1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SCSI disk target driver. 29 */ 30 #include <sys/scsi/scsi.h> 31 #include <sys/dkbad.h> 32 #include <sys/dklabel.h> 33 #include <sys/dkio.h> 34 #include <sys/fdio.h> 35 #include <sys/cdio.h> 36 #include <sys/mhd.h> 37 #include <sys/vtoc.h> 38 #include <sys/dktp/fdisk.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/note.h> 42 #include <sys/thread.h> 43 #include <sys/proc.h> 44 #include <sys/efi_partition.h> 45 #include <sys/var.h> 46 #include <sys/aio_req.h> 47 48 #ifdef __lock_lint 49 #define _LP64 50 #define __amd64 51 #endif 52 53 #if (defined(__fibre)) 54 /* Note: is there a leadville version of the following? */ 55 #include <sys/fc4/fcal_linkapp.h> 56 #endif 57 #include <sys/taskq.h> 58 #include <sys/uuid.h> 59 #include <sys/byteorder.h> 60 #include <sys/sdt.h> 61 62 #include "sd_xbuf.h" 63 64 #include <sys/scsi/targets/sddef.h> 65 #include <sys/cmlb.h> 66 #include <sys/sysevent/eventdefs.h> 67 #include <sys/sysevent/dev.h> 68 69 70 /* 71 * Loadable module info. 72 */ 73 #if (defined(__fibre)) 74 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 75 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 76 #else 77 #define SD_MODULE_NAME "SCSI Disk Driver" 78 char _depends_on[] = "misc/scsi misc/cmlb"; 79 #endif 80 81 /* 82 * Define the interconnect type, to allow the driver to distinguish 83 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 84 * 85 * This is really for backward compatibility. In the future, the driver 86 * should actually check the "interconnect-type" property as reported by 87 * the HBA; however at present this property is not defined by all HBAs, 88 * so we will use this #define (1) to permit the driver to run in 89 * backward-compatibility mode; and (2) to print a notification message 90 * if an FC HBA does not support the "interconnect-type" property. The 91 * behavior of the driver will be to assume parallel SCSI behaviors unless 92 * the "interconnect-type" property is defined by the HBA **AND** has a 93 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 94 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 95 * Channel behaviors (as per the old ssd). (Note that the 96 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 97 * will result in the driver assuming parallel SCSI behaviors.) 98 * 99 * (see common/sys/scsi/impl/services.h) 100 * 101 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 102 * since some FC HBAs may already support that, and there is some code in 103 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 104 * default would confuse that code, and besides things should work fine 105 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 106 * "interconnect_type" property. 107 * 108 */ 109 #if (defined(__fibre)) 110 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 111 #else 112 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 113 #endif 114 115 /* 116 * The name of the driver, established from the module name in _init. 117 */ 118 static char *sd_label = NULL; 119 120 /* 121 * Driver name is unfortunately prefixed on some driver.conf properties. 122 */ 123 #if (defined(__fibre)) 124 #define sd_max_xfer_size ssd_max_xfer_size 125 #define sd_config_list ssd_config_list 126 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 127 static char *sd_config_list = "ssd-config-list"; 128 #else 129 static char *sd_max_xfer_size = "sd_max_xfer_size"; 130 static char *sd_config_list = "sd-config-list"; 131 #endif 132 133 /* 134 * Driver global variables 135 */ 136 137 #if (defined(__fibre)) 138 /* 139 * These #defines are to avoid namespace collisions that occur because this 140 * code is currently used to compile two separate driver modules: sd and ssd. 141 * All global variables need to be treated this way (even if declared static) 142 * in order to allow the debugger to resolve the names properly. 143 * It is anticipated that in the near future the ssd module will be obsoleted, 144 * at which time this namespace issue should go away. 145 */ 146 #define sd_state ssd_state 147 #define sd_io_time ssd_io_time 148 #define sd_failfast_enable ssd_failfast_enable 149 #define sd_ua_retry_count ssd_ua_retry_count 150 #define sd_report_pfa ssd_report_pfa 151 #define sd_max_throttle ssd_max_throttle 152 #define sd_min_throttle ssd_min_throttle 153 #define sd_rot_delay ssd_rot_delay 154 155 #define sd_retry_on_reservation_conflict \ 156 ssd_retry_on_reservation_conflict 157 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 158 #define sd_resv_conflict_name ssd_resv_conflict_name 159 160 #define sd_component_mask ssd_component_mask 161 #define sd_level_mask ssd_level_mask 162 #define sd_debug_un ssd_debug_un 163 #define sd_error_level ssd_error_level 164 165 #define sd_xbuf_active_limit ssd_xbuf_active_limit 166 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 167 168 #define sd_tr ssd_tr 169 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 170 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 171 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 172 #define sd_check_media_time ssd_check_media_time 173 #define sd_wait_cmds_complete ssd_wait_cmds_complete 174 #define sd_label_mutex ssd_label_mutex 175 #define sd_detach_mutex ssd_detach_mutex 176 #define sd_log_buf ssd_log_buf 177 #define sd_log_mutex ssd_log_mutex 178 179 #define sd_disk_table ssd_disk_table 180 #define sd_disk_table_size ssd_disk_table_size 181 #define sd_sense_mutex ssd_sense_mutex 182 #define sd_cdbtab ssd_cdbtab 183 184 #define sd_cb_ops ssd_cb_ops 185 #define sd_ops ssd_ops 186 #define sd_additional_codes ssd_additional_codes 187 #define sd_tgops ssd_tgops 188 189 #define sd_minor_data ssd_minor_data 190 #define sd_minor_data_efi ssd_minor_data_efi 191 192 #define sd_tq ssd_tq 193 #define sd_wmr_tq ssd_wmr_tq 194 #define sd_taskq_name ssd_taskq_name 195 #define sd_wmr_taskq_name ssd_wmr_taskq_name 196 #define sd_taskq_minalloc ssd_taskq_minalloc 197 #define sd_taskq_maxalloc ssd_taskq_maxalloc 198 199 #define sd_dump_format_string ssd_dump_format_string 200 201 #define sd_iostart_chain ssd_iostart_chain 202 #define sd_iodone_chain ssd_iodone_chain 203 204 #define sd_pm_idletime ssd_pm_idletime 205 206 #define sd_force_pm_supported ssd_force_pm_supported 207 208 #define sd_dtype_optical_bind ssd_dtype_optical_bind 209 210 #endif 211 212 213 #ifdef SDDEBUG 214 int sd_force_pm_supported = 0; 215 #endif /* SDDEBUG */ 216 217 void *sd_state = NULL; 218 int sd_io_time = SD_IO_TIME; 219 int sd_failfast_enable = 1; 220 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 221 int sd_report_pfa = 1; 222 int sd_max_throttle = SD_MAX_THROTTLE; 223 int sd_min_throttle = SD_MIN_THROTTLE; 224 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 225 int sd_qfull_throttle_enable = TRUE; 226 227 int sd_retry_on_reservation_conflict = 1; 228 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 229 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 230 231 static int sd_dtype_optical_bind = -1; 232 233 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 234 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 235 236 /* 237 * Global data for debug logging. To enable debug printing, sd_component_mask 238 * and sd_level_mask should be set to the desired bit patterns as outlined in 239 * sddef.h. 240 */ 241 uint_t sd_component_mask = 0x0; 242 uint_t sd_level_mask = 0x0; 243 struct sd_lun *sd_debug_un = NULL; 244 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 245 246 /* Note: these may go away in the future... */ 247 static uint32_t sd_xbuf_active_limit = 512; 248 static uint32_t sd_xbuf_reserve_limit = 16; 249 250 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 251 252 /* 253 * Timer value used to reset the throttle after it has been reduced 254 * (typically in response to TRAN_BUSY or STATUS_QFULL) 255 */ 256 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 257 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 258 259 /* 260 * Interval value associated with the media change scsi watch. 261 */ 262 static int sd_check_media_time = 3000000; 263 264 /* 265 * Wait value used for in progress operations during a DDI_SUSPEND 266 */ 267 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 268 269 /* 270 * sd_label_mutex protects a static buffer used in the disk label 271 * component of the driver 272 */ 273 static kmutex_t sd_label_mutex; 274 275 /* 276 * sd_detach_mutex protects un_layer_count, un_detach_count, and 277 * un_opens_in_progress in the sd_lun structure. 278 */ 279 static kmutex_t sd_detach_mutex; 280 281 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 282 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 283 284 /* 285 * Global buffer and mutex for debug logging 286 */ 287 static char sd_log_buf[1024]; 288 static kmutex_t sd_log_mutex; 289 290 /* 291 * Structs and globals for recording attached lun information. 292 * This maintains a chain. Each node in the chain represents a SCSI controller. 293 * The structure records the number of luns attached to each target connected 294 * with the controller. 295 * For parallel scsi device only. 296 */ 297 struct sd_scsi_hba_tgt_lun { 298 struct sd_scsi_hba_tgt_lun *next; 299 dev_info_t *pdip; 300 int nlun[NTARGETS_WIDE]; 301 }; 302 303 /* 304 * Flag to indicate the lun is attached or detached 305 */ 306 #define SD_SCSI_LUN_ATTACH 0 307 #define SD_SCSI_LUN_DETACH 1 308 309 static kmutex_t sd_scsi_target_lun_mutex; 310 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 311 312 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 313 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 314 315 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 316 sd_scsi_target_lun_head)) 317 318 /* 319 * "Smart" Probe Caching structs, globals, #defines, etc. 320 * For parallel scsi and non-self-identify device only. 321 */ 322 323 /* 324 * The following resources and routines are implemented to support 325 * "smart" probing, which caches the scsi_probe() results in an array, 326 * in order to help avoid long probe times. 327 */ 328 struct sd_scsi_probe_cache { 329 struct sd_scsi_probe_cache *next; 330 dev_info_t *pdip; 331 int cache[NTARGETS_WIDE]; 332 }; 333 334 static kmutex_t sd_scsi_probe_cache_mutex; 335 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 336 337 /* 338 * Really we only need protection on the head of the linked list, but 339 * better safe than sorry. 340 */ 341 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 342 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 343 344 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 345 sd_scsi_probe_cache_head)) 346 347 348 /* 349 * Vendor specific data name property declarations 350 */ 351 352 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 353 354 static sd_tunables seagate_properties = { 355 SEAGATE_THROTTLE_VALUE, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0, 363 0 364 }; 365 366 367 static sd_tunables fujitsu_properties = { 368 FUJITSU_THROTTLE_VALUE, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0, 376 0 377 }; 378 379 static sd_tunables ibm_properties = { 380 IBM_THROTTLE_VALUE, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0, 388 0 389 }; 390 391 static sd_tunables purple_properties = { 392 PURPLE_THROTTLE_VALUE, 393 0, 394 0, 395 PURPLE_BUSY_RETRIES, 396 PURPLE_RESET_RETRY_COUNT, 397 PURPLE_RESERVE_RELEASE_TIME, 398 0, 399 0, 400 0 401 }; 402 403 static sd_tunables sve_properties = { 404 SVE_THROTTLE_VALUE, 405 0, 406 0, 407 SVE_BUSY_RETRIES, 408 SVE_RESET_RETRY_COUNT, 409 SVE_RESERVE_RELEASE_TIME, 410 SVE_MIN_THROTTLE_VALUE, 411 SVE_DISKSORT_DISABLED_FLAG, 412 0 413 }; 414 415 static sd_tunables maserati_properties = { 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 0, 423 MASERATI_DISKSORT_DISABLED_FLAG, 424 MASERATI_LUN_RESET_ENABLED_FLAG 425 }; 426 427 static sd_tunables pirus_properties = { 428 PIRUS_THROTTLE_VALUE, 429 0, 430 PIRUS_NRR_COUNT, 431 PIRUS_BUSY_RETRIES, 432 PIRUS_RESET_RETRY_COUNT, 433 0, 434 PIRUS_MIN_THROTTLE_VALUE, 435 PIRUS_DISKSORT_DISABLED_FLAG, 436 PIRUS_LUN_RESET_ENABLED_FLAG 437 }; 438 439 #endif 440 441 #if (defined(__sparc) && !defined(__fibre)) || \ 442 (defined(__i386) || defined(__amd64)) 443 444 445 static sd_tunables elite_properties = { 446 ELITE_THROTTLE_VALUE, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0, 454 0 455 }; 456 457 static sd_tunables st31200n_properties = { 458 ST31200N_THROTTLE_VALUE, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0, 466 0 467 }; 468 469 #endif /* Fibre or not */ 470 471 static sd_tunables lsi_properties_scsi = { 472 LSI_THROTTLE_VALUE, 473 0, 474 LSI_NOTREADY_RETRIES, 475 0, 476 0, 477 0, 478 0, 479 0, 480 0 481 }; 482 483 static sd_tunables symbios_properties = { 484 SYMBIOS_THROTTLE_VALUE, 485 0, 486 SYMBIOS_NOTREADY_RETRIES, 487 0, 488 0, 489 0, 490 0, 491 0, 492 0 493 }; 494 495 static sd_tunables lsi_properties = { 496 0, 497 0, 498 LSI_NOTREADY_RETRIES, 499 0, 500 0, 501 0, 502 0, 503 0, 504 0 505 }; 506 507 static sd_tunables lsi_oem_properties = { 508 0, 509 0, 510 LSI_OEM_NOTREADY_RETRIES, 511 0, 512 0, 513 0, 514 0, 515 0, 516 0, 517 1 518 }; 519 520 521 522 #if (defined(SD_PROP_TST)) 523 524 #define SD_TST_CTYPE_VAL CTYPE_CDROM 525 #define SD_TST_THROTTLE_VAL 16 526 #define SD_TST_NOTREADY_VAL 12 527 #define SD_TST_BUSY_VAL 60 528 #define SD_TST_RST_RETRY_VAL 36 529 #define SD_TST_RSV_REL_TIME 60 530 531 static sd_tunables tst_properties = { 532 SD_TST_THROTTLE_VAL, 533 SD_TST_CTYPE_VAL, 534 SD_TST_NOTREADY_VAL, 535 SD_TST_BUSY_VAL, 536 SD_TST_RST_RETRY_VAL, 537 SD_TST_RSV_REL_TIME, 538 0, 539 0, 540 0 541 }; 542 #endif 543 544 /* This is similar to the ANSI toupper implementation */ 545 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 546 547 /* 548 * Static Driver Configuration Table 549 * 550 * This is the table of disks which need throttle adjustment (or, perhaps 551 * something else as defined by the flags at a future time.) device_id 552 * is a string consisting of concatenated vid (vendor), pid (product/model) 553 * and revision strings as defined in the scsi_inquiry structure. Offsets of 554 * the parts of the string are as defined by the sizes in the scsi_inquiry 555 * structure. Device type is searched as far as the device_id string is 556 * defined. Flags defines which values are to be set in the driver from the 557 * properties list. 558 * 559 * Entries below which begin and end with a "*" are a special case. 560 * These do not have a specific vendor, and the string which follows 561 * can appear anywhere in the 16 byte PID portion of the inquiry data. 562 * 563 * Entries below which begin and end with a " " (blank) are a special 564 * case. The comparison function will treat multiple consecutive blanks 565 * as equivalent to a single blank. For example, this causes a 566 * sd_disk_table entry of " NEC CDROM " to match a device's id string 567 * of "NEC CDROM". 568 * 569 * Note: The MD21 controller type has been obsoleted. 570 * ST318202F is a Legacy device 571 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 572 * made with an FC connection. The entries here are a legacy. 573 */ 574 static sd_disk_config_t sd_disk_table[] = { 575 #if defined(__fibre) || defined(__i386) || defined(__amd64) 576 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 598 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 599 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 602 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 603 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 626 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 627 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 628 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 629 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 630 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 631 { "SUN T3", SD_CONF_BSET_THROTTLE | 632 SD_CONF_BSET_BSY_RETRY_COUNT| 633 SD_CONF_BSET_RST_RETRIES| 634 SD_CONF_BSET_RSV_REL_TIME, 635 &purple_properties }, 636 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 637 SD_CONF_BSET_BSY_RETRY_COUNT| 638 SD_CONF_BSET_RST_RETRIES| 639 SD_CONF_BSET_RSV_REL_TIME| 640 SD_CONF_BSET_MIN_THROTTLE| 641 SD_CONF_BSET_DISKSORT_DISABLED, 642 &sve_properties }, 643 { "SUN T4", SD_CONF_BSET_THROTTLE | 644 SD_CONF_BSET_BSY_RETRY_COUNT| 645 SD_CONF_BSET_RST_RETRIES| 646 SD_CONF_BSET_RSV_REL_TIME, 647 &purple_properties }, 648 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 649 SD_CONF_BSET_LUN_RESET_ENABLED, 650 &maserati_properties }, 651 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 652 SD_CONF_BSET_NRR_COUNT| 653 SD_CONF_BSET_BSY_RETRY_COUNT| 654 SD_CONF_BSET_RST_RETRIES| 655 SD_CONF_BSET_MIN_THROTTLE| 656 SD_CONF_BSET_DISKSORT_DISABLED| 657 SD_CONF_BSET_LUN_RESET_ENABLED, 658 &pirus_properties }, 659 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 660 SD_CONF_BSET_NRR_COUNT| 661 SD_CONF_BSET_BSY_RETRY_COUNT| 662 SD_CONF_BSET_RST_RETRIES| 663 SD_CONF_BSET_MIN_THROTTLE| 664 SD_CONF_BSET_DISKSORT_DISABLED| 665 SD_CONF_BSET_LUN_RESET_ENABLED, 666 &pirus_properties }, 667 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 668 SD_CONF_BSET_NRR_COUNT| 669 SD_CONF_BSET_BSY_RETRY_COUNT| 670 SD_CONF_BSET_RST_RETRIES| 671 SD_CONF_BSET_MIN_THROTTLE| 672 SD_CONF_BSET_DISKSORT_DISABLED| 673 SD_CONF_BSET_LUN_RESET_ENABLED, 674 &pirus_properties }, 675 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 676 SD_CONF_BSET_NRR_COUNT| 677 SD_CONF_BSET_BSY_RETRY_COUNT| 678 SD_CONF_BSET_RST_RETRIES| 679 SD_CONF_BSET_MIN_THROTTLE| 680 SD_CONF_BSET_DISKSORT_DISABLED| 681 SD_CONF_BSET_LUN_RESET_ENABLED, 682 &pirus_properties }, 683 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 684 SD_CONF_BSET_NRR_COUNT| 685 SD_CONF_BSET_BSY_RETRY_COUNT| 686 SD_CONF_BSET_RST_RETRIES| 687 SD_CONF_BSET_MIN_THROTTLE| 688 SD_CONF_BSET_DISKSORT_DISABLED| 689 SD_CONF_BSET_LUN_RESET_ENABLED, 690 &pirus_properties }, 691 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 692 SD_CONF_BSET_NRR_COUNT| 693 SD_CONF_BSET_BSY_RETRY_COUNT| 694 SD_CONF_BSET_RST_RETRIES| 695 SD_CONF_BSET_MIN_THROTTLE| 696 SD_CONF_BSET_DISKSORT_DISABLED| 697 SD_CONF_BSET_LUN_RESET_ENABLED, 698 &pirus_properties }, 699 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 700 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 701 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 702 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 703 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 704 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 705 #endif /* fibre or NON-sparc platforms */ 706 #if ((defined(__sparc) && !defined(__fibre)) ||\ 707 (defined(__i386) || defined(__amd64))) 708 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 709 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 710 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 711 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 712 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 713 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 714 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 715 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 716 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 717 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 718 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 719 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 720 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 721 &symbios_properties }, 722 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 723 &lsi_properties_scsi }, 724 #if defined(__i386) || defined(__amd64) 725 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 726 | SD_CONF_BSET_READSUB_BCD 727 | SD_CONF_BSET_READ_TOC_ADDR_BCD 728 | SD_CONF_BSET_NO_READ_HEADER 729 | SD_CONF_BSET_READ_CD_XD4), NULL }, 730 731 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 732 | SD_CONF_BSET_READSUB_BCD 733 | SD_CONF_BSET_READ_TOC_ADDR_BCD 734 | SD_CONF_BSET_NO_READ_HEADER 735 | SD_CONF_BSET_READ_CD_XD4), NULL }, 736 #endif /* __i386 || __amd64 */ 737 #endif /* sparc NON-fibre or NON-sparc platforms */ 738 739 #if (defined(SD_PROP_TST)) 740 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 741 | SD_CONF_BSET_CTYPE 742 | SD_CONF_BSET_NRR_COUNT 743 | SD_CONF_BSET_FAB_DEVID 744 | SD_CONF_BSET_NOCACHE 745 | SD_CONF_BSET_BSY_RETRY_COUNT 746 | SD_CONF_BSET_PLAYMSF_BCD 747 | SD_CONF_BSET_READSUB_BCD 748 | SD_CONF_BSET_READ_TOC_TRK_BCD 749 | SD_CONF_BSET_READ_TOC_ADDR_BCD 750 | SD_CONF_BSET_NO_READ_HEADER 751 | SD_CONF_BSET_READ_CD_XD4 752 | SD_CONF_BSET_RST_RETRIES 753 | SD_CONF_BSET_RSV_REL_TIME 754 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 755 #endif 756 }; 757 758 static const int sd_disk_table_size = 759 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 760 761 762 763 #define SD_INTERCONNECT_PARALLEL 0 764 #define SD_INTERCONNECT_FABRIC 1 765 #define SD_INTERCONNECT_FIBRE 2 766 #define SD_INTERCONNECT_SSA 3 767 #define SD_INTERCONNECT_SATA 4 768 #define SD_IS_PARALLEL_SCSI(un) \ 769 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 770 #define SD_IS_SERIAL(un) \ 771 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 772 773 /* 774 * Definitions used by device id registration routines 775 */ 776 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 777 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 778 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 779 780 static kmutex_t sd_sense_mutex = {0}; 781 782 /* 783 * Macros for updates of the driver state 784 */ 785 #define New_state(un, s) \ 786 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 787 #define Restore_state(un) \ 788 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 789 790 static struct sd_cdbinfo sd_cdbtab[] = { 791 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 792 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 793 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 794 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 795 }; 796 797 /* 798 * Specifies the number of seconds that must have elapsed since the last 799 * cmd. has completed for a device to be declared idle to the PM framework. 800 */ 801 static int sd_pm_idletime = 1; 802 803 /* 804 * Internal function prototypes 805 */ 806 807 #if (defined(__fibre)) 808 /* 809 * These #defines are to avoid namespace collisions that occur because this 810 * code is currently used to compile two separate driver modules: sd and ssd. 811 * All function names need to be treated this way (even if declared static) 812 * in order to allow the debugger to resolve the names properly. 813 * It is anticipated that in the near future the ssd module will be obsoleted, 814 * at which time this ugliness should go away. 815 */ 816 #define sd_log_trace ssd_log_trace 817 #define sd_log_info ssd_log_info 818 #define sd_log_err ssd_log_err 819 #define sdprobe ssdprobe 820 #define sdinfo ssdinfo 821 #define sd_prop_op ssd_prop_op 822 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 823 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 824 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 825 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 826 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 827 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 828 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 829 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 830 #define sd_spin_up_unit ssd_spin_up_unit 831 #define sd_enable_descr_sense ssd_enable_descr_sense 832 #define sd_reenable_dsense_task ssd_reenable_dsense_task 833 #define sd_set_mmc_caps ssd_set_mmc_caps 834 #define sd_read_unit_properties ssd_read_unit_properties 835 #define sd_process_sdconf_file ssd_process_sdconf_file 836 #define sd_process_sdconf_table ssd_process_sdconf_table 837 #define sd_sdconf_id_match ssd_sdconf_id_match 838 #define sd_blank_cmp ssd_blank_cmp 839 #define sd_chk_vers1_data ssd_chk_vers1_data 840 #define sd_set_vers1_properties ssd_set_vers1_properties 841 842 #define sd_get_physical_geometry ssd_get_physical_geometry 843 #define sd_get_virtual_geometry ssd_get_virtual_geometry 844 #define sd_update_block_info ssd_update_block_info 845 #define sd_register_devid ssd_register_devid 846 #define sd_get_devid ssd_get_devid 847 #define sd_create_devid ssd_create_devid 848 #define sd_write_deviceid ssd_write_deviceid 849 #define sd_check_vpd_page_support ssd_check_vpd_page_support 850 #define sd_setup_pm ssd_setup_pm 851 #define sd_create_pm_components ssd_create_pm_components 852 #define sd_ddi_suspend ssd_ddi_suspend 853 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 854 #define sd_ddi_resume ssd_ddi_resume 855 #define sd_ddi_pm_resume ssd_ddi_pm_resume 856 #define sdpower ssdpower 857 #define sdattach ssdattach 858 #define sddetach ssddetach 859 #define sd_unit_attach ssd_unit_attach 860 #define sd_unit_detach ssd_unit_detach 861 #define sd_set_unit_attributes ssd_set_unit_attributes 862 #define sd_create_errstats ssd_create_errstats 863 #define sd_set_errstats ssd_set_errstats 864 #define sd_set_pstats ssd_set_pstats 865 #define sddump ssddump 866 #define sd_scsi_poll ssd_scsi_poll 867 #define sd_send_polled_RQS ssd_send_polled_RQS 868 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 869 #define sd_init_event_callbacks ssd_init_event_callbacks 870 #define sd_event_callback ssd_event_callback 871 #define sd_cache_control ssd_cache_control 872 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 873 #define sd_get_nv_sup ssd_get_nv_sup 874 #define sd_make_device ssd_make_device 875 #define sdopen ssdopen 876 #define sdclose ssdclose 877 #define sd_ready_and_valid ssd_ready_and_valid 878 #define sdmin ssdmin 879 #define sdread ssdread 880 #define sdwrite ssdwrite 881 #define sdaread ssdaread 882 #define sdawrite ssdawrite 883 #define sdstrategy ssdstrategy 884 #define sdioctl ssdioctl 885 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 886 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 887 #define sd_checksum_iostart ssd_checksum_iostart 888 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 889 #define sd_pm_iostart ssd_pm_iostart 890 #define sd_core_iostart ssd_core_iostart 891 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 892 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 893 #define sd_checksum_iodone ssd_checksum_iodone 894 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 895 #define sd_pm_iodone ssd_pm_iodone 896 #define sd_initpkt_for_buf ssd_initpkt_for_buf 897 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 898 #define sd_setup_rw_pkt ssd_setup_rw_pkt 899 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 900 #define sd_buf_iodone ssd_buf_iodone 901 #define sd_uscsi_strategy ssd_uscsi_strategy 902 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 903 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 904 #define sd_uscsi_iodone ssd_uscsi_iodone 905 #define sd_xbuf_strategy ssd_xbuf_strategy 906 #define sd_xbuf_init ssd_xbuf_init 907 #define sd_pm_entry ssd_pm_entry 908 #define sd_pm_exit ssd_pm_exit 909 910 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 911 #define sd_pm_timeout_handler ssd_pm_timeout_handler 912 913 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 914 #define sdintr ssdintr 915 #define sd_start_cmds ssd_start_cmds 916 #define sd_send_scsi_cmd ssd_send_scsi_cmd 917 #define sd_bioclone_alloc ssd_bioclone_alloc 918 #define sd_bioclone_free ssd_bioclone_free 919 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 920 #define sd_shadow_buf_free ssd_shadow_buf_free 921 #define sd_print_transport_rejected_message \ 922 ssd_print_transport_rejected_message 923 #define sd_retry_command ssd_retry_command 924 #define sd_set_retry_bp ssd_set_retry_bp 925 #define sd_send_request_sense_command ssd_send_request_sense_command 926 #define sd_start_retry_command ssd_start_retry_command 927 #define sd_start_direct_priority_command \ 928 ssd_start_direct_priority_command 929 #define sd_return_failed_command ssd_return_failed_command 930 #define sd_return_failed_command_no_restart \ 931 ssd_return_failed_command_no_restart 932 #define sd_return_command ssd_return_command 933 #define sd_sync_with_callback ssd_sync_with_callback 934 #define sdrunout ssdrunout 935 #define sd_mark_rqs_busy ssd_mark_rqs_busy 936 #define sd_mark_rqs_idle ssd_mark_rqs_idle 937 #define sd_reduce_throttle ssd_reduce_throttle 938 #define sd_restore_throttle ssd_restore_throttle 939 #define sd_print_incomplete_msg ssd_print_incomplete_msg 940 #define sd_init_cdb_limits ssd_init_cdb_limits 941 #define sd_pkt_status_good ssd_pkt_status_good 942 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 943 #define sd_pkt_status_busy ssd_pkt_status_busy 944 #define sd_pkt_status_reservation_conflict \ 945 ssd_pkt_status_reservation_conflict 946 #define sd_pkt_status_qfull ssd_pkt_status_qfull 947 #define sd_handle_request_sense ssd_handle_request_sense 948 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 949 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 950 #define sd_validate_sense_data ssd_validate_sense_data 951 #define sd_decode_sense ssd_decode_sense 952 #define sd_print_sense_msg ssd_print_sense_msg 953 #define sd_sense_key_no_sense ssd_sense_key_no_sense 954 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 955 #define sd_sense_key_not_ready ssd_sense_key_not_ready 956 #define sd_sense_key_medium_or_hardware_error \ 957 ssd_sense_key_medium_or_hardware_error 958 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 959 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 960 #define sd_sense_key_fail_command ssd_sense_key_fail_command 961 #define sd_sense_key_blank_check ssd_sense_key_blank_check 962 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 963 #define sd_sense_key_default ssd_sense_key_default 964 #define sd_print_retry_msg ssd_print_retry_msg 965 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 966 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 967 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 968 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 969 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 970 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 971 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 972 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 973 #define sd_pkt_reason_default ssd_pkt_reason_default 974 #define sd_reset_target ssd_reset_target 975 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 976 #define sd_start_stop_unit_task ssd_start_stop_unit_task 977 #define sd_taskq_create ssd_taskq_create 978 #define sd_taskq_delete ssd_taskq_delete 979 #define sd_target_change_task ssd_target_change_task 980 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 981 #define sd_media_change_task ssd_media_change_task 982 #define sd_handle_mchange ssd_handle_mchange 983 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 984 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 985 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 986 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 987 #define sd_send_scsi_feature_GET_CONFIGURATION \ 988 sd_send_scsi_feature_GET_CONFIGURATION 989 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 990 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 991 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 992 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 993 ssd_send_scsi_PERSISTENT_RESERVE_IN 994 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 995 ssd_send_scsi_PERSISTENT_RESERVE_OUT 996 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 997 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 998 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 999 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1000 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1001 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1002 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1003 #define sd_alloc_rqs ssd_alloc_rqs 1004 #define sd_free_rqs ssd_free_rqs 1005 #define sd_dump_memory ssd_dump_memory 1006 #define sd_get_media_info ssd_get_media_info 1007 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1008 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1009 #define sd_strtok_r ssd_strtok_r 1010 #define sd_set_properties ssd_set_properties 1011 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1012 #define sd_setup_next_xfer ssd_setup_next_xfer 1013 #define sd_dkio_get_temp ssd_dkio_get_temp 1014 #define sd_check_mhd ssd_check_mhd 1015 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1016 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1017 #define sd_sname ssd_sname 1018 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1019 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1020 #define sd_take_ownership ssd_take_ownership 1021 #define sd_reserve_release ssd_reserve_release 1022 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1023 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1024 #define sd_persistent_reservation_in_read_keys \ 1025 ssd_persistent_reservation_in_read_keys 1026 #define sd_persistent_reservation_in_read_resv \ 1027 ssd_persistent_reservation_in_read_resv 1028 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1029 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1030 #define sd_mhdioc_release ssd_mhdioc_release 1031 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1032 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1033 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1034 #define sr_change_blkmode ssr_change_blkmode 1035 #define sr_change_speed ssr_change_speed 1036 #define sr_atapi_change_speed ssr_atapi_change_speed 1037 #define sr_pause_resume ssr_pause_resume 1038 #define sr_play_msf ssr_play_msf 1039 #define sr_play_trkind ssr_play_trkind 1040 #define sr_read_all_subcodes ssr_read_all_subcodes 1041 #define sr_read_subchannel ssr_read_subchannel 1042 #define sr_read_tocentry ssr_read_tocentry 1043 #define sr_read_tochdr ssr_read_tochdr 1044 #define sr_read_cdda ssr_read_cdda 1045 #define sr_read_cdxa ssr_read_cdxa 1046 #define sr_read_mode1 ssr_read_mode1 1047 #define sr_read_mode2 ssr_read_mode2 1048 #define sr_read_cd_mode2 ssr_read_cd_mode2 1049 #define sr_sector_mode ssr_sector_mode 1050 #define sr_eject ssr_eject 1051 #define sr_ejected ssr_ejected 1052 #define sr_check_wp ssr_check_wp 1053 #define sd_check_media ssd_check_media 1054 #define sd_media_watch_cb ssd_media_watch_cb 1055 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1056 #define sr_volume_ctrl ssr_volume_ctrl 1057 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1058 #define sd_log_page_supported ssd_log_page_supported 1059 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1060 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1061 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1062 #define sd_range_lock ssd_range_lock 1063 #define sd_get_range ssd_get_range 1064 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1065 #define sd_range_unlock ssd_range_unlock 1066 #define sd_read_modify_write_task ssd_read_modify_write_task 1067 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1068 1069 #define sd_iostart_chain ssd_iostart_chain 1070 #define sd_iodone_chain ssd_iodone_chain 1071 #define sd_initpkt_map ssd_initpkt_map 1072 #define sd_destroypkt_map ssd_destroypkt_map 1073 #define sd_chain_type_map ssd_chain_type_map 1074 #define sd_chain_index_map ssd_chain_index_map 1075 1076 #define sd_failfast_flushctl ssd_failfast_flushctl 1077 #define sd_failfast_flushq ssd_failfast_flushq 1078 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1079 1080 #define sd_is_lsi ssd_is_lsi 1081 #define sd_tg_rdwr ssd_tg_rdwr 1082 #define sd_tg_getinfo ssd_tg_getinfo 1083 1084 #endif /* #if (defined(__fibre)) */ 1085 1086 1087 int _init(void); 1088 int _fini(void); 1089 int _info(struct modinfo *modinfop); 1090 1091 /*PRINTFLIKE3*/ 1092 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1093 /*PRINTFLIKE3*/ 1094 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1095 /*PRINTFLIKE3*/ 1096 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1097 1098 static int sdprobe(dev_info_t *devi); 1099 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1100 void **result); 1101 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1102 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1103 1104 /* 1105 * Smart probe for parallel scsi 1106 */ 1107 static void sd_scsi_probe_cache_init(void); 1108 static void sd_scsi_probe_cache_fini(void); 1109 static void sd_scsi_clear_probe_cache(void); 1110 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1111 1112 /* 1113 * Attached luns on target for parallel scsi 1114 */ 1115 static void sd_scsi_target_lun_init(void); 1116 static void sd_scsi_target_lun_fini(void); 1117 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1118 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1119 1120 static int sd_spin_up_unit(struct sd_lun *un); 1121 #ifdef _LP64 1122 static void sd_enable_descr_sense(struct sd_lun *un); 1123 static void sd_reenable_dsense_task(void *arg); 1124 #endif /* _LP64 */ 1125 1126 static void sd_set_mmc_caps(struct sd_lun *un); 1127 1128 static void sd_read_unit_properties(struct sd_lun *un); 1129 static int sd_process_sdconf_file(struct sd_lun *un); 1130 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1131 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1132 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1133 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1134 int *data_list, sd_tunables *values); 1135 static void sd_process_sdconf_table(struct sd_lun *un); 1136 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1137 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1138 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1139 int list_len, char *dataname_ptr); 1140 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1141 sd_tunables *prop_list); 1142 1143 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1144 int reservation_flag); 1145 static int sd_get_devid(struct sd_lun *un); 1146 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1147 static int sd_write_deviceid(struct sd_lun *un); 1148 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1149 static int sd_check_vpd_page_support(struct sd_lun *un); 1150 1151 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1152 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1153 1154 static int sd_ddi_suspend(dev_info_t *devi); 1155 static int sd_ddi_pm_suspend(struct sd_lun *un); 1156 static int sd_ddi_resume(dev_info_t *devi); 1157 static int sd_ddi_pm_resume(struct sd_lun *un); 1158 static int sdpower(dev_info_t *devi, int component, int level); 1159 1160 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1161 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1162 static int sd_unit_attach(dev_info_t *devi); 1163 static int sd_unit_detach(dev_info_t *devi); 1164 1165 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1166 static void sd_create_errstats(struct sd_lun *un, int instance); 1167 static void sd_set_errstats(struct sd_lun *un); 1168 static void sd_set_pstats(struct sd_lun *un); 1169 1170 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1171 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1172 static int sd_send_polled_RQS(struct sd_lun *un); 1173 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1174 1175 #if (defined(__fibre)) 1176 /* 1177 * Event callbacks (photon) 1178 */ 1179 static void sd_init_event_callbacks(struct sd_lun *un); 1180 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1181 #endif 1182 1183 /* 1184 * Defines for sd_cache_control 1185 */ 1186 1187 #define SD_CACHE_ENABLE 1 1188 #define SD_CACHE_DISABLE 0 1189 #define SD_CACHE_NOCHANGE -1 1190 1191 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1192 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1193 static void sd_get_nv_sup(struct sd_lun *un); 1194 static dev_t sd_make_device(dev_info_t *devi); 1195 1196 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1197 uint64_t capacity); 1198 1199 /* 1200 * Driver entry point functions. 1201 */ 1202 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1203 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1204 static int sd_ready_and_valid(struct sd_lun *un, int part); 1205 1206 static void sdmin(struct buf *bp); 1207 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1208 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1209 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1210 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1211 1212 static int sdstrategy(struct buf *bp); 1213 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1214 1215 /* 1216 * Function prototypes for layering functions in the iostart chain. 1217 */ 1218 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1219 struct buf *bp); 1220 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1221 struct buf *bp); 1222 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1223 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1224 struct buf *bp); 1225 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1226 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1227 1228 /* 1229 * Function prototypes for layering functions in the iodone chain. 1230 */ 1231 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1232 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1233 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1234 struct buf *bp); 1235 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1236 struct buf *bp); 1237 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1238 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1239 struct buf *bp); 1240 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1241 1242 /* 1243 * Prototypes for functions to support buf(9S) based IO. 1244 */ 1245 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1246 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1247 static void sd_destroypkt_for_buf(struct buf *); 1248 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1249 struct buf *bp, int flags, 1250 int (*callback)(caddr_t), caddr_t callback_arg, 1251 diskaddr_t lba, uint32_t blockcount); 1252 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1253 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1254 1255 /* 1256 * Prototypes for functions to support USCSI IO. 1257 */ 1258 static int sd_uscsi_strategy(struct buf *bp); 1259 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1260 static void sd_destroypkt_for_uscsi(struct buf *); 1261 1262 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1263 uchar_t chain_type, void *pktinfop); 1264 1265 static int sd_pm_entry(struct sd_lun *un); 1266 static void sd_pm_exit(struct sd_lun *un); 1267 1268 static void sd_pm_idletimeout_handler(void *arg); 1269 1270 /* 1271 * sd_core internal functions (used at the sd_core_io layer). 1272 */ 1273 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1274 static void sdintr(struct scsi_pkt *pktp); 1275 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1276 1277 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1278 enum uio_seg dataspace, int path_flag); 1279 1280 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1281 daddr_t blkno, int (*func)(struct buf *)); 1282 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1283 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1284 static void sd_bioclone_free(struct buf *bp); 1285 static void sd_shadow_buf_free(struct buf *bp); 1286 1287 static void sd_print_transport_rejected_message(struct sd_lun *un, 1288 struct sd_xbuf *xp, int code); 1289 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1290 void *arg, int code); 1291 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1292 void *arg, int code); 1293 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1294 void *arg, int code); 1295 1296 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1297 int retry_check_flag, 1298 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1299 int c), 1300 void *user_arg, int failure_code, clock_t retry_delay, 1301 void (*statp)(kstat_io_t *)); 1302 1303 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1304 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1305 1306 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1307 struct scsi_pkt *pktp); 1308 static void sd_start_retry_command(void *arg); 1309 static void sd_start_direct_priority_command(void *arg); 1310 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1311 int errcode); 1312 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1313 struct buf *bp, int errcode); 1314 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1315 static void sd_sync_with_callback(struct sd_lun *un); 1316 static int sdrunout(caddr_t arg); 1317 1318 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1319 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1320 1321 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1322 static void sd_restore_throttle(void *arg); 1323 1324 static void sd_init_cdb_limits(struct sd_lun *un); 1325 1326 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1327 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1328 1329 /* 1330 * Error handling functions 1331 */ 1332 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1333 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1334 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1335 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1336 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1337 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1338 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1339 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1340 1341 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1342 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1343 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1344 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1345 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1346 struct sd_xbuf *xp, size_t actual_len); 1347 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1348 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1349 1350 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1351 void *arg, int code); 1352 1353 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1354 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1355 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1356 uint8_t *sense_datap, 1357 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1358 static void sd_sense_key_not_ready(struct sd_lun *un, 1359 uint8_t *sense_datap, 1360 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1361 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1362 uint8_t *sense_datap, 1363 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1364 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1365 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1366 static void sd_sense_key_unit_attention(struct sd_lun *un, 1367 uint8_t *sense_datap, 1368 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1369 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1370 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1371 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1372 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1373 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1374 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1375 static void sd_sense_key_default(struct sd_lun *un, 1376 uint8_t *sense_datap, 1377 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1378 1379 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1380 void *arg, int flag); 1381 1382 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1383 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1384 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1385 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1386 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1387 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1388 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1389 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1390 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1391 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1392 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1393 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1394 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1395 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1396 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1397 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1398 1399 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1400 1401 static void sd_start_stop_unit_callback(void *arg); 1402 static void sd_start_stop_unit_task(void *arg); 1403 1404 static void sd_taskq_create(void); 1405 static void sd_taskq_delete(void); 1406 static void sd_target_change_task(void *arg); 1407 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1408 static void sd_media_change_task(void *arg); 1409 1410 static int sd_handle_mchange(struct sd_lun *un); 1411 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1412 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1413 uint32_t *lbap, int path_flag); 1414 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1415 uint32_t *lbap, int path_flag); 1416 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1417 int path_flag); 1418 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1419 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1420 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1421 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1422 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1423 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1424 uchar_t usr_cmd, uchar_t *usr_bufp); 1425 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1426 struct dk_callback *dkc); 1427 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1428 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1429 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1430 uchar_t *bufaddr, uint_t buflen, int path_flag); 1431 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1432 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1433 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1434 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1435 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1436 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1437 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1438 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1439 size_t buflen, daddr_t start_block, int path_flag); 1440 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1441 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1442 path_flag) 1443 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1444 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1445 path_flag) 1446 1447 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1448 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1449 uint16_t param_ptr, int path_flag); 1450 1451 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1452 static void sd_free_rqs(struct sd_lun *un); 1453 1454 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1455 uchar_t *data, int len, int fmt); 1456 static void sd_panic_for_res_conflict(struct sd_lun *un); 1457 1458 /* 1459 * Disk Ioctl Function Prototypes 1460 */ 1461 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1462 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1463 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1464 1465 /* 1466 * Multi-host Ioctl Prototypes 1467 */ 1468 static int sd_check_mhd(dev_t dev, int interval); 1469 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1470 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1471 static char *sd_sname(uchar_t status); 1472 static void sd_mhd_resvd_recover(void *arg); 1473 static void sd_resv_reclaim_thread(); 1474 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1475 static int sd_reserve_release(dev_t dev, int cmd); 1476 static void sd_rmv_resv_reclaim_req(dev_t dev); 1477 static void sd_mhd_reset_notify_cb(caddr_t arg); 1478 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1479 mhioc_inkeys_t *usrp, int flag); 1480 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1481 mhioc_inresvs_t *usrp, int flag); 1482 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1483 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1484 static int sd_mhdioc_release(dev_t dev); 1485 static int sd_mhdioc_register_devid(dev_t dev); 1486 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1487 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1488 1489 /* 1490 * SCSI removable prototypes 1491 */ 1492 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1493 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1494 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1495 static int sr_pause_resume(dev_t dev, int mode); 1496 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1497 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1498 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1499 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1500 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1501 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1502 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1503 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1504 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1505 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1506 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1507 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1508 static int sr_eject(dev_t dev); 1509 static void sr_ejected(register struct sd_lun *un); 1510 static int sr_check_wp(dev_t dev); 1511 static int sd_check_media(dev_t dev, enum dkio_state state); 1512 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1513 static void sd_delayed_cv_broadcast(void *arg); 1514 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1515 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1516 1517 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1518 1519 /* 1520 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1521 */ 1522 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1523 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1524 static void sd_wm_cache_destructor(void *wm, void *un); 1525 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1526 daddr_t endb, ushort_t typ); 1527 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1528 daddr_t endb); 1529 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1530 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1531 static void sd_read_modify_write_task(void * arg); 1532 static int 1533 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1534 struct buf **bpp); 1535 1536 1537 /* 1538 * Function prototypes for failfast support. 1539 */ 1540 static void sd_failfast_flushq(struct sd_lun *un); 1541 static int sd_failfast_flushq_callback(struct buf *bp); 1542 1543 /* 1544 * Function prototypes to check for lsi devices 1545 */ 1546 static void sd_is_lsi(struct sd_lun *un); 1547 1548 /* 1549 * Function prototypes for partial DMA support 1550 */ 1551 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1552 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1553 1554 1555 /* Function prototypes for cmlb */ 1556 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1557 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1558 1559 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1560 1561 /* 1562 * Constants for failfast support: 1563 * 1564 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1565 * failfast processing being performed. 1566 * 1567 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1568 * failfast processing on all bufs with B_FAILFAST set. 1569 */ 1570 1571 #define SD_FAILFAST_INACTIVE 0 1572 #define SD_FAILFAST_ACTIVE 1 1573 1574 /* 1575 * Bitmask to control behavior of buf(9S) flushes when a transition to 1576 * the failfast state occurs. Optional bits include: 1577 * 1578 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1579 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1580 * be flushed. 1581 * 1582 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1583 * driver, in addition to the regular wait queue. This includes the xbuf 1584 * queues. When clear, only the driver's wait queue will be flushed. 1585 */ 1586 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1587 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1588 1589 /* 1590 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1591 * to flush all queues within the driver. 1592 */ 1593 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1594 1595 1596 /* 1597 * SD Testing Fault Injection 1598 */ 1599 #ifdef SD_FAULT_INJECTION 1600 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1601 static void sd_faultinjection(struct scsi_pkt *pktp); 1602 static void sd_injection_log(char *buf, struct sd_lun *un); 1603 #endif 1604 1605 /* 1606 * Device driver ops vector 1607 */ 1608 static struct cb_ops sd_cb_ops = { 1609 sdopen, /* open */ 1610 sdclose, /* close */ 1611 sdstrategy, /* strategy */ 1612 nodev, /* print */ 1613 sddump, /* dump */ 1614 sdread, /* read */ 1615 sdwrite, /* write */ 1616 sdioctl, /* ioctl */ 1617 nodev, /* devmap */ 1618 nodev, /* mmap */ 1619 nodev, /* segmap */ 1620 nochpoll, /* poll */ 1621 sd_prop_op, /* cb_prop_op */ 1622 0, /* streamtab */ 1623 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1624 CB_REV, /* cb_rev */ 1625 sdaread, /* async I/O read entry point */ 1626 sdawrite /* async I/O write entry point */ 1627 }; 1628 1629 static struct dev_ops sd_ops = { 1630 DEVO_REV, /* devo_rev, */ 1631 0, /* refcnt */ 1632 sdinfo, /* info */ 1633 nulldev, /* identify */ 1634 sdprobe, /* probe */ 1635 sdattach, /* attach */ 1636 sddetach, /* detach */ 1637 nodev, /* reset */ 1638 &sd_cb_ops, /* driver operations */ 1639 NULL, /* bus operations */ 1640 sdpower /* power */ 1641 }; 1642 1643 1644 /* 1645 * This is the loadable module wrapper. 1646 */ 1647 #include <sys/modctl.h> 1648 1649 static struct modldrv modldrv = { 1650 &mod_driverops, /* Type of module. This one is a driver */ 1651 SD_MODULE_NAME, /* Module name. */ 1652 &sd_ops /* driver ops */ 1653 }; 1654 1655 1656 static struct modlinkage modlinkage = { 1657 MODREV_1, 1658 &modldrv, 1659 NULL 1660 }; 1661 1662 static cmlb_tg_ops_t sd_tgops = { 1663 TG_DK_OPS_VERSION_1, 1664 sd_tg_rdwr, 1665 sd_tg_getinfo 1666 }; 1667 1668 static struct scsi_asq_key_strings sd_additional_codes[] = { 1669 0x81, 0, "Logical Unit is Reserved", 1670 0x85, 0, "Audio Address Not Valid", 1671 0xb6, 0, "Media Load Mechanism Failed", 1672 0xB9, 0, "Audio Play Operation Aborted", 1673 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1674 0x53, 2, "Medium removal prevented", 1675 0x6f, 0, "Authentication failed during key exchange", 1676 0x6f, 1, "Key not present", 1677 0x6f, 2, "Key not established", 1678 0x6f, 3, "Read without proper authentication", 1679 0x6f, 4, "Mismatched region to this logical unit", 1680 0x6f, 5, "Region reset count error", 1681 0xffff, 0x0, NULL 1682 }; 1683 1684 1685 /* 1686 * Struct for passing printing information for sense data messages 1687 */ 1688 struct sd_sense_info { 1689 int ssi_severity; 1690 int ssi_pfa_flag; 1691 }; 1692 1693 /* 1694 * Table of function pointers for iostart-side routines. Separate "chains" 1695 * of layered function calls are formed by placing the function pointers 1696 * sequentially in the desired order. Functions are called according to an 1697 * incrementing table index ordering. The last function in each chain must 1698 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1699 * in the sd_iodone_chain[] array. 1700 * 1701 * Note: It may seem more natural to organize both the iostart and iodone 1702 * functions together, into an array of structures (or some similar 1703 * organization) with a common index, rather than two separate arrays which 1704 * must be maintained in synchronization. The purpose of this division is 1705 * to achieve improved performance: individual arrays allows for more 1706 * effective cache line utilization on certain platforms. 1707 */ 1708 1709 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1710 1711 1712 static sd_chain_t sd_iostart_chain[] = { 1713 1714 /* Chain for buf IO for disk drive targets (PM enabled) */ 1715 sd_mapblockaddr_iostart, /* Index: 0 */ 1716 sd_pm_iostart, /* Index: 1 */ 1717 sd_core_iostart, /* Index: 2 */ 1718 1719 /* Chain for buf IO for disk drive targets (PM disabled) */ 1720 sd_mapblockaddr_iostart, /* Index: 3 */ 1721 sd_core_iostart, /* Index: 4 */ 1722 1723 /* Chain for buf IO for removable-media targets (PM enabled) */ 1724 sd_mapblockaddr_iostart, /* Index: 5 */ 1725 sd_mapblocksize_iostart, /* Index: 6 */ 1726 sd_pm_iostart, /* Index: 7 */ 1727 sd_core_iostart, /* Index: 8 */ 1728 1729 /* Chain for buf IO for removable-media targets (PM disabled) */ 1730 sd_mapblockaddr_iostart, /* Index: 9 */ 1731 sd_mapblocksize_iostart, /* Index: 10 */ 1732 sd_core_iostart, /* Index: 11 */ 1733 1734 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1735 sd_mapblockaddr_iostart, /* Index: 12 */ 1736 sd_checksum_iostart, /* Index: 13 */ 1737 sd_pm_iostart, /* Index: 14 */ 1738 sd_core_iostart, /* Index: 15 */ 1739 1740 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1741 sd_mapblockaddr_iostart, /* Index: 16 */ 1742 sd_checksum_iostart, /* Index: 17 */ 1743 sd_core_iostart, /* Index: 18 */ 1744 1745 /* Chain for USCSI commands (all targets) */ 1746 sd_pm_iostart, /* Index: 19 */ 1747 sd_core_iostart, /* Index: 20 */ 1748 1749 /* Chain for checksumming USCSI commands (all targets) */ 1750 sd_checksum_uscsi_iostart, /* Index: 21 */ 1751 sd_pm_iostart, /* Index: 22 */ 1752 sd_core_iostart, /* Index: 23 */ 1753 1754 /* Chain for "direct" USCSI commands (all targets) */ 1755 sd_core_iostart, /* Index: 24 */ 1756 1757 /* Chain for "direct priority" USCSI commands (all targets) */ 1758 sd_core_iostart, /* Index: 25 */ 1759 }; 1760 1761 /* 1762 * Macros to locate the first function of each iostart chain in the 1763 * sd_iostart_chain[] array. These are located by the index in the array. 1764 */ 1765 #define SD_CHAIN_DISK_IOSTART 0 1766 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1767 #define SD_CHAIN_RMMEDIA_IOSTART 5 1768 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1769 #define SD_CHAIN_CHKSUM_IOSTART 12 1770 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1771 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1772 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1773 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1774 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1775 1776 1777 /* 1778 * Table of function pointers for the iodone-side routines for the driver- 1779 * internal layering mechanism. The calling sequence for iodone routines 1780 * uses a decrementing table index, so the last routine called in a chain 1781 * must be at the lowest array index location for that chain. The last 1782 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1783 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1784 * of the functions in an iodone side chain must correspond to the ordering 1785 * of the iostart routines for that chain. Note that there is no iodone 1786 * side routine that corresponds to sd_core_iostart(), so there is no 1787 * entry in the table for this. 1788 */ 1789 1790 static sd_chain_t sd_iodone_chain[] = { 1791 1792 /* Chain for buf IO for disk drive targets (PM enabled) */ 1793 sd_buf_iodone, /* Index: 0 */ 1794 sd_mapblockaddr_iodone, /* Index: 1 */ 1795 sd_pm_iodone, /* Index: 2 */ 1796 1797 /* Chain for buf IO for disk drive targets (PM disabled) */ 1798 sd_buf_iodone, /* Index: 3 */ 1799 sd_mapblockaddr_iodone, /* Index: 4 */ 1800 1801 /* Chain for buf IO for removable-media targets (PM enabled) */ 1802 sd_buf_iodone, /* Index: 5 */ 1803 sd_mapblockaddr_iodone, /* Index: 6 */ 1804 sd_mapblocksize_iodone, /* Index: 7 */ 1805 sd_pm_iodone, /* Index: 8 */ 1806 1807 /* Chain for buf IO for removable-media targets (PM disabled) */ 1808 sd_buf_iodone, /* Index: 9 */ 1809 sd_mapblockaddr_iodone, /* Index: 10 */ 1810 sd_mapblocksize_iodone, /* Index: 11 */ 1811 1812 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1813 sd_buf_iodone, /* Index: 12 */ 1814 sd_mapblockaddr_iodone, /* Index: 13 */ 1815 sd_checksum_iodone, /* Index: 14 */ 1816 sd_pm_iodone, /* Index: 15 */ 1817 1818 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1819 sd_buf_iodone, /* Index: 16 */ 1820 sd_mapblockaddr_iodone, /* Index: 17 */ 1821 sd_checksum_iodone, /* Index: 18 */ 1822 1823 /* Chain for USCSI commands (non-checksum targets) */ 1824 sd_uscsi_iodone, /* Index: 19 */ 1825 sd_pm_iodone, /* Index: 20 */ 1826 1827 /* Chain for USCSI commands (checksum targets) */ 1828 sd_uscsi_iodone, /* Index: 21 */ 1829 sd_checksum_uscsi_iodone, /* Index: 22 */ 1830 sd_pm_iodone, /* Index: 22 */ 1831 1832 /* Chain for "direct" USCSI commands (all targets) */ 1833 sd_uscsi_iodone, /* Index: 24 */ 1834 1835 /* Chain for "direct priority" USCSI commands (all targets) */ 1836 sd_uscsi_iodone, /* Index: 25 */ 1837 }; 1838 1839 1840 /* 1841 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1842 * each iodone-side chain. These are located by the array index, but as the 1843 * iodone side functions are called in a decrementing-index order, the 1844 * highest index number in each chain must be specified (as these correspond 1845 * to the first function in the iodone chain that will be called by the core 1846 * at IO completion time). 1847 */ 1848 1849 #define SD_CHAIN_DISK_IODONE 2 1850 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1851 #define SD_CHAIN_RMMEDIA_IODONE 8 1852 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1853 #define SD_CHAIN_CHKSUM_IODONE 15 1854 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1855 #define SD_CHAIN_USCSI_CMD_IODONE 20 1856 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1857 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1858 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1859 1860 1861 1862 1863 /* 1864 * Array to map a layering chain index to the appropriate initpkt routine. 1865 * The redundant entries are present so that the index used for accessing 1866 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1867 * with this table as well. 1868 */ 1869 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1870 1871 static sd_initpkt_t sd_initpkt_map[] = { 1872 1873 /* Chain for buf IO for disk drive targets (PM enabled) */ 1874 sd_initpkt_for_buf, /* Index: 0 */ 1875 sd_initpkt_for_buf, /* Index: 1 */ 1876 sd_initpkt_for_buf, /* Index: 2 */ 1877 1878 /* Chain for buf IO for disk drive targets (PM disabled) */ 1879 sd_initpkt_for_buf, /* Index: 3 */ 1880 sd_initpkt_for_buf, /* Index: 4 */ 1881 1882 /* Chain for buf IO for removable-media targets (PM enabled) */ 1883 sd_initpkt_for_buf, /* Index: 5 */ 1884 sd_initpkt_for_buf, /* Index: 6 */ 1885 sd_initpkt_for_buf, /* Index: 7 */ 1886 sd_initpkt_for_buf, /* Index: 8 */ 1887 1888 /* Chain for buf IO for removable-media targets (PM disabled) */ 1889 sd_initpkt_for_buf, /* Index: 9 */ 1890 sd_initpkt_for_buf, /* Index: 10 */ 1891 sd_initpkt_for_buf, /* Index: 11 */ 1892 1893 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1894 sd_initpkt_for_buf, /* Index: 12 */ 1895 sd_initpkt_for_buf, /* Index: 13 */ 1896 sd_initpkt_for_buf, /* Index: 14 */ 1897 sd_initpkt_for_buf, /* Index: 15 */ 1898 1899 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1900 sd_initpkt_for_buf, /* Index: 16 */ 1901 sd_initpkt_for_buf, /* Index: 17 */ 1902 sd_initpkt_for_buf, /* Index: 18 */ 1903 1904 /* Chain for USCSI commands (non-checksum targets) */ 1905 sd_initpkt_for_uscsi, /* Index: 19 */ 1906 sd_initpkt_for_uscsi, /* Index: 20 */ 1907 1908 /* Chain for USCSI commands (checksum targets) */ 1909 sd_initpkt_for_uscsi, /* Index: 21 */ 1910 sd_initpkt_for_uscsi, /* Index: 22 */ 1911 sd_initpkt_for_uscsi, /* Index: 22 */ 1912 1913 /* Chain for "direct" USCSI commands (all targets) */ 1914 sd_initpkt_for_uscsi, /* Index: 24 */ 1915 1916 /* Chain for "direct priority" USCSI commands (all targets) */ 1917 sd_initpkt_for_uscsi, /* Index: 25 */ 1918 1919 }; 1920 1921 1922 /* 1923 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1924 * The redundant entries are present so that the index used for accessing 1925 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1926 * with this table as well. 1927 */ 1928 typedef void (*sd_destroypkt_t)(struct buf *); 1929 1930 static sd_destroypkt_t sd_destroypkt_map[] = { 1931 1932 /* Chain for buf IO for disk drive targets (PM enabled) */ 1933 sd_destroypkt_for_buf, /* Index: 0 */ 1934 sd_destroypkt_for_buf, /* Index: 1 */ 1935 sd_destroypkt_for_buf, /* Index: 2 */ 1936 1937 /* Chain for buf IO for disk drive targets (PM disabled) */ 1938 sd_destroypkt_for_buf, /* Index: 3 */ 1939 sd_destroypkt_for_buf, /* Index: 4 */ 1940 1941 /* Chain for buf IO for removable-media targets (PM enabled) */ 1942 sd_destroypkt_for_buf, /* Index: 5 */ 1943 sd_destroypkt_for_buf, /* Index: 6 */ 1944 sd_destroypkt_for_buf, /* Index: 7 */ 1945 sd_destroypkt_for_buf, /* Index: 8 */ 1946 1947 /* Chain for buf IO for removable-media targets (PM disabled) */ 1948 sd_destroypkt_for_buf, /* Index: 9 */ 1949 sd_destroypkt_for_buf, /* Index: 10 */ 1950 sd_destroypkt_for_buf, /* Index: 11 */ 1951 1952 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1953 sd_destroypkt_for_buf, /* Index: 12 */ 1954 sd_destroypkt_for_buf, /* Index: 13 */ 1955 sd_destroypkt_for_buf, /* Index: 14 */ 1956 sd_destroypkt_for_buf, /* Index: 15 */ 1957 1958 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1959 sd_destroypkt_for_buf, /* Index: 16 */ 1960 sd_destroypkt_for_buf, /* Index: 17 */ 1961 sd_destroypkt_for_buf, /* Index: 18 */ 1962 1963 /* Chain for USCSI commands (non-checksum targets) */ 1964 sd_destroypkt_for_uscsi, /* Index: 19 */ 1965 sd_destroypkt_for_uscsi, /* Index: 20 */ 1966 1967 /* Chain for USCSI commands (checksum targets) */ 1968 sd_destroypkt_for_uscsi, /* Index: 21 */ 1969 sd_destroypkt_for_uscsi, /* Index: 22 */ 1970 sd_destroypkt_for_uscsi, /* Index: 22 */ 1971 1972 /* Chain for "direct" USCSI commands (all targets) */ 1973 sd_destroypkt_for_uscsi, /* Index: 24 */ 1974 1975 /* Chain for "direct priority" USCSI commands (all targets) */ 1976 sd_destroypkt_for_uscsi, /* Index: 25 */ 1977 1978 }; 1979 1980 1981 1982 /* 1983 * Array to map a layering chain index to the appropriate chain "type". 1984 * The chain type indicates a specific property/usage of the chain. 1985 * The redundant entries are present so that the index used for accessing 1986 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1987 * with this table as well. 1988 */ 1989 1990 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1991 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1992 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1993 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1994 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1995 /* (for error recovery) */ 1996 1997 static int sd_chain_type_map[] = { 1998 1999 /* Chain for buf IO for disk drive targets (PM enabled) */ 2000 SD_CHAIN_BUFIO, /* Index: 0 */ 2001 SD_CHAIN_BUFIO, /* Index: 1 */ 2002 SD_CHAIN_BUFIO, /* Index: 2 */ 2003 2004 /* Chain for buf IO for disk drive targets (PM disabled) */ 2005 SD_CHAIN_BUFIO, /* Index: 3 */ 2006 SD_CHAIN_BUFIO, /* Index: 4 */ 2007 2008 /* Chain for buf IO for removable-media targets (PM enabled) */ 2009 SD_CHAIN_BUFIO, /* Index: 5 */ 2010 SD_CHAIN_BUFIO, /* Index: 6 */ 2011 SD_CHAIN_BUFIO, /* Index: 7 */ 2012 SD_CHAIN_BUFIO, /* Index: 8 */ 2013 2014 /* Chain for buf IO for removable-media targets (PM disabled) */ 2015 SD_CHAIN_BUFIO, /* Index: 9 */ 2016 SD_CHAIN_BUFIO, /* Index: 10 */ 2017 SD_CHAIN_BUFIO, /* Index: 11 */ 2018 2019 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2020 SD_CHAIN_BUFIO, /* Index: 12 */ 2021 SD_CHAIN_BUFIO, /* Index: 13 */ 2022 SD_CHAIN_BUFIO, /* Index: 14 */ 2023 SD_CHAIN_BUFIO, /* Index: 15 */ 2024 2025 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2026 SD_CHAIN_BUFIO, /* Index: 16 */ 2027 SD_CHAIN_BUFIO, /* Index: 17 */ 2028 SD_CHAIN_BUFIO, /* Index: 18 */ 2029 2030 /* Chain for USCSI commands (non-checksum targets) */ 2031 SD_CHAIN_USCSI, /* Index: 19 */ 2032 SD_CHAIN_USCSI, /* Index: 20 */ 2033 2034 /* Chain for USCSI commands (checksum targets) */ 2035 SD_CHAIN_USCSI, /* Index: 21 */ 2036 SD_CHAIN_USCSI, /* Index: 22 */ 2037 SD_CHAIN_USCSI, /* Index: 22 */ 2038 2039 /* Chain for "direct" USCSI commands (all targets) */ 2040 SD_CHAIN_DIRECT, /* Index: 24 */ 2041 2042 /* Chain for "direct priority" USCSI commands (all targets) */ 2043 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2044 }; 2045 2046 2047 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2048 #define SD_IS_BUFIO(xp) \ 2049 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2050 2051 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2052 #define SD_IS_DIRECT_PRIORITY(xp) \ 2053 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2054 2055 2056 2057 /* 2058 * Struct, array, and macros to map a specific chain to the appropriate 2059 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2060 * 2061 * The sd_chain_index_map[] array is used at attach time to set the various 2062 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2063 * chain to be used with the instance. This allows different instances to use 2064 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2065 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2066 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2067 * dynamically & without the use of locking; and (2) a layer may update the 2068 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2069 * to allow for deferred processing of an IO within the same chain from a 2070 * different execution context. 2071 */ 2072 2073 struct sd_chain_index { 2074 int sci_iostart_index; 2075 int sci_iodone_index; 2076 }; 2077 2078 static struct sd_chain_index sd_chain_index_map[] = { 2079 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2080 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2081 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2082 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2083 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2084 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2085 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2086 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2087 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2088 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2089 }; 2090 2091 2092 /* 2093 * The following are indexes into the sd_chain_index_map[] array. 2094 */ 2095 2096 /* un->un_buf_chain_type must be set to one of these */ 2097 #define SD_CHAIN_INFO_DISK 0 2098 #define SD_CHAIN_INFO_DISK_NO_PM 1 2099 #define SD_CHAIN_INFO_RMMEDIA 2 2100 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2101 #define SD_CHAIN_INFO_CHKSUM 4 2102 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2103 2104 /* un->un_uscsi_chain_type must be set to one of these */ 2105 #define SD_CHAIN_INFO_USCSI_CMD 6 2106 /* USCSI with PM disabled is the same as DIRECT */ 2107 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2108 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2109 2110 /* un->un_direct_chain_type must be set to one of these */ 2111 #define SD_CHAIN_INFO_DIRECT_CMD 8 2112 2113 /* un->un_priority_chain_type must be set to one of these */ 2114 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2115 2116 /* size for devid inquiries */ 2117 #define MAX_INQUIRY_SIZE 0xF0 2118 2119 /* 2120 * Macros used by functions to pass a given buf(9S) struct along to the 2121 * next function in the layering chain for further processing. 2122 * 2123 * In the following macros, passing more than three arguments to the called 2124 * routines causes the optimizer for the SPARC compiler to stop doing tail 2125 * call elimination which results in significant performance degradation. 2126 */ 2127 #define SD_BEGIN_IOSTART(index, un, bp) \ 2128 ((*(sd_iostart_chain[index]))(index, un, bp)) 2129 2130 #define SD_BEGIN_IODONE(index, un, bp) \ 2131 ((*(sd_iodone_chain[index]))(index, un, bp)) 2132 2133 #define SD_NEXT_IOSTART(index, un, bp) \ 2134 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2135 2136 #define SD_NEXT_IODONE(index, un, bp) \ 2137 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2138 2139 /* 2140 * Function: _init 2141 * 2142 * Description: This is the driver _init(9E) entry point. 2143 * 2144 * Return Code: Returns the value from mod_install(9F) or 2145 * ddi_soft_state_init(9F) as appropriate. 2146 * 2147 * Context: Called when driver module loaded. 2148 */ 2149 2150 int 2151 _init(void) 2152 { 2153 int err; 2154 2155 /* establish driver name from module name */ 2156 sd_label = (char *)mod_modname(&modlinkage); 2157 2158 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2159 SD_MAXUNIT); 2160 2161 if (err != 0) { 2162 return (err); 2163 } 2164 2165 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2166 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2167 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2168 2169 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2170 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2171 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2172 2173 /* 2174 * it's ok to init here even for fibre device 2175 */ 2176 sd_scsi_probe_cache_init(); 2177 2178 sd_scsi_target_lun_init(); 2179 2180 /* 2181 * Creating taskq before mod_install ensures that all callers (threads) 2182 * that enter the module after a successful mod_install encounter 2183 * a valid taskq. 2184 */ 2185 sd_taskq_create(); 2186 2187 err = mod_install(&modlinkage); 2188 if (err != 0) { 2189 /* delete taskq if install fails */ 2190 sd_taskq_delete(); 2191 2192 mutex_destroy(&sd_detach_mutex); 2193 mutex_destroy(&sd_log_mutex); 2194 mutex_destroy(&sd_label_mutex); 2195 2196 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2197 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2198 cv_destroy(&sd_tr.srq_inprocess_cv); 2199 2200 sd_scsi_probe_cache_fini(); 2201 2202 sd_scsi_target_lun_fini(); 2203 2204 ddi_soft_state_fini(&sd_state); 2205 return (err); 2206 } 2207 2208 return (err); 2209 } 2210 2211 2212 /* 2213 * Function: _fini 2214 * 2215 * Description: This is the driver _fini(9E) entry point. 2216 * 2217 * Return Code: Returns the value from mod_remove(9F) 2218 * 2219 * Context: Called when driver module is unloaded. 2220 */ 2221 2222 int 2223 _fini(void) 2224 { 2225 int err; 2226 2227 if ((err = mod_remove(&modlinkage)) != 0) { 2228 return (err); 2229 } 2230 2231 sd_taskq_delete(); 2232 2233 mutex_destroy(&sd_detach_mutex); 2234 mutex_destroy(&sd_log_mutex); 2235 mutex_destroy(&sd_label_mutex); 2236 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2237 2238 sd_scsi_probe_cache_fini(); 2239 2240 sd_scsi_target_lun_fini(); 2241 2242 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2243 cv_destroy(&sd_tr.srq_inprocess_cv); 2244 2245 ddi_soft_state_fini(&sd_state); 2246 2247 return (err); 2248 } 2249 2250 2251 /* 2252 * Function: _info 2253 * 2254 * Description: This is the driver _info(9E) entry point. 2255 * 2256 * Arguments: modinfop - pointer to the driver modinfo structure 2257 * 2258 * Return Code: Returns the value from mod_info(9F). 2259 * 2260 * Context: Kernel thread context 2261 */ 2262 2263 int 2264 _info(struct modinfo *modinfop) 2265 { 2266 return (mod_info(&modlinkage, modinfop)); 2267 } 2268 2269 2270 /* 2271 * The following routines implement the driver message logging facility. 2272 * They provide component- and level- based debug output filtering. 2273 * Output may also be restricted to messages for a single instance by 2274 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2275 * to NULL, then messages for all instances are printed. 2276 * 2277 * These routines have been cloned from each other due to the language 2278 * constraints of macros and variable argument list processing. 2279 */ 2280 2281 2282 /* 2283 * Function: sd_log_err 2284 * 2285 * Description: This routine is called by the SD_ERROR macro for debug 2286 * logging of error conditions. 2287 * 2288 * Arguments: comp - driver component being logged 2289 * dev - pointer to driver info structure 2290 * fmt - error string and format to be logged 2291 */ 2292 2293 static void 2294 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2295 { 2296 va_list ap; 2297 dev_info_t *dev; 2298 2299 ASSERT(un != NULL); 2300 dev = SD_DEVINFO(un); 2301 ASSERT(dev != NULL); 2302 2303 /* 2304 * Filter messages based on the global component and level masks. 2305 * Also print if un matches the value of sd_debug_un, or if 2306 * sd_debug_un is set to NULL. 2307 */ 2308 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2309 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2310 mutex_enter(&sd_log_mutex); 2311 va_start(ap, fmt); 2312 (void) vsprintf(sd_log_buf, fmt, ap); 2313 va_end(ap); 2314 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2315 mutex_exit(&sd_log_mutex); 2316 } 2317 #ifdef SD_FAULT_INJECTION 2318 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2319 if (un->sd_injection_mask & comp) { 2320 mutex_enter(&sd_log_mutex); 2321 va_start(ap, fmt); 2322 (void) vsprintf(sd_log_buf, fmt, ap); 2323 va_end(ap); 2324 sd_injection_log(sd_log_buf, un); 2325 mutex_exit(&sd_log_mutex); 2326 } 2327 #endif 2328 } 2329 2330 2331 /* 2332 * Function: sd_log_info 2333 * 2334 * Description: This routine is called by the SD_INFO macro for debug 2335 * logging of general purpose informational conditions. 2336 * 2337 * Arguments: comp - driver component being logged 2338 * dev - pointer to driver info structure 2339 * fmt - info string and format to be logged 2340 */ 2341 2342 static void 2343 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2344 { 2345 va_list ap; 2346 dev_info_t *dev; 2347 2348 ASSERT(un != NULL); 2349 dev = SD_DEVINFO(un); 2350 ASSERT(dev != NULL); 2351 2352 /* 2353 * Filter messages based on the global component and level masks. 2354 * Also print if un matches the value of sd_debug_un, or if 2355 * sd_debug_un is set to NULL. 2356 */ 2357 if ((sd_component_mask & component) && 2358 (sd_level_mask & SD_LOGMASK_INFO) && 2359 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2360 mutex_enter(&sd_log_mutex); 2361 va_start(ap, fmt); 2362 (void) vsprintf(sd_log_buf, fmt, ap); 2363 va_end(ap); 2364 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2365 mutex_exit(&sd_log_mutex); 2366 } 2367 #ifdef SD_FAULT_INJECTION 2368 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2369 if (un->sd_injection_mask & component) { 2370 mutex_enter(&sd_log_mutex); 2371 va_start(ap, fmt); 2372 (void) vsprintf(sd_log_buf, fmt, ap); 2373 va_end(ap); 2374 sd_injection_log(sd_log_buf, un); 2375 mutex_exit(&sd_log_mutex); 2376 } 2377 #endif 2378 } 2379 2380 2381 /* 2382 * Function: sd_log_trace 2383 * 2384 * Description: This routine is called by the SD_TRACE macro for debug 2385 * logging of trace conditions (i.e. function entry/exit). 2386 * 2387 * Arguments: comp - driver component being logged 2388 * dev - pointer to driver info structure 2389 * fmt - trace string and format to be logged 2390 */ 2391 2392 static void 2393 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2394 { 2395 va_list ap; 2396 dev_info_t *dev; 2397 2398 ASSERT(un != NULL); 2399 dev = SD_DEVINFO(un); 2400 ASSERT(dev != NULL); 2401 2402 /* 2403 * Filter messages based on the global component and level masks. 2404 * Also print if un matches the value of sd_debug_un, or if 2405 * sd_debug_un is set to NULL. 2406 */ 2407 if ((sd_component_mask & component) && 2408 (sd_level_mask & SD_LOGMASK_TRACE) && 2409 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2410 mutex_enter(&sd_log_mutex); 2411 va_start(ap, fmt); 2412 (void) vsprintf(sd_log_buf, fmt, ap); 2413 va_end(ap); 2414 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2415 mutex_exit(&sd_log_mutex); 2416 } 2417 #ifdef SD_FAULT_INJECTION 2418 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2419 if (un->sd_injection_mask & component) { 2420 mutex_enter(&sd_log_mutex); 2421 va_start(ap, fmt); 2422 (void) vsprintf(sd_log_buf, fmt, ap); 2423 va_end(ap); 2424 sd_injection_log(sd_log_buf, un); 2425 mutex_exit(&sd_log_mutex); 2426 } 2427 #endif 2428 } 2429 2430 2431 /* 2432 * Function: sdprobe 2433 * 2434 * Description: This is the driver probe(9e) entry point function. 2435 * 2436 * Arguments: devi - opaque device info handle 2437 * 2438 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2439 * DDI_PROBE_FAILURE: If the probe failed. 2440 * DDI_PROBE_PARTIAL: If the instance is not present now, 2441 * but may be present in the future. 2442 */ 2443 2444 static int 2445 sdprobe(dev_info_t *devi) 2446 { 2447 struct scsi_device *devp; 2448 int rval; 2449 int instance; 2450 2451 /* 2452 * if it wasn't for pln, sdprobe could actually be nulldev 2453 * in the "__fibre" case. 2454 */ 2455 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2456 return (DDI_PROBE_DONTCARE); 2457 } 2458 2459 devp = ddi_get_driver_private(devi); 2460 2461 if (devp == NULL) { 2462 /* Ooops... nexus driver is mis-configured... */ 2463 return (DDI_PROBE_FAILURE); 2464 } 2465 2466 instance = ddi_get_instance(devi); 2467 2468 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2469 return (DDI_PROBE_PARTIAL); 2470 } 2471 2472 /* 2473 * Call the SCSA utility probe routine to see if we actually 2474 * have a target at this SCSI nexus. 2475 */ 2476 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2477 case SCSIPROBE_EXISTS: 2478 switch (devp->sd_inq->inq_dtype) { 2479 case DTYPE_DIRECT: 2480 rval = DDI_PROBE_SUCCESS; 2481 break; 2482 case DTYPE_RODIRECT: 2483 /* CDs etc. Can be removable media */ 2484 rval = DDI_PROBE_SUCCESS; 2485 break; 2486 case DTYPE_OPTICAL: 2487 /* 2488 * Rewritable optical driver HP115AA 2489 * Can also be removable media 2490 */ 2491 2492 /* 2493 * Do not attempt to bind to DTYPE_OPTICAL if 2494 * pre solaris 9 sparc sd behavior is required 2495 * 2496 * If first time through and sd_dtype_optical_bind 2497 * has not been set in /etc/system check properties 2498 */ 2499 2500 if (sd_dtype_optical_bind < 0) { 2501 sd_dtype_optical_bind = ddi_prop_get_int 2502 (DDI_DEV_T_ANY, devi, 0, 2503 "optical-device-bind", 1); 2504 } 2505 2506 if (sd_dtype_optical_bind == 0) { 2507 rval = DDI_PROBE_FAILURE; 2508 } else { 2509 rval = DDI_PROBE_SUCCESS; 2510 } 2511 break; 2512 2513 case DTYPE_NOTPRESENT: 2514 default: 2515 rval = DDI_PROBE_FAILURE; 2516 break; 2517 } 2518 break; 2519 default: 2520 rval = DDI_PROBE_PARTIAL; 2521 break; 2522 } 2523 2524 /* 2525 * This routine checks for resource allocation prior to freeing, 2526 * so it will take care of the "smart probing" case where a 2527 * scsi_probe() may or may not have been issued and will *not* 2528 * free previously-freed resources. 2529 */ 2530 scsi_unprobe(devp); 2531 return (rval); 2532 } 2533 2534 2535 /* 2536 * Function: sdinfo 2537 * 2538 * Description: This is the driver getinfo(9e) entry point function. 2539 * Given the device number, return the devinfo pointer from 2540 * the scsi_device structure or the instance number 2541 * associated with the dev_t. 2542 * 2543 * Arguments: dip - pointer to device info structure 2544 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2545 * DDI_INFO_DEVT2INSTANCE) 2546 * arg - driver dev_t 2547 * resultp - user buffer for request response 2548 * 2549 * Return Code: DDI_SUCCESS 2550 * DDI_FAILURE 2551 */ 2552 /* ARGSUSED */ 2553 static int 2554 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2555 { 2556 struct sd_lun *un; 2557 dev_t dev; 2558 int instance; 2559 int error; 2560 2561 switch (infocmd) { 2562 case DDI_INFO_DEVT2DEVINFO: 2563 dev = (dev_t)arg; 2564 instance = SDUNIT(dev); 2565 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2566 return (DDI_FAILURE); 2567 } 2568 *result = (void *) SD_DEVINFO(un); 2569 error = DDI_SUCCESS; 2570 break; 2571 case DDI_INFO_DEVT2INSTANCE: 2572 dev = (dev_t)arg; 2573 instance = SDUNIT(dev); 2574 *result = (void *)(uintptr_t)instance; 2575 error = DDI_SUCCESS; 2576 break; 2577 default: 2578 error = DDI_FAILURE; 2579 } 2580 return (error); 2581 } 2582 2583 /* 2584 * Function: sd_prop_op 2585 * 2586 * Description: This is the driver prop_op(9e) entry point function. 2587 * Return the number of blocks for the partition in question 2588 * or forward the request to the property facilities. 2589 * 2590 * Arguments: dev - device number 2591 * dip - pointer to device info structure 2592 * prop_op - property operator 2593 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2594 * name - pointer to property name 2595 * valuep - pointer or address of the user buffer 2596 * lengthp - property length 2597 * 2598 * Return Code: DDI_PROP_SUCCESS 2599 * DDI_PROP_NOT_FOUND 2600 * DDI_PROP_UNDEFINED 2601 * DDI_PROP_NO_MEMORY 2602 * DDI_PROP_BUF_TOO_SMALL 2603 */ 2604 2605 static int 2606 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2607 char *name, caddr_t valuep, int *lengthp) 2608 { 2609 struct sd_lun *un; 2610 2611 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2612 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2613 name, valuep, lengthp)); 2614 2615 return (cmlb_prop_op(un->un_cmlbhandle, 2616 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2617 SDPART(dev), (void *)SD_PATH_DIRECT)); 2618 } 2619 2620 /* 2621 * The following functions are for smart probing: 2622 * sd_scsi_probe_cache_init() 2623 * sd_scsi_probe_cache_fini() 2624 * sd_scsi_clear_probe_cache() 2625 * sd_scsi_probe_with_cache() 2626 */ 2627 2628 /* 2629 * Function: sd_scsi_probe_cache_init 2630 * 2631 * Description: Initializes the probe response cache mutex and head pointer. 2632 * 2633 * Context: Kernel thread context 2634 */ 2635 2636 static void 2637 sd_scsi_probe_cache_init(void) 2638 { 2639 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2640 sd_scsi_probe_cache_head = NULL; 2641 } 2642 2643 2644 /* 2645 * Function: sd_scsi_probe_cache_fini 2646 * 2647 * Description: Frees all resources associated with the probe response cache. 2648 * 2649 * Context: Kernel thread context 2650 */ 2651 2652 static void 2653 sd_scsi_probe_cache_fini(void) 2654 { 2655 struct sd_scsi_probe_cache *cp; 2656 struct sd_scsi_probe_cache *ncp; 2657 2658 /* Clean up our smart probing linked list */ 2659 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2660 ncp = cp->next; 2661 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2662 } 2663 sd_scsi_probe_cache_head = NULL; 2664 mutex_destroy(&sd_scsi_probe_cache_mutex); 2665 } 2666 2667 2668 /* 2669 * Function: sd_scsi_clear_probe_cache 2670 * 2671 * Description: This routine clears the probe response cache. This is 2672 * done when open() returns ENXIO so that when deferred 2673 * attach is attempted (possibly after a device has been 2674 * turned on) we will retry the probe. Since we don't know 2675 * which target we failed to open, we just clear the 2676 * entire cache. 2677 * 2678 * Context: Kernel thread context 2679 */ 2680 2681 static void 2682 sd_scsi_clear_probe_cache(void) 2683 { 2684 struct sd_scsi_probe_cache *cp; 2685 int i; 2686 2687 mutex_enter(&sd_scsi_probe_cache_mutex); 2688 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2689 /* 2690 * Reset all entries to SCSIPROBE_EXISTS. This will 2691 * force probing to be performed the next time 2692 * sd_scsi_probe_with_cache is called. 2693 */ 2694 for (i = 0; i < NTARGETS_WIDE; i++) { 2695 cp->cache[i] = SCSIPROBE_EXISTS; 2696 } 2697 } 2698 mutex_exit(&sd_scsi_probe_cache_mutex); 2699 } 2700 2701 2702 /* 2703 * Function: sd_scsi_probe_with_cache 2704 * 2705 * Description: This routine implements support for a scsi device probe 2706 * with cache. The driver maintains a cache of the target 2707 * responses to scsi probes. If we get no response from a 2708 * target during a probe inquiry, we remember that, and we 2709 * avoid additional calls to scsi_probe on non-zero LUNs 2710 * on the same target until the cache is cleared. By doing 2711 * so we avoid the 1/4 sec selection timeout for nonzero 2712 * LUNs. lun0 of a target is always probed. 2713 * 2714 * Arguments: devp - Pointer to a scsi_device(9S) structure 2715 * waitfunc - indicates what the allocator routines should 2716 * do when resources are not available. This value 2717 * is passed on to scsi_probe() when that routine 2718 * is called. 2719 * 2720 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2721 * otherwise the value returned by scsi_probe(9F). 2722 * 2723 * Context: Kernel thread context 2724 */ 2725 2726 static int 2727 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2728 { 2729 struct sd_scsi_probe_cache *cp; 2730 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2731 int lun, tgt; 2732 2733 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2734 SCSI_ADDR_PROP_LUN, 0); 2735 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2736 SCSI_ADDR_PROP_TARGET, -1); 2737 2738 /* Make sure caching enabled and target in range */ 2739 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2740 /* do it the old way (no cache) */ 2741 return (scsi_probe(devp, waitfn)); 2742 } 2743 2744 mutex_enter(&sd_scsi_probe_cache_mutex); 2745 2746 /* Find the cache for this scsi bus instance */ 2747 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2748 if (cp->pdip == pdip) { 2749 break; 2750 } 2751 } 2752 2753 /* If we can't find a cache for this pdip, create one */ 2754 if (cp == NULL) { 2755 int i; 2756 2757 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2758 KM_SLEEP); 2759 cp->pdip = pdip; 2760 cp->next = sd_scsi_probe_cache_head; 2761 sd_scsi_probe_cache_head = cp; 2762 for (i = 0; i < NTARGETS_WIDE; i++) { 2763 cp->cache[i] = SCSIPROBE_EXISTS; 2764 } 2765 } 2766 2767 mutex_exit(&sd_scsi_probe_cache_mutex); 2768 2769 /* Recompute the cache for this target if LUN zero */ 2770 if (lun == 0) { 2771 cp->cache[tgt] = SCSIPROBE_EXISTS; 2772 } 2773 2774 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2775 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2776 return (SCSIPROBE_NORESP); 2777 } 2778 2779 /* Do the actual probe; save & return the result */ 2780 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2781 } 2782 2783 2784 /* 2785 * Function: sd_scsi_target_lun_init 2786 * 2787 * Description: Initializes the attached lun chain mutex and head pointer. 2788 * 2789 * Context: Kernel thread context 2790 */ 2791 2792 static void 2793 sd_scsi_target_lun_init(void) 2794 { 2795 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2796 sd_scsi_target_lun_head = NULL; 2797 } 2798 2799 2800 /* 2801 * Function: sd_scsi_target_lun_fini 2802 * 2803 * Description: Frees all resources associated with the attached lun 2804 * chain 2805 * 2806 * Context: Kernel thread context 2807 */ 2808 2809 static void 2810 sd_scsi_target_lun_fini(void) 2811 { 2812 struct sd_scsi_hba_tgt_lun *cp; 2813 struct sd_scsi_hba_tgt_lun *ncp; 2814 2815 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2816 ncp = cp->next; 2817 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2818 } 2819 sd_scsi_target_lun_head = NULL; 2820 mutex_destroy(&sd_scsi_target_lun_mutex); 2821 } 2822 2823 2824 /* 2825 * Function: sd_scsi_get_target_lun_count 2826 * 2827 * Description: This routine will check in the attached lun chain to see 2828 * how many luns are attached on the required SCSI controller 2829 * and target. Currently, some capabilities like tagged queue 2830 * are supported per target based by HBA. So all luns in a 2831 * target have the same capabilities. Based on this assumption, 2832 * sd should only set these capabilities once per target. This 2833 * function is called when sd needs to decide how many luns 2834 * already attached on a target. 2835 * 2836 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2837 * controller device. 2838 * target - The target ID on the controller's SCSI bus. 2839 * 2840 * Return Code: The number of luns attached on the required target and 2841 * controller. 2842 * -1 if target ID is not in parallel SCSI scope or the given 2843 * dip is not in the chain. 2844 * 2845 * Context: Kernel thread context 2846 */ 2847 2848 static int 2849 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2850 { 2851 struct sd_scsi_hba_tgt_lun *cp; 2852 2853 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2854 return (-1); 2855 } 2856 2857 mutex_enter(&sd_scsi_target_lun_mutex); 2858 2859 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2860 if (cp->pdip == dip) { 2861 break; 2862 } 2863 } 2864 2865 mutex_exit(&sd_scsi_target_lun_mutex); 2866 2867 if (cp == NULL) { 2868 return (-1); 2869 } 2870 2871 return (cp->nlun[target]); 2872 } 2873 2874 2875 /* 2876 * Function: sd_scsi_update_lun_on_target 2877 * 2878 * Description: This routine is used to update the attached lun chain when a 2879 * lun is attached or detached on a target. 2880 * 2881 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2882 * controller device. 2883 * target - The target ID on the controller's SCSI bus. 2884 * flag - Indicate the lun is attached or detached. 2885 * 2886 * Context: Kernel thread context 2887 */ 2888 2889 static void 2890 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2891 { 2892 struct sd_scsi_hba_tgt_lun *cp; 2893 2894 mutex_enter(&sd_scsi_target_lun_mutex); 2895 2896 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2897 if (cp->pdip == dip) { 2898 break; 2899 } 2900 } 2901 2902 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2903 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2904 KM_SLEEP); 2905 cp->pdip = dip; 2906 cp->next = sd_scsi_target_lun_head; 2907 sd_scsi_target_lun_head = cp; 2908 } 2909 2910 mutex_exit(&sd_scsi_target_lun_mutex); 2911 2912 if (cp != NULL) { 2913 if (flag == SD_SCSI_LUN_ATTACH) { 2914 cp->nlun[target] ++; 2915 } else { 2916 cp->nlun[target] --; 2917 } 2918 } 2919 } 2920 2921 2922 /* 2923 * Function: sd_spin_up_unit 2924 * 2925 * Description: Issues the following commands to spin-up the device: 2926 * START STOP UNIT, and INQUIRY. 2927 * 2928 * Arguments: un - driver soft state (unit) structure 2929 * 2930 * Return Code: 0 - success 2931 * EIO - failure 2932 * EACCES - reservation conflict 2933 * 2934 * Context: Kernel thread context 2935 */ 2936 2937 static int 2938 sd_spin_up_unit(struct sd_lun *un) 2939 { 2940 size_t resid = 0; 2941 int has_conflict = FALSE; 2942 uchar_t *bufaddr; 2943 2944 ASSERT(un != NULL); 2945 2946 /* 2947 * Send a throwaway START UNIT command. 2948 * 2949 * If we fail on this, we don't care presently what precisely 2950 * is wrong. EMC's arrays will also fail this with a check 2951 * condition (0x2/0x4/0x3) if the device is "inactive," but 2952 * we don't want to fail the attach because it may become 2953 * "active" later. 2954 */ 2955 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2956 == EACCES) 2957 has_conflict = TRUE; 2958 2959 /* 2960 * Send another INQUIRY command to the target. This is necessary for 2961 * non-removable media direct access devices because their INQUIRY data 2962 * may not be fully qualified until they are spun up (perhaps via the 2963 * START command above). Note: This seems to be needed for some 2964 * legacy devices only.) The INQUIRY command should succeed even if a 2965 * Reservation Conflict is present. 2966 */ 2967 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2968 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2969 kmem_free(bufaddr, SUN_INQSIZE); 2970 return (EIO); 2971 } 2972 2973 /* 2974 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2975 * Note that this routine does not return a failure here even if the 2976 * INQUIRY command did not return any data. This is a legacy behavior. 2977 */ 2978 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2979 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2980 } 2981 2982 kmem_free(bufaddr, SUN_INQSIZE); 2983 2984 /* If we hit a reservation conflict above, tell the caller. */ 2985 if (has_conflict == TRUE) { 2986 return (EACCES); 2987 } 2988 2989 return (0); 2990 } 2991 2992 #ifdef _LP64 2993 /* 2994 * Function: sd_enable_descr_sense 2995 * 2996 * Description: This routine attempts to select descriptor sense format 2997 * using the Control mode page. Devices that support 64 bit 2998 * LBAs (for >2TB luns) should also implement descriptor 2999 * sense data so we will call this function whenever we see 3000 * a lun larger than 2TB. If for some reason the device 3001 * supports 64 bit LBAs but doesn't support descriptor sense 3002 * presumably the mode select will fail. Everything will 3003 * continue to work normally except that we will not get 3004 * complete sense data for commands that fail with an LBA 3005 * larger than 32 bits. 3006 * 3007 * Arguments: un - driver soft state (unit) structure 3008 * 3009 * Context: Kernel thread context only 3010 */ 3011 3012 static void 3013 sd_enable_descr_sense(struct sd_lun *un) 3014 { 3015 uchar_t *header; 3016 struct mode_control_scsi3 *ctrl_bufp; 3017 size_t buflen; 3018 size_t bd_len; 3019 3020 /* 3021 * Read MODE SENSE page 0xA, Control Mode Page 3022 */ 3023 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3024 sizeof (struct mode_control_scsi3); 3025 header = kmem_zalloc(buflen, KM_SLEEP); 3026 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3027 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3028 SD_ERROR(SD_LOG_COMMON, un, 3029 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3030 goto eds_exit; 3031 } 3032 3033 /* 3034 * Determine size of Block Descriptors in order to locate 3035 * the mode page data. ATAPI devices return 0, SCSI devices 3036 * should return MODE_BLK_DESC_LENGTH. 3037 */ 3038 bd_len = ((struct mode_header *)header)->bdesc_length; 3039 3040 /* Clear the mode data length field for MODE SELECT */ 3041 ((struct mode_header *)header)->length = 0; 3042 3043 ctrl_bufp = (struct mode_control_scsi3 *) 3044 (header + MODE_HEADER_LENGTH + bd_len); 3045 3046 /* 3047 * If the page length is smaller than the expected value, 3048 * the target device doesn't support D_SENSE. Bail out here. 3049 */ 3050 if (ctrl_bufp->mode_page.length < 3051 sizeof (struct mode_control_scsi3) - 2) { 3052 SD_ERROR(SD_LOG_COMMON, un, 3053 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3054 goto eds_exit; 3055 } 3056 3057 /* 3058 * Clear PS bit for MODE SELECT 3059 */ 3060 ctrl_bufp->mode_page.ps = 0; 3061 3062 /* 3063 * Set D_SENSE to enable descriptor sense format. 3064 */ 3065 ctrl_bufp->d_sense = 1; 3066 3067 /* 3068 * Use MODE SELECT to commit the change to the D_SENSE bit 3069 */ 3070 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3071 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3072 SD_INFO(SD_LOG_COMMON, un, 3073 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3074 goto eds_exit; 3075 } 3076 3077 eds_exit: 3078 kmem_free(header, buflen); 3079 } 3080 3081 /* 3082 * Function: sd_reenable_dsense_task 3083 * 3084 * Description: Re-enable descriptor sense after device or bus reset 3085 * 3086 * Context: Executes in a taskq() thread context 3087 */ 3088 static void 3089 sd_reenable_dsense_task(void *arg) 3090 { 3091 struct sd_lun *un = arg; 3092 3093 ASSERT(un != NULL); 3094 sd_enable_descr_sense(un); 3095 } 3096 #endif /* _LP64 */ 3097 3098 /* 3099 * Function: sd_set_mmc_caps 3100 * 3101 * Description: This routine determines if the device is MMC compliant and if 3102 * the device supports CDDA via a mode sense of the CDVD 3103 * capabilities mode page. Also checks if the device is a 3104 * dvdram writable device. 3105 * 3106 * Arguments: un - driver soft state (unit) structure 3107 * 3108 * Context: Kernel thread context only 3109 */ 3110 3111 static void 3112 sd_set_mmc_caps(struct sd_lun *un) 3113 { 3114 struct mode_header_grp2 *sense_mhp; 3115 uchar_t *sense_page; 3116 caddr_t buf; 3117 int bd_len; 3118 int status; 3119 struct uscsi_cmd com; 3120 int rtn; 3121 uchar_t *out_data_rw, *out_data_hd; 3122 uchar_t *rqbuf_rw, *rqbuf_hd; 3123 3124 ASSERT(un != NULL); 3125 3126 /* 3127 * The flags which will be set in this function are - mmc compliant, 3128 * dvdram writable device, cdda support. Initialize them to FALSE 3129 * and if a capability is detected - it will be set to TRUE. 3130 */ 3131 un->un_f_mmc_cap = FALSE; 3132 un->un_f_dvdram_writable_device = FALSE; 3133 un->un_f_cfg_cdda = FALSE; 3134 3135 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3136 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3137 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3138 3139 if (status != 0) { 3140 /* command failed; just return */ 3141 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3142 return; 3143 } 3144 /* 3145 * If the mode sense request for the CDROM CAPABILITIES 3146 * page (0x2A) succeeds the device is assumed to be MMC. 3147 */ 3148 un->un_f_mmc_cap = TRUE; 3149 3150 /* Get to the page data */ 3151 sense_mhp = (struct mode_header_grp2 *)buf; 3152 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3153 sense_mhp->bdesc_length_lo; 3154 if (bd_len > MODE_BLK_DESC_LENGTH) { 3155 /* 3156 * We did not get back the expected block descriptor 3157 * length so we cannot determine if the device supports 3158 * CDDA. However, we still indicate the device is MMC 3159 * according to the successful response to the page 3160 * 0x2A mode sense request. 3161 */ 3162 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3163 "sd_set_mmc_caps: Mode Sense returned " 3164 "invalid block descriptor length\n"); 3165 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3166 return; 3167 } 3168 3169 /* See if read CDDA is supported */ 3170 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3171 bd_len); 3172 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3173 3174 /* See if writing DVD RAM is supported. */ 3175 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3176 if (un->un_f_dvdram_writable_device == TRUE) { 3177 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3178 return; 3179 } 3180 3181 /* 3182 * If the device presents DVD or CD capabilities in the mode 3183 * page, we can return here since a RRD will not have 3184 * these capabilities. 3185 */ 3186 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3187 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3188 return; 3189 } 3190 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3191 3192 /* 3193 * If un->un_f_dvdram_writable_device is still FALSE, 3194 * check for a Removable Rigid Disk (RRD). A RRD 3195 * device is identified by the features RANDOM_WRITABLE and 3196 * HARDWARE_DEFECT_MANAGEMENT. 3197 */ 3198 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3199 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3200 3201 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3202 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3203 RANDOM_WRITABLE, SD_PATH_STANDARD); 3204 if (rtn != 0) { 3205 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3206 kmem_free(rqbuf_rw, SENSE_LENGTH); 3207 return; 3208 } 3209 3210 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3211 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3212 3213 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3214 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3215 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3216 if (rtn == 0) { 3217 /* 3218 * We have good information, check for random writable 3219 * and hardware defect features. 3220 */ 3221 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3222 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3223 un->un_f_dvdram_writable_device = TRUE; 3224 } 3225 } 3226 3227 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3228 kmem_free(rqbuf_rw, SENSE_LENGTH); 3229 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3230 kmem_free(rqbuf_hd, SENSE_LENGTH); 3231 } 3232 3233 /* 3234 * Function: sd_check_for_writable_cd 3235 * 3236 * Description: This routine determines if the media in the device is 3237 * writable or not. It uses the get configuration command (0x46) 3238 * to determine if the media is writable 3239 * 3240 * Arguments: un - driver soft state (unit) structure 3241 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3242 * chain and the normal command waitq, or 3243 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3244 * "direct" chain and bypass the normal command 3245 * waitq. 3246 * 3247 * Context: Never called at interrupt context. 3248 */ 3249 3250 static void 3251 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3252 { 3253 struct uscsi_cmd com; 3254 uchar_t *out_data; 3255 uchar_t *rqbuf; 3256 int rtn; 3257 uchar_t *out_data_rw, *out_data_hd; 3258 uchar_t *rqbuf_rw, *rqbuf_hd; 3259 struct mode_header_grp2 *sense_mhp; 3260 uchar_t *sense_page; 3261 caddr_t buf; 3262 int bd_len; 3263 int status; 3264 3265 ASSERT(un != NULL); 3266 ASSERT(mutex_owned(SD_MUTEX(un))); 3267 3268 /* 3269 * Initialize the writable media to false, if configuration info. 3270 * tells us otherwise then only we will set it. 3271 */ 3272 un->un_f_mmc_writable_media = FALSE; 3273 mutex_exit(SD_MUTEX(un)); 3274 3275 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3276 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3277 3278 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3279 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3280 3281 mutex_enter(SD_MUTEX(un)); 3282 if (rtn == 0) { 3283 /* 3284 * We have good information, check for writable DVD. 3285 */ 3286 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3287 un->un_f_mmc_writable_media = TRUE; 3288 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3289 kmem_free(rqbuf, SENSE_LENGTH); 3290 return; 3291 } 3292 } 3293 3294 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3295 kmem_free(rqbuf, SENSE_LENGTH); 3296 3297 /* 3298 * Determine if this is a RRD type device. 3299 */ 3300 mutex_exit(SD_MUTEX(un)); 3301 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3302 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3303 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3304 mutex_enter(SD_MUTEX(un)); 3305 if (status != 0) { 3306 /* command failed; just return */ 3307 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3308 return; 3309 } 3310 3311 /* Get to the page data */ 3312 sense_mhp = (struct mode_header_grp2 *)buf; 3313 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3314 if (bd_len > MODE_BLK_DESC_LENGTH) { 3315 /* 3316 * We did not get back the expected block descriptor length so 3317 * we cannot check the mode page. 3318 */ 3319 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3320 "sd_check_for_writable_cd: Mode Sense returned " 3321 "invalid block descriptor length\n"); 3322 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3323 return; 3324 } 3325 3326 /* 3327 * If the device presents DVD or CD capabilities in the mode 3328 * page, we can return here since a RRD device will not have 3329 * these capabilities. 3330 */ 3331 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3332 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3333 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3334 return; 3335 } 3336 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3337 3338 /* 3339 * If un->un_f_mmc_writable_media is still FALSE, 3340 * check for RRD type media. A RRD device is identified 3341 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3342 */ 3343 mutex_exit(SD_MUTEX(un)); 3344 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3345 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3346 3347 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3348 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3349 RANDOM_WRITABLE, path_flag); 3350 if (rtn != 0) { 3351 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3352 kmem_free(rqbuf_rw, SENSE_LENGTH); 3353 mutex_enter(SD_MUTEX(un)); 3354 return; 3355 } 3356 3357 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3358 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3359 3360 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3361 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3362 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3363 mutex_enter(SD_MUTEX(un)); 3364 if (rtn == 0) { 3365 /* 3366 * We have good information, check for random writable 3367 * and hardware defect features as current. 3368 */ 3369 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3370 (out_data_rw[10] & 0x1) && 3371 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3372 (out_data_hd[10] & 0x1)) { 3373 un->un_f_mmc_writable_media = TRUE; 3374 } 3375 } 3376 3377 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3378 kmem_free(rqbuf_rw, SENSE_LENGTH); 3379 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3380 kmem_free(rqbuf_hd, SENSE_LENGTH); 3381 } 3382 3383 /* 3384 * Function: sd_read_unit_properties 3385 * 3386 * Description: The following implements a property lookup mechanism. 3387 * Properties for particular disks (keyed on vendor, model 3388 * and rev numbers) are sought in the sd.conf file via 3389 * sd_process_sdconf_file(), and if not found there, are 3390 * looked for in a list hardcoded in this driver via 3391 * sd_process_sdconf_table() Once located the properties 3392 * are used to update the driver unit structure. 3393 * 3394 * Arguments: un - driver soft state (unit) structure 3395 */ 3396 3397 static void 3398 sd_read_unit_properties(struct sd_lun *un) 3399 { 3400 /* 3401 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3402 * the "sd-config-list" property (from the sd.conf file) or if 3403 * there was not a match for the inquiry vid/pid. If this event 3404 * occurs the static driver configuration table is searched for 3405 * a match. 3406 */ 3407 ASSERT(un != NULL); 3408 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3409 sd_process_sdconf_table(un); 3410 } 3411 3412 /* check for LSI device */ 3413 sd_is_lsi(un); 3414 3415 3416 } 3417 3418 3419 /* 3420 * Function: sd_process_sdconf_file 3421 * 3422 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3423 * driver's config file (ie, sd.conf) and update the driver 3424 * soft state structure accordingly. 3425 * 3426 * Arguments: un - driver soft state (unit) structure 3427 * 3428 * Return Code: SD_SUCCESS - The properties were successfully set according 3429 * to the driver configuration file. 3430 * SD_FAILURE - The driver config list was not obtained or 3431 * there was no vid/pid match. This indicates that 3432 * the static config table should be used. 3433 * 3434 * The config file has a property, "sd-config-list". Currently we support 3435 * two kinds of formats. For both formats, the value of this property 3436 * is a list of duplets: 3437 * 3438 * sd-config-list= 3439 * <duplet>, 3440 * [,<duplet>]*; 3441 * 3442 * For the improved format, where 3443 * 3444 * <duplet>:= "<vid+pid>","<tunable-list>" 3445 * 3446 * and 3447 * 3448 * <tunable-list>:= <tunable> [, <tunable> ]*; 3449 * <tunable> = <name> : <value> 3450 * 3451 * The <vid+pid> is the string that is returned by the target device on a 3452 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3453 * to apply to all target devices with the specified <vid+pid>. 3454 * 3455 * Each <tunable> is a "<name> : <value>" pair. 3456 * 3457 * For the old format, the structure of each duplet is as follows: 3458 * 3459 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3460 * 3461 * The first entry of the duplet is the device ID string (the concatenated 3462 * vid & pid; not to be confused with a device_id). This is defined in 3463 * the same way as in the sd_disk_table. 3464 * 3465 * The second part of the duplet is a string that identifies a 3466 * data-property-name-list. The data-property-name-list is defined as 3467 * follows: 3468 * 3469 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3470 * 3471 * The syntax of <data-property-name> depends on the <version> field. 3472 * 3473 * If version = SD_CONF_VERSION_1 we have the following syntax: 3474 * 3475 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3476 * 3477 * where the prop0 value will be used to set prop0 if bit0 set in the 3478 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3479 * 3480 */ 3481 3482 static int 3483 sd_process_sdconf_file(struct sd_lun *un) 3484 { 3485 char **config_list = NULL; 3486 uint_t nelements; 3487 char *vidptr; 3488 int vidlen; 3489 char *dnlist_ptr; 3490 char *dataname_ptr; 3491 char *dataname_lasts; 3492 int *data_list = NULL; 3493 uint_t data_list_len; 3494 int rval = SD_FAILURE; 3495 int i; 3496 3497 ASSERT(un != NULL); 3498 3499 /* Obtain the configuration list associated with the .conf file */ 3500 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3501 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3502 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3503 return (SD_FAILURE); 3504 } 3505 3506 /* 3507 * Compare vids in each duplet to the inquiry vid - if a match is 3508 * made, get the data value and update the soft state structure 3509 * accordingly. 3510 * 3511 * Each duplet should show as a pair of strings, return SD_FAILURE 3512 * otherwise. 3513 */ 3514 if (nelements & 1) { 3515 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3516 "sd-config-list should show as pairs of strings.\n"); 3517 if (config_list) 3518 ddi_prop_free(config_list); 3519 return (SD_FAILURE); 3520 } 3521 3522 for (i = 0; i < nelements; i += 2) { 3523 /* 3524 * Note: The assumption here is that each vid entry is on 3525 * a unique line from its associated duplet. 3526 */ 3527 vidptr = config_list[i]; 3528 vidlen = (int)strlen(vidptr); 3529 if ((vidlen == 0) || 3530 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3531 continue; 3532 } 3533 3534 /* 3535 * dnlist contains 1 or more blank separated 3536 * data-property-name entries 3537 */ 3538 dnlist_ptr = config_list[i + 1]; 3539 3540 if (strchr(dnlist_ptr, ':') != NULL) { 3541 /* 3542 * Decode the improved format sd-config-list. 3543 */ 3544 sd_nvpair_str_decode(un, dnlist_ptr); 3545 } else { 3546 /* 3547 * The old format sd-config-list, loop through all 3548 * data-property-name entries in the 3549 * data-property-name-list 3550 * setting the properties for each. 3551 */ 3552 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3553 &dataname_lasts); dataname_ptr != NULL; 3554 dataname_ptr = sd_strtok_r(NULL, " \t", 3555 &dataname_lasts)) { 3556 int version; 3557 3558 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3559 "sd_process_sdconf_file: disk:%s, " 3560 "data:%s\n", vidptr, dataname_ptr); 3561 3562 /* Get the data list */ 3563 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3564 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3565 &data_list_len) != DDI_PROP_SUCCESS) { 3566 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3567 "sd_process_sdconf_file: data " 3568 "property (%s) has no value\n", 3569 dataname_ptr); 3570 continue; 3571 } 3572 3573 version = data_list[0]; 3574 3575 if (version == SD_CONF_VERSION_1) { 3576 sd_tunables values; 3577 3578 /* Set the properties */ 3579 if (sd_chk_vers1_data(un, data_list[1], 3580 &data_list[2], data_list_len, 3581 dataname_ptr) == SD_SUCCESS) { 3582 sd_get_tunables_from_conf(un, 3583 data_list[1], &data_list[2], 3584 &values); 3585 sd_set_vers1_properties(un, 3586 data_list[1], &values); 3587 rval = SD_SUCCESS; 3588 } else { 3589 rval = SD_FAILURE; 3590 } 3591 } else { 3592 scsi_log(SD_DEVINFO(un), sd_label, 3593 CE_WARN, "data property %s version " 3594 "0x%x is invalid.", 3595 dataname_ptr, version); 3596 rval = SD_FAILURE; 3597 } 3598 if (data_list) 3599 ddi_prop_free(data_list); 3600 } 3601 } 3602 } 3603 3604 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3605 if (config_list) { 3606 ddi_prop_free(config_list); 3607 } 3608 3609 return (rval); 3610 } 3611 3612 /* 3613 * Function: sd_nvpair_str_decode() 3614 * 3615 * Description: Parse the improved format sd-config-list to get 3616 * each entry of tunable, which includes a name-value pair. 3617 * Then call sd_set_properties() to set the property. 3618 * 3619 * Arguments: un - driver soft state (unit) structure 3620 * nvpair_str - the tunable list 3621 */ 3622 static void 3623 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3624 { 3625 char *nv, *name, *value, *token; 3626 char *nv_lasts, *v_lasts, *x_lasts; 3627 3628 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3629 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3630 token = sd_strtok_r(nv, ":", &v_lasts); 3631 name = sd_strtok_r(token, " \t", &x_lasts); 3632 token = sd_strtok_r(NULL, ":", &v_lasts); 3633 value = sd_strtok_r(token, " \t", &x_lasts); 3634 if (name == NULL || value == NULL) { 3635 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3636 "sd_nvpair_str_decode: " 3637 "name or value is not valid!\n"); 3638 } else { 3639 sd_set_properties(un, name, value); 3640 } 3641 } 3642 } 3643 3644 /* 3645 * Function: sd_strtok_r() 3646 * 3647 * Description: This function uses strpbrk and strspn to break 3648 * string into tokens on sequentially subsequent calls. Return 3649 * NULL when no non-separator characters remain. The first 3650 * argument is NULL for subsequent calls. 3651 */ 3652 static char * 3653 sd_strtok_r(char *string, const char *sepset, char **lasts) 3654 { 3655 char *q, *r; 3656 3657 /* First or subsequent call */ 3658 if (string == NULL) 3659 string = *lasts; 3660 3661 if (string == NULL) 3662 return (NULL); 3663 3664 /* Skip leading separators */ 3665 q = string + strspn(string, sepset); 3666 3667 if (*q == '\0') 3668 return (NULL); 3669 3670 if ((r = strpbrk(q, sepset)) == NULL) 3671 *lasts = NULL; 3672 else { 3673 *r = '\0'; 3674 *lasts = r + 1; 3675 } 3676 return (q); 3677 } 3678 3679 /* 3680 * Function: sd_set_properties() 3681 * 3682 * Description: Set device properties based on the improved 3683 * format sd-config-list. 3684 * 3685 * Arguments: un - driver soft state (unit) structure 3686 * name - supported tunable name 3687 * value - tunable value 3688 */ 3689 static void 3690 sd_set_properties(struct sd_lun *un, char *name, char *value) 3691 { 3692 char *endptr = NULL; 3693 long val = 0; 3694 3695 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3696 if (strcasecmp(value, "true") == 0) { 3697 un->un_f_suppress_cache_flush = TRUE; 3698 } else if (strcasecmp(value, "false") == 0) { 3699 un->un_f_suppress_cache_flush = FALSE; 3700 } else { 3701 goto value_invalid; 3702 } 3703 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3704 "suppress_cache_flush flag set to %d\n", 3705 un->un_f_suppress_cache_flush); 3706 return; 3707 } 3708 3709 if (strcasecmp(name, "controller-type") == 0) { 3710 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3711 un->un_ctype = val; 3712 } else { 3713 goto value_invalid; 3714 } 3715 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3716 "ctype set to %d\n", un->un_ctype); 3717 return; 3718 } 3719 3720 if (strcasecmp(name, "delay-busy") == 0) { 3721 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3722 un->un_busy_timeout = drv_usectohz(val / 1000); 3723 } else { 3724 goto value_invalid; 3725 } 3726 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3727 "busy_timeout set to %d\n", un->un_busy_timeout); 3728 return; 3729 } 3730 3731 if (strcasecmp(name, "disksort") == 0) { 3732 if (strcasecmp(value, "true") == 0) { 3733 un->un_f_disksort_disabled = FALSE; 3734 } else if (strcasecmp(value, "false") == 0) { 3735 un->un_f_disksort_disabled = TRUE; 3736 } else { 3737 goto value_invalid; 3738 } 3739 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3740 "disksort disabled flag set to %d\n", 3741 un->un_f_disksort_disabled); 3742 return; 3743 } 3744 3745 if (strcasecmp(name, "timeout-releasereservation") == 0) { 3746 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3747 un->un_reserve_release_time = val; 3748 } else { 3749 goto value_invalid; 3750 } 3751 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3752 "reservation release timeout set to %d\n", 3753 un->un_reserve_release_time); 3754 return; 3755 } 3756 3757 if (strcasecmp(name, "reset-lun") == 0) { 3758 if (strcasecmp(value, "true") == 0) { 3759 un->un_f_lun_reset_enabled = TRUE; 3760 } else if (strcasecmp(value, "false") == 0) { 3761 un->un_f_lun_reset_enabled = FALSE; 3762 } else { 3763 goto value_invalid; 3764 } 3765 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3766 "lun reset enabled flag set to %d\n", 3767 un->un_f_lun_reset_enabled); 3768 return; 3769 } 3770 3771 if (strcasecmp(name, "retries-busy") == 0) { 3772 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3773 un->un_busy_retry_count = val; 3774 } else { 3775 goto value_invalid; 3776 } 3777 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3778 "busy retry count set to %d\n", un->un_busy_retry_count); 3779 return; 3780 } 3781 3782 if (strcasecmp(name, "retries-timeout") == 0) { 3783 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3784 un->un_retry_count = val; 3785 } else { 3786 goto value_invalid; 3787 } 3788 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3789 "timeout retry count set to %d\n", un->un_retry_count); 3790 return; 3791 } 3792 3793 if (strcasecmp(name, "retries-notready") == 0) { 3794 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3795 un->un_notready_retry_count = val; 3796 } else { 3797 goto value_invalid; 3798 } 3799 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3800 "notready retry count set to %d\n", 3801 un->un_notready_retry_count); 3802 return; 3803 } 3804 3805 if (strcasecmp(name, "retries-reset") == 0) { 3806 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3807 un->un_reset_retry_count = val; 3808 } else { 3809 goto value_invalid; 3810 } 3811 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3812 "reset retry count set to %d\n", 3813 un->un_reset_retry_count); 3814 return; 3815 } 3816 3817 if (strcasecmp(name, "throttle-max") == 0) { 3818 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3819 un->un_saved_throttle = un->un_throttle = val; 3820 } else { 3821 goto value_invalid; 3822 } 3823 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3824 "throttle set to %d\n", un->un_throttle); 3825 } 3826 3827 if (strcasecmp(name, "throttle-min") == 0) { 3828 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3829 un->un_min_throttle = val; 3830 } else { 3831 goto value_invalid; 3832 } 3833 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3834 "min throttle set to %d\n", un->un_min_throttle); 3835 } 3836 3837 /* 3838 * Validate the throttle values. 3839 * If any of the numbers are invalid, set everything to defaults. 3840 */ 3841 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3842 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3843 (un->un_min_throttle > un->un_throttle)) { 3844 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3845 un->un_min_throttle = sd_min_throttle; 3846 } 3847 return; 3848 3849 value_invalid: 3850 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3851 "value of prop %s is invalid\n", name); 3852 } 3853 3854 /* 3855 * Function: sd_get_tunables_from_conf() 3856 * 3857 * 3858 * This function reads the data list from the sd.conf file and pulls 3859 * the values that can have numeric values as arguments and places 3860 * the values in the appropriate sd_tunables member. 3861 * Since the order of the data list members varies across platforms 3862 * This function reads them from the data list in a platform specific 3863 * order and places them into the correct sd_tunable member that is 3864 * consistent across all platforms. 3865 */ 3866 static void 3867 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3868 sd_tunables *values) 3869 { 3870 int i; 3871 int mask; 3872 3873 bzero(values, sizeof (sd_tunables)); 3874 3875 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3876 3877 mask = 1 << i; 3878 if (mask > flags) { 3879 break; 3880 } 3881 3882 switch (mask & flags) { 3883 case 0: /* This mask bit not set in flags */ 3884 continue; 3885 case SD_CONF_BSET_THROTTLE: 3886 values->sdt_throttle = data_list[i]; 3887 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3888 "sd_get_tunables_from_conf: throttle = %d\n", 3889 values->sdt_throttle); 3890 break; 3891 case SD_CONF_BSET_CTYPE: 3892 values->sdt_ctype = data_list[i]; 3893 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3894 "sd_get_tunables_from_conf: ctype = %d\n", 3895 values->sdt_ctype); 3896 break; 3897 case SD_CONF_BSET_NRR_COUNT: 3898 values->sdt_not_rdy_retries = data_list[i]; 3899 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3900 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3901 values->sdt_not_rdy_retries); 3902 break; 3903 case SD_CONF_BSET_BSY_RETRY_COUNT: 3904 values->sdt_busy_retries = data_list[i]; 3905 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3906 "sd_get_tunables_from_conf: busy_retries = %d\n", 3907 values->sdt_busy_retries); 3908 break; 3909 case SD_CONF_BSET_RST_RETRIES: 3910 values->sdt_reset_retries = data_list[i]; 3911 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3912 "sd_get_tunables_from_conf: reset_retries = %d\n", 3913 values->sdt_reset_retries); 3914 break; 3915 case SD_CONF_BSET_RSV_REL_TIME: 3916 values->sdt_reserv_rel_time = data_list[i]; 3917 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3918 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3919 values->sdt_reserv_rel_time); 3920 break; 3921 case SD_CONF_BSET_MIN_THROTTLE: 3922 values->sdt_min_throttle = data_list[i]; 3923 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3924 "sd_get_tunables_from_conf: min_throttle = %d\n", 3925 values->sdt_min_throttle); 3926 break; 3927 case SD_CONF_BSET_DISKSORT_DISABLED: 3928 values->sdt_disk_sort_dis = data_list[i]; 3929 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3930 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3931 values->sdt_disk_sort_dis); 3932 break; 3933 case SD_CONF_BSET_LUN_RESET_ENABLED: 3934 values->sdt_lun_reset_enable = data_list[i]; 3935 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3936 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3937 "\n", values->sdt_lun_reset_enable); 3938 break; 3939 case SD_CONF_BSET_CACHE_IS_NV: 3940 values->sdt_suppress_cache_flush = data_list[i]; 3941 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3942 "sd_get_tunables_from_conf: \ 3943 suppress_cache_flush = %d" 3944 "\n", values->sdt_suppress_cache_flush); 3945 break; 3946 } 3947 } 3948 } 3949 3950 /* 3951 * Function: sd_process_sdconf_table 3952 * 3953 * Description: Search the static configuration table for a match on the 3954 * inquiry vid/pid and update the driver soft state structure 3955 * according to the table property values for the device. 3956 * 3957 * The form of a configuration table entry is: 3958 * <vid+pid>,<flags>,<property-data> 3959 * "SEAGATE ST42400N",1,0x40000, 3960 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 3961 * 3962 * Arguments: un - driver soft state (unit) structure 3963 */ 3964 3965 static void 3966 sd_process_sdconf_table(struct sd_lun *un) 3967 { 3968 char *id = NULL; 3969 int table_index; 3970 int idlen; 3971 3972 ASSERT(un != NULL); 3973 for (table_index = 0; table_index < sd_disk_table_size; 3974 table_index++) { 3975 id = sd_disk_table[table_index].device_id; 3976 idlen = strlen(id); 3977 if (idlen == 0) { 3978 continue; 3979 } 3980 3981 /* 3982 * The static configuration table currently does not 3983 * implement version 10 properties. Additionally, 3984 * multiple data-property-name entries are not 3985 * implemented in the static configuration table. 3986 */ 3987 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3988 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3989 "sd_process_sdconf_table: disk %s\n", id); 3990 sd_set_vers1_properties(un, 3991 sd_disk_table[table_index].flags, 3992 sd_disk_table[table_index].properties); 3993 break; 3994 } 3995 } 3996 } 3997 3998 3999 /* 4000 * Function: sd_sdconf_id_match 4001 * 4002 * Description: This local function implements a case sensitive vid/pid 4003 * comparison as well as the boundary cases of wild card and 4004 * multiple blanks. 4005 * 4006 * Note: An implicit assumption made here is that the scsi 4007 * inquiry structure will always keep the vid, pid and 4008 * revision strings in consecutive sequence, so they can be 4009 * read as a single string. If this assumption is not the 4010 * case, a separate string, to be used for the check, needs 4011 * to be built with these strings concatenated. 4012 * 4013 * Arguments: un - driver soft state (unit) structure 4014 * id - table or config file vid/pid 4015 * idlen - length of the vid/pid (bytes) 4016 * 4017 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4018 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4019 */ 4020 4021 static int 4022 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4023 { 4024 struct scsi_inquiry *sd_inq; 4025 int rval = SD_SUCCESS; 4026 4027 ASSERT(un != NULL); 4028 sd_inq = un->un_sd->sd_inq; 4029 ASSERT(id != NULL); 4030 4031 /* 4032 * We use the inq_vid as a pointer to a buffer containing the 4033 * vid and pid and use the entire vid/pid length of the table 4034 * entry for the comparison. This works because the inq_pid 4035 * data member follows inq_vid in the scsi_inquiry structure. 4036 */ 4037 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4038 /* 4039 * The user id string is compared to the inquiry vid/pid 4040 * using a case insensitive comparison and ignoring 4041 * multiple spaces. 4042 */ 4043 rval = sd_blank_cmp(un, id, idlen); 4044 if (rval != SD_SUCCESS) { 4045 /* 4046 * User id strings that start and end with a "*" 4047 * are a special case. These do not have a 4048 * specific vendor, and the product string can 4049 * appear anywhere in the 16 byte PID portion of 4050 * the inquiry data. This is a simple strstr() 4051 * type search for the user id in the inquiry data. 4052 */ 4053 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4054 char *pidptr = &id[1]; 4055 int i; 4056 int j; 4057 int pidstrlen = idlen - 2; 4058 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4059 pidstrlen; 4060 4061 if (j < 0) { 4062 return (SD_FAILURE); 4063 } 4064 for (i = 0; i < j; i++) { 4065 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4066 pidptr, pidstrlen) == 0) { 4067 rval = SD_SUCCESS; 4068 break; 4069 } 4070 } 4071 } 4072 } 4073 } 4074 return (rval); 4075 } 4076 4077 4078 /* 4079 * Function: sd_blank_cmp 4080 * 4081 * Description: If the id string starts and ends with a space, treat 4082 * multiple consecutive spaces as equivalent to a single 4083 * space. For example, this causes a sd_disk_table entry 4084 * of " NEC CDROM " to match a device's id string of 4085 * "NEC CDROM". 4086 * 4087 * Note: The success exit condition for this routine is if 4088 * the pointer to the table entry is '\0' and the cnt of 4089 * the inquiry length is zero. This will happen if the inquiry 4090 * string returned by the device is padded with spaces to be 4091 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4092 * SCSI spec states that the inquiry string is to be padded with 4093 * spaces. 4094 * 4095 * Arguments: un - driver soft state (unit) structure 4096 * id - table or config file vid/pid 4097 * idlen - length of the vid/pid (bytes) 4098 * 4099 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4100 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4101 */ 4102 4103 static int 4104 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4105 { 4106 char *p1; 4107 char *p2; 4108 int cnt; 4109 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4110 sizeof (SD_INQUIRY(un)->inq_pid); 4111 4112 ASSERT(un != NULL); 4113 p2 = un->un_sd->sd_inq->inq_vid; 4114 ASSERT(id != NULL); 4115 p1 = id; 4116 4117 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4118 /* 4119 * Note: string p1 is terminated by a NUL but string p2 4120 * isn't. The end of p2 is determined by cnt. 4121 */ 4122 for (;;) { 4123 /* skip over any extra blanks in both strings */ 4124 while ((*p1 != '\0') && (*p1 == ' ')) { 4125 p1++; 4126 } 4127 while ((cnt != 0) && (*p2 == ' ')) { 4128 p2++; 4129 cnt--; 4130 } 4131 4132 /* compare the two strings */ 4133 if ((cnt == 0) || 4134 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4135 break; 4136 } 4137 while ((cnt > 0) && 4138 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4139 p1++; 4140 p2++; 4141 cnt--; 4142 } 4143 } 4144 } 4145 4146 /* return SD_SUCCESS if both strings match */ 4147 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4148 } 4149 4150 4151 /* 4152 * Function: sd_chk_vers1_data 4153 * 4154 * Description: Verify the version 1 device properties provided by the 4155 * user via the configuration file 4156 * 4157 * Arguments: un - driver soft state (unit) structure 4158 * flags - integer mask indicating properties to be set 4159 * prop_list - integer list of property values 4160 * list_len - number of the elements 4161 * 4162 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4163 * SD_FAILURE - Indicates the user provided data is invalid 4164 */ 4165 4166 static int 4167 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4168 int list_len, char *dataname_ptr) 4169 { 4170 int i; 4171 int mask = 1; 4172 int index = 0; 4173 4174 ASSERT(un != NULL); 4175 4176 /* Check for a NULL property name and list */ 4177 if (dataname_ptr == NULL) { 4178 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4179 "sd_chk_vers1_data: NULL data property name."); 4180 return (SD_FAILURE); 4181 } 4182 if (prop_list == NULL) { 4183 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4184 "sd_chk_vers1_data: %s NULL data property list.", 4185 dataname_ptr); 4186 return (SD_FAILURE); 4187 } 4188 4189 /* Display a warning if undefined bits are set in the flags */ 4190 if (flags & ~SD_CONF_BIT_MASK) { 4191 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4192 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4193 "Properties not set.", 4194 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4195 return (SD_FAILURE); 4196 } 4197 4198 /* 4199 * Verify the length of the list by identifying the highest bit set 4200 * in the flags and validating that the property list has a length 4201 * up to the index of this bit. 4202 */ 4203 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4204 if (flags & mask) { 4205 index++; 4206 } 4207 mask = 1 << i; 4208 } 4209 if (list_len < (index + 2)) { 4210 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4211 "sd_chk_vers1_data: " 4212 "Data property list %s size is incorrect. " 4213 "Properties not set.", dataname_ptr); 4214 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4215 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4216 return (SD_FAILURE); 4217 } 4218 return (SD_SUCCESS); 4219 } 4220 4221 4222 /* 4223 * Function: sd_set_vers1_properties 4224 * 4225 * Description: Set version 1 device properties based on a property list 4226 * retrieved from the driver configuration file or static 4227 * configuration table. Version 1 properties have the format: 4228 * 4229 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4230 * 4231 * where the prop0 value will be used to set prop0 if bit0 4232 * is set in the flags 4233 * 4234 * Arguments: un - driver soft state (unit) structure 4235 * flags - integer mask indicating properties to be set 4236 * prop_list - integer list of property values 4237 */ 4238 4239 static void 4240 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4241 { 4242 ASSERT(un != NULL); 4243 4244 /* 4245 * Set the flag to indicate cache is to be disabled. An attempt 4246 * to disable the cache via sd_cache_control() will be made 4247 * later during attach once the basic initialization is complete. 4248 */ 4249 if (flags & SD_CONF_BSET_NOCACHE) { 4250 un->un_f_opt_disable_cache = TRUE; 4251 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4252 "sd_set_vers1_properties: caching disabled flag set\n"); 4253 } 4254 4255 /* CD-specific configuration parameters */ 4256 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4257 un->un_f_cfg_playmsf_bcd = TRUE; 4258 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4259 "sd_set_vers1_properties: playmsf_bcd set\n"); 4260 } 4261 if (flags & SD_CONF_BSET_READSUB_BCD) { 4262 un->un_f_cfg_readsub_bcd = TRUE; 4263 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4264 "sd_set_vers1_properties: readsub_bcd set\n"); 4265 } 4266 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4267 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4268 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4269 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4270 } 4271 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4272 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4273 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4274 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4275 } 4276 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4277 un->un_f_cfg_no_read_header = TRUE; 4278 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4279 "sd_set_vers1_properties: no_read_header set\n"); 4280 } 4281 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4282 un->un_f_cfg_read_cd_xd4 = TRUE; 4283 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4284 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4285 } 4286 4287 /* Support for devices which do not have valid/unique serial numbers */ 4288 if (flags & SD_CONF_BSET_FAB_DEVID) { 4289 un->un_f_opt_fab_devid = TRUE; 4290 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4291 "sd_set_vers1_properties: fab_devid bit set\n"); 4292 } 4293 4294 /* Support for user throttle configuration */ 4295 if (flags & SD_CONF_BSET_THROTTLE) { 4296 ASSERT(prop_list != NULL); 4297 un->un_saved_throttle = un->un_throttle = 4298 prop_list->sdt_throttle; 4299 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4300 "sd_set_vers1_properties: throttle set to %d\n", 4301 prop_list->sdt_throttle); 4302 } 4303 4304 /* Set the per disk retry count according to the conf file or table. */ 4305 if (flags & SD_CONF_BSET_NRR_COUNT) { 4306 ASSERT(prop_list != NULL); 4307 if (prop_list->sdt_not_rdy_retries) { 4308 un->un_notready_retry_count = 4309 prop_list->sdt_not_rdy_retries; 4310 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4311 "sd_set_vers1_properties: not ready retry count" 4312 " set to %d\n", un->un_notready_retry_count); 4313 } 4314 } 4315 4316 /* The controller type is reported for generic disk driver ioctls */ 4317 if (flags & SD_CONF_BSET_CTYPE) { 4318 ASSERT(prop_list != NULL); 4319 switch (prop_list->sdt_ctype) { 4320 case CTYPE_CDROM: 4321 un->un_ctype = prop_list->sdt_ctype; 4322 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4323 "sd_set_vers1_properties: ctype set to " 4324 "CTYPE_CDROM\n"); 4325 break; 4326 case CTYPE_CCS: 4327 un->un_ctype = prop_list->sdt_ctype; 4328 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4329 "sd_set_vers1_properties: ctype set to " 4330 "CTYPE_CCS\n"); 4331 break; 4332 case CTYPE_ROD: /* RW optical */ 4333 un->un_ctype = prop_list->sdt_ctype; 4334 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4335 "sd_set_vers1_properties: ctype set to " 4336 "CTYPE_ROD\n"); 4337 break; 4338 default: 4339 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4340 "sd_set_vers1_properties: Could not set " 4341 "invalid ctype value (%d)", 4342 prop_list->sdt_ctype); 4343 } 4344 } 4345 4346 /* Purple failover timeout */ 4347 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4348 ASSERT(prop_list != NULL); 4349 un->un_busy_retry_count = 4350 prop_list->sdt_busy_retries; 4351 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4352 "sd_set_vers1_properties: " 4353 "busy retry count set to %d\n", 4354 un->un_busy_retry_count); 4355 } 4356 4357 /* Purple reset retry count */ 4358 if (flags & SD_CONF_BSET_RST_RETRIES) { 4359 ASSERT(prop_list != NULL); 4360 un->un_reset_retry_count = 4361 prop_list->sdt_reset_retries; 4362 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4363 "sd_set_vers1_properties: " 4364 "reset retry count set to %d\n", 4365 un->un_reset_retry_count); 4366 } 4367 4368 /* Purple reservation release timeout */ 4369 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4370 ASSERT(prop_list != NULL); 4371 un->un_reserve_release_time = 4372 prop_list->sdt_reserv_rel_time; 4373 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4374 "sd_set_vers1_properties: " 4375 "reservation release timeout set to %d\n", 4376 un->un_reserve_release_time); 4377 } 4378 4379 /* 4380 * Driver flag telling the driver to verify that no commands are pending 4381 * for a device before issuing a Test Unit Ready. This is a workaround 4382 * for a firmware bug in some Seagate eliteI drives. 4383 */ 4384 if (flags & SD_CONF_BSET_TUR_CHECK) { 4385 un->un_f_cfg_tur_check = TRUE; 4386 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4387 "sd_set_vers1_properties: tur queue check set\n"); 4388 } 4389 4390 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4391 un->un_min_throttle = prop_list->sdt_min_throttle; 4392 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4393 "sd_set_vers1_properties: min throttle set to %d\n", 4394 un->un_min_throttle); 4395 } 4396 4397 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4398 un->un_f_disksort_disabled = 4399 (prop_list->sdt_disk_sort_dis != 0) ? 4400 TRUE : FALSE; 4401 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4402 "sd_set_vers1_properties: disksort disabled " 4403 "flag set to %d\n", 4404 prop_list->sdt_disk_sort_dis); 4405 } 4406 4407 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4408 un->un_f_lun_reset_enabled = 4409 (prop_list->sdt_lun_reset_enable != 0) ? 4410 TRUE : FALSE; 4411 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4412 "sd_set_vers1_properties: lun reset enabled " 4413 "flag set to %d\n", 4414 prop_list->sdt_lun_reset_enable); 4415 } 4416 4417 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4418 un->un_f_suppress_cache_flush = 4419 (prop_list->sdt_suppress_cache_flush != 0) ? 4420 TRUE : FALSE; 4421 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4422 "sd_set_vers1_properties: suppress_cache_flush " 4423 "flag set to %d\n", 4424 prop_list->sdt_suppress_cache_flush); 4425 } 4426 4427 /* 4428 * Validate the throttle values. 4429 * If any of the numbers are invalid, set everything to defaults. 4430 */ 4431 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4432 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4433 (un->un_min_throttle > un->un_throttle)) { 4434 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4435 un->un_min_throttle = sd_min_throttle; 4436 } 4437 } 4438 4439 /* 4440 * Function: sd_is_lsi() 4441 * 4442 * Description: Check for lsi devices, step through the static device 4443 * table to match vid/pid. 4444 * 4445 * Args: un - ptr to sd_lun 4446 * 4447 * Notes: When creating new LSI property, need to add the new LSI property 4448 * to this function. 4449 */ 4450 static void 4451 sd_is_lsi(struct sd_lun *un) 4452 { 4453 char *id = NULL; 4454 int table_index; 4455 int idlen; 4456 void *prop; 4457 4458 ASSERT(un != NULL); 4459 for (table_index = 0; table_index < sd_disk_table_size; 4460 table_index++) { 4461 id = sd_disk_table[table_index].device_id; 4462 idlen = strlen(id); 4463 if (idlen == 0) { 4464 continue; 4465 } 4466 4467 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4468 prop = sd_disk_table[table_index].properties; 4469 if (prop == &lsi_properties || 4470 prop == &lsi_oem_properties || 4471 prop == &lsi_properties_scsi || 4472 prop == &symbios_properties) { 4473 un->un_f_cfg_is_lsi = TRUE; 4474 } 4475 break; 4476 } 4477 } 4478 } 4479 4480 /* 4481 * Function: sd_get_physical_geometry 4482 * 4483 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4484 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4485 * target, and use this information to initialize the physical 4486 * geometry cache specified by pgeom_p. 4487 * 4488 * MODE SENSE is an optional command, so failure in this case 4489 * does not necessarily denote an error. We want to use the 4490 * MODE SENSE commands to derive the physical geometry of the 4491 * device, but if either command fails, the logical geometry is 4492 * used as the fallback for disk label geometry in cmlb. 4493 * 4494 * This requires that un->un_blockcount and un->un_tgt_blocksize 4495 * have already been initialized for the current target and 4496 * that the current values be passed as args so that we don't 4497 * end up ever trying to use -1 as a valid value. This could 4498 * happen if either value is reset while we're not holding 4499 * the mutex. 4500 * 4501 * Arguments: un - driver soft state (unit) structure 4502 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4503 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4504 * to use the USCSI "direct" chain and bypass the normal 4505 * command waitq. 4506 * 4507 * Context: Kernel thread only (can sleep). 4508 */ 4509 4510 static int 4511 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4512 diskaddr_t capacity, int lbasize, int path_flag) 4513 { 4514 struct mode_format *page3p; 4515 struct mode_geometry *page4p; 4516 struct mode_header *headerp; 4517 int sector_size; 4518 int nsect; 4519 int nhead; 4520 int ncyl; 4521 int intrlv; 4522 int spc; 4523 diskaddr_t modesense_capacity; 4524 int rpm; 4525 int bd_len; 4526 int mode_header_length; 4527 uchar_t *p3bufp; 4528 uchar_t *p4bufp; 4529 int cdbsize; 4530 int ret = EIO; 4531 4532 ASSERT(un != NULL); 4533 4534 if (lbasize == 0) { 4535 if (ISCD(un)) { 4536 lbasize = 2048; 4537 } else { 4538 lbasize = un->un_sys_blocksize; 4539 } 4540 } 4541 pgeom_p->g_secsize = (unsigned short)lbasize; 4542 4543 /* 4544 * If the unit is a cd/dvd drive MODE SENSE page three 4545 * and MODE SENSE page four are reserved (see SBC spec 4546 * and MMC spec). To prevent soft errors just return 4547 * using the default LBA size. 4548 */ 4549 if (ISCD(un)) 4550 return (ret); 4551 4552 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4553 4554 /* 4555 * Retrieve MODE SENSE page 3 - Format Device Page 4556 */ 4557 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4558 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4559 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4560 != 0) { 4561 SD_ERROR(SD_LOG_COMMON, un, 4562 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4563 goto page3_exit; 4564 } 4565 4566 /* 4567 * Determine size of Block Descriptors in order to locate the mode 4568 * page data. ATAPI devices return 0, SCSI devices should return 4569 * MODE_BLK_DESC_LENGTH. 4570 */ 4571 headerp = (struct mode_header *)p3bufp; 4572 if (un->un_f_cfg_is_atapi == TRUE) { 4573 struct mode_header_grp2 *mhp = 4574 (struct mode_header_grp2 *)headerp; 4575 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4576 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4577 } else { 4578 mode_header_length = MODE_HEADER_LENGTH; 4579 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4580 } 4581 4582 if (bd_len > MODE_BLK_DESC_LENGTH) { 4583 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4584 "received unexpected bd_len of %d, page3\n", bd_len); 4585 goto page3_exit; 4586 } 4587 4588 page3p = (struct mode_format *) 4589 ((caddr_t)headerp + mode_header_length + bd_len); 4590 4591 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4592 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4593 "mode sense pg3 code mismatch %d\n", 4594 page3p->mode_page.code); 4595 goto page3_exit; 4596 } 4597 4598 /* 4599 * Use this physical geometry data only if BOTH MODE SENSE commands 4600 * complete successfully; otherwise, revert to the logical geometry. 4601 * So, we need to save everything in temporary variables. 4602 */ 4603 sector_size = BE_16(page3p->data_bytes_sect); 4604 4605 /* 4606 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4607 */ 4608 if (sector_size == 0) { 4609 sector_size = un->un_sys_blocksize; 4610 } else { 4611 sector_size &= ~(un->un_sys_blocksize - 1); 4612 } 4613 4614 nsect = BE_16(page3p->sect_track); 4615 intrlv = BE_16(page3p->interleave); 4616 4617 SD_INFO(SD_LOG_COMMON, un, 4618 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4619 SD_INFO(SD_LOG_COMMON, un, 4620 " mode page: %d; nsect: %d; sector size: %d;\n", 4621 page3p->mode_page.code, nsect, sector_size); 4622 SD_INFO(SD_LOG_COMMON, un, 4623 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4624 BE_16(page3p->track_skew), 4625 BE_16(page3p->cylinder_skew)); 4626 4627 4628 /* 4629 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4630 */ 4631 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4632 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4633 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4634 != 0) { 4635 SD_ERROR(SD_LOG_COMMON, un, 4636 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4637 goto page4_exit; 4638 } 4639 4640 /* 4641 * Determine size of Block Descriptors in order to locate the mode 4642 * page data. ATAPI devices return 0, SCSI devices should return 4643 * MODE_BLK_DESC_LENGTH. 4644 */ 4645 headerp = (struct mode_header *)p4bufp; 4646 if (un->un_f_cfg_is_atapi == TRUE) { 4647 struct mode_header_grp2 *mhp = 4648 (struct mode_header_grp2 *)headerp; 4649 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4650 } else { 4651 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4652 } 4653 4654 if (bd_len > MODE_BLK_DESC_LENGTH) { 4655 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4656 "received unexpected bd_len of %d, page4\n", bd_len); 4657 goto page4_exit; 4658 } 4659 4660 page4p = (struct mode_geometry *) 4661 ((caddr_t)headerp + mode_header_length + bd_len); 4662 4663 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4664 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4665 "mode sense pg4 code mismatch %d\n", 4666 page4p->mode_page.code); 4667 goto page4_exit; 4668 } 4669 4670 /* 4671 * Stash the data now, after we know that both commands completed. 4672 */ 4673 4674 4675 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4676 spc = nhead * nsect; 4677 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4678 rpm = BE_16(page4p->rpm); 4679 4680 modesense_capacity = spc * ncyl; 4681 4682 SD_INFO(SD_LOG_COMMON, un, 4683 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4684 SD_INFO(SD_LOG_COMMON, un, 4685 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4686 SD_INFO(SD_LOG_COMMON, un, 4687 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4688 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4689 (void *)pgeom_p, capacity); 4690 4691 /* 4692 * Compensate if the drive's geometry is not rectangular, i.e., 4693 * the product of C * H * S returned by MODE SENSE >= that returned 4694 * by read capacity. This is an idiosyncrasy of the original x86 4695 * disk subsystem. 4696 */ 4697 if (modesense_capacity >= capacity) { 4698 SD_INFO(SD_LOG_COMMON, un, 4699 "sd_get_physical_geometry: adjusting acyl; " 4700 "old: %d; new: %d\n", pgeom_p->g_acyl, 4701 (modesense_capacity - capacity + spc - 1) / spc); 4702 if (sector_size != 0) { 4703 /* 1243403: NEC D38x7 drives don't support sec size */ 4704 pgeom_p->g_secsize = (unsigned short)sector_size; 4705 } 4706 pgeom_p->g_nsect = (unsigned short)nsect; 4707 pgeom_p->g_nhead = (unsigned short)nhead; 4708 pgeom_p->g_capacity = capacity; 4709 pgeom_p->g_acyl = 4710 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4711 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4712 } 4713 4714 pgeom_p->g_rpm = (unsigned short)rpm; 4715 pgeom_p->g_intrlv = (unsigned short)intrlv; 4716 ret = 0; 4717 4718 SD_INFO(SD_LOG_COMMON, un, 4719 "sd_get_physical_geometry: mode sense geometry:\n"); 4720 SD_INFO(SD_LOG_COMMON, un, 4721 " nsect: %d; sector size: %d; interlv: %d\n", 4722 nsect, sector_size, intrlv); 4723 SD_INFO(SD_LOG_COMMON, un, 4724 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4725 nhead, ncyl, rpm, modesense_capacity); 4726 SD_INFO(SD_LOG_COMMON, un, 4727 "sd_get_physical_geometry: (cached)\n"); 4728 SD_INFO(SD_LOG_COMMON, un, 4729 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4730 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4731 pgeom_p->g_nhead, pgeom_p->g_nsect); 4732 SD_INFO(SD_LOG_COMMON, un, 4733 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4734 pgeom_p->g_secsize, pgeom_p->g_capacity, 4735 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4736 4737 page4_exit: 4738 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4739 page3_exit: 4740 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4741 4742 return (ret); 4743 } 4744 4745 /* 4746 * Function: sd_get_virtual_geometry 4747 * 4748 * Description: Ask the controller to tell us about the target device. 4749 * 4750 * Arguments: un - pointer to softstate 4751 * capacity - disk capacity in #blocks 4752 * lbasize - disk block size in bytes 4753 * 4754 * Context: Kernel thread only 4755 */ 4756 4757 static int 4758 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4759 diskaddr_t capacity, int lbasize) 4760 { 4761 uint_t geombuf; 4762 int spc; 4763 4764 ASSERT(un != NULL); 4765 4766 /* Set sector size, and total number of sectors */ 4767 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4768 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4769 4770 /* Let the HBA tell us its geometry */ 4771 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4772 4773 /* A value of -1 indicates an undefined "geometry" property */ 4774 if (geombuf == (-1)) { 4775 return (EINVAL); 4776 } 4777 4778 /* Initialize the logical geometry cache. */ 4779 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4780 lgeom_p->g_nsect = geombuf & 0xffff; 4781 lgeom_p->g_secsize = un->un_sys_blocksize; 4782 4783 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4784 4785 /* 4786 * Note: The driver originally converted the capacity value from 4787 * target blocks to system blocks. However, the capacity value passed 4788 * to this routine is already in terms of system blocks (this scaling 4789 * is done when the READ CAPACITY command is issued and processed). 4790 * This 'error' may have gone undetected because the usage of g_ncyl 4791 * (which is based upon g_capacity) is very limited within the driver 4792 */ 4793 lgeom_p->g_capacity = capacity; 4794 4795 /* 4796 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4797 * hba may return zero values if the device has been removed. 4798 */ 4799 if (spc == 0) { 4800 lgeom_p->g_ncyl = 0; 4801 } else { 4802 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4803 } 4804 lgeom_p->g_acyl = 0; 4805 4806 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4807 return (0); 4808 4809 } 4810 /* 4811 * Function: sd_update_block_info 4812 * 4813 * Description: Calculate a byte count to sector count bitshift value 4814 * from sector size. 4815 * 4816 * Arguments: un: unit struct. 4817 * lbasize: new target sector size 4818 * capacity: new target capacity, ie. block count 4819 * 4820 * Context: Kernel thread context 4821 */ 4822 4823 static void 4824 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4825 { 4826 if (lbasize != 0) { 4827 un->un_tgt_blocksize = lbasize; 4828 un->un_f_tgt_blocksize_is_valid = TRUE; 4829 } 4830 4831 if (capacity != 0) { 4832 un->un_blockcount = capacity; 4833 un->un_f_blockcount_is_valid = TRUE; 4834 } 4835 } 4836 4837 4838 /* 4839 * Function: sd_register_devid 4840 * 4841 * Description: This routine will obtain the device id information from the 4842 * target, obtain the serial number, and register the device 4843 * id with the ddi framework. 4844 * 4845 * Arguments: devi - the system's dev_info_t for the device. 4846 * un - driver soft state (unit) structure 4847 * reservation_flag - indicates if a reservation conflict 4848 * occurred during attach 4849 * 4850 * Context: Kernel Thread 4851 */ 4852 static void 4853 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4854 { 4855 int rval = 0; 4856 uchar_t *inq80 = NULL; 4857 size_t inq80_len = MAX_INQUIRY_SIZE; 4858 size_t inq80_resid = 0; 4859 uchar_t *inq83 = NULL; 4860 size_t inq83_len = MAX_INQUIRY_SIZE; 4861 size_t inq83_resid = 0; 4862 int dlen, len; 4863 char *sn; 4864 4865 ASSERT(un != NULL); 4866 ASSERT(mutex_owned(SD_MUTEX(un))); 4867 ASSERT((SD_DEVINFO(un)) == devi); 4868 4869 /* 4870 * If transport has already registered a devid for this target 4871 * then that takes precedence over the driver's determination 4872 * of the devid. 4873 */ 4874 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 4875 ASSERT(un->un_devid); 4876 return; /* use devid registered by the transport */ 4877 } 4878 4879 /* 4880 * This is the case of antiquated Sun disk drives that have the 4881 * FAB_DEVID property set in the disk_table. These drives 4882 * manage the devid's by storing them in last 2 available sectors 4883 * on the drive and have them fabricated by the ddi layer by calling 4884 * ddi_devid_init and passing the DEVID_FAB flag. 4885 */ 4886 if (un->un_f_opt_fab_devid == TRUE) { 4887 /* 4888 * Depending on EINVAL isn't reliable, since a reserved disk 4889 * may result in invalid geometry, so check to make sure a 4890 * reservation conflict did not occur during attach. 4891 */ 4892 if ((sd_get_devid(un) == EINVAL) && 4893 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4894 /* 4895 * The devid is invalid AND there is no reservation 4896 * conflict. Fabricate a new devid. 4897 */ 4898 (void) sd_create_devid(un); 4899 } 4900 4901 /* Register the devid if it exists */ 4902 if (un->un_devid != NULL) { 4903 (void) ddi_devid_register(SD_DEVINFO(un), 4904 un->un_devid); 4905 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4906 "sd_register_devid: Devid Fabricated\n"); 4907 } 4908 return; 4909 } 4910 4911 /* 4912 * We check the availability of the World Wide Name (0x83) and Unit 4913 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4914 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4915 * 0x83 is available, that is the best choice. Our next choice is 4916 * 0x80. If neither are available, we munge the devid from the device 4917 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4918 * to fabricate a devid for non-Sun qualified disks. 4919 */ 4920 if (sd_check_vpd_page_support(un) == 0) { 4921 /* collect page 80 data if available */ 4922 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4923 4924 mutex_exit(SD_MUTEX(un)); 4925 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4926 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4927 0x01, 0x80, &inq80_resid); 4928 4929 if (rval != 0) { 4930 kmem_free(inq80, inq80_len); 4931 inq80 = NULL; 4932 inq80_len = 0; 4933 } else if (ddi_prop_exists( 4934 DDI_DEV_T_NONE, SD_DEVINFO(un), 4935 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 4936 INQUIRY_SERIAL_NO) == 0) { 4937 /* 4938 * If we don't already have a serial number 4939 * property, do quick verify of data returned 4940 * and define property. 4941 */ 4942 dlen = inq80_len - inq80_resid; 4943 len = (size_t)inq80[3]; 4944 if ((dlen >= 4) && ((len + 4) <= dlen)) { 4945 /* 4946 * Ensure sn termination, skip leading 4947 * blanks, and create property 4948 * 'inquiry-serial-no'. 4949 */ 4950 sn = (char *)&inq80[4]; 4951 sn[len] = 0; 4952 while (*sn && (*sn == ' ')) 4953 sn++; 4954 if (*sn) { 4955 (void) ddi_prop_update_string( 4956 DDI_DEV_T_NONE, 4957 SD_DEVINFO(un), 4958 INQUIRY_SERIAL_NO, sn); 4959 } 4960 } 4961 } 4962 mutex_enter(SD_MUTEX(un)); 4963 } 4964 4965 /* collect page 83 data if available */ 4966 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4967 mutex_exit(SD_MUTEX(un)); 4968 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4969 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4970 0x01, 0x83, &inq83_resid); 4971 4972 if (rval != 0) { 4973 kmem_free(inq83, inq83_len); 4974 inq83 = NULL; 4975 inq83_len = 0; 4976 } 4977 mutex_enter(SD_MUTEX(un)); 4978 } 4979 } 4980 4981 /* encode best devid possible based on data available */ 4982 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4983 (char *)ddi_driver_name(SD_DEVINFO(un)), 4984 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4985 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4986 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4987 4988 /* devid successfully encoded, register devid */ 4989 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4990 4991 } else { 4992 /* 4993 * Unable to encode a devid based on data available. 4994 * This is not a Sun qualified disk. Older Sun disk 4995 * drives that have the SD_FAB_DEVID property 4996 * set in the disk_table and non Sun qualified 4997 * disks are treated in the same manner. These 4998 * drives manage the devid's by storing them in 4999 * last 2 available sectors on the drive and 5000 * have them fabricated by the ddi layer by 5001 * calling ddi_devid_init and passing the 5002 * DEVID_FAB flag. 5003 * Create a fabricate devid only if there's no 5004 * fabricate devid existed. 5005 */ 5006 if (sd_get_devid(un) == EINVAL) { 5007 (void) sd_create_devid(un); 5008 } 5009 un->un_f_opt_fab_devid = TRUE; 5010 5011 /* Register the devid if it exists */ 5012 if (un->un_devid != NULL) { 5013 (void) ddi_devid_register(SD_DEVINFO(un), 5014 un->un_devid); 5015 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5016 "sd_register_devid: devid fabricated using " 5017 "ddi framework\n"); 5018 } 5019 } 5020 5021 /* clean up resources */ 5022 if (inq80 != NULL) { 5023 kmem_free(inq80, inq80_len); 5024 } 5025 if (inq83 != NULL) { 5026 kmem_free(inq83, inq83_len); 5027 } 5028 } 5029 5030 5031 5032 /* 5033 * Function: sd_get_devid 5034 * 5035 * Description: This routine will return 0 if a valid device id has been 5036 * obtained from the target and stored in the soft state. If a 5037 * valid device id has not been previously read and stored, a 5038 * read attempt will be made. 5039 * 5040 * Arguments: un - driver soft state (unit) structure 5041 * 5042 * Return Code: 0 if we successfully get the device id 5043 * 5044 * Context: Kernel Thread 5045 */ 5046 5047 static int 5048 sd_get_devid(struct sd_lun *un) 5049 { 5050 struct dk_devid *dkdevid; 5051 ddi_devid_t tmpid; 5052 uint_t *ip; 5053 size_t sz; 5054 diskaddr_t blk; 5055 int status; 5056 int chksum; 5057 int i; 5058 size_t buffer_size; 5059 5060 ASSERT(un != NULL); 5061 ASSERT(mutex_owned(SD_MUTEX(un))); 5062 5063 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5064 un); 5065 5066 if (un->un_devid != NULL) { 5067 return (0); 5068 } 5069 5070 mutex_exit(SD_MUTEX(un)); 5071 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5072 (void *)SD_PATH_DIRECT) != 0) { 5073 mutex_enter(SD_MUTEX(un)); 5074 return (EINVAL); 5075 } 5076 5077 /* 5078 * Read and verify device id, stored in the reserved cylinders at the 5079 * end of the disk. Backup label is on the odd sectors of the last 5080 * track of the last cylinder. Device id will be on track of the next 5081 * to last cylinder. 5082 */ 5083 mutex_enter(SD_MUTEX(un)); 5084 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5085 mutex_exit(SD_MUTEX(un)); 5086 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5087 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 5088 SD_PATH_DIRECT); 5089 if (status != 0) { 5090 goto error; 5091 } 5092 5093 /* Validate the revision */ 5094 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5095 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5096 status = EINVAL; 5097 goto error; 5098 } 5099 5100 /* Calculate the checksum */ 5101 chksum = 0; 5102 ip = (uint_t *)dkdevid; 5103 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5104 i++) { 5105 chksum ^= ip[i]; 5106 } 5107 5108 /* Compare the checksums */ 5109 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5110 status = EINVAL; 5111 goto error; 5112 } 5113 5114 /* Validate the device id */ 5115 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5116 status = EINVAL; 5117 goto error; 5118 } 5119 5120 /* 5121 * Store the device id in the driver soft state 5122 */ 5123 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5124 tmpid = kmem_alloc(sz, KM_SLEEP); 5125 5126 mutex_enter(SD_MUTEX(un)); 5127 5128 un->un_devid = tmpid; 5129 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5130 5131 kmem_free(dkdevid, buffer_size); 5132 5133 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5134 5135 return (status); 5136 error: 5137 mutex_enter(SD_MUTEX(un)); 5138 kmem_free(dkdevid, buffer_size); 5139 return (status); 5140 } 5141 5142 5143 /* 5144 * Function: sd_create_devid 5145 * 5146 * Description: This routine will fabricate the device id and write it 5147 * to the disk. 5148 * 5149 * Arguments: un - driver soft state (unit) structure 5150 * 5151 * Return Code: value of the fabricated device id 5152 * 5153 * Context: Kernel Thread 5154 */ 5155 5156 static ddi_devid_t 5157 sd_create_devid(struct sd_lun *un) 5158 { 5159 ASSERT(un != NULL); 5160 5161 /* Fabricate the devid */ 5162 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5163 == DDI_FAILURE) { 5164 return (NULL); 5165 } 5166 5167 /* Write the devid to disk */ 5168 if (sd_write_deviceid(un) != 0) { 5169 ddi_devid_free(un->un_devid); 5170 un->un_devid = NULL; 5171 } 5172 5173 return (un->un_devid); 5174 } 5175 5176 5177 /* 5178 * Function: sd_write_deviceid 5179 * 5180 * Description: This routine will write the device id to the disk 5181 * reserved sector. 5182 * 5183 * Arguments: un - driver soft state (unit) structure 5184 * 5185 * Return Code: EINVAL 5186 * value returned by sd_send_scsi_cmd 5187 * 5188 * Context: Kernel Thread 5189 */ 5190 5191 static int 5192 sd_write_deviceid(struct sd_lun *un) 5193 { 5194 struct dk_devid *dkdevid; 5195 diskaddr_t blk; 5196 uint_t *ip, chksum; 5197 int status; 5198 int i; 5199 5200 ASSERT(mutex_owned(SD_MUTEX(un))); 5201 5202 mutex_exit(SD_MUTEX(un)); 5203 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5204 (void *)SD_PATH_DIRECT) != 0) { 5205 mutex_enter(SD_MUTEX(un)); 5206 return (-1); 5207 } 5208 5209 5210 /* Allocate the buffer */ 5211 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5212 5213 /* Fill in the revision */ 5214 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5215 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5216 5217 /* Copy in the device id */ 5218 mutex_enter(SD_MUTEX(un)); 5219 bcopy(un->un_devid, &dkdevid->dkd_devid, 5220 ddi_devid_sizeof(un->un_devid)); 5221 mutex_exit(SD_MUTEX(un)); 5222 5223 /* Calculate the checksum */ 5224 chksum = 0; 5225 ip = (uint_t *)dkdevid; 5226 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5227 i++) { 5228 chksum ^= ip[i]; 5229 } 5230 5231 /* Fill-in checksum */ 5232 DKD_FORMCHKSUM(chksum, dkdevid); 5233 5234 /* Write the reserved sector */ 5235 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 5236 SD_PATH_DIRECT); 5237 5238 kmem_free(dkdevid, un->un_sys_blocksize); 5239 5240 mutex_enter(SD_MUTEX(un)); 5241 return (status); 5242 } 5243 5244 5245 /* 5246 * Function: sd_check_vpd_page_support 5247 * 5248 * Description: This routine sends an inquiry command with the EVPD bit set and 5249 * a page code of 0x00 to the device. It is used to determine which 5250 * vital product pages are available to find the devid. We are 5251 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5252 * device does not support that command. 5253 * 5254 * Arguments: un - driver soft state (unit) structure 5255 * 5256 * Return Code: 0 - success 5257 * 1 - check condition 5258 * 5259 * Context: This routine can sleep. 5260 */ 5261 5262 static int 5263 sd_check_vpd_page_support(struct sd_lun *un) 5264 { 5265 uchar_t *page_list = NULL; 5266 uchar_t page_length = 0xff; /* Use max possible length */ 5267 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5268 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5269 int rval = 0; 5270 int counter; 5271 5272 ASSERT(un != NULL); 5273 ASSERT(mutex_owned(SD_MUTEX(un))); 5274 5275 mutex_exit(SD_MUTEX(un)); 5276 5277 /* 5278 * We'll set the page length to the maximum to save figuring it out 5279 * with an additional call. 5280 */ 5281 page_list = kmem_zalloc(page_length, KM_SLEEP); 5282 5283 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 5284 page_code, NULL); 5285 5286 mutex_enter(SD_MUTEX(un)); 5287 5288 /* 5289 * Now we must validate that the device accepted the command, as some 5290 * drives do not support it. If the drive does support it, we will 5291 * return 0, and the supported pages will be in un_vpd_page_mask. If 5292 * not, we return -1. 5293 */ 5294 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5295 /* Loop to find one of the 2 pages we need */ 5296 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5297 5298 /* 5299 * Pages are returned in ascending order, and 0x83 is what we 5300 * are hoping for. 5301 */ 5302 while ((page_list[counter] <= 0x86) && 5303 (counter <= (page_list[VPD_PAGE_LENGTH] + 5304 VPD_HEAD_OFFSET))) { 5305 /* 5306 * Add 3 because page_list[3] is the number of 5307 * pages minus 3 5308 */ 5309 5310 switch (page_list[counter]) { 5311 case 0x00: 5312 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5313 break; 5314 case 0x80: 5315 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5316 break; 5317 case 0x81: 5318 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5319 break; 5320 case 0x82: 5321 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5322 break; 5323 case 0x83: 5324 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5325 break; 5326 case 0x86: 5327 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5328 break; 5329 } 5330 counter++; 5331 } 5332 5333 } else { 5334 rval = -1; 5335 5336 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5337 "sd_check_vpd_page_support: This drive does not implement " 5338 "VPD pages.\n"); 5339 } 5340 5341 kmem_free(page_list, page_length); 5342 5343 return (rval); 5344 } 5345 5346 5347 /* 5348 * Function: sd_setup_pm 5349 * 5350 * Description: Initialize Power Management on the device 5351 * 5352 * Context: Kernel Thread 5353 */ 5354 5355 static void 5356 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5357 { 5358 uint_t log_page_size; 5359 uchar_t *log_page_data; 5360 int rval; 5361 5362 /* 5363 * Since we are called from attach, holding a mutex for 5364 * un is unnecessary. Because some of the routines called 5365 * from here require SD_MUTEX to not be held, assert this 5366 * right up front. 5367 */ 5368 ASSERT(!mutex_owned(SD_MUTEX(un))); 5369 /* 5370 * Since the sd device does not have the 'reg' property, 5371 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5372 * The following code is to tell cpr that this device 5373 * DOES need to be suspended and resumed. 5374 */ 5375 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5376 "pm-hardware-state", "needs-suspend-resume"); 5377 5378 /* 5379 * This complies with the new power management framework 5380 * for certain desktop machines. Create the pm_components 5381 * property as a string array property. 5382 */ 5383 if (un->un_f_pm_supported) { 5384 /* 5385 * not all devices have a motor, try it first. 5386 * some devices may return ILLEGAL REQUEST, some 5387 * will hang 5388 * The following START_STOP_UNIT is used to check if target 5389 * device has a motor. 5390 */ 5391 un->un_f_start_stop_supported = TRUE; 5392 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5393 SD_PATH_DIRECT) != 0) { 5394 un->un_f_start_stop_supported = FALSE; 5395 } 5396 5397 /* 5398 * create pm properties anyways otherwise the parent can't 5399 * go to sleep 5400 */ 5401 (void) sd_create_pm_components(devi, un); 5402 un->un_f_pm_is_enabled = TRUE; 5403 return; 5404 } 5405 5406 if (!un->un_f_log_sense_supported) { 5407 un->un_power_level = SD_SPINDLE_ON; 5408 un->un_f_pm_is_enabled = FALSE; 5409 return; 5410 } 5411 5412 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5413 5414 #ifdef SDDEBUG 5415 if (sd_force_pm_supported) { 5416 /* Force a successful result */ 5417 rval = 1; 5418 } 5419 #endif 5420 5421 /* 5422 * If the start-stop cycle counter log page is not supported 5423 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5424 * then we should not create the pm_components property. 5425 */ 5426 if (rval == -1) { 5427 /* 5428 * Error. 5429 * Reading log sense failed, most likely this is 5430 * an older drive that does not support log sense. 5431 * If this fails auto-pm is not supported. 5432 */ 5433 un->un_power_level = SD_SPINDLE_ON; 5434 un->un_f_pm_is_enabled = FALSE; 5435 5436 } else if (rval == 0) { 5437 /* 5438 * Page not found. 5439 * The start stop cycle counter is implemented as page 5440 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5441 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5442 */ 5443 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5444 /* 5445 * Page found, use this one. 5446 */ 5447 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5448 un->un_f_pm_is_enabled = TRUE; 5449 } else { 5450 /* 5451 * Error or page not found. 5452 * auto-pm is not supported for this device. 5453 */ 5454 un->un_power_level = SD_SPINDLE_ON; 5455 un->un_f_pm_is_enabled = FALSE; 5456 } 5457 } else { 5458 /* 5459 * Page found, use it. 5460 */ 5461 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5462 un->un_f_pm_is_enabled = TRUE; 5463 } 5464 5465 5466 if (un->un_f_pm_is_enabled == TRUE) { 5467 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5468 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5469 5470 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5471 log_page_size, un->un_start_stop_cycle_page, 5472 0x01, 0, SD_PATH_DIRECT); 5473 #ifdef SDDEBUG 5474 if (sd_force_pm_supported) { 5475 /* Force a successful result */ 5476 rval = 0; 5477 } 5478 #endif 5479 5480 /* 5481 * If the Log sense for Page( Start/stop cycle counter page) 5482 * succeeds, then power management is supported and we can 5483 * enable auto-pm. 5484 */ 5485 if (rval == 0) { 5486 (void) sd_create_pm_components(devi, un); 5487 } else { 5488 un->un_power_level = SD_SPINDLE_ON; 5489 un->un_f_pm_is_enabled = FALSE; 5490 } 5491 5492 kmem_free(log_page_data, log_page_size); 5493 } 5494 } 5495 5496 5497 /* 5498 * Function: sd_create_pm_components 5499 * 5500 * Description: Initialize PM property. 5501 * 5502 * Context: Kernel thread context 5503 */ 5504 5505 static void 5506 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5507 { 5508 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5509 5510 ASSERT(!mutex_owned(SD_MUTEX(un))); 5511 5512 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5513 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5514 /* 5515 * When components are initially created they are idle, 5516 * power up any non-removables. 5517 * Note: the return value of pm_raise_power can't be used 5518 * for determining if PM should be enabled for this device. 5519 * Even if you check the return values and remove this 5520 * property created above, the PM framework will not honor the 5521 * change after the first call to pm_raise_power. Hence, 5522 * removal of that property does not help if pm_raise_power 5523 * fails. In the case of removable media, the start/stop 5524 * will fail if the media is not present. 5525 */ 5526 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5527 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5528 mutex_enter(SD_MUTEX(un)); 5529 un->un_power_level = SD_SPINDLE_ON; 5530 mutex_enter(&un->un_pm_mutex); 5531 /* Set to on and not busy. */ 5532 un->un_pm_count = 0; 5533 } else { 5534 mutex_enter(SD_MUTEX(un)); 5535 un->un_power_level = SD_SPINDLE_OFF; 5536 mutex_enter(&un->un_pm_mutex); 5537 /* Set to off. */ 5538 un->un_pm_count = -1; 5539 } 5540 mutex_exit(&un->un_pm_mutex); 5541 mutex_exit(SD_MUTEX(un)); 5542 } else { 5543 un->un_power_level = SD_SPINDLE_ON; 5544 un->un_f_pm_is_enabled = FALSE; 5545 } 5546 } 5547 5548 5549 /* 5550 * Function: sd_ddi_suspend 5551 * 5552 * Description: Performs system power-down operations. This includes 5553 * setting the drive state to indicate its suspended so 5554 * that no new commands will be accepted. Also, wait for 5555 * all commands that are in transport or queued to a timer 5556 * for retry to complete. All timeout threads are cancelled. 5557 * 5558 * Return Code: DDI_FAILURE or DDI_SUCCESS 5559 * 5560 * Context: Kernel thread context 5561 */ 5562 5563 static int 5564 sd_ddi_suspend(dev_info_t *devi) 5565 { 5566 struct sd_lun *un; 5567 clock_t wait_cmds_complete; 5568 5569 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5570 if (un == NULL) { 5571 return (DDI_FAILURE); 5572 } 5573 5574 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5575 5576 mutex_enter(SD_MUTEX(un)); 5577 5578 /* Return success if the device is already suspended. */ 5579 if (un->un_state == SD_STATE_SUSPENDED) { 5580 mutex_exit(SD_MUTEX(un)); 5581 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5582 "device already suspended, exiting\n"); 5583 return (DDI_SUCCESS); 5584 } 5585 5586 /* Return failure if the device is being used by HA */ 5587 if (un->un_resvd_status & 5588 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5589 mutex_exit(SD_MUTEX(un)); 5590 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5591 "device in use by HA, exiting\n"); 5592 return (DDI_FAILURE); 5593 } 5594 5595 /* 5596 * Return failure if the device is in a resource wait 5597 * or power changing state. 5598 */ 5599 if ((un->un_state == SD_STATE_RWAIT) || 5600 (un->un_state == SD_STATE_PM_CHANGING)) { 5601 mutex_exit(SD_MUTEX(un)); 5602 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5603 "device in resource wait state, exiting\n"); 5604 return (DDI_FAILURE); 5605 } 5606 5607 5608 un->un_save_state = un->un_last_state; 5609 New_state(un, SD_STATE_SUSPENDED); 5610 5611 /* 5612 * Wait for all commands that are in transport or queued to a timer 5613 * for retry to complete. 5614 * 5615 * While waiting, no new commands will be accepted or sent because of 5616 * the new state we set above. 5617 * 5618 * Wait till current operation has completed. If we are in the resource 5619 * wait state (with an intr outstanding) then we need to wait till the 5620 * intr completes and starts the next cmd. We want to wait for 5621 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5622 */ 5623 wait_cmds_complete = ddi_get_lbolt() + 5624 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5625 5626 while (un->un_ncmds_in_transport != 0) { 5627 /* 5628 * Fail if commands do not finish in the specified time. 5629 */ 5630 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5631 wait_cmds_complete) == -1) { 5632 /* 5633 * Undo the state changes made above. Everything 5634 * must go back to it's original value. 5635 */ 5636 Restore_state(un); 5637 un->un_last_state = un->un_save_state; 5638 /* Wake up any threads that might be waiting. */ 5639 cv_broadcast(&un->un_suspend_cv); 5640 mutex_exit(SD_MUTEX(un)); 5641 SD_ERROR(SD_LOG_IO_PM, un, 5642 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5643 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5644 return (DDI_FAILURE); 5645 } 5646 } 5647 5648 /* 5649 * Cancel SCSI watch thread and timeouts, if any are active 5650 */ 5651 5652 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5653 opaque_t temp_token = un->un_swr_token; 5654 mutex_exit(SD_MUTEX(un)); 5655 scsi_watch_suspend(temp_token); 5656 mutex_enter(SD_MUTEX(un)); 5657 } 5658 5659 if (un->un_reset_throttle_timeid != NULL) { 5660 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5661 un->un_reset_throttle_timeid = NULL; 5662 mutex_exit(SD_MUTEX(un)); 5663 (void) untimeout(temp_id); 5664 mutex_enter(SD_MUTEX(un)); 5665 } 5666 5667 if (un->un_dcvb_timeid != NULL) { 5668 timeout_id_t temp_id = un->un_dcvb_timeid; 5669 un->un_dcvb_timeid = NULL; 5670 mutex_exit(SD_MUTEX(un)); 5671 (void) untimeout(temp_id); 5672 mutex_enter(SD_MUTEX(un)); 5673 } 5674 5675 mutex_enter(&un->un_pm_mutex); 5676 if (un->un_pm_timeid != NULL) { 5677 timeout_id_t temp_id = un->un_pm_timeid; 5678 un->un_pm_timeid = NULL; 5679 mutex_exit(&un->un_pm_mutex); 5680 mutex_exit(SD_MUTEX(un)); 5681 (void) untimeout(temp_id); 5682 mutex_enter(SD_MUTEX(un)); 5683 } else { 5684 mutex_exit(&un->un_pm_mutex); 5685 } 5686 5687 if (un->un_retry_timeid != NULL) { 5688 timeout_id_t temp_id = un->un_retry_timeid; 5689 un->un_retry_timeid = NULL; 5690 mutex_exit(SD_MUTEX(un)); 5691 (void) untimeout(temp_id); 5692 mutex_enter(SD_MUTEX(un)); 5693 5694 if (un->un_retry_bp != NULL) { 5695 un->un_retry_bp->av_forw = un->un_waitq_headp; 5696 un->un_waitq_headp = un->un_retry_bp; 5697 if (un->un_waitq_tailp == NULL) { 5698 un->un_waitq_tailp = un->un_retry_bp; 5699 } 5700 un->un_retry_bp = NULL; 5701 un->un_retry_statp = NULL; 5702 } 5703 } 5704 5705 if (un->un_direct_priority_timeid != NULL) { 5706 timeout_id_t temp_id = un->un_direct_priority_timeid; 5707 un->un_direct_priority_timeid = NULL; 5708 mutex_exit(SD_MUTEX(un)); 5709 (void) untimeout(temp_id); 5710 mutex_enter(SD_MUTEX(un)); 5711 } 5712 5713 if (un->un_f_is_fibre == TRUE) { 5714 /* 5715 * Remove callbacks for insert and remove events 5716 */ 5717 if (un->un_insert_event != NULL) { 5718 mutex_exit(SD_MUTEX(un)); 5719 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5720 mutex_enter(SD_MUTEX(un)); 5721 un->un_insert_event = NULL; 5722 } 5723 5724 if (un->un_remove_event != NULL) { 5725 mutex_exit(SD_MUTEX(un)); 5726 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5727 mutex_enter(SD_MUTEX(un)); 5728 un->un_remove_event = NULL; 5729 } 5730 } 5731 5732 mutex_exit(SD_MUTEX(un)); 5733 5734 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5735 5736 return (DDI_SUCCESS); 5737 } 5738 5739 5740 /* 5741 * Function: sd_ddi_pm_suspend 5742 * 5743 * Description: Set the drive state to low power. 5744 * Someone else is required to actually change the drive 5745 * power level. 5746 * 5747 * Arguments: un - driver soft state (unit) structure 5748 * 5749 * Return Code: DDI_FAILURE or DDI_SUCCESS 5750 * 5751 * Context: Kernel thread context 5752 */ 5753 5754 static int 5755 sd_ddi_pm_suspend(struct sd_lun *un) 5756 { 5757 ASSERT(un != NULL); 5758 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5759 5760 ASSERT(!mutex_owned(SD_MUTEX(un))); 5761 mutex_enter(SD_MUTEX(un)); 5762 5763 /* 5764 * Exit if power management is not enabled for this device, or if 5765 * the device is being used by HA. 5766 */ 5767 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5768 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5769 mutex_exit(SD_MUTEX(un)); 5770 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5771 return (DDI_SUCCESS); 5772 } 5773 5774 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5775 un->un_ncmds_in_driver); 5776 5777 /* 5778 * See if the device is not busy, ie.: 5779 * - we have no commands in the driver for this device 5780 * - not waiting for resources 5781 */ 5782 if ((un->un_ncmds_in_driver == 0) && 5783 (un->un_state != SD_STATE_RWAIT)) { 5784 /* 5785 * The device is not busy, so it is OK to go to low power state. 5786 * Indicate low power, but rely on someone else to actually 5787 * change it. 5788 */ 5789 mutex_enter(&un->un_pm_mutex); 5790 un->un_pm_count = -1; 5791 mutex_exit(&un->un_pm_mutex); 5792 un->un_power_level = SD_SPINDLE_OFF; 5793 } 5794 5795 mutex_exit(SD_MUTEX(un)); 5796 5797 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5798 5799 return (DDI_SUCCESS); 5800 } 5801 5802 5803 /* 5804 * Function: sd_ddi_resume 5805 * 5806 * Description: Performs system power-up operations.. 5807 * 5808 * Return Code: DDI_SUCCESS 5809 * DDI_FAILURE 5810 * 5811 * Context: Kernel thread context 5812 */ 5813 5814 static int 5815 sd_ddi_resume(dev_info_t *devi) 5816 { 5817 struct sd_lun *un; 5818 5819 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5820 if (un == NULL) { 5821 return (DDI_FAILURE); 5822 } 5823 5824 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5825 5826 mutex_enter(SD_MUTEX(un)); 5827 Restore_state(un); 5828 5829 /* 5830 * Restore the state which was saved to give the 5831 * the right state in un_last_state 5832 */ 5833 un->un_last_state = un->un_save_state; 5834 /* 5835 * Note: throttle comes back at full. 5836 * Also note: this MUST be done before calling pm_raise_power 5837 * otherwise the system can get hung in biowait. The scenario where 5838 * this'll happen is under cpr suspend. Writing of the system 5839 * state goes through sddump, which writes 0 to un_throttle. If 5840 * writing the system state then fails, example if the partition is 5841 * too small, then cpr attempts a resume. If throttle isn't restored 5842 * from the saved value until after calling pm_raise_power then 5843 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5844 * in biowait. 5845 */ 5846 un->un_throttle = un->un_saved_throttle; 5847 5848 /* 5849 * The chance of failure is very rare as the only command done in power 5850 * entry point is START command when you transition from 0->1 or 5851 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5852 * which suspend was done. Ignore the return value as the resume should 5853 * not be failed. In the case of removable media the media need not be 5854 * inserted and hence there is a chance that raise power will fail with 5855 * media not present. 5856 */ 5857 if (un->un_f_attach_spinup) { 5858 mutex_exit(SD_MUTEX(un)); 5859 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5860 mutex_enter(SD_MUTEX(un)); 5861 } 5862 5863 /* 5864 * Don't broadcast to the suspend cv and therefore possibly 5865 * start I/O until after power has been restored. 5866 */ 5867 cv_broadcast(&un->un_suspend_cv); 5868 cv_broadcast(&un->un_state_cv); 5869 5870 /* restart thread */ 5871 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5872 scsi_watch_resume(un->un_swr_token); 5873 } 5874 5875 #if (defined(__fibre)) 5876 if (un->un_f_is_fibre == TRUE) { 5877 /* 5878 * Add callbacks for insert and remove events 5879 */ 5880 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5881 sd_init_event_callbacks(un); 5882 } 5883 } 5884 #endif 5885 5886 /* 5887 * Transport any pending commands to the target. 5888 * 5889 * If this is a low-activity device commands in queue will have to wait 5890 * until new commands come in, which may take awhile. Also, we 5891 * specifically don't check un_ncmds_in_transport because we know that 5892 * there really are no commands in progress after the unit was 5893 * suspended and we could have reached the throttle level, been 5894 * suspended, and have no new commands coming in for awhile. Highly 5895 * unlikely, but so is the low-activity disk scenario. 5896 */ 5897 ddi_xbuf_dispatch(un->un_xbuf_attr); 5898 5899 sd_start_cmds(un, NULL); 5900 mutex_exit(SD_MUTEX(un)); 5901 5902 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5903 5904 return (DDI_SUCCESS); 5905 } 5906 5907 5908 /* 5909 * Function: sd_ddi_pm_resume 5910 * 5911 * Description: Set the drive state to powered on. 5912 * Someone else is required to actually change the drive 5913 * power level. 5914 * 5915 * Arguments: un - driver soft state (unit) structure 5916 * 5917 * Return Code: DDI_SUCCESS 5918 * 5919 * Context: Kernel thread context 5920 */ 5921 5922 static int 5923 sd_ddi_pm_resume(struct sd_lun *un) 5924 { 5925 ASSERT(un != NULL); 5926 5927 ASSERT(!mutex_owned(SD_MUTEX(un))); 5928 mutex_enter(SD_MUTEX(un)); 5929 un->un_power_level = SD_SPINDLE_ON; 5930 5931 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5932 mutex_enter(&un->un_pm_mutex); 5933 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5934 un->un_pm_count++; 5935 ASSERT(un->un_pm_count == 0); 5936 /* 5937 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5938 * un_suspend_cv is for a system resume, not a power management 5939 * device resume. (4297749) 5940 * cv_broadcast(&un->un_suspend_cv); 5941 */ 5942 } 5943 mutex_exit(&un->un_pm_mutex); 5944 mutex_exit(SD_MUTEX(un)); 5945 5946 return (DDI_SUCCESS); 5947 } 5948 5949 5950 /* 5951 * Function: sd_pm_idletimeout_handler 5952 * 5953 * Description: A timer routine that's active only while a device is busy. 5954 * The purpose is to extend slightly the pm framework's busy 5955 * view of the device to prevent busy/idle thrashing for 5956 * back-to-back commands. Do this by comparing the current time 5957 * to the time at which the last command completed and when the 5958 * difference is greater than sd_pm_idletime, call 5959 * pm_idle_component. In addition to indicating idle to the pm 5960 * framework, update the chain type to again use the internal pm 5961 * layers of the driver. 5962 * 5963 * Arguments: arg - driver soft state (unit) structure 5964 * 5965 * Context: Executes in a timeout(9F) thread context 5966 */ 5967 5968 static void 5969 sd_pm_idletimeout_handler(void *arg) 5970 { 5971 struct sd_lun *un = arg; 5972 5973 time_t now; 5974 5975 mutex_enter(&sd_detach_mutex); 5976 if (un->un_detach_count != 0) { 5977 /* Abort if the instance is detaching */ 5978 mutex_exit(&sd_detach_mutex); 5979 return; 5980 } 5981 mutex_exit(&sd_detach_mutex); 5982 5983 now = ddi_get_time(); 5984 /* 5985 * Grab both mutexes, in the proper order, since we're accessing 5986 * both PM and softstate variables. 5987 */ 5988 mutex_enter(SD_MUTEX(un)); 5989 mutex_enter(&un->un_pm_mutex); 5990 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5991 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5992 /* 5993 * Update the chain types. 5994 * This takes affect on the next new command received. 5995 */ 5996 if (un->un_f_non_devbsize_supported) { 5997 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5998 } else { 5999 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6000 } 6001 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6002 6003 SD_TRACE(SD_LOG_IO_PM, un, 6004 "sd_pm_idletimeout_handler: idling device\n"); 6005 (void) pm_idle_component(SD_DEVINFO(un), 0); 6006 un->un_pm_idle_timeid = NULL; 6007 } else { 6008 un->un_pm_idle_timeid = 6009 timeout(sd_pm_idletimeout_handler, un, 6010 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6011 } 6012 mutex_exit(&un->un_pm_mutex); 6013 mutex_exit(SD_MUTEX(un)); 6014 } 6015 6016 6017 /* 6018 * Function: sd_pm_timeout_handler 6019 * 6020 * Description: Callback to tell framework we are idle. 6021 * 6022 * Context: timeout(9f) thread context. 6023 */ 6024 6025 static void 6026 sd_pm_timeout_handler(void *arg) 6027 { 6028 struct sd_lun *un = arg; 6029 6030 (void) pm_idle_component(SD_DEVINFO(un), 0); 6031 mutex_enter(&un->un_pm_mutex); 6032 un->un_pm_timeid = NULL; 6033 mutex_exit(&un->un_pm_mutex); 6034 } 6035 6036 6037 /* 6038 * Function: sdpower 6039 * 6040 * Description: PM entry point. 6041 * 6042 * Return Code: DDI_SUCCESS 6043 * DDI_FAILURE 6044 * 6045 * Context: Kernel thread context 6046 */ 6047 6048 static int 6049 sdpower(dev_info_t *devi, int component, int level) 6050 { 6051 struct sd_lun *un; 6052 int instance; 6053 int rval = DDI_SUCCESS; 6054 uint_t i, log_page_size, maxcycles, ncycles; 6055 uchar_t *log_page_data; 6056 int log_sense_page; 6057 int medium_present; 6058 time_t intvlp; 6059 dev_t dev; 6060 struct pm_trans_data sd_pm_tran_data; 6061 uchar_t save_state; 6062 int sval; 6063 uchar_t state_before_pm; 6064 int got_semaphore_here; 6065 6066 instance = ddi_get_instance(devi); 6067 6068 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6069 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6070 component != 0) { 6071 return (DDI_FAILURE); 6072 } 6073 6074 dev = sd_make_device(SD_DEVINFO(un)); 6075 6076 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6077 6078 /* 6079 * Must synchronize power down with close. 6080 * Attempt to decrement/acquire the open/close semaphore, 6081 * but do NOT wait on it. If it's not greater than zero, 6082 * ie. it can't be decremented without waiting, then 6083 * someone else, either open or close, already has it 6084 * and the try returns 0. Use that knowledge here to determine 6085 * if it's OK to change the device power level. 6086 * Also, only increment it on exit if it was decremented, ie. gotten, 6087 * here. 6088 */ 6089 got_semaphore_here = sema_tryp(&un->un_semoclose); 6090 6091 mutex_enter(SD_MUTEX(un)); 6092 6093 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6094 un->un_ncmds_in_driver); 6095 6096 /* 6097 * If un_ncmds_in_driver is non-zero it indicates commands are 6098 * already being processed in the driver, or if the semaphore was 6099 * not gotten here it indicates an open or close is being processed. 6100 * At the same time somebody is requesting to go low power which 6101 * can't happen, therefore we need to return failure. 6102 */ 6103 if ((level == SD_SPINDLE_OFF) && 6104 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6105 mutex_exit(SD_MUTEX(un)); 6106 6107 if (got_semaphore_here != 0) { 6108 sema_v(&un->un_semoclose); 6109 } 6110 SD_TRACE(SD_LOG_IO_PM, un, 6111 "sdpower: exit, device has queued cmds.\n"); 6112 return (DDI_FAILURE); 6113 } 6114 6115 /* 6116 * if it is OFFLINE that means the disk is completely dead 6117 * in our case we have to put the disk in on or off by sending commands 6118 * Of course that will fail anyway so return back here. 6119 * 6120 * Power changes to a device that's OFFLINE or SUSPENDED 6121 * are not allowed. 6122 */ 6123 if ((un->un_state == SD_STATE_OFFLINE) || 6124 (un->un_state == SD_STATE_SUSPENDED)) { 6125 mutex_exit(SD_MUTEX(un)); 6126 6127 if (got_semaphore_here != 0) { 6128 sema_v(&un->un_semoclose); 6129 } 6130 SD_TRACE(SD_LOG_IO_PM, un, 6131 "sdpower: exit, device is off-line.\n"); 6132 return (DDI_FAILURE); 6133 } 6134 6135 /* 6136 * Change the device's state to indicate it's power level 6137 * is being changed. Do this to prevent a power off in the 6138 * middle of commands, which is especially bad on devices 6139 * that are really powered off instead of just spun down. 6140 */ 6141 state_before_pm = un->un_state; 6142 un->un_state = SD_STATE_PM_CHANGING; 6143 6144 mutex_exit(SD_MUTEX(un)); 6145 6146 /* 6147 * If "pm-capable" property is set to TRUE by HBA drivers, 6148 * bypass the following checking, otherwise, check the log 6149 * sense information for this device 6150 */ 6151 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 6152 /* 6153 * Get the log sense information to understand whether the 6154 * the powercycle counts have gone beyond the threshhold. 6155 */ 6156 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6157 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6158 6159 mutex_enter(SD_MUTEX(un)); 6160 log_sense_page = un->un_start_stop_cycle_page; 6161 mutex_exit(SD_MUTEX(un)); 6162 6163 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 6164 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6165 #ifdef SDDEBUG 6166 if (sd_force_pm_supported) { 6167 /* Force a successful result */ 6168 rval = 0; 6169 } 6170 #endif 6171 if (rval != 0) { 6172 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6173 "Log Sense Failed\n"); 6174 kmem_free(log_page_data, log_page_size); 6175 /* Cannot support power management on those drives */ 6176 6177 if (got_semaphore_here != 0) { 6178 sema_v(&un->un_semoclose); 6179 } 6180 /* 6181 * On exit put the state back to it's original value 6182 * and broadcast to anyone waiting for the power 6183 * change completion. 6184 */ 6185 mutex_enter(SD_MUTEX(un)); 6186 un->un_state = state_before_pm; 6187 cv_broadcast(&un->un_suspend_cv); 6188 mutex_exit(SD_MUTEX(un)); 6189 SD_TRACE(SD_LOG_IO_PM, un, 6190 "sdpower: exit, Log Sense Failed.\n"); 6191 return (DDI_FAILURE); 6192 } 6193 6194 /* 6195 * From the page data - Convert the essential information to 6196 * pm_trans_data 6197 */ 6198 maxcycles = 6199 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6200 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6201 6202 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6203 6204 ncycles = 6205 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6206 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6207 6208 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6209 6210 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6211 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6212 log_page_data[8+i]; 6213 } 6214 6215 kmem_free(log_page_data, log_page_size); 6216 6217 /* 6218 * Call pm_trans_check routine to get the Ok from 6219 * the global policy 6220 */ 6221 6222 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6223 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6224 6225 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6226 #ifdef SDDEBUG 6227 if (sd_force_pm_supported) { 6228 /* Force a successful result */ 6229 rval = 1; 6230 } 6231 #endif 6232 switch (rval) { 6233 case 0: 6234 /* 6235 * Not Ok to Power cycle or error in parameters passed 6236 * Would have given the advised time to consider power 6237 * cycle. Based on the new intvlp parameter we are 6238 * supposed to pretend we are busy so that pm framework 6239 * will never call our power entry point. Because of 6240 * that install a timeout handler and wait for the 6241 * recommended time to elapse so that power management 6242 * can be effective again. 6243 * 6244 * To effect this behavior, call pm_busy_component to 6245 * indicate to the framework this device is busy. 6246 * By not adjusting un_pm_count the rest of PM in 6247 * the driver will function normally, and independent 6248 * of this but because the framework is told the device 6249 * is busy it won't attempt powering down until it gets 6250 * a matching idle. The timeout handler sends this. 6251 * Note: sd_pm_entry can't be called here to do this 6252 * because sdpower may have been called as a result 6253 * of a call to pm_raise_power from within sd_pm_entry. 6254 * 6255 * If a timeout handler is already active then 6256 * don't install another. 6257 */ 6258 mutex_enter(&un->un_pm_mutex); 6259 if (un->un_pm_timeid == NULL) { 6260 un->un_pm_timeid = 6261 timeout(sd_pm_timeout_handler, 6262 un, intvlp * drv_usectohz(1000000)); 6263 mutex_exit(&un->un_pm_mutex); 6264 (void) pm_busy_component(SD_DEVINFO(un), 0); 6265 } else { 6266 mutex_exit(&un->un_pm_mutex); 6267 } 6268 if (got_semaphore_here != 0) { 6269 sema_v(&un->un_semoclose); 6270 } 6271 /* 6272 * On exit put the state back to it's original value 6273 * and broadcast to anyone waiting for the power 6274 * change completion. 6275 */ 6276 mutex_enter(SD_MUTEX(un)); 6277 un->un_state = state_before_pm; 6278 cv_broadcast(&un->un_suspend_cv); 6279 mutex_exit(SD_MUTEX(un)); 6280 6281 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6282 "trans check Failed, not ok to power cycle.\n"); 6283 return (DDI_FAILURE); 6284 6285 case -1: 6286 if (got_semaphore_here != 0) { 6287 sema_v(&un->un_semoclose); 6288 } 6289 /* 6290 * On exit put the state back to it's original value 6291 * and broadcast to anyone waiting for the power 6292 * change completion. 6293 */ 6294 mutex_enter(SD_MUTEX(un)); 6295 un->un_state = state_before_pm; 6296 cv_broadcast(&un->un_suspend_cv); 6297 mutex_exit(SD_MUTEX(un)); 6298 SD_TRACE(SD_LOG_IO_PM, un, 6299 "sdpower: exit, trans check command Failed.\n"); 6300 return (DDI_FAILURE); 6301 } 6302 } 6303 6304 if (level == SD_SPINDLE_OFF) { 6305 /* 6306 * Save the last state... if the STOP FAILS we need it 6307 * for restoring 6308 */ 6309 mutex_enter(SD_MUTEX(un)); 6310 save_state = un->un_last_state; 6311 /* 6312 * There must not be any cmds. getting processed 6313 * in the driver when we get here. Power to the 6314 * device is potentially going off. 6315 */ 6316 ASSERT(un->un_ncmds_in_driver == 0); 6317 mutex_exit(SD_MUTEX(un)); 6318 6319 /* 6320 * For now suspend the device completely before spindle is 6321 * turned off 6322 */ 6323 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6324 if (got_semaphore_here != 0) { 6325 sema_v(&un->un_semoclose); 6326 } 6327 /* 6328 * On exit put the state back to it's original value 6329 * and broadcast to anyone waiting for the power 6330 * change completion. 6331 */ 6332 mutex_enter(SD_MUTEX(un)); 6333 un->un_state = state_before_pm; 6334 cv_broadcast(&un->un_suspend_cv); 6335 mutex_exit(SD_MUTEX(un)); 6336 SD_TRACE(SD_LOG_IO_PM, un, 6337 "sdpower: exit, PM suspend Failed.\n"); 6338 return (DDI_FAILURE); 6339 } 6340 } 6341 6342 /* 6343 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6344 * close, or strategy. Dump no long uses this routine, it uses it's 6345 * own code so it can be done in polled mode. 6346 */ 6347 6348 medium_present = TRUE; 6349 6350 /* 6351 * When powering up, issue a TUR in case the device is at unit 6352 * attention. Don't do retries. Bypass the PM layer, otherwise 6353 * a deadlock on un_pm_busy_cv will occur. 6354 */ 6355 if (level == SD_SPINDLE_ON) { 6356 (void) sd_send_scsi_TEST_UNIT_READY(un, 6357 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6358 } 6359 6360 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6361 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6362 6363 sval = sd_send_scsi_START_STOP_UNIT(un, 6364 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6365 SD_PATH_DIRECT); 6366 /* Command failed, check for media present. */ 6367 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6368 medium_present = FALSE; 6369 } 6370 6371 /* 6372 * The conditions of interest here are: 6373 * if a spindle off with media present fails, 6374 * then restore the state and return an error. 6375 * else if a spindle on fails, 6376 * then return an error (there's no state to restore). 6377 * In all other cases we setup for the new state 6378 * and return success. 6379 */ 6380 switch (level) { 6381 case SD_SPINDLE_OFF: 6382 if ((medium_present == TRUE) && (sval != 0)) { 6383 /* The stop command from above failed */ 6384 rval = DDI_FAILURE; 6385 /* 6386 * The stop command failed, and we have media 6387 * present. Put the level back by calling the 6388 * sd_pm_resume() and set the state back to 6389 * it's previous value. 6390 */ 6391 (void) sd_ddi_pm_resume(un); 6392 mutex_enter(SD_MUTEX(un)); 6393 un->un_last_state = save_state; 6394 mutex_exit(SD_MUTEX(un)); 6395 break; 6396 } 6397 /* 6398 * The stop command from above succeeded. 6399 */ 6400 if (un->un_f_monitor_media_state) { 6401 /* 6402 * Terminate watch thread in case of removable media 6403 * devices going into low power state. This is as per 6404 * the requirements of pm framework, otherwise commands 6405 * will be generated for the device (through watch 6406 * thread), even when the device is in low power state. 6407 */ 6408 mutex_enter(SD_MUTEX(un)); 6409 un->un_f_watcht_stopped = FALSE; 6410 if (un->un_swr_token != NULL) { 6411 opaque_t temp_token = un->un_swr_token; 6412 un->un_f_watcht_stopped = TRUE; 6413 un->un_swr_token = NULL; 6414 mutex_exit(SD_MUTEX(un)); 6415 (void) scsi_watch_request_terminate(temp_token, 6416 SCSI_WATCH_TERMINATE_ALL_WAIT); 6417 } else { 6418 mutex_exit(SD_MUTEX(un)); 6419 } 6420 } 6421 break; 6422 6423 default: /* The level requested is spindle on... */ 6424 /* 6425 * Legacy behavior: return success on a failed spinup 6426 * if there is no media in the drive. 6427 * Do this by looking at medium_present here. 6428 */ 6429 if ((sval != 0) && medium_present) { 6430 /* The start command from above failed */ 6431 rval = DDI_FAILURE; 6432 break; 6433 } 6434 /* 6435 * The start command from above succeeded 6436 * Resume the devices now that we have 6437 * started the disks 6438 */ 6439 (void) sd_ddi_pm_resume(un); 6440 6441 /* 6442 * Resume the watch thread since it was suspended 6443 * when the device went into low power mode. 6444 */ 6445 if (un->un_f_monitor_media_state) { 6446 mutex_enter(SD_MUTEX(un)); 6447 if (un->un_f_watcht_stopped == TRUE) { 6448 opaque_t temp_token; 6449 6450 un->un_f_watcht_stopped = FALSE; 6451 mutex_exit(SD_MUTEX(un)); 6452 temp_token = scsi_watch_request_submit( 6453 SD_SCSI_DEVP(un), 6454 sd_check_media_time, 6455 SENSE_LENGTH, sd_media_watch_cb, 6456 (caddr_t)dev); 6457 mutex_enter(SD_MUTEX(un)); 6458 un->un_swr_token = temp_token; 6459 } 6460 mutex_exit(SD_MUTEX(un)); 6461 } 6462 } 6463 if (got_semaphore_here != 0) { 6464 sema_v(&un->un_semoclose); 6465 } 6466 /* 6467 * On exit put the state back to it's original value 6468 * and broadcast to anyone waiting for the power 6469 * change completion. 6470 */ 6471 mutex_enter(SD_MUTEX(un)); 6472 un->un_state = state_before_pm; 6473 cv_broadcast(&un->un_suspend_cv); 6474 mutex_exit(SD_MUTEX(un)); 6475 6476 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6477 6478 return (rval); 6479 } 6480 6481 6482 6483 /* 6484 * Function: sdattach 6485 * 6486 * Description: Driver's attach(9e) entry point function. 6487 * 6488 * Arguments: devi - opaque device info handle 6489 * cmd - attach type 6490 * 6491 * Return Code: DDI_SUCCESS 6492 * DDI_FAILURE 6493 * 6494 * Context: Kernel thread context 6495 */ 6496 6497 static int 6498 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6499 { 6500 switch (cmd) { 6501 case DDI_ATTACH: 6502 return (sd_unit_attach(devi)); 6503 case DDI_RESUME: 6504 return (sd_ddi_resume(devi)); 6505 default: 6506 break; 6507 } 6508 return (DDI_FAILURE); 6509 } 6510 6511 6512 /* 6513 * Function: sddetach 6514 * 6515 * Description: Driver's detach(9E) entry point function. 6516 * 6517 * Arguments: devi - opaque device info handle 6518 * cmd - detach type 6519 * 6520 * Return Code: DDI_SUCCESS 6521 * DDI_FAILURE 6522 * 6523 * Context: Kernel thread context 6524 */ 6525 6526 static int 6527 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6528 { 6529 switch (cmd) { 6530 case DDI_DETACH: 6531 return (sd_unit_detach(devi)); 6532 case DDI_SUSPEND: 6533 return (sd_ddi_suspend(devi)); 6534 default: 6535 break; 6536 } 6537 return (DDI_FAILURE); 6538 } 6539 6540 6541 /* 6542 * Function: sd_sync_with_callback 6543 * 6544 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6545 * state while the callback routine is active. 6546 * 6547 * Arguments: un: softstate structure for the instance 6548 * 6549 * Context: Kernel thread context 6550 */ 6551 6552 static void 6553 sd_sync_with_callback(struct sd_lun *un) 6554 { 6555 ASSERT(un != NULL); 6556 6557 mutex_enter(SD_MUTEX(un)); 6558 6559 ASSERT(un->un_in_callback >= 0); 6560 6561 while (un->un_in_callback > 0) { 6562 mutex_exit(SD_MUTEX(un)); 6563 delay(2); 6564 mutex_enter(SD_MUTEX(un)); 6565 } 6566 6567 mutex_exit(SD_MUTEX(un)); 6568 } 6569 6570 /* 6571 * Function: sd_unit_attach 6572 * 6573 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6574 * the soft state structure for the device and performs 6575 * all necessary structure and device initializations. 6576 * 6577 * Arguments: devi: the system's dev_info_t for the device. 6578 * 6579 * Return Code: DDI_SUCCESS if attach is successful. 6580 * DDI_FAILURE if any part of the attach fails. 6581 * 6582 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6583 * Kernel thread context only. Can sleep. 6584 */ 6585 6586 static int 6587 sd_unit_attach(dev_info_t *devi) 6588 { 6589 struct scsi_device *devp; 6590 struct sd_lun *un; 6591 char *variantp; 6592 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6593 int instance; 6594 int rval; 6595 int wc_enabled; 6596 int tgt; 6597 uint64_t capacity; 6598 uint_t lbasize = 0; 6599 dev_info_t *pdip = ddi_get_parent(devi); 6600 int offbyone = 0; 6601 int geom_label_valid = 0; 6602 #if defined(__sparc) 6603 int max_xfer_size; 6604 #endif 6605 6606 /* 6607 * Retrieve the target driver's private data area. This was set 6608 * up by the HBA. 6609 */ 6610 devp = ddi_get_driver_private(devi); 6611 6612 /* 6613 * Retrieve the target ID of the device. 6614 */ 6615 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6616 SCSI_ADDR_PROP_TARGET, -1); 6617 6618 /* 6619 * Since we have no idea what state things were left in by the last 6620 * user of the device, set up some 'default' settings, ie. turn 'em 6621 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6622 * Do this before the scsi_probe, which sends an inquiry. 6623 * This is a fix for bug (4430280). 6624 * Of special importance is wide-xfer. The drive could have been left 6625 * in wide transfer mode by the last driver to communicate with it, 6626 * this includes us. If that's the case, and if the following is not 6627 * setup properly or we don't re-negotiate with the drive prior to 6628 * transferring data to/from the drive, it causes bus parity errors, 6629 * data overruns, and unexpected interrupts. This first occurred when 6630 * the fix for bug (4378686) was made. 6631 */ 6632 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6633 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6634 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6635 6636 /* 6637 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6638 * on a target. Setting it per lun instance actually sets the 6639 * capability of this target, which affects those luns already 6640 * attached on the same target. So during attach, we can only disable 6641 * this capability only when no other lun has been attached on this 6642 * target. By doing this, we assume a target has the same tagged-qing 6643 * capability for every lun. The condition can be removed when HBA 6644 * is changed to support per lun based tagged-qing capability. 6645 */ 6646 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6647 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6648 } 6649 6650 /* 6651 * Use scsi_probe() to issue an INQUIRY command to the device. 6652 * This call will allocate and fill in the scsi_inquiry structure 6653 * and point the sd_inq member of the scsi_device structure to it. 6654 * If the attach succeeds, then this memory will not be de-allocated 6655 * (via scsi_unprobe()) until the instance is detached. 6656 */ 6657 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6658 goto probe_failed; 6659 } 6660 6661 /* 6662 * Check the device type as specified in the inquiry data and 6663 * claim it if it is of a type that we support. 6664 */ 6665 switch (devp->sd_inq->inq_dtype) { 6666 case DTYPE_DIRECT: 6667 break; 6668 case DTYPE_RODIRECT: 6669 break; 6670 case DTYPE_OPTICAL: 6671 break; 6672 case DTYPE_NOTPRESENT: 6673 default: 6674 /* Unsupported device type; fail the attach. */ 6675 goto probe_failed; 6676 } 6677 6678 /* 6679 * Allocate the soft state structure for this unit. 6680 * 6681 * We rely upon this memory being set to all zeroes by 6682 * ddi_soft_state_zalloc(). We assume that any member of the 6683 * soft state structure that is not explicitly initialized by 6684 * this routine will have a value of zero. 6685 */ 6686 instance = ddi_get_instance(devp->sd_dev); 6687 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6688 goto probe_failed; 6689 } 6690 6691 /* 6692 * Retrieve a pointer to the newly-allocated soft state. 6693 * 6694 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6695 * was successful, unless something has gone horribly wrong and the 6696 * ddi's soft state internals are corrupt (in which case it is 6697 * probably better to halt here than just fail the attach....) 6698 */ 6699 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6700 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6701 instance); 6702 /*NOTREACHED*/ 6703 } 6704 6705 /* 6706 * Link the back ptr of the driver soft state to the scsi_device 6707 * struct for this lun. 6708 * Save a pointer to the softstate in the driver-private area of 6709 * the scsi_device struct. 6710 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6711 * we first set un->un_sd below. 6712 */ 6713 un->un_sd = devp; 6714 devp->sd_private = (opaque_t)un; 6715 6716 /* 6717 * The following must be after devp is stored in the soft state struct. 6718 */ 6719 #ifdef SDDEBUG 6720 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6721 "%s_unit_attach: un:0x%p instance:%d\n", 6722 ddi_driver_name(devi), un, instance); 6723 #endif 6724 6725 /* 6726 * Set up the device type and node type (for the minor nodes). 6727 * By default we assume that the device can at least support the 6728 * Common Command Set. Call it a CD-ROM if it reports itself 6729 * as a RODIRECT device. 6730 */ 6731 switch (devp->sd_inq->inq_dtype) { 6732 case DTYPE_RODIRECT: 6733 un->un_node_type = DDI_NT_CD_CHAN; 6734 un->un_ctype = CTYPE_CDROM; 6735 break; 6736 case DTYPE_OPTICAL: 6737 un->un_node_type = DDI_NT_BLOCK_CHAN; 6738 un->un_ctype = CTYPE_ROD; 6739 break; 6740 default: 6741 un->un_node_type = DDI_NT_BLOCK_CHAN; 6742 un->un_ctype = CTYPE_CCS; 6743 break; 6744 } 6745 6746 /* 6747 * Try to read the interconnect type from the HBA. 6748 * 6749 * Note: This driver is currently compiled as two binaries, a parallel 6750 * scsi version (sd) and a fibre channel version (ssd). All functional 6751 * differences are determined at compile time. In the future a single 6752 * binary will be provided and the interconnect type will be used to 6753 * differentiate between fibre and parallel scsi behaviors. At that time 6754 * it will be necessary for all fibre channel HBAs to support this 6755 * property. 6756 * 6757 * set un_f_is_fiber to TRUE ( default fiber ) 6758 */ 6759 un->un_f_is_fibre = TRUE; 6760 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6761 case INTERCONNECT_SSA: 6762 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6763 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6764 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6765 break; 6766 case INTERCONNECT_PARALLEL: 6767 un->un_f_is_fibre = FALSE; 6768 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6769 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6770 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6771 break; 6772 case INTERCONNECT_SATA: 6773 un->un_f_is_fibre = FALSE; 6774 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6775 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6776 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6777 break; 6778 case INTERCONNECT_FIBRE: 6779 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6780 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6781 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6782 break; 6783 case INTERCONNECT_FABRIC: 6784 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6785 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6786 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6787 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6788 break; 6789 default: 6790 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6791 /* 6792 * The HBA does not support the "interconnect-type" property 6793 * (or did not provide a recognized type). 6794 * 6795 * Note: This will be obsoleted when a single fibre channel 6796 * and parallel scsi driver is delivered. In the meantime the 6797 * interconnect type will be set to the platform default.If that 6798 * type is not parallel SCSI, it means that we should be 6799 * assuming "ssd" semantics. However, here this also means that 6800 * the FC HBA is not supporting the "interconnect-type" property 6801 * like we expect it to, so log this occurrence. 6802 */ 6803 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6804 if (!SD_IS_PARALLEL_SCSI(un)) { 6805 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6806 "sd_unit_attach: un:0x%p Assuming " 6807 "INTERCONNECT_FIBRE\n", un); 6808 } else { 6809 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6810 "sd_unit_attach: un:0x%p Assuming " 6811 "INTERCONNECT_PARALLEL\n", un); 6812 un->un_f_is_fibre = FALSE; 6813 } 6814 #else 6815 /* 6816 * Note: This source will be implemented when a single fibre 6817 * channel and parallel scsi driver is delivered. The default 6818 * will be to assume that if a device does not support the 6819 * "interconnect-type" property it is a parallel SCSI HBA and 6820 * we will set the interconnect type for parallel scsi. 6821 */ 6822 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6823 un->un_f_is_fibre = FALSE; 6824 #endif 6825 break; 6826 } 6827 6828 if (un->un_f_is_fibre == TRUE) { 6829 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6830 SCSI_VERSION_3) { 6831 switch (un->un_interconnect_type) { 6832 case SD_INTERCONNECT_FIBRE: 6833 case SD_INTERCONNECT_SSA: 6834 un->un_node_type = DDI_NT_BLOCK_WWN; 6835 break; 6836 default: 6837 break; 6838 } 6839 } 6840 } 6841 6842 /* 6843 * Initialize the Request Sense command for the target 6844 */ 6845 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6846 goto alloc_rqs_failed; 6847 } 6848 6849 /* 6850 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6851 * with separate binary for sd and ssd. 6852 * 6853 * x86 has 1 binary, un_retry_count is set base on connection type. 6854 * The hardcoded values will go away when Sparc uses 1 binary 6855 * for sd and ssd. This hardcoded values need to match 6856 * SD_RETRY_COUNT in sddef.h 6857 * The value used is base on interconnect type. 6858 * fibre = 3, parallel = 5 6859 */ 6860 #if defined(__i386) || defined(__amd64) 6861 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6862 #else 6863 un->un_retry_count = SD_RETRY_COUNT; 6864 #endif 6865 6866 /* 6867 * Set the per disk retry count to the default number of retries 6868 * for disks and CDROMs. This value can be overridden by the 6869 * disk property list or an entry in sd.conf. 6870 */ 6871 un->un_notready_retry_count = 6872 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6873 : DISK_NOT_READY_RETRY_COUNT(un); 6874 6875 /* 6876 * Set the busy retry count to the default value of un_retry_count. 6877 * This can be overridden by entries in sd.conf or the device 6878 * config table. 6879 */ 6880 un->un_busy_retry_count = un->un_retry_count; 6881 6882 /* 6883 * Init the reset threshold for retries. This number determines 6884 * how many retries must be performed before a reset can be issued 6885 * (for certain error conditions). This can be overridden by entries 6886 * in sd.conf or the device config table. 6887 */ 6888 un->un_reset_retry_count = (un->un_retry_count / 2); 6889 6890 /* 6891 * Set the victim_retry_count to the default un_retry_count 6892 */ 6893 un->un_victim_retry_count = (2 * un->un_retry_count); 6894 6895 /* 6896 * Set the reservation release timeout to the default value of 6897 * 5 seconds. This can be overridden by entries in ssd.conf or the 6898 * device config table. 6899 */ 6900 un->un_reserve_release_time = 5; 6901 6902 /* 6903 * Set up the default maximum transfer size. Note that this may 6904 * get updated later in the attach, when setting up default wide 6905 * operations for disks. 6906 */ 6907 #if defined(__i386) || defined(__amd64) 6908 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6909 un->un_partial_dma_supported = 1; 6910 #else 6911 un->un_max_xfer_size = (uint_t)maxphys; 6912 #endif 6913 6914 /* 6915 * Get "allow bus device reset" property (defaults to "enabled" if 6916 * the property was not defined). This is to disable bus resets for 6917 * certain kinds of error recovery. Note: In the future when a run-time 6918 * fibre check is available the soft state flag should default to 6919 * enabled. 6920 */ 6921 if (un->un_f_is_fibre == TRUE) { 6922 un->un_f_allow_bus_device_reset = TRUE; 6923 } else { 6924 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6925 "allow-bus-device-reset", 1) != 0) { 6926 un->un_f_allow_bus_device_reset = TRUE; 6927 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6928 "sd_unit_attach: un:0x%p Bus device reset " 6929 "enabled\n", un); 6930 } else { 6931 un->un_f_allow_bus_device_reset = FALSE; 6932 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6933 "sd_unit_attach: un:0x%p Bus device reset " 6934 "disabled\n", un); 6935 } 6936 } 6937 6938 /* 6939 * Check if this is an ATAPI device. ATAPI devices use Group 1 6940 * Read/Write commands and Group 2 Mode Sense/Select commands. 6941 * 6942 * Note: The "obsolete" way of doing this is to check for the "atapi" 6943 * property. The new "variant" property with a value of "atapi" has been 6944 * introduced so that future 'variants' of standard SCSI behavior (like 6945 * atapi) could be specified by the underlying HBA drivers by supplying 6946 * a new value for the "variant" property, instead of having to define a 6947 * new property. 6948 */ 6949 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6950 un->un_f_cfg_is_atapi = TRUE; 6951 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6952 "sd_unit_attach: un:0x%p Atapi device\n", un); 6953 } 6954 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6955 &variantp) == DDI_PROP_SUCCESS) { 6956 if (strcmp(variantp, "atapi") == 0) { 6957 un->un_f_cfg_is_atapi = TRUE; 6958 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6959 "sd_unit_attach: un:0x%p Atapi device\n", un); 6960 } 6961 ddi_prop_free(variantp); 6962 } 6963 6964 un->un_cmd_timeout = SD_IO_TIME; 6965 6966 un->un_busy_timeout = SD_BSY_TIMEOUT; 6967 6968 /* Info on current states, statuses, etc. (Updated frequently) */ 6969 un->un_state = SD_STATE_NORMAL; 6970 un->un_last_state = SD_STATE_NORMAL; 6971 6972 /* Control & status info for command throttling */ 6973 un->un_throttle = sd_max_throttle; 6974 un->un_saved_throttle = sd_max_throttle; 6975 un->un_min_throttle = sd_min_throttle; 6976 6977 if (un->un_f_is_fibre == TRUE) { 6978 un->un_f_use_adaptive_throttle = TRUE; 6979 } else { 6980 un->un_f_use_adaptive_throttle = FALSE; 6981 } 6982 6983 /* Removable media support. */ 6984 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6985 un->un_mediastate = DKIO_NONE; 6986 un->un_specified_mediastate = DKIO_NONE; 6987 6988 /* CVs for suspend/resume (PM or DR) */ 6989 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6990 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6991 6992 /* Power management support. */ 6993 un->un_power_level = SD_SPINDLE_UNINIT; 6994 6995 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6996 un->un_f_wcc_inprog = 0; 6997 6998 /* 6999 * The open/close semaphore is used to serialize threads executing 7000 * in the driver's open & close entry point routines for a given 7001 * instance. 7002 */ 7003 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7004 7005 /* 7006 * The conf file entry and softstate variable is a forceful override, 7007 * meaning a non-zero value must be entered to change the default. 7008 */ 7009 un->un_f_disksort_disabled = FALSE; 7010 7011 /* 7012 * Retrieve the properties from the static driver table or the driver 7013 * configuration file (.conf) for this unit and update the soft state 7014 * for the device as needed for the indicated properties. 7015 * Note: the property configuration needs to occur here as some of the 7016 * following routines may have dependencies on soft state flags set 7017 * as part of the driver property configuration. 7018 */ 7019 sd_read_unit_properties(un); 7020 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7021 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7022 7023 /* 7024 * Only if a device has "hotpluggable" property, it is 7025 * treated as hotpluggable device. Otherwise, it is 7026 * regarded as non-hotpluggable one. 7027 */ 7028 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7029 -1) != -1) { 7030 un->un_f_is_hotpluggable = TRUE; 7031 } 7032 7033 /* 7034 * set unit's attributes(flags) according to "hotpluggable" and 7035 * RMB bit in INQUIRY data. 7036 */ 7037 sd_set_unit_attributes(un, devi); 7038 7039 /* 7040 * By default, we mark the capacity, lbasize, and geometry 7041 * as invalid. Only if we successfully read a valid capacity 7042 * will we update the un_blockcount and un_tgt_blocksize with the 7043 * valid values (the geometry will be validated later). 7044 */ 7045 un->un_f_blockcount_is_valid = FALSE; 7046 un->un_f_tgt_blocksize_is_valid = FALSE; 7047 7048 /* 7049 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7050 * otherwise. 7051 */ 7052 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7053 un->un_blockcount = 0; 7054 7055 /* 7056 * Set up the per-instance info needed to determine the correct 7057 * CDBs and other info for issuing commands to the target. 7058 */ 7059 sd_init_cdb_limits(un); 7060 7061 /* 7062 * Set up the IO chains to use, based upon the target type. 7063 */ 7064 if (un->un_f_non_devbsize_supported) { 7065 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7066 } else { 7067 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7068 } 7069 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7070 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7071 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7072 7073 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7074 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7075 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7076 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7077 7078 7079 if (ISCD(un)) { 7080 un->un_additional_codes = sd_additional_codes; 7081 } else { 7082 un->un_additional_codes = NULL; 7083 } 7084 7085 /* 7086 * Create the kstats here so they can be available for attach-time 7087 * routines that send commands to the unit (either polled or via 7088 * sd_send_scsi_cmd). 7089 * 7090 * Note: This is a critical sequence that needs to be maintained: 7091 * 1) Instantiate the kstats here, before any routines using the 7092 * iopath (i.e. sd_send_scsi_cmd). 7093 * 2) Instantiate and initialize the partition stats 7094 * (sd_set_pstats). 7095 * 3) Initialize the error stats (sd_set_errstats), following 7096 * sd_validate_geometry(),sd_register_devid(), 7097 * and sd_cache_control(). 7098 */ 7099 7100 un->un_stats = kstat_create(sd_label, instance, 7101 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7102 if (un->un_stats != NULL) { 7103 un->un_stats->ks_lock = SD_MUTEX(un); 7104 kstat_install(un->un_stats); 7105 } 7106 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7107 "sd_unit_attach: un:0x%p un_stats created\n", un); 7108 7109 sd_create_errstats(un, instance); 7110 if (un->un_errstats == NULL) { 7111 goto create_errstats_failed; 7112 } 7113 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7114 "sd_unit_attach: un:0x%p errstats created\n", un); 7115 7116 /* 7117 * The following if/else code was relocated here from below as part 7118 * of the fix for bug (4430280). However with the default setup added 7119 * on entry to this routine, it's no longer absolutely necessary for 7120 * this to be before the call to sd_spin_up_unit. 7121 */ 7122 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7123 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7124 (devp->sd_inq->inq_ansi == 5)) && 7125 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7126 7127 /* 7128 * If tagged queueing is supported by the target 7129 * and by the host adapter then we will enable it 7130 */ 7131 un->un_tagflags = 0; 7132 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7133 (un->un_f_arq_enabled == TRUE)) { 7134 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7135 1, 1) == 1) { 7136 un->un_tagflags = FLAG_STAG; 7137 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7138 "sd_unit_attach: un:0x%p tag queueing " 7139 "enabled\n", un); 7140 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7141 "untagged-qing", 0) == 1) { 7142 un->un_f_opt_queueing = TRUE; 7143 un->un_saved_throttle = un->un_throttle = 7144 min(un->un_throttle, 3); 7145 } else { 7146 un->un_f_opt_queueing = FALSE; 7147 un->un_saved_throttle = un->un_throttle = 1; 7148 } 7149 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7150 == 1) && (un->un_f_arq_enabled == TRUE)) { 7151 /* The Host Adapter supports internal queueing. */ 7152 un->un_f_opt_queueing = TRUE; 7153 un->un_saved_throttle = un->un_throttle = 7154 min(un->un_throttle, 3); 7155 } else { 7156 un->un_f_opt_queueing = FALSE; 7157 un->un_saved_throttle = un->un_throttle = 1; 7158 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7159 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7160 } 7161 7162 /* 7163 * Enable large transfers for SATA/SAS drives 7164 */ 7165 if (SD_IS_SERIAL(un)) { 7166 un->un_max_xfer_size = 7167 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7168 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7169 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7170 "sd_unit_attach: un:0x%p max transfer " 7171 "size=0x%x\n", un, un->un_max_xfer_size); 7172 7173 } 7174 7175 /* Setup or tear down default wide operations for disks */ 7176 7177 /* 7178 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7179 * and "ssd_max_xfer_size" to exist simultaneously on the same 7180 * system and be set to different values. In the future this 7181 * code may need to be updated when the ssd module is 7182 * obsoleted and removed from the system. (4299588) 7183 */ 7184 if (SD_IS_PARALLEL_SCSI(un) && 7185 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7186 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7187 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7188 1, 1) == 1) { 7189 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7190 "sd_unit_attach: un:0x%p Wide Transfer " 7191 "enabled\n", un); 7192 } 7193 7194 /* 7195 * If tagged queuing has also been enabled, then 7196 * enable large xfers 7197 */ 7198 if (un->un_saved_throttle == sd_max_throttle) { 7199 un->un_max_xfer_size = 7200 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7201 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7202 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7203 "sd_unit_attach: un:0x%p max transfer " 7204 "size=0x%x\n", un, un->un_max_xfer_size); 7205 } 7206 } else { 7207 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7208 0, 1) == 1) { 7209 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7210 "sd_unit_attach: un:0x%p " 7211 "Wide Transfer disabled\n", un); 7212 } 7213 } 7214 } else { 7215 un->un_tagflags = FLAG_STAG; 7216 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7217 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7218 } 7219 7220 /* 7221 * If this target supports LUN reset, try to enable it. 7222 */ 7223 if (un->un_f_lun_reset_enabled) { 7224 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7225 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7226 "un:0x%p lun_reset capability set\n", un); 7227 } else { 7228 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7229 "un:0x%p lun-reset capability not set\n", un); 7230 } 7231 } 7232 7233 /* 7234 * Adjust the maximum transfer size. This is to fix 7235 * the problem of partial DMA support on SPARC. Some 7236 * HBA driver, like aac, has very small dma_attr_maxxfer 7237 * size, which requires partial DMA support on SPARC. 7238 * In the future the SPARC pci nexus driver may solve 7239 * the problem instead of this fix. 7240 */ 7241 #if defined(__sparc) 7242 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7243 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7244 un->un_max_xfer_size = max_xfer_size; 7245 un->un_partial_dma_supported = 1; 7246 } 7247 #endif 7248 7249 /* 7250 * Set PKT_DMA_PARTIAL flag. 7251 */ 7252 if (un->un_partial_dma_supported == 1) { 7253 un->un_pkt_flags = PKT_DMA_PARTIAL; 7254 } else { 7255 un->un_pkt_flags = 0; 7256 } 7257 7258 /* 7259 * At this point in the attach, we have enough info in the 7260 * soft state to be able to issue commands to the target. 7261 * 7262 * All command paths used below MUST issue their commands as 7263 * SD_PATH_DIRECT. This is important as intermediate layers 7264 * are not all initialized yet (such as PM). 7265 */ 7266 7267 /* 7268 * Send a TEST UNIT READY command to the device. This should clear 7269 * any outstanding UNIT ATTENTION that may be present. 7270 * 7271 * Note: Don't check for success, just track if there is a reservation, 7272 * this is a throw away command to clear any unit attentions. 7273 * 7274 * Note: This MUST be the first command issued to the target during 7275 * attach to ensure power on UNIT ATTENTIONS are cleared. 7276 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7277 * with attempts at spinning up a device with no media. 7278 */ 7279 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 7280 reservation_flag = SD_TARGET_IS_RESERVED; 7281 } 7282 7283 /* 7284 * If the device is NOT a removable media device, attempt to spin 7285 * it up (using the START_STOP_UNIT command) and read its capacity 7286 * (using the READ CAPACITY command). Note, however, that either 7287 * of these could fail and in some cases we would continue with 7288 * the attach despite the failure (see below). 7289 */ 7290 if (un->un_f_descr_format_supported) { 7291 switch (sd_spin_up_unit(un)) { 7292 case 0: 7293 /* 7294 * Spin-up was successful; now try to read the 7295 * capacity. If successful then save the results 7296 * and mark the capacity & lbasize as valid. 7297 */ 7298 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7299 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7300 7301 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 7302 &lbasize, SD_PATH_DIRECT)) { 7303 case 0: { 7304 if (capacity > DK_MAX_BLOCKS) { 7305 #ifdef _LP64 7306 if (capacity + 1 > 7307 SD_GROUP1_MAX_ADDRESS) { 7308 /* 7309 * Enable descriptor format 7310 * sense data so that we can 7311 * get 64 bit sense data 7312 * fields. 7313 */ 7314 sd_enable_descr_sense(un); 7315 } 7316 #else 7317 /* 32-bit kernels can't handle this */ 7318 scsi_log(SD_DEVINFO(un), 7319 sd_label, CE_WARN, 7320 "disk has %llu blocks, which " 7321 "is too large for a 32-bit " 7322 "kernel", capacity); 7323 7324 #if defined(__i386) || defined(__amd64) 7325 /* 7326 * 1TB disk was treated as (1T - 512)B 7327 * in the past, so that it might have 7328 * valid VTOC and solaris partitions, 7329 * we have to allow it to continue to 7330 * work. 7331 */ 7332 if (capacity -1 > DK_MAX_BLOCKS) 7333 #endif 7334 goto spinup_failed; 7335 #endif 7336 } 7337 7338 /* 7339 * Here it's not necessary to check the case: 7340 * the capacity of the device is bigger than 7341 * what the max hba cdb can support. Because 7342 * sd_send_scsi_READ_CAPACITY will retrieve 7343 * the capacity by sending USCSI command, which 7344 * is constrained by the max hba cdb. Actually, 7345 * sd_send_scsi_READ_CAPACITY will return 7346 * EINVAL when using bigger cdb than required 7347 * cdb length. Will handle this case in 7348 * "case EINVAL". 7349 */ 7350 7351 /* 7352 * The following relies on 7353 * sd_send_scsi_READ_CAPACITY never 7354 * returning 0 for capacity and/or lbasize. 7355 */ 7356 sd_update_block_info(un, lbasize, capacity); 7357 7358 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7359 "sd_unit_attach: un:0x%p capacity = %ld " 7360 "blocks; lbasize= %ld.\n", un, 7361 un->un_blockcount, un->un_tgt_blocksize); 7362 7363 break; 7364 } 7365 case EINVAL: 7366 /* 7367 * In the case where the max-cdb-length property 7368 * is smaller than the required CDB length for 7369 * a SCSI device, a target driver can fail to 7370 * attach to that device. 7371 */ 7372 scsi_log(SD_DEVINFO(un), 7373 sd_label, CE_WARN, 7374 "disk capacity is too large " 7375 "for current cdb length"); 7376 goto spinup_failed; 7377 case EACCES: 7378 /* 7379 * Should never get here if the spin-up 7380 * succeeded, but code it in anyway. 7381 * From here, just continue with the attach... 7382 */ 7383 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7384 "sd_unit_attach: un:0x%p " 7385 "sd_send_scsi_READ_CAPACITY " 7386 "returned reservation conflict\n", un); 7387 reservation_flag = SD_TARGET_IS_RESERVED; 7388 break; 7389 default: 7390 /* 7391 * Likewise, should never get here if the 7392 * spin-up succeeded. Just continue with 7393 * the attach... 7394 */ 7395 break; 7396 } 7397 break; 7398 case EACCES: 7399 /* 7400 * Device is reserved by another host. In this case 7401 * we could not spin it up or read the capacity, but 7402 * we continue with the attach anyway. 7403 */ 7404 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7405 "sd_unit_attach: un:0x%p spin-up reservation " 7406 "conflict.\n", un); 7407 reservation_flag = SD_TARGET_IS_RESERVED; 7408 break; 7409 default: 7410 /* Fail the attach if the spin-up failed. */ 7411 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7412 "sd_unit_attach: un:0x%p spin-up failed.", un); 7413 goto spinup_failed; 7414 } 7415 } 7416 7417 /* 7418 * Check to see if this is a MMC drive 7419 */ 7420 if (ISCD(un)) { 7421 sd_set_mmc_caps(un); 7422 } 7423 7424 7425 /* 7426 * Add a zero-length attribute to tell the world we support 7427 * kernel ioctls (for layered drivers) 7428 */ 7429 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7430 DDI_KERNEL_IOCTL, NULL, 0); 7431 7432 /* 7433 * Add a boolean property to tell the world we support 7434 * the B_FAILFAST flag (for layered drivers) 7435 */ 7436 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7437 "ddi-failfast-supported", NULL, 0); 7438 7439 /* 7440 * Initialize power management 7441 */ 7442 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7443 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7444 sd_setup_pm(un, devi); 7445 if (un->un_f_pm_is_enabled == FALSE) { 7446 /* 7447 * For performance, point to a jump table that does 7448 * not include pm. 7449 * The direct and priority chains don't change with PM. 7450 * 7451 * Note: this is currently done based on individual device 7452 * capabilities. When an interface for determining system 7453 * power enabled state becomes available, or when additional 7454 * layers are added to the command chain, these values will 7455 * have to be re-evaluated for correctness. 7456 */ 7457 if (un->un_f_non_devbsize_supported) { 7458 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7459 } else { 7460 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7461 } 7462 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7463 } 7464 7465 /* 7466 * This property is set to 0 by HA software to avoid retries 7467 * on a reserved disk. (The preferred property name is 7468 * "retry-on-reservation-conflict") (1189689) 7469 * 7470 * Note: The use of a global here can have unintended consequences. A 7471 * per instance variable is preferable to match the capabilities of 7472 * different underlying hba's (4402600) 7473 */ 7474 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7475 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7476 sd_retry_on_reservation_conflict); 7477 if (sd_retry_on_reservation_conflict != 0) { 7478 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7479 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7480 sd_retry_on_reservation_conflict); 7481 } 7482 7483 /* Set up options for QFULL handling. */ 7484 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7485 "qfull-retries", -1)) != -1) { 7486 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7487 rval, 1); 7488 } 7489 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7490 "qfull-retry-interval", -1)) != -1) { 7491 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7492 rval, 1); 7493 } 7494 7495 /* 7496 * This just prints a message that announces the existence of the 7497 * device. The message is always printed in the system logfile, but 7498 * only appears on the console if the system is booted with the 7499 * -v (verbose) argument. 7500 */ 7501 ddi_report_dev(devi); 7502 7503 un->un_mediastate = DKIO_NONE; 7504 7505 cmlb_alloc_handle(&un->un_cmlbhandle); 7506 7507 #if defined(__i386) || defined(__amd64) 7508 /* 7509 * On x86, compensate for off-by-1 legacy error 7510 */ 7511 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7512 (lbasize == un->un_sys_blocksize)) 7513 offbyone = CMLB_OFF_BY_ONE; 7514 #endif 7515 7516 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7517 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7518 un->un_node_type, offbyone, un->un_cmlbhandle, 7519 (void *)SD_PATH_DIRECT) != 0) { 7520 goto cmlb_attach_failed; 7521 } 7522 7523 7524 /* 7525 * Read and validate the device's geometry (ie, disk label) 7526 * A new unformatted drive will not have a valid geometry, but 7527 * the driver needs to successfully attach to this device so 7528 * the drive can be formatted via ioctls. 7529 */ 7530 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7531 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7532 7533 mutex_enter(SD_MUTEX(un)); 7534 7535 /* 7536 * Read and initialize the devid for the unit. 7537 */ 7538 if (un->un_f_devid_supported) { 7539 sd_register_devid(un, devi, reservation_flag); 7540 } 7541 mutex_exit(SD_MUTEX(un)); 7542 7543 #if (defined(__fibre)) 7544 /* 7545 * Register callbacks for fibre only. You can't do this solely 7546 * on the basis of the devid_type because this is hba specific. 7547 * We need to query our hba capabilities to find out whether to 7548 * register or not. 7549 */ 7550 if (un->un_f_is_fibre) { 7551 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7552 sd_init_event_callbacks(un); 7553 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7554 "sd_unit_attach: un:0x%p event callbacks inserted", 7555 un); 7556 } 7557 } 7558 #endif 7559 7560 if (un->un_f_opt_disable_cache == TRUE) { 7561 /* 7562 * Disable both read cache and write cache. This is 7563 * the historic behavior of the keywords in the config file. 7564 */ 7565 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7566 0) { 7567 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7568 "sd_unit_attach: un:0x%p Could not disable " 7569 "caching", un); 7570 goto devid_failed; 7571 } 7572 } 7573 7574 /* 7575 * Check the value of the WCE bit now and 7576 * set un_f_write_cache_enabled accordingly. 7577 */ 7578 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7579 mutex_enter(SD_MUTEX(un)); 7580 un->un_f_write_cache_enabled = (wc_enabled != 0); 7581 mutex_exit(SD_MUTEX(un)); 7582 7583 /* 7584 * Check the value of the NV_SUP bit and set 7585 * un_f_suppress_cache_flush accordingly. 7586 */ 7587 sd_get_nv_sup(un); 7588 7589 /* 7590 * Find out what type of reservation this disk supports. 7591 */ 7592 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7593 case 0: 7594 /* 7595 * SCSI-3 reservations are supported. 7596 */ 7597 un->un_reservation_type = SD_SCSI3_RESERVATION; 7598 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7599 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7600 break; 7601 case ENOTSUP: 7602 /* 7603 * The PERSISTENT RESERVE IN command would not be recognized by 7604 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7605 */ 7606 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7607 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7608 un->un_reservation_type = SD_SCSI2_RESERVATION; 7609 break; 7610 default: 7611 /* 7612 * default to SCSI-3 reservations 7613 */ 7614 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7615 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7616 un->un_reservation_type = SD_SCSI3_RESERVATION; 7617 break; 7618 } 7619 7620 /* 7621 * Set the pstat and error stat values here, so data obtained during the 7622 * previous attach-time routines is available. 7623 * 7624 * Note: This is a critical sequence that needs to be maintained: 7625 * 1) Instantiate the kstats before any routines using the iopath 7626 * (i.e. sd_send_scsi_cmd). 7627 * 2) Initialize the error stats (sd_set_errstats) and partition 7628 * stats (sd_set_pstats)here, following 7629 * cmlb_validate_geometry(), sd_register_devid(), and 7630 * sd_cache_control(). 7631 */ 7632 7633 if (un->un_f_pkstats_enabled && geom_label_valid) { 7634 sd_set_pstats(un); 7635 SD_TRACE(SD_LOG_IO_PARTITION, un, 7636 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7637 } 7638 7639 sd_set_errstats(un); 7640 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7641 "sd_unit_attach: un:0x%p errstats set\n", un); 7642 7643 7644 /* 7645 * After successfully attaching an instance, we record the information 7646 * of how many luns have been attached on the relative target and 7647 * controller for parallel SCSI. This information is used when sd tries 7648 * to set the tagged queuing capability in HBA. 7649 */ 7650 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7651 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7652 } 7653 7654 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7655 "sd_unit_attach: un:0x%p exit success\n", un); 7656 7657 return (DDI_SUCCESS); 7658 7659 /* 7660 * An error occurred during the attach; clean up & return failure. 7661 */ 7662 7663 devid_failed: 7664 7665 setup_pm_failed: 7666 ddi_remove_minor_node(devi, NULL); 7667 7668 cmlb_attach_failed: 7669 /* 7670 * Cleanup from the scsi_ifsetcap() calls (437868) 7671 */ 7672 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7673 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7674 7675 /* 7676 * Refer to the comments of setting tagged-qing in the beginning of 7677 * sd_unit_attach. We can only disable tagged queuing when there is 7678 * no lun attached on the target. 7679 */ 7680 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7681 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7682 } 7683 7684 if (un->un_f_is_fibre == FALSE) { 7685 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7686 } 7687 7688 spinup_failed: 7689 7690 mutex_enter(SD_MUTEX(un)); 7691 7692 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7693 if (un->un_direct_priority_timeid != NULL) { 7694 timeout_id_t temp_id = un->un_direct_priority_timeid; 7695 un->un_direct_priority_timeid = NULL; 7696 mutex_exit(SD_MUTEX(un)); 7697 (void) untimeout(temp_id); 7698 mutex_enter(SD_MUTEX(un)); 7699 } 7700 7701 /* Cancel any pending start/stop timeouts */ 7702 if (un->un_startstop_timeid != NULL) { 7703 timeout_id_t temp_id = un->un_startstop_timeid; 7704 un->un_startstop_timeid = NULL; 7705 mutex_exit(SD_MUTEX(un)); 7706 (void) untimeout(temp_id); 7707 mutex_enter(SD_MUTEX(un)); 7708 } 7709 7710 /* Cancel any pending reset-throttle timeouts */ 7711 if (un->un_reset_throttle_timeid != NULL) { 7712 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7713 un->un_reset_throttle_timeid = NULL; 7714 mutex_exit(SD_MUTEX(un)); 7715 (void) untimeout(temp_id); 7716 mutex_enter(SD_MUTEX(un)); 7717 } 7718 7719 /* Cancel any pending retry timeouts */ 7720 if (un->un_retry_timeid != NULL) { 7721 timeout_id_t temp_id = un->un_retry_timeid; 7722 un->un_retry_timeid = NULL; 7723 mutex_exit(SD_MUTEX(un)); 7724 (void) untimeout(temp_id); 7725 mutex_enter(SD_MUTEX(un)); 7726 } 7727 7728 /* Cancel any pending delayed cv broadcast timeouts */ 7729 if (un->un_dcvb_timeid != NULL) { 7730 timeout_id_t temp_id = un->un_dcvb_timeid; 7731 un->un_dcvb_timeid = NULL; 7732 mutex_exit(SD_MUTEX(un)); 7733 (void) untimeout(temp_id); 7734 mutex_enter(SD_MUTEX(un)); 7735 } 7736 7737 mutex_exit(SD_MUTEX(un)); 7738 7739 /* There should not be any in-progress I/O so ASSERT this check */ 7740 ASSERT(un->un_ncmds_in_transport == 0); 7741 ASSERT(un->un_ncmds_in_driver == 0); 7742 7743 /* Do not free the softstate if the callback routine is active */ 7744 sd_sync_with_callback(un); 7745 7746 /* 7747 * Partition stats apparently are not used with removables. These would 7748 * not have been created during attach, so no need to clean them up... 7749 */ 7750 if (un->un_errstats != NULL) { 7751 kstat_delete(un->un_errstats); 7752 un->un_errstats = NULL; 7753 } 7754 7755 create_errstats_failed: 7756 7757 if (un->un_stats != NULL) { 7758 kstat_delete(un->un_stats); 7759 un->un_stats = NULL; 7760 } 7761 7762 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7763 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7764 7765 ddi_prop_remove_all(devi); 7766 sema_destroy(&un->un_semoclose); 7767 cv_destroy(&un->un_state_cv); 7768 7769 getrbuf_failed: 7770 7771 sd_free_rqs(un); 7772 7773 alloc_rqs_failed: 7774 7775 devp->sd_private = NULL; 7776 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7777 7778 get_softstate_failed: 7779 /* 7780 * Note: the man pages are unclear as to whether or not doing a 7781 * ddi_soft_state_free(sd_state, instance) is the right way to 7782 * clean up after the ddi_soft_state_zalloc() if the subsequent 7783 * ddi_get_soft_state() fails. The implication seems to be 7784 * that the get_soft_state cannot fail if the zalloc succeeds. 7785 */ 7786 ddi_soft_state_free(sd_state, instance); 7787 7788 probe_failed: 7789 scsi_unprobe(devp); 7790 7791 return (DDI_FAILURE); 7792 } 7793 7794 7795 /* 7796 * Function: sd_unit_detach 7797 * 7798 * Description: Performs DDI_DETACH processing for sddetach(). 7799 * 7800 * Return Code: DDI_SUCCESS 7801 * DDI_FAILURE 7802 * 7803 * Context: Kernel thread context 7804 */ 7805 7806 static int 7807 sd_unit_detach(dev_info_t *devi) 7808 { 7809 struct scsi_device *devp; 7810 struct sd_lun *un; 7811 int i; 7812 int tgt; 7813 dev_t dev; 7814 dev_info_t *pdip = ddi_get_parent(devi); 7815 int instance = ddi_get_instance(devi); 7816 7817 mutex_enter(&sd_detach_mutex); 7818 7819 /* 7820 * Fail the detach for any of the following: 7821 * - Unable to get the sd_lun struct for the instance 7822 * - A layered driver has an outstanding open on the instance 7823 * - Another thread is already detaching this instance 7824 * - Another thread is currently performing an open 7825 */ 7826 devp = ddi_get_driver_private(devi); 7827 if ((devp == NULL) || 7828 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7829 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7830 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7831 mutex_exit(&sd_detach_mutex); 7832 return (DDI_FAILURE); 7833 } 7834 7835 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7836 7837 /* 7838 * Mark this instance as currently in a detach, to inhibit any 7839 * opens from a layered driver. 7840 */ 7841 un->un_detach_count++; 7842 mutex_exit(&sd_detach_mutex); 7843 7844 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7845 SCSI_ADDR_PROP_TARGET, -1); 7846 7847 dev = sd_make_device(SD_DEVINFO(un)); 7848 7849 #ifndef lint 7850 _NOTE(COMPETING_THREADS_NOW); 7851 #endif 7852 7853 mutex_enter(SD_MUTEX(un)); 7854 7855 /* 7856 * Fail the detach if there are any outstanding layered 7857 * opens on this device. 7858 */ 7859 for (i = 0; i < NDKMAP; i++) { 7860 if (un->un_ocmap.lyropen[i] != 0) { 7861 goto err_notclosed; 7862 } 7863 } 7864 7865 /* 7866 * Verify there are NO outstanding commands issued to this device. 7867 * ie, un_ncmds_in_transport == 0. 7868 * It's possible to have outstanding commands through the physio 7869 * code path, even though everything's closed. 7870 */ 7871 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7872 (un->un_direct_priority_timeid != NULL) || 7873 (un->un_state == SD_STATE_RWAIT)) { 7874 mutex_exit(SD_MUTEX(un)); 7875 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7876 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7877 goto err_stillbusy; 7878 } 7879 7880 /* 7881 * If we have the device reserved, release the reservation. 7882 */ 7883 if ((un->un_resvd_status & SD_RESERVE) && 7884 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7885 mutex_exit(SD_MUTEX(un)); 7886 /* 7887 * Note: sd_reserve_release sends a command to the device 7888 * via the sd_ioctlcmd() path, and can sleep. 7889 */ 7890 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7891 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7892 "sd_dr_detach: Cannot release reservation \n"); 7893 } 7894 } else { 7895 mutex_exit(SD_MUTEX(un)); 7896 } 7897 7898 /* 7899 * Untimeout any reserve recover, throttle reset, restart unit 7900 * and delayed broadcast timeout threads. Protect the timeout pointer 7901 * from getting nulled by their callback functions. 7902 */ 7903 mutex_enter(SD_MUTEX(un)); 7904 if (un->un_resvd_timeid != NULL) { 7905 timeout_id_t temp_id = un->un_resvd_timeid; 7906 un->un_resvd_timeid = NULL; 7907 mutex_exit(SD_MUTEX(un)); 7908 (void) untimeout(temp_id); 7909 mutex_enter(SD_MUTEX(un)); 7910 } 7911 7912 if (un->un_reset_throttle_timeid != NULL) { 7913 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7914 un->un_reset_throttle_timeid = NULL; 7915 mutex_exit(SD_MUTEX(un)); 7916 (void) untimeout(temp_id); 7917 mutex_enter(SD_MUTEX(un)); 7918 } 7919 7920 if (un->un_startstop_timeid != NULL) { 7921 timeout_id_t temp_id = un->un_startstop_timeid; 7922 un->un_startstop_timeid = NULL; 7923 mutex_exit(SD_MUTEX(un)); 7924 (void) untimeout(temp_id); 7925 mutex_enter(SD_MUTEX(un)); 7926 } 7927 7928 if (un->un_dcvb_timeid != NULL) { 7929 timeout_id_t temp_id = un->un_dcvb_timeid; 7930 un->un_dcvb_timeid = NULL; 7931 mutex_exit(SD_MUTEX(un)); 7932 (void) untimeout(temp_id); 7933 } else { 7934 mutex_exit(SD_MUTEX(un)); 7935 } 7936 7937 /* Remove any pending reservation reclaim requests for this device */ 7938 sd_rmv_resv_reclaim_req(dev); 7939 7940 mutex_enter(SD_MUTEX(un)); 7941 7942 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7943 if (un->un_direct_priority_timeid != NULL) { 7944 timeout_id_t temp_id = un->un_direct_priority_timeid; 7945 un->un_direct_priority_timeid = NULL; 7946 mutex_exit(SD_MUTEX(un)); 7947 (void) untimeout(temp_id); 7948 mutex_enter(SD_MUTEX(un)); 7949 } 7950 7951 /* Cancel any active multi-host disk watch thread requests */ 7952 if (un->un_mhd_token != NULL) { 7953 mutex_exit(SD_MUTEX(un)); 7954 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7955 if (scsi_watch_request_terminate(un->un_mhd_token, 7956 SCSI_WATCH_TERMINATE_NOWAIT)) { 7957 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7958 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7959 /* 7960 * Note: We are returning here after having removed 7961 * some driver timeouts above. This is consistent with 7962 * the legacy implementation but perhaps the watch 7963 * terminate call should be made with the wait flag set. 7964 */ 7965 goto err_stillbusy; 7966 } 7967 mutex_enter(SD_MUTEX(un)); 7968 un->un_mhd_token = NULL; 7969 } 7970 7971 if (un->un_swr_token != NULL) { 7972 mutex_exit(SD_MUTEX(un)); 7973 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7974 if (scsi_watch_request_terminate(un->un_swr_token, 7975 SCSI_WATCH_TERMINATE_NOWAIT)) { 7976 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7977 "sd_dr_detach: Cannot cancel swr watch request\n"); 7978 /* 7979 * Note: We are returning here after having removed 7980 * some driver timeouts above. This is consistent with 7981 * the legacy implementation but perhaps the watch 7982 * terminate call should be made with the wait flag set. 7983 */ 7984 goto err_stillbusy; 7985 } 7986 mutex_enter(SD_MUTEX(un)); 7987 un->un_swr_token = NULL; 7988 } 7989 7990 mutex_exit(SD_MUTEX(un)); 7991 7992 /* 7993 * Clear any scsi_reset_notifies. We clear the reset notifies 7994 * if we have not registered one. 7995 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7996 */ 7997 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7998 sd_mhd_reset_notify_cb, (caddr_t)un); 7999 8000 /* 8001 * protect the timeout pointers from getting nulled by 8002 * their callback functions during the cancellation process. 8003 * In such a scenario untimeout can be invoked with a null value. 8004 */ 8005 _NOTE(NO_COMPETING_THREADS_NOW); 8006 8007 mutex_enter(&un->un_pm_mutex); 8008 if (un->un_pm_idle_timeid != NULL) { 8009 timeout_id_t temp_id = un->un_pm_idle_timeid; 8010 un->un_pm_idle_timeid = NULL; 8011 mutex_exit(&un->un_pm_mutex); 8012 8013 /* 8014 * Timeout is active; cancel it. 8015 * Note that it'll never be active on a device 8016 * that does not support PM therefore we don't 8017 * have to check before calling pm_idle_component. 8018 */ 8019 (void) untimeout(temp_id); 8020 (void) pm_idle_component(SD_DEVINFO(un), 0); 8021 mutex_enter(&un->un_pm_mutex); 8022 } 8023 8024 /* 8025 * Check whether there is already a timeout scheduled for power 8026 * management. If yes then don't lower the power here, that's. 8027 * the timeout handler's job. 8028 */ 8029 if (un->un_pm_timeid != NULL) { 8030 timeout_id_t temp_id = un->un_pm_timeid; 8031 un->un_pm_timeid = NULL; 8032 mutex_exit(&un->un_pm_mutex); 8033 /* 8034 * Timeout is active; cancel it. 8035 * Note that it'll never be active on a device 8036 * that does not support PM therefore we don't 8037 * have to check before calling pm_idle_component. 8038 */ 8039 (void) untimeout(temp_id); 8040 (void) pm_idle_component(SD_DEVINFO(un), 0); 8041 8042 } else { 8043 mutex_exit(&un->un_pm_mutex); 8044 if ((un->un_f_pm_is_enabled == TRUE) && 8045 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8046 DDI_SUCCESS)) { 8047 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8048 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8049 /* 8050 * Fix for bug: 4297749, item # 13 8051 * The above test now includes a check to see if PM is 8052 * supported by this device before call 8053 * pm_lower_power(). 8054 * Note, the following is not dead code. The call to 8055 * pm_lower_power above will generate a call back into 8056 * our sdpower routine which might result in a timeout 8057 * handler getting activated. Therefore the following 8058 * code is valid and necessary. 8059 */ 8060 mutex_enter(&un->un_pm_mutex); 8061 if (un->un_pm_timeid != NULL) { 8062 timeout_id_t temp_id = un->un_pm_timeid; 8063 un->un_pm_timeid = NULL; 8064 mutex_exit(&un->un_pm_mutex); 8065 (void) untimeout(temp_id); 8066 (void) pm_idle_component(SD_DEVINFO(un), 0); 8067 } else { 8068 mutex_exit(&un->un_pm_mutex); 8069 } 8070 } 8071 } 8072 8073 /* 8074 * Cleanup from the scsi_ifsetcap() calls (437868) 8075 * Relocated here from above to be after the call to 8076 * pm_lower_power, which was getting errors. 8077 */ 8078 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8079 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8080 8081 /* 8082 * Currently, tagged queuing is supported per target based by HBA. 8083 * Setting this per lun instance actually sets the capability of this 8084 * target in HBA, which affects those luns already attached on the 8085 * same target. So during detach, we can only disable this capability 8086 * only when this is the only lun left on this target. By doing 8087 * this, we assume a target has the same tagged queuing capability 8088 * for every lun. The condition can be removed when HBA is changed to 8089 * support per lun based tagged queuing capability. 8090 */ 8091 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8092 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8093 } 8094 8095 if (un->un_f_is_fibre == FALSE) { 8096 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8097 } 8098 8099 /* 8100 * Remove any event callbacks, fibre only 8101 */ 8102 if (un->un_f_is_fibre == TRUE) { 8103 if ((un->un_insert_event != NULL) && 8104 (ddi_remove_event_handler(un->un_insert_cb_id) != 8105 DDI_SUCCESS)) { 8106 /* 8107 * Note: We are returning here after having done 8108 * substantial cleanup above. This is consistent 8109 * with the legacy implementation but this may not 8110 * be the right thing to do. 8111 */ 8112 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8113 "sd_dr_detach: Cannot cancel insert event\n"); 8114 goto err_remove_event; 8115 } 8116 un->un_insert_event = NULL; 8117 8118 if ((un->un_remove_event != NULL) && 8119 (ddi_remove_event_handler(un->un_remove_cb_id) != 8120 DDI_SUCCESS)) { 8121 /* 8122 * Note: We are returning here after having done 8123 * substantial cleanup above. This is consistent 8124 * with the legacy implementation but this may not 8125 * be the right thing to do. 8126 */ 8127 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8128 "sd_dr_detach: Cannot cancel remove event\n"); 8129 goto err_remove_event; 8130 } 8131 un->un_remove_event = NULL; 8132 } 8133 8134 /* Do not free the softstate if the callback routine is active */ 8135 sd_sync_with_callback(un); 8136 8137 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8138 cmlb_free_handle(&un->un_cmlbhandle); 8139 8140 /* 8141 * Hold the detach mutex here, to make sure that no other threads ever 8142 * can access a (partially) freed soft state structure. 8143 */ 8144 mutex_enter(&sd_detach_mutex); 8145 8146 /* 8147 * Clean up the soft state struct. 8148 * Cleanup is done in reverse order of allocs/inits. 8149 * At this point there should be no competing threads anymore. 8150 */ 8151 8152 /* Unregister and free device id. */ 8153 ddi_devid_unregister(devi); 8154 if (un->un_devid) { 8155 ddi_devid_free(un->un_devid); 8156 un->un_devid = NULL; 8157 } 8158 8159 /* 8160 * Destroy wmap cache if it exists. 8161 */ 8162 if (un->un_wm_cache != NULL) { 8163 kmem_cache_destroy(un->un_wm_cache); 8164 un->un_wm_cache = NULL; 8165 } 8166 8167 /* 8168 * kstat cleanup is done in detach for all device types (4363169). 8169 * We do not want to fail detach if the device kstats are not deleted 8170 * since there is a confusion about the devo_refcnt for the device. 8171 * We just delete the kstats and let detach complete successfully. 8172 */ 8173 if (un->un_stats != NULL) { 8174 kstat_delete(un->un_stats); 8175 un->un_stats = NULL; 8176 } 8177 if (un->un_errstats != NULL) { 8178 kstat_delete(un->un_errstats); 8179 un->un_errstats = NULL; 8180 } 8181 8182 /* Remove partition stats */ 8183 if (un->un_f_pkstats_enabled) { 8184 for (i = 0; i < NSDMAP; i++) { 8185 if (un->un_pstats[i] != NULL) { 8186 kstat_delete(un->un_pstats[i]); 8187 un->un_pstats[i] = NULL; 8188 } 8189 } 8190 } 8191 8192 /* Remove xbuf registration */ 8193 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8194 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8195 8196 /* Remove driver properties */ 8197 ddi_prop_remove_all(devi); 8198 8199 mutex_destroy(&un->un_pm_mutex); 8200 cv_destroy(&un->un_pm_busy_cv); 8201 8202 cv_destroy(&un->un_wcc_cv); 8203 8204 /* Open/close semaphore */ 8205 sema_destroy(&un->un_semoclose); 8206 8207 /* Removable media condvar. */ 8208 cv_destroy(&un->un_state_cv); 8209 8210 /* Suspend/resume condvar. */ 8211 cv_destroy(&un->un_suspend_cv); 8212 cv_destroy(&un->un_disk_busy_cv); 8213 8214 sd_free_rqs(un); 8215 8216 /* Free up soft state */ 8217 devp->sd_private = NULL; 8218 8219 bzero(un, sizeof (struct sd_lun)); 8220 ddi_soft_state_free(sd_state, instance); 8221 8222 mutex_exit(&sd_detach_mutex); 8223 8224 /* This frees up the INQUIRY data associated with the device. */ 8225 scsi_unprobe(devp); 8226 8227 /* 8228 * After successfully detaching an instance, we update the information 8229 * of how many luns have been attached in the relative target and 8230 * controller for parallel SCSI. This information is used when sd tries 8231 * to set the tagged queuing capability in HBA. 8232 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8233 * check if the device is parallel SCSI. However, we don't need to 8234 * check here because we've already checked during attach. No device 8235 * that is not parallel SCSI is in the chain. 8236 */ 8237 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8238 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8239 } 8240 8241 return (DDI_SUCCESS); 8242 8243 err_notclosed: 8244 mutex_exit(SD_MUTEX(un)); 8245 8246 err_stillbusy: 8247 _NOTE(NO_COMPETING_THREADS_NOW); 8248 8249 err_remove_event: 8250 mutex_enter(&sd_detach_mutex); 8251 un->un_detach_count--; 8252 mutex_exit(&sd_detach_mutex); 8253 8254 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8255 return (DDI_FAILURE); 8256 } 8257 8258 8259 /* 8260 * Function: sd_create_errstats 8261 * 8262 * Description: This routine instantiates the device error stats. 8263 * 8264 * Note: During attach the stats are instantiated first so they are 8265 * available for attach-time routines that utilize the driver 8266 * iopath to send commands to the device. The stats are initialized 8267 * separately so data obtained during some attach-time routines is 8268 * available. (4362483) 8269 * 8270 * Arguments: un - driver soft state (unit) structure 8271 * instance - driver instance 8272 * 8273 * Context: Kernel thread context 8274 */ 8275 8276 static void 8277 sd_create_errstats(struct sd_lun *un, int instance) 8278 { 8279 struct sd_errstats *stp; 8280 char kstatmodule_err[KSTAT_STRLEN]; 8281 char kstatname[KSTAT_STRLEN]; 8282 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8283 8284 ASSERT(un != NULL); 8285 8286 if (un->un_errstats != NULL) { 8287 return; 8288 } 8289 8290 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8291 "%serr", sd_label); 8292 (void) snprintf(kstatname, sizeof (kstatname), 8293 "%s%d,err", sd_label, instance); 8294 8295 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8296 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8297 8298 if (un->un_errstats == NULL) { 8299 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8300 "sd_create_errstats: Failed kstat_create\n"); 8301 return; 8302 } 8303 8304 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8305 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8306 KSTAT_DATA_UINT32); 8307 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8308 KSTAT_DATA_UINT32); 8309 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8310 KSTAT_DATA_UINT32); 8311 kstat_named_init(&stp->sd_vid, "Vendor", 8312 KSTAT_DATA_CHAR); 8313 kstat_named_init(&stp->sd_pid, "Product", 8314 KSTAT_DATA_CHAR); 8315 kstat_named_init(&stp->sd_revision, "Revision", 8316 KSTAT_DATA_CHAR); 8317 kstat_named_init(&stp->sd_serial, "Serial No", 8318 KSTAT_DATA_CHAR); 8319 kstat_named_init(&stp->sd_capacity, "Size", 8320 KSTAT_DATA_ULONGLONG); 8321 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8322 KSTAT_DATA_UINT32); 8323 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8324 KSTAT_DATA_UINT32); 8325 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8326 KSTAT_DATA_UINT32); 8327 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8328 KSTAT_DATA_UINT32); 8329 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8330 KSTAT_DATA_UINT32); 8331 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8332 KSTAT_DATA_UINT32); 8333 8334 un->un_errstats->ks_private = un; 8335 un->un_errstats->ks_update = nulldev; 8336 8337 kstat_install(un->un_errstats); 8338 } 8339 8340 8341 /* 8342 * Function: sd_set_errstats 8343 * 8344 * Description: This routine sets the value of the vendor id, product id, 8345 * revision, serial number, and capacity device error stats. 8346 * 8347 * Note: During attach the stats are instantiated first so they are 8348 * available for attach-time routines that utilize the driver 8349 * iopath to send commands to the device. The stats are initialized 8350 * separately so data obtained during some attach-time routines is 8351 * available. (4362483) 8352 * 8353 * Arguments: un - driver soft state (unit) structure 8354 * 8355 * Context: Kernel thread context 8356 */ 8357 8358 static void 8359 sd_set_errstats(struct sd_lun *un) 8360 { 8361 struct sd_errstats *stp; 8362 8363 ASSERT(un != NULL); 8364 ASSERT(un->un_errstats != NULL); 8365 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8366 ASSERT(stp != NULL); 8367 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8368 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8369 (void) strncpy(stp->sd_revision.value.c, 8370 un->un_sd->sd_inq->inq_revision, 4); 8371 8372 /* 8373 * All the errstats are persistent across detach/attach, 8374 * so reset all the errstats here in case of the hot 8375 * replacement of disk drives, except for not changed 8376 * Sun qualified drives. 8377 */ 8378 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8379 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8380 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8381 stp->sd_softerrs.value.ui32 = 0; 8382 stp->sd_harderrs.value.ui32 = 0; 8383 stp->sd_transerrs.value.ui32 = 0; 8384 stp->sd_rq_media_err.value.ui32 = 0; 8385 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8386 stp->sd_rq_nodev_err.value.ui32 = 0; 8387 stp->sd_rq_recov_err.value.ui32 = 0; 8388 stp->sd_rq_illrq_err.value.ui32 = 0; 8389 stp->sd_rq_pfa_err.value.ui32 = 0; 8390 } 8391 8392 /* 8393 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8394 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8395 * (4376302)) 8396 */ 8397 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8398 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8399 sizeof (SD_INQUIRY(un)->inq_serial)); 8400 } 8401 8402 if (un->un_f_blockcount_is_valid != TRUE) { 8403 /* 8404 * Set capacity error stat to 0 for no media. This ensures 8405 * a valid capacity is displayed in response to 'iostat -E' 8406 * when no media is present in the device. 8407 */ 8408 stp->sd_capacity.value.ui64 = 0; 8409 } else { 8410 /* 8411 * Multiply un_blockcount by un->un_sys_blocksize to get 8412 * capacity. 8413 * 8414 * Note: for non-512 blocksize devices "un_blockcount" has been 8415 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8416 * (un_tgt_blocksize / un->un_sys_blocksize). 8417 */ 8418 stp->sd_capacity.value.ui64 = (uint64_t) 8419 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8420 } 8421 } 8422 8423 8424 /* 8425 * Function: sd_set_pstats 8426 * 8427 * Description: This routine instantiates and initializes the partition 8428 * stats for each partition with more than zero blocks. 8429 * (4363169) 8430 * 8431 * Arguments: un - driver soft state (unit) structure 8432 * 8433 * Context: Kernel thread context 8434 */ 8435 8436 static void 8437 sd_set_pstats(struct sd_lun *un) 8438 { 8439 char kstatname[KSTAT_STRLEN]; 8440 int instance; 8441 int i; 8442 diskaddr_t nblks = 0; 8443 char *partname = NULL; 8444 8445 ASSERT(un != NULL); 8446 8447 instance = ddi_get_instance(SD_DEVINFO(un)); 8448 8449 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8450 for (i = 0; i < NSDMAP; i++) { 8451 8452 if (cmlb_partinfo(un->un_cmlbhandle, i, 8453 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8454 continue; 8455 mutex_enter(SD_MUTEX(un)); 8456 8457 if ((un->un_pstats[i] == NULL) && 8458 (nblks != 0)) { 8459 8460 (void) snprintf(kstatname, sizeof (kstatname), 8461 "%s%d,%s", sd_label, instance, 8462 partname); 8463 8464 un->un_pstats[i] = kstat_create(sd_label, 8465 instance, kstatname, "partition", KSTAT_TYPE_IO, 8466 1, KSTAT_FLAG_PERSISTENT); 8467 if (un->un_pstats[i] != NULL) { 8468 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8469 kstat_install(un->un_pstats[i]); 8470 } 8471 } 8472 mutex_exit(SD_MUTEX(un)); 8473 } 8474 } 8475 8476 8477 #if (defined(__fibre)) 8478 /* 8479 * Function: sd_init_event_callbacks 8480 * 8481 * Description: This routine initializes the insertion and removal event 8482 * callbacks. (fibre only) 8483 * 8484 * Arguments: un - driver soft state (unit) structure 8485 * 8486 * Context: Kernel thread context 8487 */ 8488 8489 static void 8490 sd_init_event_callbacks(struct sd_lun *un) 8491 { 8492 ASSERT(un != NULL); 8493 8494 if ((un->un_insert_event == NULL) && 8495 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8496 &un->un_insert_event) == DDI_SUCCESS)) { 8497 /* 8498 * Add the callback for an insertion event 8499 */ 8500 (void) ddi_add_event_handler(SD_DEVINFO(un), 8501 un->un_insert_event, sd_event_callback, (void *)un, 8502 &(un->un_insert_cb_id)); 8503 } 8504 8505 if ((un->un_remove_event == NULL) && 8506 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8507 &un->un_remove_event) == DDI_SUCCESS)) { 8508 /* 8509 * Add the callback for a removal event 8510 */ 8511 (void) ddi_add_event_handler(SD_DEVINFO(un), 8512 un->un_remove_event, sd_event_callback, (void *)un, 8513 &(un->un_remove_cb_id)); 8514 } 8515 } 8516 8517 8518 /* 8519 * Function: sd_event_callback 8520 * 8521 * Description: This routine handles insert/remove events (photon). The 8522 * state is changed to OFFLINE which can be used to supress 8523 * error msgs. (fibre only) 8524 * 8525 * Arguments: un - driver soft state (unit) structure 8526 * 8527 * Context: Callout thread context 8528 */ 8529 /* ARGSUSED */ 8530 static void 8531 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8532 void *bus_impldata) 8533 { 8534 struct sd_lun *un = (struct sd_lun *)arg; 8535 8536 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8537 if (event == un->un_insert_event) { 8538 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8539 mutex_enter(SD_MUTEX(un)); 8540 if (un->un_state == SD_STATE_OFFLINE) { 8541 if (un->un_last_state != SD_STATE_SUSPENDED) { 8542 un->un_state = un->un_last_state; 8543 } else { 8544 /* 8545 * We have gone through SUSPEND/RESUME while 8546 * we were offline. Restore the last state 8547 */ 8548 un->un_state = un->un_save_state; 8549 } 8550 } 8551 mutex_exit(SD_MUTEX(un)); 8552 8553 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8554 } else if (event == un->un_remove_event) { 8555 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8556 mutex_enter(SD_MUTEX(un)); 8557 /* 8558 * We need to handle an event callback that occurs during 8559 * the suspend operation, since we don't prevent it. 8560 */ 8561 if (un->un_state != SD_STATE_OFFLINE) { 8562 if (un->un_state != SD_STATE_SUSPENDED) { 8563 New_state(un, SD_STATE_OFFLINE); 8564 } else { 8565 un->un_last_state = SD_STATE_OFFLINE; 8566 } 8567 } 8568 mutex_exit(SD_MUTEX(un)); 8569 } else { 8570 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8571 "!Unknown event\n"); 8572 } 8573 8574 } 8575 #endif 8576 8577 /* 8578 * Function: sd_cache_control() 8579 * 8580 * Description: This routine is the driver entry point for setting 8581 * read and write caching by modifying the WCE (write cache 8582 * enable) and RCD (read cache disable) bits of mode 8583 * page 8 (MODEPAGE_CACHING). 8584 * 8585 * Arguments: un - driver soft state (unit) structure 8586 * rcd_flag - flag for controlling the read cache 8587 * wce_flag - flag for controlling the write cache 8588 * 8589 * Return Code: EIO 8590 * code returned by sd_send_scsi_MODE_SENSE and 8591 * sd_send_scsi_MODE_SELECT 8592 * 8593 * Context: Kernel Thread 8594 */ 8595 8596 static int 8597 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8598 { 8599 struct mode_caching *mode_caching_page; 8600 uchar_t *header; 8601 size_t buflen; 8602 int hdrlen; 8603 int bd_len; 8604 int rval = 0; 8605 struct mode_header_grp2 *mhp; 8606 8607 ASSERT(un != NULL); 8608 8609 /* 8610 * Do a test unit ready, otherwise a mode sense may not work if this 8611 * is the first command sent to the device after boot. 8612 */ 8613 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8614 8615 if (un->un_f_cfg_is_atapi == TRUE) { 8616 hdrlen = MODE_HEADER_LENGTH_GRP2; 8617 } else { 8618 hdrlen = MODE_HEADER_LENGTH; 8619 } 8620 8621 /* 8622 * Allocate memory for the retrieved mode page and its headers. Set 8623 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8624 * we get all of the mode sense data otherwise, the mode select 8625 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8626 */ 8627 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8628 sizeof (struct mode_cache_scsi3); 8629 8630 header = kmem_zalloc(buflen, KM_SLEEP); 8631 8632 /* Get the information from the device. */ 8633 if (un->un_f_cfg_is_atapi == TRUE) { 8634 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8635 MODEPAGE_CACHING, SD_PATH_DIRECT); 8636 } else { 8637 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8638 MODEPAGE_CACHING, SD_PATH_DIRECT); 8639 } 8640 if (rval != 0) { 8641 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8642 "sd_cache_control: Mode Sense Failed\n"); 8643 kmem_free(header, buflen); 8644 return (rval); 8645 } 8646 8647 /* 8648 * Determine size of Block Descriptors in order to locate 8649 * the mode page data. ATAPI devices return 0, SCSI devices 8650 * should return MODE_BLK_DESC_LENGTH. 8651 */ 8652 if (un->un_f_cfg_is_atapi == TRUE) { 8653 mhp = (struct mode_header_grp2 *)header; 8654 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8655 } else { 8656 bd_len = ((struct mode_header *)header)->bdesc_length; 8657 } 8658 8659 if (bd_len > MODE_BLK_DESC_LENGTH) { 8660 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8661 "sd_cache_control: Mode Sense returned invalid " 8662 "block descriptor length\n"); 8663 kmem_free(header, buflen); 8664 return (EIO); 8665 } 8666 8667 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8668 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8669 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8670 " caching page code mismatch %d\n", 8671 mode_caching_page->mode_page.code); 8672 kmem_free(header, buflen); 8673 return (EIO); 8674 } 8675 8676 /* Check the relevant bits on successful mode sense. */ 8677 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8678 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8679 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8680 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8681 8682 size_t sbuflen; 8683 uchar_t save_pg; 8684 8685 /* 8686 * Construct select buffer length based on the 8687 * length of the sense data returned. 8688 */ 8689 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8690 sizeof (struct mode_page) + 8691 (int)mode_caching_page->mode_page.length; 8692 8693 /* 8694 * Set the caching bits as requested. 8695 */ 8696 if (rcd_flag == SD_CACHE_ENABLE) 8697 mode_caching_page->rcd = 0; 8698 else if (rcd_flag == SD_CACHE_DISABLE) 8699 mode_caching_page->rcd = 1; 8700 8701 if (wce_flag == SD_CACHE_ENABLE) 8702 mode_caching_page->wce = 1; 8703 else if (wce_flag == SD_CACHE_DISABLE) 8704 mode_caching_page->wce = 0; 8705 8706 /* 8707 * Save the page if the mode sense says the 8708 * drive supports it. 8709 */ 8710 save_pg = mode_caching_page->mode_page.ps ? 8711 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8712 8713 /* Clear reserved bits before mode select. */ 8714 mode_caching_page->mode_page.ps = 0; 8715 8716 /* 8717 * Clear out mode header for mode select. 8718 * The rest of the retrieved page will be reused. 8719 */ 8720 bzero(header, hdrlen); 8721 8722 if (un->un_f_cfg_is_atapi == TRUE) { 8723 mhp = (struct mode_header_grp2 *)header; 8724 mhp->bdesc_length_hi = bd_len >> 8; 8725 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8726 } else { 8727 ((struct mode_header *)header)->bdesc_length = bd_len; 8728 } 8729 8730 /* Issue mode select to change the cache settings */ 8731 if (un->un_f_cfg_is_atapi == TRUE) { 8732 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8733 sbuflen, save_pg, SD_PATH_DIRECT); 8734 } else { 8735 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8736 sbuflen, save_pg, SD_PATH_DIRECT); 8737 } 8738 } 8739 8740 kmem_free(header, buflen); 8741 return (rval); 8742 } 8743 8744 8745 /* 8746 * Function: sd_get_write_cache_enabled() 8747 * 8748 * Description: This routine is the driver entry point for determining if 8749 * write caching is enabled. It examines the WCE (write cache 8750 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8751 * 8752 * Arguments: un - driver soft state (unit) structure 8753 * is_enabled - pointer to int where write cache enabled state 8754 * is returned (non-zero -> write cache enabled) 8755 * 8756 * 8757 * Return Code: EIO 8758 * code returned by sd_send_scsi_MODE_SENSE 8759 * 8760 * Context: Kernel Thread 8761 * 8762 * NOTE: If ioctl is added to disable write cache, this sequence should 8763 * be followed so that no locking is required for accesses to 8764 * un->un_f_write_cache_enabled: 8765 * do mode select to clear wce 8766 * do synchronize cache to flush cache 8767 * set un->un_f_write_cache_enabled = FALSE 8768 * 8769 * Conversely, an ioctl to enable the write cache should be done 8770 * in this order: 8771 * set un->un_f_write_cache_enabled = TRUE 8772 * do mode select to set wce 8773 */ 8774 8775 static int 8776 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8777 { 8778 struct mode_caching *mode_caching_page; 8779 uchar_t *header; 8780 size_t buflen; 8781 int hdrlen; 8782 int bd_len; 8783 int rval = 0; 8784 8785 ASSERT(un != NULL); 8786 ASSERT(is_enabled != NULL); 8787 8788 /* in case of error, flag as enabled */ 8789 *is_enabled = TRUE; 8790 8791 /* 8792 * Do a test unit ready, otherwise a mode sense may not work if this 8793 * is the first command sent to the device after boot. 8794 */ 8795 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8796 8797 if (un->un_f_cfg_is_atapi == TRUE) { 8798 hdrlen = MODE_HEADER_LENGTH_GRP2; 8799 } else { 8800 hdrlen = MODE_HEADER_LENGTH; 8801 } 8802 8803 /* 8804 * Allocate memory for the retrieved mode page and its headers. Set 8805 * a pointer to the page itself. 8806 */ 8807 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8808 header = kmem_zalloc(buflen, KM_SLEEP); 8809 8810 /* Get the information from the device. */ 8811 if (un->un_f_cfg_is_atapi == TRUE) { 8812 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8813 MODEPAGE_CACHING, SD_PATH_DIRECT); 8814 } else { 8815 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8816 MODEPAGE_CACHING, SD_PATH_DIRECT); 8817 } 8818 if (rval != 0) { 8819 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8820 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8821 kmem_free(header, buflen); 8822 return (rval); 8823 } 8824 8825 /* 8826 * Determine size of Block Descriptors in order to locate 8827 * the mode page data. ATAPI devices return 0, SCSI devices 8828 * should return MODE_BLK_DESC_LENGTH. 8829 */ 8830 if (un->un_f_cfg_is_atapi == TRUE) { 8831 struct mode_header_grp2 *mhp; 8832 mhp = (struct mode_header_grp2 *)header; 8833 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8834 } else { 8835 bd_len = ((struct mode_header *)header)->bdesc_length; 8836 } 8837 8838 if (bd_len > MODE_BLK_DESC_LENGTH) { 8839 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8840 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8841 "block descriptor length\n"); 8842 kmem_free(header, buflen); 8843 return (EIO); 8844 } 8845 8846 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8847 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8848 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8849 " caching page code mismatch %d\n", 8850 mode_caching_page->mode_page.code); 8851 kmem_free(header, buflen); 8852 return (EIO); 8853 } 8854 *is_enabled = mode_caching_page->wce; 8855 8856 kmem_free(header, buflen); 8857 return (0); 8858 } 8859 8860 /* 8861 * Function: sd_get_nv_sup() 8862 * 8863 * Description: This routine is the driver entry point for 8864 * determining whether non-volatile cache is supported. This 8865 * determination process works as follows: 8866 * 8867 * 1. sd first queries sd.conf on whether 8868 * suppress_cache_flush bit is set for this device. 8869 * 8870 * 2. if not there, then queries the internal disk table. 8871 * 8872 * 3. if either sd.conf or internal disk table specifies 8873 * cache flush be suppressed, we don't bother checking 8874 * NV_SUP bit. 8875 * 8876 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 8877 * the optional INQUIRY VPD page 0x86. If the device 8878 * supports VPD page 0x86, sd examines the NV_SUP 8879 * (non-volatile cache support) bit in the INQUIRY VPD page 8880 * 0x86: 8881 * o If NV_SUP bit is set, sd assumes the device has a 8882 * non-volatile cache and set the 8883 * un_f_sync_nv_supported to TRUE. 8884 * o Otherwise cache is not non-volatile, 8885 * un_f_sync_nv_supported is set to FALSE. 8886 * 8887 * Arguments: un - driver soft state (unit) structure 8888 * 8889 * Return Code: 8890 * 8891 * Context: Kernel Thread 8892 */ 8893 8894 static void 8895 sd_get_nv_sup(struct sd_lun *un) 8896 { 8897 int rval = 0; 8898 uchar_t *inq86 = NULL; 8899 size_t inq86_len = MAX_INQUIRY_SIZE; 8900 size_t inq86_resid = 0; 8901 struct dk_callback *dkc; 8902 8903 ASSERT(un != NULL); 8904 8905 mutex_enter(SD_MUTEX(un)); 8906 8907 /* 8908 * Be conservative on the device's support of 8909 * SYNC_NV bit: un_f_sync_nv_supported is 8910 * initialized to be false. 8911 */ 8912 un->un_f_sync_nv_supported = FALSE; 8913 8914 /* 8915 * If either sd.conf or internal disk table 8916 * specifies cache flush be suppressed, then 8917 * we don't bother checking NV_SUP bit. 8918 */ 8919 if (un->un_f_suppress_cache_flush == TRUE) { 8920 mutex_exit(SD_MUTEX(un)); 8921 return; 8922 } 8923 8924 if (sd_check_vpd_page_support(un) == 0 && 8925 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 8926 mutex_exit(SD_MUTEX(un)); 8927 /* collect page 86 data if available */ 8928 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 8929 rval = sd_send_scsi_INQUIRY(un, inq86, inq86_len, 8930 0x01, 0x86, &inq86_resid); 8931 8932 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 8933 SD_TRACE(SD_LOG_COMMON, un, 8934 "sd_get_nv_sup: \ 8935 successfully get VPD page: %x \ 8936 PAGE LENGTH: %x BYTE 6: %x\n", 8937 inq86[1], inq86[3], inq86[6]); 8938 8939 mutex_enter(SD_MUTEX(un)); 8940 /* 8941 * check the value of NV_SUP bit: only if the device 8942 * reports NV_SUP bit to be 1, the 8943 * un_f_sync_nv_supported bit will be set to true. 8944 */ 8945 if (inq86[6] & SD_VPD_NV_SUP) { 8946 un->un_f_sync_nv_supported = TRUE; 8947 } 8948 mutex_exit(SD_MUTEX(un)); 8949 } 8950 kmem_free(inq86, inq86_len); 8951 } else { 8952 mutex_exit(SD_MUTEX(un)); 8953 } 8954 8955 /* 8956 * Send a SYNC CACHE command to check whether 8957 * SYNC_NV bit is supported. This command should have 8958 * un_f_sync_nv_supported set to correct value. 8959 */ 8960 mutex_enter(SD_MUTEX(un)); 8961 if (un->un_f_sync_nv_supported) { 8962 mutex_exit(SD_MUTEX(un)); 8963 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 8964 dkc->dkc_flag = FLUSH_VOLATILE; 8965 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 8966 8967 /* 8968 * Send a TEST UNIT READY command to the device. This should 8969 * clear any outstanding UNIT ATTENTION that may be present. 8970 */ 8971 (void) sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR); 8972 8973 kmem_free(dkc, sizeof (struct dk_callback)); 8974 } else { 8975 mutex_exit(SD_MUTEX(un)); 8976 } 8977 8978 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 8979 un_f_suppress_cache_flush is set to %d\n", 8980 un->un_f_suppress_cache_flush); 8981 } 8982 8983 /* 8984 * Function: sd_make_device 8985 * 8986 * Description: Utility routine to return the Solaris device number from 8987 * the data in the device's dev_info structure. 8988 * 8989 * Return Code: The Solaris device number 8990 * 8991 * Context: Any 8992 */ 8993 8994 static dev_t 8995 sd_make_device(dev_info_t *devi) 8996 { 8997 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8998 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8999 } 9000 9001 9002 /* 9003 * Function: sd_pm_entry 9004 * 9005 * Description: Called at the start of a new command to manage power 9006 * and busy status of a device. This includes determining whether 9007 * the current power state of the device is sufficient for 9008 * performing the command or whether it must be changed. 9009 * The PM framework is notified appropriately. 9010 * Only with a return status of DDI_SUCCESS will the 9011 * component be busy to the framework. 9012 * 9013 * All callers of sd_pm_entry must check the return status 9014 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9015 * of DDI_FAILURE indicates the device failed to power up. 9016 * In this case un_pm_count has been adjusted so the result 9017 * on exit is still powered down, ie. count is less than 0. 9018 * Calling sd_pm_exit with this count value hits an ASSERT. 9019 * 9020 * Return Code: DDI_SUCCESS or DDI_FAILURE 9021 * 9022 * Context: Kernel thread context. 9023 */ 9024 9025 static int 9026 sd_pm_entry(struct sd_lun *un) 9027 { 9028 int return_status = DDI_SUCCESS; 9029 9030 ASSERT(!mutex_owned(SD_MUTEX(un))); 9031 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9032 9033 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9034 9035 if (un->un_f_pm_is_enabled == FALSE) { 9036 SD_TRACE(SD_LOG_IO_PM, un, 9037 "sd_pm_entry: exiting, PM not enabled\n"); 9038 return (return_status); 9039 } 9040 9041 /* 9042 * Just increment a counter if PM is enabled. On the transition from 9043 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9044 * the count with each IO and mark the device as idle when the count 9045 * hits 0. 9046 * 9047 * If the count is less than 0 the device is powered down. If a powered 9048 * down device is successfully powered up then the count must be 9049 * incremented to reflect the power up. Note that it'll get incremented 9050 * a second time to become busy. 9051 * 9052 * Because the following has the potential to change the device state 9053 * and must release the un_pm_mutex to do so, only one thread can be 9054 * allowed through at a time. 9055 */ 9056 9057 mutex_enter(&un->un_pm_mutex); 9058 while (un->un_pm_busy == TRUE) { 9059 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9060 } 9061 un->un_pm_busy = TRUE; 9062 9063 if (un->un_pm_count < 1) { 9064 9065 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9066 9067 /* 9068 * Indicate we are now busy so the framework won't attempt to 9069 * power down the device. This call will only fail if either 9070 * we passed a bad component number or the device has no 9071 * components. Neither of these should ever happen. 9072 */ 9073 mutex_exit(&un->un_pm_mutex); 9074 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9075 ASSERT(return_status == DDI_SUCCESS); 9076 9077 mutex_enter(&un->un_pm_mutex); 9078 9079 if (un->un_pm_count < 0) { 9080 mutex_exit(&un->un_pm_mutex); 9081 9082 SD_TRACE(SD_LOG_IO_PM, un, 9083 "sd_pm_entry: power up component\n"); 9084 9085 /* 9086 * pm_raise_power will cause sdpower to be called 9087 * which brings the device power level to the 9088 * desired state, ON in this case. If successful, 9089 * un_pm_count and un_power_level will be updated 9090 * appropriately. 9091 */ 9092 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9093 SD_SPINDLE_ON); 9094 9095 mutex_enter(&un->un_pm_mutex); 9096 9097 if (return_status != DDI_SUCCESS) { 9098 /* 9099 * Power up failed. 9100 * Idle the device and adjust the count 9101 * so the result on exit is that we're 9102 * still powered down, ie. count is less than 0. 9103 */ 9104 SD_TRACE(SD_LOG_IO_PM, un, 9105 "sd_pm_entry: power up failed," 9106 " idle the component\n"); 9107 9108 (void) pm_idle_component(SD_DEVINFO(un), 0); 9109 un->un_pm_count--; 9110 } else { 9111 /* 9112 * Device is powered up, verify the 9113 * count is non-negative. 9114 * This is debug only. 9115 */ 9116 ASSERT(un->un_pm_count == 0); 9117 } 9118 } 9119 9120 if (return_status == DDI_SUCCESS) { 9121 /* 9122 * For performance, now that the device has been tagged 9123 * as busy, and it's known to be powered up, update the 9124 * chain types to use jump tables that do not include 9125 * pm. This significantly lowers the overhead and 9126 * therefore improves performance. 9127 */ 9128 9129 mutex_exit(&un->un_pm_mutex); 9130 mutex_enter(SD_MUTEX(un)); 9131 SD_TRACE(SD_LOG_IO_PM, un, 9132 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9133 un->un_uscsi_chain_type); 9134 9135 if (un->un_f_non_devbsize_supported) { 9136 un->un_buf_chain_type = 9137 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9138 } else { 9139 un->un_buf_chain_type = 9140 SD_CHAIN_INFO_DISK_NO_PM; 9141 } 9142 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9143 9144 SD_TRACE(SD_LOG_IO_PM, un, 9145 " changed uscsi_chain_type to %d\n", 9146 un->un_uscsi_chain_type); 9147 mutex_exit(SD_MUTEX(un)); 9148 mutex_enter(&un->un_pm_mutex); 9149 9150 if (un->un_pm_idle_timeid == NULL) { 9151 /* 300 ms. */ 9152 un->un_pm_idle_timeid = 9153 timeout(sd_pm_idletimeout_handler, un, 9154 (drv_usectohz((clock_t)300000))); 9155 /* 9156 * Include an extra call to busy which keeps the 9157 * device busy with-respect-to the PM layer 9158 * until the timer fires, at which time it'll 9159 * get the extra idle call. 9160 */ 9161 (void) pm_busy_component(SD_DEVINFO(un), 0); 9162 } 9163 } 9164 } 9165 un->un_pm_busy = FALSE; 9166 /* Next... */ 9167 cv_signal(&un->un_pm_busy_cv); 9168 9169 un->un_pm_count++; 9170 9171 SD_TRACE(SD_LOG_IO_PM, un, 9172 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9173 9174 mutex_exit(&un->un_pm_mutex); 9175 9176 return (return_status); 9177 } 9178 9179 9180 /* 9181 * Function: sd_pm_exit 9182 * 9183 * Description: Called at the completion of a command to manage busy 9184 * status for the device. If the device becomes idle the 9185 * PM framework is notified. 9186 * 9187 * Context: Kernel thread context 9188 */ 9189 9190 static void 9191 sd_pm_exit(struct sd_lun *un) 9192 { 9193 ASSERT(!mutex_owned(SD_MUTEX(un))); 9194 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9195 9196 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9197 9198 /* 9199 * After attach the following flag is only read, so don't 9200 * take the penalty of acquiring a mutex for it. 9201 */ 9202 if (un->un_f_pm_is_enabled == TRUE) { 9203 9204 mutex_enter(&un->un_pm_mutex); 9205 un->un_pm_count--; 9206 9207 SD_TRACE(SD_LOG_IO_PM, un, 9208 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9209 9210 ASSERT(un->un_pm_count >= 0); 9211 if (un->un_pm_count == 0) { 9212 mutex_exit(&un->un_pm_mutex); 9213 9214 SD_TRACE(SD_LOG_IO_PM, un, 9215 "sd_pm_exit: idle component\n"); 9216 9217 (void) pm_idle_component(SD_DEVINFO(un), 0); 9218 9219 } else { 9220 mutex_exit(&un->un_pm_mutex); 9221 } 9222 } 9223 9224 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9225 } 9226 9227 9228 /* 9229 * Function: sdopen 9230 * 9231 * Description: Driver's open(9e) entry point function. 9232 * 9233 * Arguments: dev_i - pointer to device number 9234 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9235 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9236 * cred_p - user credential pointer 9237 * 9238 * Return Code: EINVAL 9239 * ENXIO 9240 * EIO 9241 * EROFS 9242 * EBUSY 9243 * 9244 * Context: Kernel thread context 9245 */ 9246 /* ARGSUSED */ 9247 static int 9248 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9249 { 9250 struct sd_lun *un; 9251 int nodelay; 9252 int part; 9253 uint64_t partmask; 9254 int instance; 9255 dev_t dev; 9256 int rval = EIO; 9257 diskaddr_t nblks = 0; 9258 diskaddr_t label_cap; 9259 9260 /* Validate the open type */ 9261 if (otyp >= OTYPCNT) { 9262 return (EINVAL); 9263 } 9264 9265 dev = *dev_p; 9266 instance = SDUNIT(dev); 9267 mutex_enter(&sd_detach_mutex); 9268 9269 /* 9270 * Fail the open if there is no softstate for the instance, or 9271 * if another thread somewhere is trying to detach the instance. 9272 */ 9273 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9274 (un->un_detach_count != 0)) { 9275 mutex_exit(&sd_detach_mutex); 9276 /* 9277 * The probe cache only needs to be cleared when open (9e) fails 9278 * with ENXIO (4238046). 9279 */ 9280 /* 9281 * un-conditionally clearing probe cache is ok with 9282 * separate sd/ssd binaries 9283 * x86 platform can be an issue with both parallel 9284 * and fibre in 1 binary 9285 */ 9286 sd_scsi_clear_probe_cache(); 9287 return (ENXIO); 9288 } 9289 9290 /* 9291 * The un_layer_count is to prevent another thread in specfs from 9292 * trying to detach the instance, which can happen when we are 9293 * called from a higher-layer driver instead of thru specfs. 9294 * This will not be needed when DDI provides a layered driver 9295 * interface that allows specfs to know that an instance is in 9296 * use by a layered driver & should not be detached. 9297 * 9298 * Note: the semantics for layered driver opens are exactly one 9299 * close for every open. 9300 */ 9301 if (otyp == OTYP_LYR) { 9302 un->un_layer_count++; 9303 } 9304 9305 /* 9306 * Keep a count of the current # of opens in progress. This is because 9307 * some layered drivers try to call us as a regular open. This can 9308 * cause problems that we cannot prevent, however by keeping this count 9309 * we can at least keep our open and detach routines from racing against 9310 * each other under such conditions. 9311 */ 9312 un->un_opens_in_progress++; 9313 mutex_exit(&sd_detach_mutex); 9314 9315 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9316 part = SDPART(dev); 9317 partmask = 1 << part; 9318 9319 /* 9320 * We use a semaphore here in order to serialize 9321 * open and close requests on the device. 9322 */ 9323 sema_p(&un->un_semoclose); 9324 9325 mutex_enter(SD_MUTEX(un)); 9326 9327 /* 9328 * All device accesses go thru sdstrategy() where we check 9329 * on suspend status but there could be a scsi_poll command, 9330 * which bypasses sdstrategy(), so we need to check pm 9331 * status. 9332 */ 9333 9334 if (!nodelay) { 9335 while ((un->un_state == SD_STATE_SUSPENDED) || 9336 (un->un_state == SD_STATE_PM_CHANGING)) { 9337 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9338 } 9339 9340 mutex_exit(SD_MUTEX(un)); 9341 if (sd_pm_entry(un) != DDI_SUCCESS) { 9342 rval = EIO; 9343 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9344 "sdopen: sd_pm_entry failed\n"); 9345 goto open_failed_with_pm; 9346 } 9347 mutex_enter(SD_MUTEX(un)); 9348 } 9349 9350 /* check for previous exclusive open */ 9351 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9352 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9353 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9354 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9355 9356 if (un->un_exclopen & (partmask)) { 9357 goto excl_open_fail; 9358 } 9359 9360 if (flag & FEXCL) { 9361 int i; 9362 if (un->un_ocmap.lyropen[part]) { 9363 goto excl_open_fail; 9364 } 9365 for (i = 0; i < (OTYPCNT - 1); i++) { 9366 if (un->un_ocmap.regopen[i] & (partmask)) { 9367 goto excl_open_fail; 9368 } 9369 } 9370 } 9371 9372 /* 9373 * Check the write permission if this is a removable media device, 9374 * NDELAY has not been set, and writable permission is requested. 9375 * 9376 * Note: If NDELAY was set and this is write-protected media the WRITE 9377 * attempt will fail with EIO as part of the I/O processing. This is a 9378 * more permissive implementation that allows the open to succeed and 9379 * WRITE attempts to fail when appropriate. 9380 */ 9381 if (un->un_f_chk_wp_open) { 9382 if ((flag & FWRITE) && (!nodelay)) { 9383 mutex_exit(SD_MUTEX(un)); 9384 /* 9385 * Defer the check for write permission on writable 9386 * DVD drive till sdstrategy and will not fail open even 9387 * if FWRITE is set as the device can be writable 9388 * depending upon the media and the media can change 9389 * after the call to open(). 9390 */ 9391 if (un->un_f_dvdram_writable_device == FALSE) { 9392 if (ISCD(un) || sr_check_wp(dev)) { 9393 rval = EROFS; 9394 mutex_enter(SD_MUTEX(un)); 9395 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9396 "write to cd or write protected media\n"); 9397 goto open_fail; 9398 } 9399 } 9400 mutex_enter(SD_MUTEX(un)); 9401 } 9402 } 9403 9404 /* 9405 * If opening in NDELAY/NONBLOCK mode, just return. 9406 * Check if disk is ready and has a valid geometry later. 9407 */ 9408 if (!nodelay) { 9409 mutex_exit(SD_MUTEX(un)); 9410 rval = sd_ready_and_valid(un, part); 9411 mutex_enter(SD_MUTEX(un)); 9412 /* 9413 * Fail if device is not ready or if the number of disk 9414 * blocks is zero or negative for non CD devices. 9415 */ 9416 9417 nblks = 0; 9418 9419 if (rval == SD_READY_VALID && (!ISCD(un))) { 9420 /* if cmlb_partinfo fails, nblks remains 0 */ 9421 mutex_exit(SD_MUTEX(un)); 9422 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9423 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9424 mutex_enter(SD_MUTEX(un)); 9425 } 9426 9427 if ((rval != SD_READY_VALID) || 9428 (!ISCD(un) && nblks <= 0)) { 9429 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9430 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9431 "device not ready or invalid disk block value\n"); 9432 goto open_fail; 9433 } 9434 #if defined(__i386) || defined(__amd64) 9435 } else { 9436 uchar_t *cp; 9437 /* 9438 * x86 requires special nodelay handling, so that p0 is 9439 * always defined and accessible. 9440 * Invalidate geometry only if device is not already open. 9441 */ 9442 cp = &un->un_ocmap.chkd[0]; 9443 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9444 if (*cp != (uchar_t)0) { 9445 break; 9446 } 9447 cp++; 9448 } 9449 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9450 mutex_exit(SD_MUTEX(un)); 9451 cmlb_invalidate(un->un_cmlbhandle, 9452 (void *)SD_PATH_DIRECT); 9453 mutex_enter(SD_MUTEX(un)); 9454 } 9455 9456 #endif 9457 } 9458 9459 if (otyp == OTYP_LYR) { 9460 un->un_ocmap.lyropen[part]++; 9461 } else { 9462 un->un_ocmap.regopen[otyp] |= partmask; 9463 } 9464 9465 /* Set up open and exclusive open flags */ 9466 if (flag & FEXCL) { 9467 un->un_exclopen |= (partmask); 9468 } 9469 9470 /* 9471 * If the lun is EFI labeled and lun capacity is greater than the 9472 * capacity contained in the label, log a sys-event to notify the 9473 * interested module. 9474 * To avoid an infinite loop of logging sys-event, we only log the 9475 * event when the lun is not opened in NDELAY mode. The event handler 9476 * should open the lun in NDELAY mode. 9477 */ 9478 if (!(flag & FNDELAY)) { 9479 mutex_exit(SD_MUTEX(un)); 9480 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 9481 (void*)SD_PATH_DIRECT) == 0) { 9482 mutex_enter(SD_MUTEX(un)); 9483 if (un->un_f_blockcount_is_valid && 9484 un->un_blockcount > label_cap) { 9485 mutex_exit(SD_MUTEX(un)); 9486 sd_log_lun_expansion_event(un, 9487 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 9488 mutex_enter(SD_MUTEX(un)); 9489 } 9490 } else { 9491 mutex_enter(SD_MUTEX(un)); 9492 } 9493 } 9494 9495 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9496 "open of part %d type %d\n", part, otyp); 9497 9498 mutex_exit(SD_MUTEX(un)); 9499 if (!nodelay) { 9500 sd_pm_exit(un); 9501 } 9502 9503 sema_v(&un->un_semoclose); 9504 9505 mutex_enter(&sd_detach_mutex); 9506 un->un_opens_in_progress--; 9507 mutex_exit(&sd_detach_mutex); 9508 9509 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9510 return (DDI_SUCCESS); 9511 9512 excl_open_fail: 9513 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9514 rval = EBUSY; 9515 9516 open_fail: 9517 mutex_exit(SD_MUTEX(un)); 9518 9519 /* 9520 * On a failed open we must exit the pm management. 9521 */ 9522 if (!nodelay) { 9523 sd_pm_exit(un); 9524 } 9525 open_failed_with_pm: 9526 sema_v(&un->un_semoclose); 9527 9528 mutex_enter(&sd_detach_mutex); 9529 un->un_opens_in_progress--; 9530 if (otyp == OTYP_LYR) { 9531 un->un_layer_count--; 9532 } 9533 mutex_exit(&sd_detach_mutex); 9534 9535 return (rval); 9536 } 9537 9538 9539 /* 9540 * Function: sdclose 9541 * 9542 * Description: Driver's close(9e) entry point function. 9543 * 9544 * Arguments: dev - device number 9545 * flag - file status flag, informational only 9546 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9547 * cred_p - user credential pointer 9548 * 9549 * Return Code: ENXIO 9550 * 9551 * Context: Kernel thread context 9552 */ 9553 /* ARGSUSED */ 9554 static int 9555 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9556 { 9557 struct sd_lun *un; 9558 uchar_t *cp; 9559 int part; 9560 int nodelay; 9561 int rval = 0; 9562 9563 /* Validate the open type */ 9564 if (otyp >= OTYPCNT) { 9565 return (ENXIO); 9566 } 9567 9568 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9569 return (ENXIO); 9570 } 9571 9572 part = SDPART(dev); 9573 nodelay = flag & (FNDELAY | FNONBLOCK); 9574 9575 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9576 "sdclose: close of part %d type %d\n", part, otyp); 9577 9578 /* 9579 * We use a semaphore here in order to serialize 9580 * open and close requests on the device. 9581 */ 9582 sema_p(&un->un_semoclose); 9583 9584 mutex_enter(SD_MUTEX(un)); 9585 9586 /* Don't proceed if power is being changed. */ 9587 while (un->un_state == SD_STATE_PM_CHANGING) { 9588 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9589 } 9590 9591 if (un->un_exclopen & (1 << part)) { 9592 un->un_exclopen &= ~(1 << part); 9593 } 9594 9595 /* Update the open partition map */ 9596 if (otyp == OTYP_LYR) { 9597 un->un_ocmap.lyropen[part] -= 1; 9598 } else { 9599 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9600 } 9601 9602 cp = &un->un_ocmap.chkd[0]; 9603 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9604 if (*cp != NULL) { 9605 break; 9606 } 9607 cp++; 9608 } 9609 9610 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9611 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9612 9613 /* 9614 * We avoid persistance upon the last close, and set 9615 * the throttle back to the maximum. 9616 */ 9617 un->un_throttle = un->un_saved_throttle; 9618 9619 if (un->un_state == SD_STATE_OFFLINE) { 9620 if (un->un_f_is_fibre == FALSE) { 9621 scsi_log(SD_DEVINFO(un), sd_label, 9622 CE_WARN, "offline\n"); 9623 } 9624 mutex_exit(SD_MUTEX(un)); 9625 cmlb_invalidate(un->un_cmlbhandle, 9626 (void *)SD_PATH_DIRECT); 9627 mutex_enter(SD_MUTEX(un)); 9628 9629 } else { 9630 /* 9631 * Flush any outstanding writes in NVRAM cache. 9632 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9633 * cmd, it may not work for non-Pluto devices. 9634 * SYNCHRONIZE CACHE is not required for removables, 9635 * except DVD-RAM drives. 9636 * 9637 * Also note: because SYNCHRONIZE CACHE is currently 9638 * the only command issued here that requires the 9639 * drive be powered up, only do the power up before 9640 * sending the Sync Cache command. If additional 9641 * commands are added which require a powered up 9642 * drive, the following sequence may have to change. 9643 * 9644 * And finally, note that parallel SCSI on SPARC 9645 * only issues a Sync Cache to DVD-RAM, a newly 9646 * supported device. 9647 */ 9648 #if defined(__i386) || defined(__amd64) 9649 if ((un->un_f_sync_cache_supported && 9650 un->un_f_sync_cache_required) || 9651 un->un_f_dvdram_writable_device == TRUE) { 9652 #else 9653 if (un->un_f_dvdram_writable_device == TRUE) { 9654 #endif 9655 mutex_exit(SD_MUTEX(un)); 9656 if (sd_pm_entry(un) == DDI_SUCCESS) { 9657 rval = 9658 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9659 NULL); 9660 /* ignore error if not supported */ 9661 if (rval == ENOTSUP) { 9662 rval = 0; 9663 } else if (rval != 0) { 9664 rval = EIO; 9665 } 9666 sd_pm_exit(un); 9667 } else { 9668 rval = EIO; 9669 } 9670 mutex_enter(SD_MUTEX(un)); 9671 } 9672 9673 /* 9674 * For devices which supports DOOR_LOCK, send an ALLOW 9675 * MEDIA REMOVAL command, but don't get upset if it 9676 * fails. We need to raise the power of the drive before 9677 * we can call sd_send_scsi_DOORLOCK() 9678 */ 9679 if (un->un_f_doorlock_supported) { 9680 mutex_exit(SD_MUTEX(un)); 9681 if (sd_pm_entry(un) == DDI_SUCCESS) { 9682 rval = sd_send_scsi_DOORLOCK(un, 9683 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9684 9685 sd_pm_exit(un); 9686 if (ISCD(un) && (rval != 0) && 9687 (nodelay != 0)) { 9688 rval = ENXIO; 9689 } 9690 } else { 9691 rval = EIO; 9692 } 9693 mutex_enter(SD_MUTEX(un)); 9694 } 9695 9696 /* 9697 * If a device has removable media, invalidate all 9698 * parameters related to media, such as geometry, 9699 * blocksize, and blockcount. 9700 */ 9701 if (un->un_f_has_removable_media) { 9702 sr_ejected(un); 9703 } 9704 9705 /* 9706 * Destroy the cache (if it exists) which was 9707 * allocated for the write maps since this is 9708 * the last close for this media. 9709 */ 9710 if (un->un_wm_cache) { 9711 /* 9712 * Check if there are pending commands. 9713 * and if there are give a warning and 9714 * do not destroy the cache. 9715 */ 9716 if (un->un_ncmds_in_driver > 0) { 9717 scsi_log(SD_DEVINFO(un), 9718 sd_label, CE_WARN, 9719 "Unable to clean up memory " 9720 "because of pending I/O\n"); 9721 } else { 9722 kmem_cache_destroy( 9723 un->un_wm_cache); 9724 un->un_wm_cache = NULL; 9725 } 9726 } 9727 } 9728 } 9729 9730 mutex_exit(SD_MUTEX(un)); 9731 sema_v(&un->un_semoclose); 9732 9733 if (otyp == OTYP_LYR) { 9734 mutex_enter(&sd_detach_mutex); 9735 /* 9736 * The detach routine may run when the layer count 9737 * drops to zero. 9738 */ 9739 un->un_layer_count--; 9740 mutex_exit(&sd_detach_mutex); 9741 } 9742 9743 return (rval); 9744 } 9745 9746 9747 /* 9748 * Function: sd_ready_and_valid 9749 * 9750 * Description: Test if device is ready and has a valid geometry. 9751 * 9752 * Arguments: dev - device number 9753 * un - driver soft state (unit) structure 9754 * 9755 * Return Code: SD_READY_VALID ready and valid label 9756 * SD_NOT_READY_VALID not ready, no label 9757 * SD_RESERVED_BY_OTHERS reservation conflict 9758 * 9759 * Context: Never called at interrupt context. 9760 */ 9761 9762 static int 9763 sd_ready_and_valid(struct sd_lun *un, int part) 9764 { 9765 struct sd_errstats *stp; 9766 uint64_t capacity; 9767 uint_t lbasize; 9768 int rval = SD_READY_VALID; 9769 char name_str[48]; 9770 int is_valid; 9771 9772 ASSERT(un != NULL); 9773 ASSERT(!mutex_owned(SD_MUTEX(un))); 9774 9775 mutex_enter(SD_MUTEX(un)); 9776 /* 9777 * If a device has removable media, we must check if media is 9778 * ready when checking if this device is ready and valid. 9779 */ 9780 if (un->un_f_has_removable_media) { 9781 mutex_exit(SD_MUTEX(un)); 9782 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9783 rval = SD_NOT_READY_VALID; 9784 mutex_enter(SD_MUTEX(un)); 9785 goto done; 9786 } 9787 9788 is_valid = SD_IS_VALID_LABEL(un); 9789 mutex_enter(SD_MUTEX(un)); 9790 if (!is_valid || 9791 (un->un_f_blockcount_is_valid == FALSE) || 9792 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9793 9794 /* capacity has to be read every open. */ 9795 mutex_exit(SD_MUTEX(un)); 9796 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9797 &lbasize, SD_PATH_DIRECT) != 0) { 9798 cmlb_invalidate(un->un_cmlbhandle, 9799 (void *)SD_PATH_DIRECT); 9800 mutex_enter(SD_MUTEX(un)); 9801 rval = SD_NOT_READY_VALID; 9802 goto done; 9803 } else { 9804 mutex_enter(SD_MUTEX(un)); 9805 sd_update_block_info(un, lbasize, capacity); 9806 } 9807 } 9808 9809 /* 9810 * Check if the media in the device is writable or not. 9811 */ 9812 if (!is_valid && ISCD(un)) { 9813 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9814 } 9815 9816 } else { 9817 /* 9818 * Do a test unit ready to clear any unit attention from non-cd 9819 * devices. 9820 */ 9821 mutex_exit(SD_MUTEX(un)); 9822 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9823 mutex_enter(SD_MUTEX(un)); 9824 } 9825 9826 9827 /* 9828 * If this is a non 512 block device, allocate space for 9829 * the wmap cache. This is being done here since every time 9830 * a media is changed this routine will be called and the 9831 * block size is a function of media rather than device. 9832 */ 9833 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9834 if (!(un->un_wm_cache)) { 9835 (void) snprintf(name_str, sizeof (name_str), 9836 "%s%d_cache", 9837 ddi_driver_name(SD_DEVINFO(un)), 9838 ddi_get_instance(SD_DEVINFO(un))); 9839 un->un_wm_cache = kmem_cache_create( 9840 name_str, sizeof (struct sd_w_map), 9841 8, sd_wm_cache_constructor, 9842 sd_wm_cache_destructor, NULL, 9843 (void *)un, NULL, 0); 9844 if (!(un->un_wm_cache)) { 9845 rval = ENOMEM; 9846 goto done; 9847 } 9848 } 9849 } 9850 9851 if (un->un_state == SD_STATE_NORMAL) { 9852 /* 9853 * If the target is not yet ready here (defined by a TUR 9854 * failure), invalidate the geometry and print an 'offline' 9855 * message. This is a legacy message, as the state of the 9856 * target is not actually changed to SD_STATE_OFFLINE. 9857 * 9858 * If the TUR fails for EACCES (Reservation Conflict), 9859 * SD_RESERVED_BY_OTHERS will be returned to indicate 9860 * reservation conflict. If the TUR fails for other 9861 * reasons, SD_NOT_READY_VALID will be returned. 9862 */ 9863 int err; 9864 9865 mutex_exit(SD_MUTEX(un)); 9866 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9867 mutex_enter(SD_MUTEX(un)); 9868 9869 if (err != 0) { 9870 mutex_exit(SD_MUTEX(un)); 9871 cmlb_invalidate(un->un_cmlbhandle, 9872 (void *)SD_PATH_DIRECT); 9873 mutex_enter(SD_MUTEX(un)); 9874 if (err == EACCES) { 9875 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9876 "reservation conflict\n"); 9877 rval = SD_RESERVED_BY_OTHERS; 9878 } else { 9879 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9880 "drive offline\n"); 9881 rval = SD_NOT_READY_VALID; 9882 } 9883 goto done; 9884 } 9885 } 9886 9887 if (un->un_f_format_in_progress == FALSE) { 9888 mutex_exit(SD_MUTEX(un)); 9889 9890 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 9891 NULL, (void *) SD_PATH_DIRECT) != 0) { 9892 rval = SD_NOT_READY_VALID; 9893 mutex_enter(SD_MUTEX(un)); 9894 goto done; 9895 } 9896 if (un->un_f_pkstats_enabled) { 9897 sd_set_pstats(un); 9898 SD_TRACE(SD_LOG_IO_PARTITION, un, 9899 "sd_ready_and_valid: un:0x%p pstats created and " 9900 "set\n", un); 9901 } 9902 mutex_enter(SD_MUTEX(un)); 9903 } 9904 9905 /* 9906 * If this device supports DOOR_LOCK command, try and send 9907 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9908 * if it fails. For a CD, however, it is an error 9909 */ 9910 if (un->un_f_doorlock_supported) { 9911 mutex_exit(SD_MUTEX(un)); 9912 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9913 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9914 rval = SD_NOT_READY_VALID; 9915 mutex_enter(SD_MUTEX(un)); 9916 goto done; 9917 } 9918 mutex_enter(SD_MUTEX(un)); 9919 } 9920 9921 /* The state has changed, inform the media watch routines */ 9922 un->un_mediastate = DKIO_INSERTED; 9923 cv_broadcast(&un->un_state_cv); 9924 rval = SD_READY_VALID; 9925 9926 done: 9927 9928 /* 9929 * Initialize the capacity kstat value, if no media previously 9930 * (capacity kstat is 0) and a media has been inserted 9931 * (un_blockcount > 0). 9932 */ 9933 if (un->un_errstats != NULL) { 9934 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9935 if ((stp->sd_capacity.value.ui64 == 0) && 9936 (un->un_f_blockcount_is_valid == TRUE)) { 9937 stp->sd_capacity.value.ui64 = 9938 (uint64_t)((uint64_t)un->un_blockcount * 9939 un->un_sys_blocksize); 9940 } 9941 } 9942 9943 mutex_exit(SD_MUTEX(un)); 9944 return (rval); 9945 } 9946 9947 9948 /* 9949 * Function: sdmin 9950 * 9951 * Description: Routine to limit the size of a data transfer. Used in 9952 * conjunction with physio(9F). 9953 * 9954 * Arguments: bp - pointer to the indicated buf(9S) struct. 9955 * 9956 * Context: Kernel thread context. 9957 */ 9958 9959 static void 9960 sdmin(struct buf *bp) 9961 { 9962 struct sd_lun *un; 9963 int instance; 9964 9965 instance = SDUNIT(bp->b_edev); 9966 9967 un = ddi_get_soft_state(sd_state, instance); 9968 ASSERT(un != NULL); 9969 9970 if (bp->b_bcount > un->un_max_xfer_size) { 9971 bp->b_bcount = un->un_max_xfer_size; 9972 } 9973 } 9974 9975 9976 /* 9977 * Function: sdread 9978 * 9979 * Description: Driver's read(9e) entry point function. 9980 * 9981 * Arguments: dev - device number 9982 * uio - structure pointer describing where data is to be stored 9983 * in user's space 9984 * cred_p - user credential pointer 9985 * 9986 * Return Code: ENXIO 9987 * EIO 9988 * EINVAL 9989 * value returned by physio 9990 * 9991 * Context: Kernel thread context. 9992 */ 9993 /* ARGSUSED */ 9994 static int 9995 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9996 { 9997 struct sd_lun *un = NULL; 9998 int secmask; 9999 int err; 10000 10001 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10002 return (ENXIO); 10003 } 10004 10005 ASSERT(!mutex_owned(SD_MUTEX(un))); 10006 10007 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10008 mutex_enter(SD_MUTEX(un)); 10009 /* 10010 * Because the call to sd_ready_and_valid will issue I/O we 10011 * must wait here if either the device is suspended or 10012 * if it's power level is changing. 10013 */ 10014 while ((un->un_state == SD_STATE_SUSPENDED) || 10015 (un->un_state == SD_STATE_PM_CHANGING)) { 10016 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10017 } 10018 un->un_ncmds_in_driver++; 10019 mutex_exit(SD_MUTEX(un)); 10020 if ((sd_ready_and_valid(un, SDPART(dev))) != SD_READY_VALID) { 10021 mutex_enter(SD_MUTEX(un)); 10022 un->un_ncmds_in_driver--; 10023 ASSERT(un->un_ncmds_in_driver >= 0); 10024 mutex_exit(SD_MUTEX(un)); 10025 return (EIO); 10026 } 10027 mutex_enter(SD_MUTEX(un)); 10028 un->un_ncmds_in_driver--; 10029 ASSERT(un->un_ncmds_in_driver >= 0); 10030 mutex_exit(SD_MUTEX(un)); 10031 } 10032 10033 /* 10034 * Read requests are restricted to multiples of the system block size. 10035 */ 10036 secmask = un->un_sys_blocksize - 1; 10037 10038 if (uio->uio_loffset & ((offset_t)(secmask))) { 10039 SD_ERROR(SD_LOG_READ_WRITE, un, 10040 "sdread: file offset not modulo %d\n", 10041 un->un_sys_blocksize); 10042 err = EINVAL; 10043 } else if (uio->uio_iov->iov_len & (secmask)) { 10044 SD_ERROR(SD_LOG_READ_WRITE, un, 10045 "sdread: transfer length not modulo %d\n", 10046 un->un_sys_blocksize); 10047 err = EINVAL; 10048 } else { 10049 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10050 } 10051 return (err); 10052 } 10053 10054 10055 /* 10056 * Function: sdwrite 10057 * 10058 * Description: Driver's write(9e) entry point function. 10059 * 10060 * Arguments: dev - device number 10061 * uio - structure pointer describing where data is stored in 10062 * user's space 10063 * cred_p - user credential pointer 10064 * 10065 * Return Code: ENXIO 10066 * EIO 10067 * EINVAL 10068 * value returned by physio 10069 * 10070 * Context: Kernel thread context. 10071 */ 10072 /* ARGSUSED */ 10073 static int 10074 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10075 { 10076 struct sd_lun *un = NULL; 10077 int secmask; 10078 int err; 10079 10080 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10081 return (ENXIO); 10082 } 10083 10084 ASSERT(!mutex_owned(SD_MUTEX(un))); 10085 10086 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10087 mutex_enter(SD_MUTEX(un)); 10088 /* 10089 * Because the call to sd_ready_and_valid will issue I/O we 10090 * must wait here if either the device is suspended or 10091 * if it's power level is changing. 10092 */ 10093 while ((un->un_state == SD_STATE_SUSPENDED) || 10094 (un->un_state == SD_STATE_PM_CHANGING)) { 10095 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10096 } 10097 un->un_ncmds_in_driver++; 10098 mutex_exit(SD_MUTEX(un)); 10099 if ((sd_ready_and_valid(un, SDPART(dev))) != SD_READY_VALID) { 10100 mutex_enter(SD_MUTEX(un)); 10101 un->un_ncmds_in_driver--; 10102 ASSERT(un->un_ncmds_in_driver >= 0); 10103 mutex_exit(SD_MUTEX(un)); 10104 return (EIO); 10105 } 10106 mutex_enter(SD_MUTEX(un)); 10107 un->un_ncmds_in_driver--; 10108 ASSERT(un->un_ncmds_in_driver >= 0); 10109 mutex_exit(SD_MUTEX(un)); 10110 } 10111 10112 /* 10113 * Write requests are restricted to multiples of the system block size. 10114 */ 10115 secmask = un->un_sys_blocksize - 1; 10116 10117 if (uio->uio_loffset & ((offset_t)(secmask))) { 10118 SD_ERROR(SD_LOG_READ_WRITE, un, 10119 "sdwrite: file offset not modulo %d\n", 10120 un->un_sys_blocksize); 10121 err = EINVAL; 10122 } else if (uio->uio_iov->iov_len & (secmask)) { 10123 SD_ERROR(SD_LOG_READ_WRITE, un, 10124 "sdwrite: transfer length not modulo %d\n", 10125 un->un_sys_blocksize); 10126 err = EINVAL; 10127 } else { 10128 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10129 } 10130 return (err); 10131 } 10132 10133 10134 /* 10135 * Function: sdaread 10136 * 10137 * Description: Driver's aread(9e) entry point function. 10138 * 10139 * Arguments: dev - device number 10140 * aio - structure pointer describing where data is to be stored 10141 * cred_p - user credential pointer 10142 * 10143 * Return Code: ENXIO 10144 * EIO 10145 * EINVAL 10146 * value returned by aphysio 10147 * 10148 * Context: Kernel thread context. 10149 */ 10150 /* ARGSUSED */ 10151 static int 10152 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10153 { 10154 struct sd_lun *un = NULL; 10155 struct uio *uio = aio->aio_uio; 10156 int secmask; 10157 int err; 10158 10159 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10160 return (ENXIO); 10161 } 10162 10163 ASSERT(!mutex_owned(SD_MUTEX(un))); 10164 10165 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10166 mutex_enter(SD_MUTEX(un)); 10167 /* 10168 * Because the call to sd_ready_and_valid will issue I/O we 10169 * must wait here if either the device is suspended or 10170 * if it's power level is changing. 10171 */ 10172 while ((un->un_state == SD_STATE_SUSPENDED) || 10173 (un->un_state == SD_STATE_PM_CHANGING)) { 10174 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10175 } 10176 un->un_ncmds_in_driver++; 10177 mutex_exit(SD_MUTEX(un)); 10178 if ((sd_ready_and_valid(un, SDPART(dev))) != SD_READY_VALID) { 10179 mutex_enter(SD_MUTEX(un)); 10180 un->un_ncmds_in_driver--; 10181 ASSERT(un->un_ncmds_in_driver >= 0); 10182 mutex_exit(SD_MUTEX(un)); 10183 return (EIO); 10184 } 10185 mutex_enter(SD_MUTEX(un)); 10186 un->un_ncmds_in_driver--; 10187 ASSERT(un->un_ncmds_in_driver >= 0); 10188 mutex_exit(SD_MUTEX(un)); 10189 } 10190 10191 /* 10192 * Read requests are restricted to multiples of the system block size. 10193 */ 10194 secmask = un->un_sys_blocksize - 1; 10195 10196 if (uio->uio_loffset & ((offset_t)(secmask))) { 10197 SD_ERROR(SD_LOG_READ_WRITE, un, 10198 "sdaread: file offset not modulo %d\n", 10199 un->un_sys_blocksize); 10200 err = EINVAL; 10201 } else if (uio->uio_iov->iov_len & (secmask)) { 10202 SD_ERROR(SD_LOG_READ_WRITE, un, 10203 "sdaread: transfer length not modulo %d\n", 10204 un->un_sys_blocksize); 10205 err = EINVAL; 10206 } else { 10207 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10208 } 10209 return (err); 10210 } 10211 10212 10213 /* 10214 * Function: sdawrite 10215 * 10216 * Description: Driver's awrite(9e) entry point function. 10217 * 10218 * Arguments: dev - device number 10219 * aio - structure pointer describing where data is stored 10220 * cred_p - user credential pointer 10221 * 10222 * Return Code: ENXIO 10223 * EIO 10224 * EINVAL 10225 * value returned by aphysio 10226 * 10227 * Context: Kernel thread context. 10228 */ 10229 /* ARGSUSED */ 10230 static int 10231 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10232 { 10233 struct sd_lun *un = NULL; 10234 struct uio *uio = aio->aio_uio; 10235 int secmask; 10236 int err; 10237 10238 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10239 return (ENXIO); 10240 } 10241 10242 ASSERT(!mutex_owned(SD_MUTEX(un))); 10243 10244 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10245 mutex_enter(SD_MUTEX(un)); 10246 /* 10247 * Because the call to sd_ready_and_valid will issue I/O we 10248 * must wait here if either the device is suspended or 10249 * if it's power level is changing. 10250 */ 10251 while ((un->un_state == SD_STATE_SUSPENDED) || 10252 (un->un_state == SD_STATE_PM_CHANGING)) { 10253 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10254 } 10255 un->un_ncmds_in_driver++; 10256 mutex_exit(SD_MUTEX(un)); 10257 if ((sd_ready_and_valid(un, SDPART(dev))) != SD_READY_VALID) { 10258 mutex_enter(SD_MUTEX(un)); 10259 un->un_ncmds_in_driver--; 10260 ASSERT(un->un_ncmds_in_driver >= 0); 10261 mutex_exit(SD_MUTEX(un)); 10262 return (EIO); 10263 } 10264 mutex_enter(SD_MUTEX(un)); 10265 un->un_ncmds_in_driver--; 10266 ASSERT(un->un_ncmds_in_driver >= 0); 10267 mutex_exit(SD_MUTEX(un)); 10268 } 10269 10270 /* 10271 * Write requests are restricted to multiples of the system block size. 10272 */ 10273 secmask = un->un_sys_blocksize - 1; 10274 10275 if (uio->uio_loffset & ((offset_t)(secmask))) { 10276 SD_ERROR(SD_LOG_READ_WRITE, un, 10277 "sdawrite: file offset not modulo %d\n", 10278 un->un_sys_blocksize); 10279 err = EINVAL; 10280 } else if (uio->uio_iov->iov_len & (secmask)) { 10281 SD_ERROR(SD_LOG_READ_WRITE, un, 10282 "sdawrite: transfer length not modulo %d\n", 10283 un->un_sys_blocksize); 10284 err = EINVAL; 10285 } else { 10286 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10287 } 10288 return (err); 10289 } 10290 10291 10292 10293 10294 10295 /* 10296 * Driver IO processing follows the following sequence: 10297 * 10298 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10299 * | | ^ 10300 * v v | 10301 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10302 * | | | | 10303 * v | | | 10304 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10305 * | | ^ ^ 10306 * v v | | 10307 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10308 * | | | | 10309 * +---+ | +------------+ +-------+ 10310 * | | | | 10311 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10312 * | v | | 10313 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10314 * | | ^ | 10315 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10316 * | v | | 10317 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10318 * | | ^ | 10319 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10320 * | v | | 10321 * | sd_checksum_iostart() sd_checksum_iodone() | 10322 * | | ^ | 10323 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10324 * | v | | 10325 * | sd_pm_iostart() sd_pm_iodone() | 10326 * | | ^ | 10327 * | | | | 10328 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10329 * | ^ 10330 * v | 10331 * sd_core_iostart() | 10332 * | | 10333 * | +------>(*destroypkt)() 10334 * +-> sd_start_cmds() <-+ | | 10335 * | | | v 10336 * | | | scsi_destroy_pkt(9F) 10337 * | | | 10338 * +->(*initpkt)() +- sdintr() 10339 * | | | | 10340 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10341 * | +-> scsi_setup_cdb(9F) | 10342 * | | 10343 * +--> scsi_transport(9F) | 10344 * | | 10345 * +----> SCSA ---->+ 10346 * 10347 * 10348 * This code is based upon the following presumptions: 10349 * 10350 * - iostart and iodone functions operate on buf(9S) structures. These 10351 * functions perform the necessary operations on the buf(9S) and pass 10352 * them along to the next function in the chain by using the macros 10353 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10354 * (for iodone side functions). 10355 * 10356 * - The iostart side functions may sleep. The iodone side functions 10357 * are called under interrupt context and may NOT sleep. Therefore 10358 * iodone side functions also may not call iostart side functions. 10359 * (NOTE: iostart side functions should NOT sleep for memory, as 10360 * this could result in deadlock.) 10361 * 10362 * - An iostart side function may call its corresponding iodone side 10363 * function directly (if necessary). 10364 * 10365 * - In the event of an error, an iostart side function can return a buf(9S) 10366 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10367 * b_error in the usual way of course). 10368 * 10369 * - The taskq mechanism may be used by the iodone side functions to dispatch 10370 * requests to the iostart side functions. The iostart side functions in 10371 * this case would be called under the context of a taskq thread, so it's 10372 * OK for them to block/sleep/spin in this case. 10373 * 10374 * - iostart side functions may allocate "shadow" buf(9S) structs and 10375 * pass them along to the next function in the chain. The corresponding 10376 * iodone side functions must coalesce the "shadow" bufs and return 10377 * the "original" buf to the next higher layer. 10378 * 10379 * - The b_private field of the buf(9S) struct holds a pointer to 10380 * an sd_xbuf struct, which contains information needed to 10381 * construct the scsi_pkt for the command. 10382 * 10383 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10384 * layer must acquire & release the SD_MUTEX(un) as needed. 10385 */ 10386 10387 10388 /* 10389 * Create taskq for all targets in the system. This is created at 10390 * _init(9E) and destroyed at _fini(9E). 10391 * 10392 * Note: here we set the minalloc to a reasonably high number to ensure that 10393 * we will have an adequate supply of task entries available at interrupt time. 10394 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10395 * sd_create_taskq(). Since we do not want to sleep for allocations at 10396 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10397 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10398 * requests any one instant in time. 10399 */ 10400 #define SD_TASKQ_NUMTHREADS 8 10401 #define SD_TASKQ_MINALLOC 256 10402 #define SD_TASKQ_MAXALLOC 256 10403 10404 static taskq_t *sd_tq = NULL; 10405 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10406 10407 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10408 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10409 10410 /* 10411 * The following task queue is being created for the write part of 10412 * read-modify-write of non-512 block size devices. 10413 * Limit the number of threads to 1 for now. This number has been chosen 10414 * considering the fact that it applies only to dvd ram drives/MO drives 10415 * currently. Performance for which is not main criteria at this stage. 10416 * Note: It needs to be explored if we can use a single taskq in future 10417 */ 10418 #define SD_WMR_TASKQ_NUMTHREADS 1 10419 static taskq_t *sd_wmr_tq = NULL; 10420 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10421 10422 /* 10423 * Function: sd_taskq_create 10424 * 10425 * Description: Create taskq thread(s) and preallocate task entries 10426 * 10427 * Return Code: Returns a pointer to the allocated taskq_t. 10428 * 10429 * Context: Can sleep. Requires blockable context. 10430 * 10431 * Notes: - The taskq() facility currently is NOT part of the DDI. 10432 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10433 * - taskq_create() will block for memory, also it will panic 10434 * if it cannot create the requested number of threads. 10435 * - Currently taskq_create() creates threads that cannot be 10436 * swapped. 10437 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10438 * supply of taskq entries at interrupt time (ie, so that we 10439 * do not have to sleep for memory) 10440 */ 10441 10442 static void 10443 sd_taskq_create(void) 10444 { 10445 char taskq_name[TASKQ_NAMELEN]; 10446 10447 ASSERT(sd_tq == NULL); 10448 ASSERT(sd_wmr_tq == NULL); 10449 10450 (void) snprintf(taskq_name, sizeof (taskq_name), 10451 "%s_drv_taskq", sd_label); 10452 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10453 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10454 TASKQ_PREPOPULATE)); 10455 10456 (void) snprintf(taskq_name, sizeof (taskq_name), 10457 "%s_rmw_taskq", sd_label); 10458 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10459 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10460 TASKQ_PREPOPULATE)); 10461 } 10462 10463 10464 /* 10465 * Function: sd_taskq_delete 10466 * 10467 * Description: Complementary cleanup routine for sd_taskq_create(). 10468 * 10469 * Context: Kernel thread context. 10470 */ 10471 10472 static void 10473 sd_taskq_delete(void) 10474 { 10475 ASSERT(sd_tq != NULL); 10476 ASSERT(sd_wmr_tq != NULL); 10477 taskq_destroy(sd_tq); 10478 taskq_destroy(sd_wmr_tq); 10479 sd_tq = NULL; 10480 sd_wmr_tq = NULL; 10481 } 10482 10483 10484 /* 10485 * Function: sdstrategy 10486 * 10487 * Description: Driver's strategy (9E) entry point function. 10488 * 10489 * Arguments: bp - pointer to buf(9S) 10490 * 10491 * Return Code: Always returns zero 10492 * 10493 * Context: Kernel thread context. 10494 */ 10495 10496 static int 10497 sdstrategy(struct buf *bp) 10498 { 10499 struct sd_lun *un; 10500 10501 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10502 if (un == NULL) { 10503 bioerror(bp, EIO); 10504 bp->b_resid = bp->b_bcount; 10505 biodone(bp); 10506 return (0); 10507 } 10508 /* As was done in the past, fail new cmds. if state is dumping. */ 10509 if (un->un_state == SD_STATE_DUMPING) { 10510 bioerror(bp, ENXIO); 10511 bp->b_resid = bp->b_bcount; 10512 biodone(bp); 10513 return (0); 10514 } 10515 10516 ASSERT(!mutex_owned(SD_MUTEX(un))); 10517 10518 /* 10519 * Commands may sneak in while we released the mutex in 10520 * DDI_SUSPEND, we should block new commands. However, old 10521 * commands that are still in the driver at this point should 10522 * still be allowed to drain. 10523 */ 10524 mutex_enter(SD_MUTEX(un)); 10525 /* 10526 * Must wait here if either the device is suspended or 10527 * if it's power level is changing. 10528 */ 10529 while ((un->un_state == SD_STATE_SUSPENDED) || 10530 (un->un_state == SD_STATE_PM_CHANGING)) { 10531 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10532 } 10533 10534 un->un_ncmds_in_driver++; 10535 10536 /* 10537 * atapi: Since we are running the CD for now in PIO mode we need to 10538 * call bp_mapin here to avoid bp_mapin called interrupt context under 10539 * the HBA's init_pkt routine. 10540 */ 10541 if (un->un_f_cfg_is_atapi == TRUE) { 10542 mutex_exit(SD_MUTEX(un)); 10543 bp_mapin(bp); 10544 mutex_enter(SD_MUTEX(un)); 10545 } 10546 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10547 un->un_ncmds_in_driver); 10548 10549 if (bp->b_flags & B_WRITE) 10550 un->un_f_sync_cache_required = TRUE; 10551 10552 mutex_exit(SD_MUTEX(un)); 10553 10554 /* 10555 * This will (eventually) allocate the sd_xbuf area and 10556 * call sd_xbuf_strategy(). We just want to return the 10557 * result of ddi_xbuf_qstrategy so that we have an opt- 10558 * imized tail call which saves us a stack frame. 10559 */ 10560 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10561 } 10562 10563 10564 /* 10565 * Function: sd_xbuf_strategy 10566 * 10567 * Description: Function for initiating IO operations via the 10568 * ddi_xbuf_qstrategy() mechanism. 10569 * 10570 * Context: Kernel thread context. 10571 */ 10572 10573 static void 10574 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10575 { 10576 struct sd_lun *un = arg; 10577 10578 ASSERT(bp != NULL); 10579 ASSERT(xp != NULL); 10580 ASSERT(un != NULL); 10581 ASSERT(!mutex_owned(SD_MUTEX(un))); 10582 10583 /* 10584 * Initialize the fields in the xbuf and save a pointer to the 10585 * xbuf in bp->b_private. 10586 */ 10587 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10588 10589 /* Send the buf down the iostart chain */ 10590 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10591 } 10592 10593 10594 /* 10595 * Function: sd_xbuf_init 10596 * 10597 * Description: Prepare the given sd_xbuf struct for use. 10598 * 10599 * Arguments: un - ptr to softstate 10600 * bp - ptr to associated buf(9S) 10601 * xp - ptr to associated sd_xbuf 10602 * chain_type - IO chain type to use: 10603 * SD_CHAIN_NULL 10604 * SD_CHAIN_BUFIO 10605 * SD_CHAIN_USCSI 10606 * SD_CHAIN_DIRECT 10607 * SD_CHAIN_DIRECT_PRIORITY 10608 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10609 * initialization; may be NULL if none. 10610 * 10611 * Context: Kernel thread context 10612 */ 10613 10614 static void 10615 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10616 uchar_t chain_type, void *pktinfop) 10617 { 10618 int index; 10619 10620 ASSERT(un != NULL); 10621 ASSERT(bp != NULL); 10622 ASSERT(xp != NULL); 10623 10624 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10625 bp, chain_type); 10626 10627 xp->xb_un = un; 10628 xp->xb_pktp = NULL; 10629 xp->xb_pktinfo = pktinfop; 10630 xp->xb_private = bp->b_private; 10631 xp->xb_blkno = (daddr_t)bp->b_blkno; 10632 10633 /* 10634 * Set up the iostart and iodone chain indexes in the xbuf, based 10635 * upon the specified chain type to use. 10636 */ 10637 switch (chain_type) { 10638 case SD_CHAIN_NULL: 10639 /* 10640 * Fall thru to just use the values for the buf type, even 10641 * tho for the NULL chain these values will never be used. 10642 */ 10643 /* FALLTHRU */ 10644 case SD_CHAIN_BUFIO: 10645 index = un->un_buf_chain_type; 10646 break; 10647 case SD_CHAIN_USCSI: 10648 index = un->un_uscsi_chain_type; 10649 break; 10650 case SD_CHAIN_DIRECT: 10651 index = un->un_direct_chain_type; 10652 break; 10653 case SD_CHAIN_DIRECT_PRIORITY: 10654 index = un->un_priority_chain_type; 10655 break; 10656 default: 10657 /* We're really broken if we ever get here... */ 10658 panic("sd_xbuf_init: illegal chain type!"); 10659 /*NOTREACHED*/ 10660 } 10661 10662 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10663 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10664 10665 /* 10666 * It might be a bit easier to simply bzero the entire xbuf above, 10667 * but it turns out that since we init a fair number of members anyway, 10668 * we save a fair number cycles by doing explicit assignment of zero. 10669 */ 10670 xp->xb_pkt_flags = 0; 10671 xp->xb_dma_resid = 0; 10672 xp->xb_retry_count = 0; 10673 xp->xb_victim_retry_count = 0; 10674 xp->xb_ua_retry_count = 0; 10675 xp->xb_nr_retry_count = 0; 10676 xp->xb_sense_bp = NULL; 10677 xp->xb_sense_status = 0; 10678 xp->xb_sense_state = 0; 10679 xp->xb_sense_resid = 0; 10680 10681 bp->b_private = xp; 10682 bp->b_flags &= ~(B_DONE | B_ERROR); 10683 bp->b_resid = 0; 10684 bp->av_forw = NULL; 10685 bp->av_back = NULL; 10686 bioerror(bp, 0); 10687 10688 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10689 } 10690 10691 10692 /* 10693 * Function: sd_uscsi_strategy 10694 * 10695 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10696 * 10697 * Arguments: bp - buf struct ptr 10698 * 10699 * Return Code: Always returns 0 10700 * 10701 * Context: Kernel thread context 10702 */ 10703 10704 static int 10705 sd_uscsi_strategy(struct buf *bp) 10706 { 10707 struct sd_lun *un; 10708 struct sd_uscsi_info *uip; 10709 struct sd_xbuf *xp; 10710 uchar_t chain_type; 10711 uchar_t cmd; 10712 10713 ASSERT(bp != NULL); 10714 10715 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10716 if (un == NULL) { 10717 bioerror(bp, EIO); 10718 bp->b_resid = bp->b_bcount; 10719 biodone(bp); 10720 return (0); 10721 } 10722 10723 ASSERT(!mutex_owned(SD_MUTEX(un))); 10724 10725 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10726 10727 /* 10728 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10729 */ 10730 ASSERT(bp->b_private != NULL); 10731 uip = (struct sd_uscsi_info *)bp->b_private; 10732 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 10733 10734 mutex_enter(SD_MUTEX(un)); 10735 /* 10736 * atapi: Since we are running the CD for now in PIO mode we need to 10737 * call bp_mapin here to avoid bp_mapin called interrupt context under 10738 * the HBA's init_pkt routine. 10739 */ 10740 if (un->un_f_cfg_is_atapi == TRUE) { 10741 mutex_exit(SD_MUTEX(un)); 10742 bp_mapin(bp); 10743 mutex_enter(SD_MUTEX(un)); 10744 } 10745 un->un_ncmds_in_driver++; 10746 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10747 un->un_ncmds_in_driver); 10748 10749 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 10750 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 10751 un->un_f_sync_cache_required = TRUE; 10752 10753 mutex_exit(SD_MUTEX(un)); 10754 10755 switch (uip->ui_flags) { 10756 case SD_PATH_DIRECT: 10757 chain_type = SD_CHAIN_DIRECT; 10758 break; 10759 case SD_PATH_DIRECT_PRIORITY: 10760 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10761 break; 10762 default: 10763 chain_type = SD_CHAIN_USCSI; 10764 break; 10765 } 10766 10767 /* 10768 * We may allocate extra buf for external USCSI commands. If the 10769 * application asks for bigger than 20-byte sense data via USCSI, 10770 * SCSA layer will allocate 252 bytes sense buf for that command. 10771 */ 10772 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 10773 SENSE_LENGTH) { 10774 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 10775 MAX_SENSE_LENGTH, KM_SLEEP); 10776 } else { 10777 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 10778 } 10779 10780 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10781 10782 /* Use the index obtained within xbuf_init */ 10783 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10784 10785 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10786 10787 return (0); 10788 } 10789 10790 /* 10791 * Function: sd_send_scsi_cmd 10792 * 10793 * Description: Runs a USCSI command for user (when called thru sdioctl), 10794 * or for the driver 10795 * 10796 * Arguments: dev - the dev_t for the device 10797 * incmd - ptr to a valid uscsi_cmd struct 10798 * flag - bit flag, indicating open settings, 32/64 bit type 10799 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10800 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10801 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10802 * to use the USCSI "direct" chain and bypass the normal 10803 * command waitq. 10804 * 10805 * Return Code: 0 - successful completion of the given command 10806 * EIO - scsi_uscsi_handle_command() failed 10807 * ENXIO - soft state not found for specified dev 10808 * EINVAL 10809 * EFAULT - copyin/copyout error 10810 * return code of scsi_uscsi_handle_command(): 10811 * EIO 10812 * ENXIO 10813 * EACCES 10814 * 10815 * Context: Waits for command to complete. Can sleep. 10816 */ 10817 10818 static int 10819 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10820 enum uio_seg dataspace, int path_flag) 10821 { 10822 struct sd_uscsi_info *uip; 10823 struct uscsi_cmd *uscmd; 10824 struct sd_lun *un; 10825 int format = 0; 10826 int rval; 10827 10828 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10829 if (un == NULL) { 10830 return (ENXIO); 10831 } 10832 10833 ASSERT(!mutex_owned(SD_MUTEX(un))); 10834 10835 #ifdef SDDEBUG 10836 switch (dataspace) { 10837 case UIO_USERSPACE: 10838 SD_TRACE(SD_LOG_IO, un, 10839 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10840 break; 10841 case UIO_SYSSPACE: 10842 SD_TRACE(SD_LOG_IO, un, 10843 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10844 break; 10845 default: 10846 SD_TRACE(SD_LOG_IO, un, 10847 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10848 break; 10849 } 10850 #endif 10851 10852 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10853 SD_ADDRESS(un), &uscmd); 10854 if (rval != 0) { 10855 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10856 "scsi_uscsi_alloc_and_copyin failed\n", un); 10857 return (rval); 10858 } 10859 10860 if ((uscmd->uscsi_cdb != NULL) && 10861 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10862 mutex_enter(SD_MUTEX(un)); 10863 un->un_f_format_in_progress = TRUE; 10864 mutex_exit(SD_MUTEX(un)); 10865 format = 1; 10866 } 10867 10868 /* 10869 * Allocate an sd_uscsi_info struct and fill it with the info 10870 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10871 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10872 * since we allocate the buf here in this function, we do not 10873 * need to preserve the prior contents of b_private. 10874 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10875 */ 10876 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10877 uip->ui_flags = path_flag; 10878 uip->ui_cmdp = uscmd; 10879 10880 /* 10881 * Commands sent with priority are intended for error recovery 10882 * situations, and do not have retries performed. 10883 */ 10884 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10885 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10886 } 10887 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10888 10889 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10890 sd_uscsi_strategy, NULL, uip); 10891 10892 #ifdef SDDEBUG 10893 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10894 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10895 uscmd->uscsi_status, uscmd->uscsi_resid); 10896 if (uscmd->uscsi_bufaddr != NULL) { 10897 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10898 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10899 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10900 if (dataspace == UIO_SYSSPACE) { 10901 SD_DUMP_MEMORY(un, SD_LOG_IO, 10902 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10903 uscmd->uscsi_buflen, SD_LOG_HEX); 10904 } 10905 } 10906 #endif 10907 10908 if (format == 1) { 10909 mutex_enter(SD_MUTEX(un)); 10910 un->un_f_format_in_progress = FALSE; 10911 mutex_exit(SD_MUTEX(un)); 10912 } 10913 10914 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10915 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10916 10917 return (rval); 10918 } 10919 10920 10921 /* 10922 * Function: sd_buf_iodone 10923 * 10924 * Description: Frees the sd_xbuf & returns the buf to its originator. 10925 * 10926 * Context: May be called from interrupt context. 10927 */ 10928 /* ARGSUSED */ 10929 static void 10930 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10931 { 10932 struct sd_xbuf *xp; 10933 10934 ASSERT(un != NULL); 10935 ASSERT(bp != NULL); 10936 ASSERT(!mutex_owned(SD_MUTEX(un))); 10937 10938 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10939 10940 xp = SD_GET_XBUF(bp); 10941 ASSERT(xp != NULL); 10942 10943 mutex_enter(SD_MUTEX(un)); 10944 10945 /* 10946 * Grab time when the cmd completed. 10947 * This is used for determining if the system has been 10948 * idle long enough to make it idle to the PM framework. 10949 * This is for lowering the overhead, and therefore improving 10950 * performance per I/O operation. 10951 */ 10952 un->un_pm_idle_time = ddi_get_time(); 10953 10954 un->un_ncmds_in_driver--; 10955 ASSERT(un->un_ncmds_in_driver >= 0); 10956 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10957 un->un_ncmds_in_driver); 10958 10959 mutex_exit(SD_MUTEX(un)); 10960 10961 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10962 biodone(bp); /* bp is gone after this */ 10963 10964 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10965 } 10966 10967 10968 /* 10969 * Function: sd_uscsi_iodone 10970 * 10971 * Description: Frees the sd_xbuf & returns the buf to its originator. 10972 * 10973 * Context: May be called from interrupt context. 10974 */ 10975 /* ARGSUSED */ 10976 static void 10977 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10978 { 10979 struct sd_xbuf *xp; 10980 10981 ASSERT(un != NULL); 10982 ASSERT(bp != NULL); 10983 10984 xp = SD_GET_XBUF(bp); 10985 ASSERT(xp != NULL); 10986 ASSERT(!mutex_owned(SD_MUTEX(un))); 10987 10988 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10989 10990 bp->b_private = xp->xb_private; 10991 10992 mutex_enter(SD_MUTEX(un)); 10993 10994 /* 10995 * Grab time when the cmd completed. 10996 * This is used for determining if the system has been 10997 * idle long enough to make it idle to the PM framework. 10998 * This is for lowering the overhead, and therefore improving 10999 * performance per I/O operation. 11000 */ 11001 un->un_pm_idle_time = ddi_get_time(); 11002 11003 un->un_ncmds_in_driver--; 11004 ASSERT(un->un_ncmds_in_driver >= 0); 11005 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 11006 un->un_ncmds_in_driver); 11007 11008 mutex_exit(SD_MUTEX(un)); 11009 11010 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 11011 SENSE_LENGTH) { 11012 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 11013 MAX_SENSE_LENGTH); 11014 } else { 11015 kmem_free(xp, sizeof (struct sd_xbuf)); 11016 } 11017 11018 biodone(bp); 11019 11020 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 11021 } 11022 11023 11024 /* 11025 * Function: sd_mapblockaddr_iostart 11026 * 11027 * Description: Verify request lies within the partition limits for 11028 * the indicated minor device. Issue "overrun" buf if 11029 * request would exceed partition range. Converts 11030 * partition-relative block address to absolute. 11031 * 11032 * Context: Can sleep 11033 * 11034 * Issues: This follows what the old code did, in terms of accessing 11035 * some of the partition info in the unit struct without holding 11036 * the mutext. This is a general issue, if the partition info 11037 * can be altered while IO is in progress... as soon as we send 11038 * a buf, its partitioning can be invalid before it gets to the 11039 * device. Probably the right fix is to move partitioning out 11040 * of the driver entirely. 11041 */ 11042 11043 static void 11044 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 11045 { 11046 diskaddr_t nblocks; /* #blocks in the given partition */ 11047 daddr_t blocknum; /* Block number specified by the buf */ 11048 size_t requested_nblocks; 11049 size_t available_nblocks; 11050 int partition; 11051 diskaddr_t partition_offset; 11052 struct sd_xbuf *xp; 11053 11054 11055 ASSERT(un != NULL); 11056 ASSERT(bp != NULL); 11057 ASSERT(!mutex_owned(SD_MUTEX(un))); 11058 11059 SD_TRACE(SD_LOG_IO_PARTITION, un, 11060 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 11061 11062 xp = SD_GET_XBUF(bp); 11063 ASSERT(xp != NULL); 11064 11065 /* 11066 * If the geometry is not indicated as valid, attempt to access 11067 * the unit & verify the geometry/label. This can be the case for 11068 * removable-media devices, of if the device was opened in 11069 * NDELAY/NONBLOCK mode. 11070 */ 11071 partition = SDPART(bp->b_edev); 11072 11073 if (!SD_IS_VALID_LABEL(un) && 11074 (sd_ready_and_valid(un, partition) != SD_READY_VALID)) { 11075 /* 11076 * For removable devices it is possible to start an I/O 11077 * without a media by opening the device in nodelay mode. 11078 * Also for writable CDs there can be many scenarios where 11079 * there is no geometry yet but volume manager is trying to 11080 * issue a read() just because it can see TOC on the CD. So 11081 * do not print a message for removables. 11082 */ 11083 if (!un->un_f_has_removable_media) { 11084 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11085 "i/o to invalid geometry\n"); 11086 } 11087 bioerror(bp, EIO); 11088 bp->b_resid = bp->b_bcount; 11089 SD_BEGIN_IODONE(index, un, bp); 11090 return; 11091 } 11092 11093 11094 nblocks = 0; 11095 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 11096 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 11097 11098 /* 11099 * blocknum is the starting block number of the request. At this 11100 * point it is still relative to the start of the minor device. 11101 */ 11102 blocknum = xp->xb_blkno; 11103 11104 /* 11105 * Legacy: If the starting block number is one past the last block 11106 * in the partition, do not set B_ERROR in the buf. 11107 */ 11108 if (blocknum == nblocks) { 11109 goto error_exit; 11110 } 11111 11112 /* 11113 * Confirm that the first block of the request lies within the 11114 * partition limits. Also the requested number of bytes must be 11115 * a multiple of the system block size. 11116 */ 11117 if ((blocknum < 0) || (blocknum >= nblocks) || 11118 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 11119 bp->b_flags |= B_ERROR; 11120 goto error_exit; 11121 } 11122 11123 /* 11124 * If the requsted # blocks exceeds the available # blocks, that 11125 * is an overrun of the partition. 11126 */ 11127 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 11128 available_nblocks = (size_t)(nblocks - blocknum); 11129 ASSERT(nblocks >= blocknum); 11130 11131 if (requested_nblocks > available_nblocks) { 11132 /* 11133 * Allocate an "overrun" buf to allow the request to proceed 11134 * for the amount of space available in the partition. The 11135 * amount not transferred will be added into the b_resid 11136 * when the operation is complete. The overrun buf 11137 * replaces the original buf here, and the original buf 11138 * is saved inside the overrun buf, for later use. 11139 */ 11140 size_t resid = SD_SYSBLOCKS2BYTES(un, 11141 (offset_t)(requested_nblocks - available_nblocks)); 11142 size_t count = bp->b_bcount - resid; 11143 /* 11144 * Note: count is an unsigned entity thus it'll NEVER 11145 * be less than 0 so ASSERT the original values are 11146 * correct. 11147 */ 11148 ASSERT(bp->b_bcount >= resid); 11149 11150 bp = sd_bioclone_alloc(bp, count, blocknum, 11151 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 11152 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 11153 ASSERT(xp != NULL); 11154 } 11155 11156 /* At this point there should be no residual for this buf. */ 11157 ASSERT(bp->b_resid == 0); 11158 11159 /* Convert the block number to an absolute address. */ 11160 xp->xb_blkno += partition_offset; 11161 11162 SD_NEXT_IOSTART(index, un, bp); 11163 11164 SD_TRACE(SD_LOG_IO_PARTITION, un, 11165 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 11166 11167 return; 11168 11169 error_exit: 11170 bp->b_resid = bp->b_bcount; 11171 SD_BEGIN_IODONE(index, un, bp); 11172 SD_TRACE(SD_LOG_IO_PARTITION, un, 11173 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 11174 } 11175 11176 11177 /* 11178 * Function: sd_mapblockaddr_iodone 11179 * 11180 * Description: Completion-side processing for partition management. 11181 * 11182 * Context: May be called under interrupt context 11183 */ 11184 11185 static void 11186 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 11187 { 11188 /* int partition; */ /* Not used, see below. */ 11189 ASSERT(un != NULL); 11190 ASSERT(bp != NULL); 11191 ASSERT(!mutex_owned(SD_MUTEX(un))); 11192 11193 SD_TRACE(SD_LOG_IO_PARTITION, un, 11194 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 11195 11196 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 11197 /* 11198 * We have an "overrun" buf to deal with... 11199 */ 11200 struct sd_xbuf *xp; 11201 struct buf *obp; /* ptr to the original buf */ 11202 11203 xp = SD_GET_XBUF(bp); 11204 ASSERT(xp != NULL); 11205 11206 /* Retrieve the pointer to the original buf */ 11207 obp = (struct buf *)xp->xb_private; 11208 ASSERT(obp != NULL); 11209 11210 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 11211 bioerror(obp, bp->b_error); 11212 11213 sd_bioclone_free(bp); 11214 11215 /* 11216 * Get back the original buf. 11217 * Note that since the restoration of xb_blkno below 11218 * was removed, the sd_xbuf is not needed. 11219 */ 11220 bp = obp; 11221 /* 11222 * xp = SD_GET_XBUF(bp); 11223 * ASSERT(xp != NULL); 11224 */ 11225 } 11226 11227 /* 11228 * Convert sd->xb_blkno back to a minor-device relative value. 11229 * Note: this has been commented out, as it is not needed in the 11230 * current implementation of the driver (ie, since this function 11231 * is at the top of the layering chains, so the info will be 11232 * discarded) and it is in the "hot" IO path. 11233 * 11234 * partition = getminor(bp->b_edev) & SDPART_MASK; 11235 * xp->xb_blkno -= un->un_offset[partition]; 11236 */ 11237 11238 SD_NEXT_IODONE(index, un, bp); 11239 11240 SD_TRACE(SD_LOG_IO_PARTITION, un, 11241 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 11242 } 11243 11244 11245 /* 11246 * Function: sd_mapblocksize_iostart 11247 * 11248 * Description: Convert between system block size (un->un_sys_blocksize) 11249 * and target block size (un->un_tgt_blocksize). 11250 * 11251 * Context: Can sleep to allocate resources. 11252 * 11253 * Assumptions: A higher layer has already performed any partition validation, 11254 * and converted the xp->xb_blkno to an absolute value relative 11255 * to the start of the device. 11256 * 11257 * It is also assumed that the higher layer has implemented 11258 * an "overrun" mechanism for the case where the request would 11259 * read/write beyond the end of a partition. In this case we 11260 * assume (and ASSERT) that bp->b_resid == 0. 11261 * 11262 * Note: The implementation for this routine assumes the target 11263 * block size remains constant between allocation and transport. 11264 */ 11265 11266 static void 11267 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 11268 { 11269 struct sd_mapblocksize_info *bsp; 11270 struct sd_xbuf *xp; 11271 offset_t first_byte; 11272 daddr_t start_block, end_block; 11273 daddr_t request_bytes; 11274 ushort_t is_aligned = FALSE; 11275 11276 ASSERT(un != NULL); 11277 ASSERT(bp != NULL); 11278 ASSERT(!mutex_owned(SD_MUTEX(un))); 11279 ASSERT(bp->b_resid == 0); 11280 11281 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11282 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 11283 11284 /* 11285 * For a non-writable CD, a write request is an error 11286 */ 11287 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 11288 (un->un_f_mmc_writable_media == FALSE)) { 11289 bioerror(bp, EIO); 11290 bp->b_resid = bp->b_bcount; 11291 SD_BEGIN_IODONE(index, un, bp); 11292 return; 11293 } 11294 11295 /* 11296 * We do not need a shadow buf if the device is using 11297 * un->un_sys_blocksize as its block size or if bcount == 0. 11298 * In this case there is no layer-private data block allocated. 11299 */ 11300 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11301 (bp->b_bcount == 0)) { 11302 goto done; 11303 } 11304 11305 #if defined(__i386) || defined(__amd64) 11306 /* We do not support non-block-aligned transfers for ROD devices */ 11307 ASSERT(!ISROD(un)); 11308 #endif 11309 11310 xp = SD_GET_XBUF(bp); 11311 ASSERT(xp != NULL); 11312 11313 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11314 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 11315 un->un_tgt_blocksize, un->un_sys_blocksize); 11316 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11317 "request start block:0x%x\n", xp->xb_blkno); 11318 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11319 "request len:0x%x\n", bp->b_bcount); 11320 11321 /* 11322 * Allocate the layer-private data area for the mapblocksize layer. 11323 * Layers are allowed to use the xp_private member of the sd_xbuf 11324 * struct to store the pointer to their layer-private data block, but 11325 * each layer also has the responsibility of restoring the prior 11326 * contents of xb_private before returning the buf/xbuf to the 11327 * higher layer that sent it. 11328 * 11329 * Here we save the prior contents of xp->xb_private into the 11330 * bsp->mbs_oprivate field of our layer-private data area. This value 11331 * is restored by sd_mapblocksize_iodone() just prior to freeing up 11332 * the layer-private area and returning the buf/xbuf to the layer 11333 * that sent it. 11334 * 11335 * Note that here we use kmem_zalloc for the allocation as there are 11336 * parts of the mapblocksize code that expect certain fields to be 11337 * zero unless explicitly set to a required value. 11338 */ 11339 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11340 bsp->mbs_oprivate = xp->xb_private; 11341 xp->xb_private = bsp; 11342 11343 /* 11344 * This treats the data on the disk (target) as an array of bytes. 11345 * first_byte is the byte offset, from the beginning of the device, 11346 * to the location of the request. This is converted from a 11347 * un->un_sys_blocksize block address to a byte offset, and then back 11348 * to a block address based upon a un->un_tgt_blocksize block size. 11349 * 11350 * xp->xb_blkno should be absolute upon entry into this function, 11351 * but, but it is based upon partitions that use the "system" 11352 * block size. It must be adjusted to reflect the block size of 11353 * the target. 11354 * 11355 * Note that end_block is actually the block that follows the last 11356 * block of the request, but that's what is needed for the computation. 11357 */ 11358 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11359 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 11360 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 11361 un->un_tgt_blocksize; 11362 11363 /* request_bytes is rounded up to a multiple of the target block size */ 11364 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 11365 11366 /* 11367 * See if the starting address of the request and the request 11368 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 11369 * then we do not need to allocate a shadow buf to handle the request. 11370 */ 11371 if (((first_byte % un->un_tgt_blocksize) == 0) && 11372 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 11373 is_aligned = TRUE; 11374 } 11375 11376 if ((bp->b_flags & B_READ) == 0) { 11377 /* 11378 * Lock the range for a write operation. An aligned request is 11379 * considered a simple write; otherwise the request must be a 11380 * read-modify-write. 11381 */ 11382 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 11383 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 11384 } 11385 11386 /* 11387 * Alloc a shadow buf if the request is not aligned. Also, this is 11388 * where the READ command is generated for a read-modify-write. (The 11389 * write phase is deferred until after the read completes.) 11390 */ 11391 if (is_aligned == FALSE) { 11392 11393 struct sd_mapblocksize_info *shadow_bsp; 11394 struct sd_xbuf *shadow_xp; 11395 struct buf *shadow_bp; 11396 11397 /* 11398 * Allocate the shadow buf and it associated xbuf. Note that 11399 * after this call the xb_blkno value in both the original 11400 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 11401 * same: absolute relative to the start of the device, and 11402 * adjusted for the target block size. The b_blkno in the 11403 * shadow buf will also be set to this value. We should never 11404 * change b_blkno in the original bp however. 11405 * 11406 * Note also that the shadow buf will always need to be a 11407 * READ command, regardless of whether the incoming command 11408 * is a READ or a WRITE. 11409 */ 11410 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 11411 xp->xb_blkno, 11412 (int (*)(struct buf *)) sd_mapblocksize_iodone); 11413 11414 shadow_xp = SD_GET_XBUF(shadow_bp); 11415 11416 /* 11417 * Allocate the layer-private data for the shadow buf. 11418 * (No need to preserve xb_private in the shadow xbuf.) 11419 */ 11420 shadow_xp->xb_private = shadow_bsp = 11421 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11422 11423 /* 11424 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 11425 * to figure out where the start of the user data is (based upon 11426 * the system block size) in the data returned by the READ 11427 * command (which will be based upon the target blocksize). Note 11428 * that this is only really used if the request is unaligned. 11429 */ 11430 bsp->mbs_copy_offset = (ssize_t)(first_byte - 11431 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 11432 ASSERT((bsp->mbs_copy_offset >= 0) && 11433 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 11434 11435 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 11436 11437 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 11438 11439 /* Transfer the wmap (if any) to the shadow buf */ 11440 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 11441 bsp->mbs_wmp = NULL; 11442 11443 /* 11444 * The shadow buf goes on from here in place of the 11445 * original buf. 11446 */ 11447 shadow_bsp->mbs_orig_bp = bp; 11448 bp = shadow_bp; 11449 } 11450 11451 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11452 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 11453 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11454 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 11455 request_bytes); 11456 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11457 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 11458 11459 done: 11460 SD_NEXT_IOSTART(index, un, bp); 11461 11462 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11463 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 11464 } 11465 11466 11467 /* 11468 * Function: sd_mapblocksize_iodone 11469 * 11470 * Description: Completion side processing for block-size mapping. 11471 * 11472 * Context: May be called under interrupt context 11473 */ 11474 11475 static void 11476 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 11477 { 11478 struct sd_mapblocksize_info *bsp; 11479 struct sd_xbuf *xp; 11480 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 11481 struct buf *orig_bp; /* ptr to the original buf */ 11482 offset_t shadow_end; 11483 offset_t request_end; 11484 offset_t shadow_start; 11485 ssize_t copy_offset; 11486 size_t copy_length; 11487 size_t shortfall; 11488 uint_t is_write; /* TRUE if this bp is a WRITE */ 11489 uint_t has_wmap; /* TRUE is this bp has a wmap */ 11490 11491 ASSERT(un != NULL); 11492 ASSERT(bp != NULL); 11493 11494 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11495 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 11496 11497 /* 11498 * There is no shadow buf or layer-private data if the target is 11499 * using un->un_sys_blocksize as its block size or if bcount == 0. 11500 */ 11501 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11502 (bp->b_bcount == 0)) { 11503 goto exit; 11504 } 11505 11506 xp = SD_GET_XBUF(bp); 11507 ASSERT(xp != NULL); 11508 11509 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 11510 bsp = xp->xb_private; 11511 11512 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 11513 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 11514 11515 if (is_write) { 11516 /* 11517 * For a WRITE request we must free up the block range that 11518 * we have locked up. This holds regardless of whether this is 11519 * an aligned write request or a read-modify-write request. 11520 */ 11521 sd_range_unlock(un, bsp->mbs_wmp); 11522 bsp->mbs_wmp = NULL; 11523 } 11524 11525 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 11526 /* 11527 * An aligned read or write command will have no shadow buf; 11528 * there is not much else to do with it. 11529 */ 11530 goto done; 11531 } 11532 11533 orig_bp = bsp->mbs_orig_bp; 11534 ASSERT(orig_bp != NULL); 11535 orig_xp = SD_GET_XBUF(orig_bp); 11536 ASSERT(orig_xp != NULL); 11537 ASSERT(!mutex_owned(SD_MUTEX(un))); 11538 11539 if (!is_write && has_wmap) { 11540 /* 11541 * A READ with a wmap means this is the READ phase of a 11542 * read-modify-write. If an error occurred on the READ then 11543 * we do not proceed with the WRITE phase or copy any data. 11544 * Just release the write maps and return with an error. 11545 */ 11546 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11547 orig_bp->b_resid = orig_bp->b_bcount; 11548 bioerror(orig_bp, bp->b_error); 11549 sd_range_unlock(un, bsp->mbs_wmp); 11550 goto freebuf_done; 11551 } 11552 } 11553 11554 /* 11555 * Here is where we set up to copy the data from the shadow buf 11556 * into the space associated with the original buf. 11557 * 11558 * To deal with the conversion between block sizes, these 11559 * computations treat the data as an array of bytes, with the 11560 * first byte (byte 0) corresponding to the first byte in the 11561 * first block on the disk. 11562 */ 11563 11564 /* 11565 * shadow_start and shadow_len indicate the location and size of 11566 * the data returned with the shadow IO request. 11567 */ 11568 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11569 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11570 11571 /* 11572 * copy_offset gives the offset (in bytes) from the start of the first 11573 * block of the READ request to the beginning of the data. We retrieve 11574 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11575 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11576 * data to be copied (in bytes). 11577 */ 11578 copy_offset = bsp->mbs_copy_offset; 11579 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11580 copy_length = orig_bp->b_bcount; 11581 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11582 11583 /* 11584 * Set up the resid and error fields of orig_bp as appropriate. 11585 */ 11586 if (shadow_end >= request_end) { 11587 /* We got all the requested data; set resid to zero */ 11588 orig_bp->b_resid = 0; 11589 } else { 11590 /* 11591 * We failed to get enough data to fully satisfy the original 11592 * request. Just copy back whatever data we got and set 11593 * up the residual and error code as required. 11594 * 11595 * 'shortfall' is the amount by which the data received with the 11596 * shadow buf has "fallen short" of the requested amount. 11597 */ 11598 shortfall = (size_t)(request_end - shadow_end); 11599 11600 if (shortfall > orig_bp->b_bcount) { 11601 /* 11602 * We did not get enough data to even partially 11603 * fulfill the original request. The residual is 11604 * equal to the amount requested. 11605 */ 11606 orig_bp->b_resid = orig_bp->b_bcount; 11607 } else { 11608 /* 11609 * We did not get all the data that we requested 11610 * from the device, but we will try to return what 11611 * portion we did get. 11612 */ 11613 orig_bp->b_resid = shortfall; 11614 } 11615 ASSERT(copy_length >= orig_bp->b_resid); 11616 copy_length -= orig_bp->b_resid; 11617 } 11618 11619 /* Propagate the error code from the shadow buf to the original buf */ 11620 bioerror(orig_bp, bp->b_error); 11621 11622 if (is_write) { 11623 goto freebuf_done; /* No data copying for a WRITE */ 11624 } 11625 11626 if (has_wmap) { 11627 /* 11628 * This is a READ command from the READ phase of a 11629 * read-modify-write request. We have to copy the data given 11630 * by the user OVER the data returned by the READ command, 11631 * then convert the command from a READ to a WRITE and send 11632 * it back to the target. 11633 */ 11634 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11635 copy_length); 11636 11637 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11638 11639 /* 11640 * Dispatch the WRITE command to the taskq thread, which 11641 * will in turn send the command to the target. When the 11642 * WRITE command completes, we (sd_mapblocksize_iodone()) 11643 * will get called again as part of the iodone chain 11644 * processing for it. Note that we will still be dealing 11645 * with the shadow buf at that point. 11646 */ 11647 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11648 KM_NOSLEEP) != 0) { 11649 /* 11650 * Dispatch was successful so we are done. Return 11651 * without going any higher up the iodone chain. Do 11652 * not free up any layer-private data until after the 11653 * WRITE completes. 11654 */ 11655 return; 11656 } 11657 11658 /* 11659 * Dispatch of the WRITE command failed; set up the error 11660 * condition and send this IO back up the iodone chain. 11661 */ 11662 bioerror(orig_bp, EIO); 11663 orig_bp->b_resid = orig_bp->b_bcount; 11664 11665 } else { 11666 /* 11667 * This is a regular READ request (ie, not a RMW). Copy the 11668 * data from the shadow buf into the original buf. The 11669 * copy_offset compensates for any "misalignment" between the 11670 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11671 * original buf (with its un->un_sys_blocksize blocks). 11672 */ 11673 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11674 copy_length); 11675 } 11676 11677 freebuf_done: 11678 11679 /* 11680 * At this point we still have both the shadow buf AND the original 11681 * buf to deal with, as well as the layer-private data area in each. 11682 * Local variables are as follows: 11683 * 11684 * bp -- points to shadow buf 11685 * xp -- points to xbuf of shadow buf 11686 * bsp -- points to layer-private data area of shadow buf 11687 * orig_bp -- points to original buf 11688 * 11689 * First free the shadow buf and its associated xbuf, then free the 11690 * layer-private data area from the shadow buf. There is no need to 11691 * restore xb_private in the shadow xbuf. 11692 */ 11693 sd_shadow_buf_free(bp); 11694 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11695 11696 /* 11697 * Now update the local variables to point to the original buf, xbuf, 11698 * and layer-private area. 11699 */ 11700 bp = orig_bp; 11701 xp = SD_GET_XBUF(bp); 11702 ASSERT(xp != NULL); 11703 ASSERT(xp == orig_xp); 11704 bsp = xp->xb_private; 11705 ASSERT(bsp != NULL); 11706 11707 done: 11708 /* 11709 * Restore xb_private to whatever it was set to by the next higher 11710 * layer in the chain, then free the layer-private data area. 11711 */ 11712 xp->xb_private = bsp->mbs_oprivate; 11713 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11714 11715 exit: 11716 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11717 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11718 11719 SD_NEXT_IODONE(index, un, bp); 11720 } 11721 11722 11723 /* 11724 * Function: sd_checksum_iostart 11725 * 11726 * Description: A stub function for a layer that's currently not used. 11727 * For now just a placeholder. 11728 * 11729 * Context: Kernel thread context 11730 */ 11731 11732 static void 11733 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11734 { 11735 ASSERT(un != NULL); 11736 ASSERT(bp != NULL); 11737 ASSERT(!mutex_owned(SD_MUTEX(un))); 11738 SD_NEXT_IOSTART(index, un, bp); 11739 } 11740 11741 11742 /* 11743 * Function: sd_checksum_iodone 11744 * 11745 * Description: A stub function for a layer that's currently not used. 11746 * For now just a placeholder. 11747 * 11748 * Context: May be called under interrupt context 11749 */ 11750 11751 static void 11752 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11753 { 11754 ASSERT(un != NULL); 11755 ASSERT(bp != NULL); 11756 ASSERT(!mutex_owned(SD_MUTEX(un))); 11757 SD_NEXT_IODONE(index, un, bp); 11758 } 11759 11760 11761 /* 11762 * Function: sd_checksum_uscsi_iostart 11763 * 11764 * Description: A stub function for a layer that's currently not used. 11765 * For now just a placeholder. 11766 * 11767 * Context: Kernel thread context 11768 */ 11769 11770 static void 11771 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11772 { 11773 ASSERT(un != NULL); 11774 ASSERT(bp != NULL); 11775 ASSERT(!mutex_owned(SD_MUTEX(un))); 11776 SD_NEXT_IOSTART(index, un, bp); 11777 } 11778 11779 11780 /* 11781 * Function: sd_checksum_uscsi_iodone 11782 * 11783 * Description: A stub function for a layer that's currently not used. 11784 * For now just a placeholder. 11785 * 11786 * Context: May be called under interrupt context 11787 */ 11788 11789 static void 11790 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11791 { 11792 ASSERT(un != NULL); 11793 ASSERT(bp != NULL); 11794 ASSERT(!mutex_owned(SD_MUTEX(un))); 11795 SD_NEXT_IODONE(index, un, bp); 11796 } 11797 11798 11799 /* 11800 * Function: sd_pm_iostart 11801 * 11802 * Description: iostart-side routine for Power mangement. 11803 * 11804 * Context: Kernel thread context 11805 */ 11806 11807 static void 11808 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11809 { 11810 ASSERT(un != NULL); 11811 ASSERT(bp != NULL); 11812 ASSERT(!mutex_owned(SD_MUTEX(un))); 11813 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11814 11815 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11816 11817 if (sd_pm_entry(un) != DDI_SUCCESS) { 11818 /* 11819 * Set up to return the failed buf back up the 'iodone' 11820 * side of the calling chain. 11821 */ 11822 bioerror(bp, EIO); 11823 bp->b_resid = bp->b_bcount; 11824 11825 SD_BEGIN_IODONE(index, un, bp); 11826 11827 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11828 return; 11829 } 11830 11831 SD_NEXT_IOSTART(index, un, bp); 11832 11833 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11834 } 11835 11836 11837 /* 11838 * Function: sd_pm_iodone 11839 * 11840 * Description: iodone-side routine for power mangement. 11841 * 11842 * Context: may be called from interrupt context 11843 */ 11844 11845 static void 11846 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11847 { 11848 ASSERT(un != NULL); 11849 ASSERT(bp != NULL); 11850 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11851 11852 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11853 11854 /* 11855 * After attach the following flag is only read, so don't 11856 * take the penalty of acquiring a mutex for it. 11857 */ 11858 if (un->un_f_pm_is_enabled == TRUE) { 11859 sd_pm_exit(un); 11860 } 11861 11862 SD_NEXT_IODONE(index, un, bp); 11863 11864 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11865 } 11866 11867 11868 /* 11869 * Function: sd_core_iostart 11870 * 11871 * Description: Primary driver function for enqueuing buf(9S) structs from 11872 * the system and initiating IO to the target device 11873 * 11874 * Context: Kernel thread context. Can sleep. 11875 * 11876 * Assumptions: - The given xp->xb_blkno is absolute 11877 * (ie, relative to the start of the device). 11878 * - The IO is to be done using the native blocksize of 11879 * the device, as specified in un->un_tgt_blocksize. 11880 */ 11881 /* ARGSUSED */ 11882 static void 11883 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11884 { 11885 struct sd_xbuf *xp; 11886 11887 ASSERT(un != NULL); 11888 ASSERT(bp != NULL); 11889 ASSERT(!mutex_owned(SD_MUTEX(un))); 11890 ASSERT(bp->b_resid == 0); 11891 11892 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11893 11894 xp = SD_GET_XBUF(bp); 11895 ASSERT(xp != NULL); 11896 11897 mutex_enter(SD_MUTEX(un)); 11898 11899 /* 11900 * If we are currently in the failfast state, fail any new IO 11901 * that has B_FAILFAST set, then return. 11902 */ 11903 if ((bp->b_flags & B_FAILFAST) && 11904 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11905 mutex_exit(SD_MUTEX(un)); 11906 bioerror(bp, EIO); 11907 bp->b_resid = bp->b_bcount; 11908 SD_BEGIN_IODONE(index, un, bp); 11909 return; 11910 } 11911 11912 if (SD_IS_DIRECT_PRIORITY(xp)) { 11913 /* 11914 * Priority command -- transport it immediately. 11915 * 11916 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11917 * because all direct priority commands should be associated 11918 * with error recovery actions which we don't want to retry. 11919 */ 11920 sd_start_cmds(un, bp); 11921 } else { 11922 /* 11923 * Normal command -- add it to the wait queue, then start 11924 * transporting commands from the wait queue. 11925 */ 11926 sd_add_buf_to_waitq(un, bp); 11927 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11928 sd_start_cmds(un, NULL); 11929 } 11930 11931 mutex_exit(SD_MUTEX(un)); 11932 11933 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11934 } 11935 11936 11937 /* 11938 * Function: sd_init_cdb_limits 11939 * 11940 * Description: This is to handle scsi_pkt initialization differences 11941 * between the driver platforms. 11942 * 11943 * Legacy behaviors: 11944 * 11945 * If the block number or the sector count exceeds the 11946 * capabilities of a Group 0 command, shift over to a 11947 * Group 1 command. We don't blindly use Group 1 11948 * commands because a) some drives (CDC Wren IVs) get a 11949 * bit confused, and b) there is probably a fair amount 11950 * of speed difference for a target to receive and decode 11951 * a 10 byte command instead of a 6 byte command. 11952 * 11953 * The xfer time difference of 6 vs 10 byte CDBs is 11954 * still significant so this code is still worthwhile. 11955 * 10 byte CDBs are very inefficient with the fas HBA driver 11956 * and older disks. Each CDB byte took 1 usec with some 11957 * popular disks. 11958 * 11959 * Context: Must be called at attach time 11960 */ 11961 11962 static void 11963 sd_init_cdb_limits(struct sd_lun *un) 11964 { 11965 int hba_cdb_limit; 11966 11967 /* 11968 * Use CDB_GROUP1 commands for most devices except for 11969 * parallel SCSI fixed drives in which case we get better 11970 * performance using CDB_GROUP0 commands (where applicable). 11971 */ 11972 un->un_mincdb = SD_CDB_GROUP1; 11973 #if !defined(__fibre) 11974 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11975 !un->un_f_has_removable_media) { 11976 un->un_mincdb = SD_CDB_GROUP0; 11977 } 11978 #endif 11979 11980 /* 11981 * Try to read the max-cdb-length supported by HBA. 11982 */ 11983 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11984 if (0 >= un->un_max_hba_cdb) { 11985 un->un_max_hba_cdb = CDB_GROUP4; 11986 hba_cdb_limit = SD_CDB_GROUP4; 11987 } else if (0 < un->un_max_hba_cdb && 11988 un->un_max_hba_cdb < CDB_GROUP1) { 11989 hba_cdb_limit = SD_CDB_GROUP0; 11990 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11991 un->un_max_hba_cdb < CDB_GROUP5) { 11992 hba_cdb_limit = SD_CDB_GROUP1; 11993 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11994 un->un_max_hba_cdb < CDB_GROUP4) { 11995 hba_cdb_limit = SD_CDB_GROUP5; 11996 } else { 11997 hba_cdb_limit = SD_CDB_GROUP4; 11998 } 11999 12000 /* 12001 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 12002 * commands for fixed disks unless we are building for a 32 bit 12003 * kernel. 12004 */ 12005 #ifdef _LP64 12006 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12007 min(hba_cdb_limit, SD_CDB_GROUP4); 12008 #else 12009 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12010 min(hba_cdb_limit, SD_CDB_GROUP1); 12011 #endif 12012 12013 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 12014 ? sizeof (struct scsi_arq_status) : 1); 12015 un->un_cmd_timeout = (ushort_t)sd_io_time; 12016 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 12017 } 12018 12019 12020 /* 12021 * Function: sd_initpkt_for_buf 12022 * 12023 * Description: Allocate and initialize for transport a scsi_pkt struct, 12024 * based upon the info specified in the given buf struct. 12025 * 12026 * Assumes the xb_blkno in the request is absolute (ie, 12027 * relative to the start of the device (NOT partition!). 12028 * Also assumes that the request is using the native block 12029 * size of the device (as returned by the READ CAPACITY 12030 * command). 12031 * 12032 * Return Code: SD_PKT_ALLOC_SUCCESS 12033 * SD_PKT_ALLOC_FAILURE 12034 * SD_PKT_ALLOC_FAILURE_NO_DMA 12035 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12036 * 12037 * Context: Kernel thread and may be called from software interrupt context 12038 * as part of a sdrunout callback. This function may not block or 12039 * call routines that block 12040 */ 12041 12042 static int 12043 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 12044 { 12045 struct sd_xbuf *xp; 12046 struct scsi_pkt *pktp = NULL; 12047 struct sd_lun *un; 12048 size_t blockcount; 12049 daddr_t startblock; 12050 int rval; 12051 int cmd_flags; 12052 12053 ASSERT(bp != NULL); 12054 ASSERT(pktpp != NULL); 12055 xp = SD_GET_XBUF(bp); 12056 ASSERT(xp != NULL); 12057 un = SD_GET_UN(bp); 12058 ASSERT(un != NULL); 12059 ASSERT(mutex_owned(SD_MUTEX(un))); 12060 ASSERT(bp->b_resid == 0); 12061 12062 SD_TRACE(SD_LOG_IO_CORE, un, 12063 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 12064 12065 mutex_exit(SD_MUTEX(un)); 12066 12067 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12068 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 12069 /* 12070 * Already have a scsi_pkt -- just need DMA resources. 12071 * We must recompute the CDB in case the mapping returns 12072 * a nonzero pkt_resid. 12073 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 12074 * that is being retried, the unmap/remap of the DMA resouces 12075 * will result in the entire transfer starting over again 12076 * from the very first block. 12077 */ 12078 ASSERT(xp->xb_pktp != NULL); 12079 pktp = xp->xb_pktp; 12080 } else { 12081 pktp = NULL; 12082 } 12083 #endif /* __i386 || __amd64 */ 12084 12085 startblock = xp->xb_blkno; /* Absolute block num. */ 12086 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12087 12088 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 12089 12090 /* 12091 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 12092 * call scsi_init_pkt, and build the CDB. 12093 */ 12094 rval = sd_setup_rw_pkt(un, &pktp, bp, 12095 cmd_flags, sdrunout, (caddr_t)un, 12096 startblock, blockcount); 12097 12098 if (rval == 0) { 12099 /* 12100 * Success. 12101 * 12102 * If partial DMA is being used and required for this transfer. 12103 * set it up here. 12104 */ 12105 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 12106 (pktp->pkt_resid != 0)) { 12107 12108 /* 12109 * Save the CDB length and pkt_resid for the 12110 * next xfer 12111 */ 12112 xp->xb_dma_resid = pktp->pkt_resid; 12113 12114 /* rezero resid */ 12115 pktp->pkt_resid = 0; 12116 12117 } else { 12118 xp->xb_dma_resid = 0; 12119 } 12120 12121 pktp->pkt_flags = un->un_tagflags; 12122 pktp->pkt_time = un->un_cmd_timeout; 12123 pktp->pkt_comp = sdintr; 12124 12125 pktp->pkt_private = bp; 12126 *pktpp = pktp; 12127 12128 SD_TRACE(SD_LOG_IO_CORE, un, 12129 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 12130 12131 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12132 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 12133 #endif 12134 12135 mutex_enter(SD_MUTEX(un)); 12136 return (SD_PKT_ALLOC_SUCCESS); 12137 12138 } 12139 12140 /* 12141 * SD_PKT_ALLOC_FAILURE is the only expected failure code 12142 * from sd_setup_rw_pkt. 12143 */ 12144 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 12145 12146 if (rval == SD_PKT_ALLOC_FAILURE) { 12147 *pktpp = NULL; 12148 /* 12149 * Set the driver state to RWAIT to indicate the driver 12150 * is waiting on resource allocations. The driver will not 12151 * suspend, pm_suspend, or detatch while the state is RWAIT. 12152 */ 12153 mutex_enter(SD_MUTEX(un)); 12154 New_state(un, SD_STATE_RWAIT); 12155 12156 SD_ERROR(SD_LOG_IO_CORE, un, 12157 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 12158 12159 if ((bp->b_flags & B_ERROR) != 0) { 12160 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12161 } 12162 return (SD_PKT_ALLOC_FAILURE); 12163 } else { 12164 /* 12165 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12166 * 12167 * This should never happen. Maybe someone messed with the 12168 * kernel's minphys? 12169 */ 12170 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12171 "Request rejected: too large for CDB: " 12172 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 12173 SD_ERROR(SD_LOG_IO_CORE, un, 12174 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 12175 mutex_enter(SD_MUTEX(un)); 12176 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12177 12178 } 12179 } 12180 12181 12182 /* 12183 * Function: sd_destroypkt_for_buf 12184 * 12185 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 12186 * 12187 * Context: Kernel thread or interrupt context 12188 */ 12189 12190 static void 12191 sd_destroypkt_for_buf(struct buf *bp) 12192 { 12193 ASSERT(bp != NULL); 12194 ASSERT(SD_GET_UN(bp) != NULL); 12195 12196 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 12197 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 12198 12199 ASSERT(SD_GET_PKTP(bp) != NULL); 12200 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12201 12202 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 12203 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 12204 } 12205 12206 /* 12207 * Function: sd_setup_rw_pkt 12208 * 12209 * Description: Determines appropriate CDB group for the requested LBA 12210 * and transfer length, calls scsi_init_pkt, and builds 12211 * the CDB. Do not use for partial DMA transfers except 12212 * for the initial transfer since the CDB size must 12213 * remain constant. 12214 * 12215 * Context: Kernel thread and may be called from software interrupt 12216 * context as part of a sdrunout callback. This function may not 12217 * block or call routines that block 12218 */ 12219 12220 12221 int 12222 sd_setup_rw_pkt(struct sd_lun *un, 12223 struct scsi_pkt **pktpp, struct buf *bp, int flags, 12224 int (*callback)(caddr_t), caddr_t callback_arg, 12225 diskaddr_t lba, uint32_t blockcount) 12226 { 12227 struct scsi_pkt *return_pktp; 12228 union scsi_cdb *cdbp; 12229 struct sd_cdbinfo *cp = NULL; 12230 int i; 12231 12232 /* 12233 * See which size CDB to use, based upon the request. 12234 */ 12235 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 12236 12237 /* 12238 * Check lba and block count against sd_cdbtab limits. 12239 * In the partial DMA case, we have to use the same size 12240 * CDB for all the transfers. Check lba + blockcount 12241 * against the max LBA so we know that segment of the 12242 * transfer can use the CDB we select. 12243 */ 12244 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 12245 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 12246 12247 /* 12248 * The command will fit into the CDB type 12249 * specified by sd_cdbtab[i]. 12250 */ 12251 cp = sd_cdbtab + i; 12252 12253 /* 12254 * Call scsi_init_pkt so we can fill in the 12255 * CDB. 12256 */ 12257 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 12258 bp, cp->sc_grpcode, un->un_status_len, 0, 12259 flags, callback, callback_arg); 12260 12261 if (return_pktp != NULL) { 12262 12263 /* 12264 * Return new value of pkt 12265 */ 12266 *pktpp = return_pktp; 12267 12268 /* 12269 * To be safe, zero the CDB insuring there is 12270 * no leftover data from a previous command. 12271 */ 12272 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 12273 12274 /* 12275 * Handle partial DMA mapping 12276 */ 12277 if (return_pktp->pkt_resid != 0) { 12278 12279 /* 12280 * Not going to xfer as many blocks as 12281 * originally expected 12282 */ 12283 blockcount -= 12284 SD_BYTES2TGTBLOCKS(un, 12285 return_pktp->pkt_resid); 12286 } 12287 12288 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 12289 12290 /* 12291 * Set command byte based on the CDB 12292 * type we matched. 12293 */ 12294 cdbp->scc_cmd = cp->sc_grpmask | 12295 ((bp->b_flags & B_READ) ? 12296 SCMD_READ : SCMD_WRITE); 12297 12298 SD_FILL_SCSI1_LUN(un, return_pktp); 12299 12300 /* 12301 * Fill in LBA and length 12302 */ 12303 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 12304 (cp->sc_grpcode == CDB_GROUP4) || 12305 (cp->sc_grpcode == CDB_GROUP0) || 12306 (cp->sc_grpcode == CDB_GROUP5)); 12307 12308 if (cp->sc_grpcode == CDB_GROUP1) { 12309 FORMG1ADDR(cdbp, lba); 12310 FORMG1COUNT(cdbp, blockcount); 12311 return (0); 12312 } else if (cp->sc_grpcode == CDB_GROUP4) { 12313 FORMG4LONGADDR(cdbp, lba); 12314 FORMG4COUNT(cdbp, blockcount); 12315 return (0); 12316 } else if (cp->sc_grpcode == CDB_GROUP0) { 12317 FORMG0ADDR(cdbp, lba); 12318 FORMG0COUNT(cdbp, blockcount); 12319 return (0); 12320 } else if (cp->sc_grpcode == CDB_GROUP5) { 12321 FORMG5ADDR(cdbp, lba); 12322 FORMG5COUNT(cdbp, blockcount); 12323 return (0); 12324 } 12325 12326 /* 12327 * It should be impossible to not match one 12328 * of the CDB types above, so we should never 12329 * reach this point. Set the CDB command byte 12330 * to test-unit-ready to avoid writing 12331 * to somewhere we don't intend. 12332 */ 12333 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 12334 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12335 } else { 12336 /* 12337 * Couldn't get scsi_pkt 12338 */ 12339 return (SD_PKT_ALLOC_FAILURE); 12340 } 12341 } 12342 } 12343 12344 /* 12345 * None of the available CDB types were suitable. This really 12346 * should never happen: on a 64 bit system we support 12347 * READ16/WRITE16 which will hold an entire 64 bit disk address 12348 * and on a 32 bit system we will refuse to bind to a device 12349 * larger than 2TB so addresses will never be larger than 32 bits. 12350 */ 12351 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12352 } 12353 12354 /* 12355 * Function: sd_setup_next_rw_pkt 12356 * 12357 * Description: Setup packet for partial DMA transfers, except for the 12358 * initial transfer. sd_setup_rw_pkt should be used for 12359 * the initial transfer. 12360 * 12361 * Context: Kernel thread and may be called from interrupt context. 12362 */ 12363 12364 int 12365 sd_setup_next_rw_pkt(struct sd_lun *un, 12366 struct scsi_pkt *pktp, struct buf *bp, 12367 diskaddr_t lba, uint32_t blockcount) 12368 { 12369 uchar_t com; 12370 union scsi_cdb *cdbp; 12371 uchar_t cdb_group_id; 12372 12373 ASSERT(pktp != NULL); 12374 ASSERT(pktp->pkt_cdbp != NULL); 12375 12376 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 12377 com = cdbp->scc_cmd; 12378 cdb_group_id = CDB_GROUPID(com); 12379 12380 ASSERT((cdb_group_id == CDB_GROUPID_0) || 12381 (cdb_group_id == CDB_GROUPID_1) || 12382 (cdb_group_id == CDB_GROUPID_4) || 12383 (cdb_group_id == CDB_GROUPID_5)); 12384 12385 /* 12386 * Move pkt to the next portion of the xfer. 12387 * func is NULL_FUNC so we do not have to release 12388 * the disk mutex here. 12389 */ 12390 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 12391 NULL_FUNC, NULL) == pktp) { 12392 /* Success. Handle partial DMA */ 12393 if (pktp->pkt_resid != 0) { 12394 blockcount -= 12395 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 12396 } 12397 12398 cdbp->scc_cmd = com; 12399 SD_FILL_SCSI1_LUN(un, pktp); 12400 if (cdb_group_id == CDB_GROUPID_1) { 12401 FORMG1ADDR(cdbp, lba); 12402 FORMG1COUNT(cdbp, blockcount); 12403 return (0); 12404 } else if (cdb_group_id == CDB_GROUPID_4) { 12405 FORMG4LONGADDR(cdbp, lba); 12406 FORMG4COUNT(cdbp, blockcount); 12407 return (0); 12408 } else if (cdb_group_id == CDB_GROUPID_0) { 12409 FORMG0ADDR(cdbp, lba); 12410 FORMG0COUNT(cdbp, blockcount); 12411 return (0); 12412 } else if (cdb_group_id == CDB_GROUPID_5) { 12413 FORMG5ADDR(cdbp, lba); 12414 FORMG5COUNT(cdbp, blockcount); 12415 return (0); 12416 } 12417 12418 /* Unreachable */ 12419 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12420 } 12421 12422 /* 12423 * Error setting up next portion of cmd transfer. 12424 * Something is definitely very wrong and this 12425 * should not happen. 12426 */ 12427 return (SD_PKT_ALLOC_FAILURE); 12428 } 12429 12430 /* 12431 * Function: sd_initpkt_for_uscsi 12432 * 12433 * Description: Allocate and initialize for transport a scsi_pkt struct, 12434 * based upon the info specified in the given uscsi_cmd struct. 12435 * 12436 * Return Code: SD_PKT_ALLOC_SUCCESS 12437 * SD_PKT_ALLOC_FAILURE 12438 * SD_PKT_ALLOC_FAILURE_NO_DMA 12439 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12440 * 12441 * Context: Kernel thread and may be called from software interrupt context 12442 * as part of a sdrunout callback. This function may not block or 12443 * call routines that block 12444 */ 12445 12446 static int 12447 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 12448 { 12449 struct uscsi_cmd *uscmd; 12450 struct sd_xbuf *xp; 12451 struct scsi_pkt *pktp; 12452 struct sd_lun *un; 12453 uint32_t flags = 0; 12454 12455 ASSERT(bp != NULL); 12456 ASSERT(pktpp != NULL); 12457 xp = SD_GET_XBUF(bp); 12458 ASSERT(xp != NULL); 12459 un = SD_GET_UN(bp); 12460 ASSERT(un != NULL); 12461 ASSERT(mutex_owned(SD_MUTEX(un))); 12462 12463 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12464 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12465 ASSERT(uscmd != NULL); 12466 12467 SD_TRACE(SD_LOG_IO_CORE, un, 12468 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 12469 12470 /* 12471 * Allocate the scsi_pkt for the command. 12472 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 12473 * during scsi_init_pkt time and will continue to use the 12474 * same path as long as the same scsi_pkt is used without 12475 * intervening scsi_dma_free(). Since uscsi command does 12476 * not call scsi_dmafree() before retry failed command, it 12477 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 12478 * set such that scsi_vhci can use other available path for 12479 * retry. Besides, ucsci command does not allow DMA breakup, 12480 * so there is no need to set PKT_DMA_PARTIAL flag. 12481 */ 12482 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12483 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12484 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12485 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 12486 - sizeof (struct scsi_extended_sense)), 0, 12487 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 12488 sdrunout, (caddr_t)un); 12489 } else { 12490 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12491 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12492 sizeof (struct scsi_arq_status), 0, 12493 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 12494 sdrunout, (caddr_t)un); 12495 } 12496 12497 if (pktp == NULL) { 12498 *pktpp = NULL; 12499 /* 12500 * Set the driver state to RWAIT to indicate the driver 12501 * is waiting on resource allocations. The driver will not 12502 * suspend, pm_suspend, or detatch while the state is RWAIT. 12503 */ 12504 New_state(un, SD_STATE_RWAIT); 12505 12506 SD_ERROR(SD_LOG_IO_CORE, un, 12507 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 12508 12509 if ((bp->b_flags & B_ERROR) != 0) { 12510 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12511 } 12512 return (SD_PKT_ALLOC_FAILURE); 12513 } 12514 12515 /* 12516 * We do not do DMA breakup for USCSI commands, so return failure 12517 * here if all the needed DMA resources were not allocated. 12518 */ 12519 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 12520 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 12521 scsi_destroy_pkt(pktp); 12522 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 12523 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 12524 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 12525 } 12526 12527 /* Init the cdb from the given uscsi struct */ 12528 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 12529 uscmd->uscsi_cdb[0], 0, 0, 0); 12530 12531 SD_FILL_SCSI1_LUN(un, pktp); 12532 12533 /* 12534 * Set up the optional USCSI flags. See the uscsi (7I) man page 12535 * for listing of the supported flags. 12536 */ 12537 12538 if (uscmd->uscsi_flags & USCSI_SILENT) { 12539 flags |= FLAG_SILENT; 12540 } 12541 12542 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12543 flags |= FLAG_DIAGNOSE; 12544 } 12545 12546 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12547 flags |= FLAG_ISOLATE; 12548 } 12549 12550 if (un->un_f_is_fibre == FALSE) { 12551 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12552 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12553 } 12554 } 12555 12556 /* 12557 * Set the pkt flags here so we save time later. 12558 * Note: These flags are NOT in the uscsi man page!!! 12559 */ 12560 if (uscmd->uscsi_flags & USCSI_HEAD) { 12561 flags |= FLAG_HEAD; 12562 } 12563 12564 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12565 flags |= FLAG_NOINTR; 12566 } 12567 12568 /* 12569 * For tagged queueing, things get a bit complicated. 12570 * Check first for head of queue and last for ordered queue. 12571 * If neither head nor order, use the default driver tag flags. 12572 */ 12573 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12574 if (uscmd->uscsi_flags & USCSI_HTAG) { 12575 flags |= FLAG_HTAG; 12576 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12577 flags |= FLAG_OTAG; 12578 } else { 12579 flags |= un->un_tagflags & FLAG_TAGMASK; 12580 } 12581 } 12582 12583 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12584 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12585 } 12586 12587 pktp->pkt_flags = flags; 12588 12589 /* Transfer uscsi information to scsi_pkt */ 12590 (void) scsi_uscsi_pktinit(uscmd, pktp); 12591 12592 /* Copy the caller's CDB into the pkt... */ 12593 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12594 12595 if (uscmd->uscsi_timeout == 0) { 12596 pktp->pkt_time = un->un_uscsi_timeout; 12597 } else { 12598 pktp->pkt_time = uscmd->uscsi_timeout; 12599 } 12600 12601 /* need it later to identify USCSI request in sdintr */ 12602 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12603 12604 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12605 12606 pktp->pkt_private = bp; 12607 pktp->pkt_comp = sdintr; 12608 *pktpp = pktp; 12609 12610 SD_TRACE(SD_LOG_IO_CORE, un, 12611 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12612 12613 return (SD_PKT_ALLOC_SUCCESS); 12614 } 12615 12616 12617 /* 12618 * Function: sd_destroypkt_for_uscsi 12619 * 12620 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12621 * IOs.. Also saves relevant info into the associated uscsi_cmd 12622 * struct. 12623 * 12624 * Context: May be called under interrupt context 12625 */ 12626 12627 static void 12628 sd_destroypkt_for_uscsi(struct buf *bp) 12629 { 12630 struct uscsi_cmd *uscmd; 12631 struct sd_xbuf *xp; 12632 struct scsi_pkt *pktp; 12633 struct sd_lun *un; 12634 12635 ASSERT(bp != NULL); 12636 xp = SD_GET_XBUF(bp); 12637 ASSERT(xp != NULL); 12638 un = SD_GET_UN(bp); 12639 ASSERT(un != NULL); 12640 ASSERT(!mutex_owned(SD_MUTEX(un))); 12641 pktp = SD_GET_PKTP(bp); 12642 ASSERT(pktp != NULL); 12643 12644 SD_TRACE(SD_LOG_IO_CORE, un, 12645 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12646 12647 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12648 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12649 ASSERT(uscmd != NULL); 12650 12651 /* Save the status and the residual into the uscsi_cmd struct */ 12652 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12653 uscmd->uscsi_resid = bp->b_resid; 12654 12655 /* Transfer scsi_pkt information to uscsi */ 12656 (void) scsi_uscsi_pktfini(pktp, uscmd); 12657 12658 /* 12659 * If enabled, copy any saved sense data into the area specified 12660 * by the uscsi command. 12661 */ 12662 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12663 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12664 /* 12665 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12666 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12667 */ 12668 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12669 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12670 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12671 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12672 MAX_SENSE_LENGTH); 12673 } else { 12674 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12675 SENSE_LENGTH); 12676 } 12677 } 12678 12679 /* We are done with the scsi_pkt; free it now */ 12680 ASSERT(SD_GET_PKTP(bp) != NULL); 12681 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12682 12683 SD_TRACE(SD_LOG_IO_CORE, un, 12684 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12685 } 12686 12687 12688 /* 12689 * Function: sd_bioclone_alloc 12690 * 12691 * Description: Allocate a buf(9S) and init it as per the given buf 12692 * and the various arguments. The associated sd_xbuf 12693 * struct is (nearly) duplicated. The struct buf *bp 12694 * argument is saved in new_xp->xb_private. 12695 * 12696 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12697 * datalen - size of data area for the shadow bp 12698 * blkno - starting LBA 12699 * func - function pointer for b_iodone in the shadow buf. (May 12700 * be NULL if none.) 12701 * 12702 * Return Code: Pointer to allocates buf(9S) struct 12703 * 12704 * Context: Can sleep. 12705 */ 12706 12707 static struct buf * 12708 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12709 daddr_t blkno, int (*func)(struct buf *)) 12710 { 12711 struct sd_lun *un; 12712 struct sd_xbuf *xp; 12713 struct sd_xbuf *new_xp; 12714 struct buf *new_bp; 12715 12716 ASSERT(bp != NULL); 12717 xp = SD_GET_XBUF(bp); 12718 ASSERT(xp != NULL); 12719 un = SD_GET_UN(bp); 12720 ASSERT(un != NULL); 12721 ASSERT(!mutex_owned(SD_MUTEX(un))); 12722 12723 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12724 NULL, KM_SLEEP); 12725 12726 new_bp->b_lblkno = blkno; 12727 12728 /* 12729 * Allocate an xbuf for the shadow bp and copy the contents of the 12730 * original xbuf into it. 12731 */ 12732 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12733 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12734 12735 /* 12736 * The given bp is automatically saved in the xb_private member 12737 * of the new xbuf. Callers are allowed to depend on this. 12738 */ 12739 new_xp->xb_private = bp; 12740 12741 new_bp->b_private = new_xp; 12742 12743 return (new_bp); 12744 } 12745 12746 /* 12747 * Function: sd_shadow_buf_alloc 12748 * 12749 * Description: Allocate a buf(9S) and init it as per the given buf 12750 * and the various arguments. The associated sd_xbuf 12751 * struct is (nearly) duplicated. The struct buf *bp 12752 * argument is saved in new_xp->xb_private. 12753 * 12754 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12755 * datalen - size of data area for the shadow bp 12756 * bflags - B_READ or B_WRITE (pseudo flag) 12757 * blkno - starting LBA 12758 * func - function pointer for b_iodone in the shadow buf. (May 12759 * be NULL if none.) 12760 * 12761 * Return Code: Pointer to allocates buf(9S) struct 12762 * 12763 * Context: Can sleep. 12764 */ 12765 12766 static struct buf * 12767 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12768 daddr_t blkno, int (*func)(struct buf *)) 12769 { 12770 struct sd_lun *un; 12771 struct sd_xbuf *xp; 12772 struct sd_xbuf *new_xp; 12773 struct buf *new_bp; 12774 12775 ASSERT(bp != NULL); 12776 xp = SD_GET_XBUF(bp); 12777 ASSERT(xp != NULL); 12778 un = SD_GET_UN(bp); 12779 ASSERT(un != NULL); 12780 ASSERT(!mutex_owned(SD_MUTEX(un))); 12781 12782 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12783 bp_mapin(bp); 12784 } 12785 12786 bflags &= (B_READ | B_WRITE); 12787 #if defined(__i386) || defined(__amd64) 12788 new_bp = getrbuf(KM_SLEEP); 12789 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12790 new_bp->b_bcount = datalen; 12791 new_bp->b_flags = bflags | 12792 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12793 #else 12794 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12795 datalen, bflags, SLEEP_FUNC, NULL); 12796 #endif 12797 new_bp->av_forw = NULL; 12798 new_bp->av_back = NULL; 12799 new_bp->b_dev = bp->b_dev; 12800 new_bp->b_blkno = blkno; 12801 new_bp->b_iodone = func; 12802 new_bp->b_edev = bp->b_edev; 12803 new_bp->b_resid = 0; 12804 12805 /* We need to preserve the B_FAILFAST flag */ 12806 if (bp->b_flags & B_FAILFAST) { 12807 new_bp->b_flags |= B_FAILFAST; 12808 } 12809 12810 /* 12811 * Allocate an xbuf for the shadow bp and copy the contents of the 12812 * original xbuf into it. 12813 */ 12814 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12815 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12816 12817 /* Need later to copy data between the shadow buf & original buf! */ 12818 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12819 12820 /* 12821 * The given bp is automatically saved in the xb_private member 12822 * of the new xbuf. Callers are allowed to depend on this. 12823 */ 12824 new_xp->xb_private = bp; 12825 12826 new_bp->b_private = new_xp; 12827 12828 return (new_bp); 12829 } 12830 12831 /* 12832 * Function: sd_bioclone_free 12833 * 12834 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12835 * in the larger than partition operation. 12836 * 12837 * Context: May be called under interrupt context 12838 */ 12839 12840 static void 12841 sd_bioclone_free(struct buf *bp) 12842 { 12843 struct sd_xbuf *xp; 12844 12845 ASSERT(bp != NULL); 12846 xp = SD_GET_XBUF(bp); 12847 ASSERT(xp != NULL); 12848 12849 /* 12850 * Call bp_mapout() before freeing the buf, in case a lower 12851 * layer or HBA had done a bp_mapin(). we must do this here 12852 * as we are the "originator" of the shadow buf. 12853 */ 12854 bp_mapout(bp); 12855 12856 /* 12857 * Null out b_iodone before freeing the bp, to ensure that the driver 12858 * never gets confused by a stale value in this field. (Just a little 12859 * extra defensiveness here.) 12860 */ 12861 bp->b_iodone = NULL; 12862 12863 freerbuf(bp); 12864 12865 kmem_free(xp, sizeof (struct sd_xbuf)); 12866 } 12867 12868 /* 12869 * Function: sd_shadow_buf_free 12870 * 12871 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12872 * 12873 * Context: May be called under interrupt context 12874 */ 12875 12876 static void 12877 sd_shadow_buf_free(struct buf *bp) 12878 { 12879 struct sd_xbuf *xp; 12880 12881 ASSERT(bp != NULL); 12882 xp = SD_GET_XBUF(bp); 12883 ASSERT(xp != NULL); 12884 12885 #if defined(__sparc) 12886 /* 12887 * Call bp_mapout() before freeing the buf, in case a lower 12888 * layer or HBA had done a bp_mapin(). we must do this here 12889 * as we are the "originator" of the shadow buf. 12890 */ 12891 bp_mapout(bp); 12892 #endif 12893 12894 /* 12895 * Null out b_iodone before freeing the bp, to ensure that the driver 12896 * never gets confused by a stale value in this field. (Just a little 12897 * extra defensiveness here.) 12898 */ 12899 bp->b_iodone = NULL; 12900 12901 #if defined(__i386) || defined(__amd64) 12902 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12903 freerbuf(bp); 12904 #else 12905 scsi_free_consistent_buf(bp); 12906 #endif 12907 12908 kmem_free(xp, sizeof (struct sd_xbuf)); 12909 } 12910 12911 12912 /* 12913 * Function: sd_print_transport_rejected_message 12914 * 12915 * Description: This implements the ludicrously complex rules for printing 12916 * a "transport rejected" message. This is to address the 12917 * specific problem of having a flood of this error message 12918 * produced when a failover occurs. 12919 * 12920 * Context: Any. 12921 */ 12922 12923 static void 12924 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12925 int code) 12926 { 12927 ASSERT(un != NULL); 12928 ASSERT(mutex_owned(SD_MUTEX(un))); 12929 ASSERT(xp != NULL); 12930 12931 /* 12932 * Print the "transport rejected" message under the following 12933 * conditions: 12934 * 12935 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12936 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12937 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12938 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12939 * scsi_transport(9F) (which indicates that the target might have 12940 * gone off-line). This uses the un->un_tran_fatal_count 12941 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12942 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12943 * from scsi_transport(). 12944 * 12945 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12946 * the preceeding cases in order for the message to be printed. 12947 */ 12948 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12949 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12950 (code != TRAN_FATAL_ERROR) || 12951 (un->un_tran_fatal_count == 1)) { 12952 switch (code) { 12953 case TRAN_BADPKT: 12954 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12955 "transport rejected bad packet\n"); 12956 break; 12957 case TRAN_FATAL_ERROR: 12958 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12959 "transport rejected fatal error\n"); 12960 break; 12961 default: 12962 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12963 "transport rejected (%d)\n", code); 12964 break; 12965 } 12966 } 12967 } 12968 } 12969 12970 12971 /* 12972 * Function: sd_add_buf_to_waitq 12973 * 12974 * Description: Add the given buf(9S) struct to the wait queue for the 12975 * instance. If sorting is enabled, then the buf is added 12976 * to the queue via an elevator sort algorithm (a la 12977 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12978 * If sorting is not enabled, then the buf is just added 12979 * to the end of the wait queue. 12980 * 12981 * Return Code: void 12982 * 12983 * Context: Does not sleep/block, therefore technically can be called 12984 * from any context. However if sorting is enabled then the 12985 * execution time is indeterminate, and may take long if 12986 * the wait queue grows large. 12987 */ 12988 12989 static void 12990 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12991 { 12992 struct buf *ap; 12993 12994 ASSERT(bp != NULL); 12995 ASSERT(un != NULL); 12996 ASSERT(mutex_owned(SD_MUTEX(un))); 12997 12998 /* If the queue is empty, add the buf as the only entry & return. */ 12999 if (un->un_waitq_headp == NULL) { 13000 ASSERT(un->un_waitq_tailp == NULL); 13001 un->un_waitq_headp = un->un_waitq_tailp = bp; 13002 bp->av_forw = NULL; 13003 return; 13004 } 13005 13006 ASSERT(un->un_waitq_tailp != NULL); 13007 13008 /* 13009 * If sorting is disabled, just add the buf to the tail end of 13010 * the wait queue and return. 13011 */ 13012 if (un->un_f_disksort_disabled) { 13013 un->un_waitq_tailp->av_forw = bp; 13014 un->un_waitq_tailp = bp; 13015 bp->av_forw = NULL; 13016 return; 13017 } 13018 13019 /* 13020 * Sort thru the list of requests currently on the wait queue 13021 * and add the new buf request at the appropriate position. 13022 * 13023 * The un->un_waitq_headp is an activity chain pointer on which 13024 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 13025 * first queue holds those requests which are positioned after 13026 * the current SD_GET_BLKNO() (in the first request); the second holds 13027 * requests which came in after their SD_GET_BLKNO() number was passed. 13028 * Thus we implement a one way scan, retracting after reaching 13029 * the end of the drive to the first request on the second 13030 * queue, at which time it becomes the first queue. 13031 * A one-way scan is natural because of the way UNIX read-ahead 13032 * blocks are allocated. 13033 * 13034 * If we lie after the first request, then we must locate the 13035 * second request list and add ourselves to it. 13036 */ 13037 ap = un->un_waitq_headp; 13038 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 13039 while (ap->av_forw != NULL) { 13040 /* 13041 * Look for an "inversion" in the (normally 13042 * ascending) block numbers. This indicates 13043 * the start of the second request list. 13044 */ 13045 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 13046 /* 13047 * Search the second request list for the 13048 * first request at a larger block number. 13049 * We go before that; however if there is 13050 * no such request, we go at the end. 13051 */ 13052 do { 13053 if (SD_GET_BLKNO(bp) < 13054 SD_GET_BLKNO(ap->av_forw)) { 13055 goto insert; 13056 } 13057 ap = ap->av_forw; 13058 } while (ap->av_forw != NULL); 13059 goto insert; /* after last */ 13060 } 13061 ap = ap->av_forw; 13062 } 13063 13064 /* 13065 * No inversions... we will go after the last, and 13066 * be the first request in the second request list. 13067 */ 13068 goto insert; 13069 } 13070 13071 /* 13072 * Request is at/after the current request... 13073 * sort in the first request list. 13074 */ 13075 while (ap->av_forw != NULL) { 13076 /* 13077 * We want to go after the current request (1) if 13078 * there is an inversion after it (i.e. it is the end 13079 * of the first request list), or (2) if the next 13080 * request is a larger block no. than our request. 13081 */ 13082 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 13083 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 13084 goto insert; 13085 } 13086 ap = ap->av_forw; 13087 } 13088 13089 /* 13090 * Neither a second list nor a larger request, therefore 13091 * we go at the end of the first list (which is the same 13092 * as the end of the whole schebang). 13093 */ 13094 insert: 13095 bp->av_forw = ap->av_forw; 13096 ap->av_forw = bp; 13097 13098 /* 13099 * If we inserted onto the tail end of the waitq, make sure the 13100 * tail pointer is updated. 13101 */ 13102 if (ap == un->un_waitq_tailp) { 13103 un->un_waitq_tailp = bp; 13104 } 13105 } 13106 13107 13108 /* 13109 * Function: sd_start_cmds 13110 * 13111 * Description: Remove and transport cmds from the driver queues. 13112 * 13113 * Arguments: un - pointer to the unit (soft state) struct for the target. 13114 * 13115 * immed_bp - ptr to a buf to be transported immediately. Only 13116 * the immed_bp is transported; bufs on the waitq are not 13117 * processed and the un_retry_bp is not checked. If immed_bp is 13118 * NULL, then normal queue processing is performed. 13119 * 13120 * Context: May be called from kernel thread context, interrupt context, 13121 * or runout callback context. This function may not block or 13122 * call routines that block. 13123 */ 13124 13125 static void 13126 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 13127 { 13128 struct sd_xbuf *xp; 13129 struct buf *bp; 13130 void (*statp)(kstat_io_t *); 13131 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13132 void (*saved_statp)(kstat_io_t *); 13133 #endif 13134 int rval; 13135 13136 ASSERT(un != NULL); 13137 ASSERT(mutex_owned(SD_MUTEX(un))); 13138 ASSERT(un->un_ncmds_in_transport >= 0); 13139 ASSERT(un->un_throttle >= 0); 13140 13141 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 13142 13143 do { 13144 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13145 saved_statp = NULL; 13146 #endif 13147 13148 /* 13149 * If we are syncing or dumping, fail the command to 13150 * avoid recursively calling back into scsi_transport(). 13151 * The dump I/O itself uses a separate code path so this 13152 * only prevents non-dump I/O from being sent while dumping. 13153 * File system sync takes place before dumping begins. 13154 * During panic, filesystem I/O is allowed provided 13155 * un_in_callback is <= 1. This is to prevent recursion 13156 * such as sd_start_cmds -> scsi_transport -> sdintr -> 13157 * sd_start_cmds and so on. See panic.c for more information 13158 * about the states the system can be in during panic. 13159 */ 13160 if ((un->un_state == SD_STATE_DUMPING) || 13161 (ddi_in_panic() && (un->un_in_callback > 1))) { 13162 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13163 "sd_start_cmds: panicking\n"); 13164 goto exit; 13165 } 13166 13167 if ((bp = immed_bp) != NULL) { 13168 /* 13169 * We have a bp that must be transported immediately. 13170 * It's OK to transport the immed_bp here without doing 13171 * the throttle limit check because the immed_bp is 13172 * always used in a retry/recovery case. This means 13173 * that we know we are not at the throttle limit by 13174 * virtue of the fact that to get here we must have 13175 * already gotten a command back via sdintr(). This also 13176 * relies on (1) the command on un_retry_bp preventing 13177 * further commands from the waitq from being issued; 13178 * and (2) the code in sd_retry_command checking the 13179 * throttle limit before issuing a delayed or immediate 13180 * retry. This holds even if the throttle limit is 13181 * currently ratcheted down from its maximum value. 13182 */ 13183 statp = kstat_runq_enter; 13184 if (bp == un->un_retry_bp) { 13185 ASSERT((un->un_retry_statp == NULL) || 13186 (un->un_retry_statp == kstat_waitq_enter) || 13187 (un->un_retry_statp == 13188 kstat_runq_back_to_waitq)); 13189 /* 13190 * If the waitq kstat was incremented when 13191 * sd_set_retry_bp() queued this bp for a retry, 13192 * then we must set up statp so that the waitq 13193 * count will get decremented correctly below. 13194 * Also we must clear un->un_retry_statp to 13195 * ensure that we do not act on a stale value 13196 * in this field. 13197 */ 13198 if ((un->un_retry_statp == kstat_waitq_enter) || 13199 (un->un_retry_statp == 13200 kstat_runq_back_to_waitq)) { 13201 statp = kstat_waitq_to_runq; 13202 } 13203 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13204 saved_statp = un->un_retry_statp; 13205 #endif 13206 un->un_retry_statp = NULL; 13207 13208 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13209 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 13210 "un_throttle:%d un_ncmds_in_transport:%d\n", 13211 un, un->un_retry_bp, un->un_throttle, 13212 un->un_ncmds_in_transport); 13213 } else { 13214 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 13215 "processing priority bp:0x%p\n", bp); 13216 } 13217 13218 } else if ((bp = un->un_waitq_headp) != NULL) { 13219 /* 13220 * A command on the waitq is ready to go, but do not 13221 * send it if: 13222 * 13223 * (1) the throttle limit has been reached, or 13224 * (2) a retry is pending, or 13225 * (3) a START_STOP_UNIT callback pending, or 13226 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 13227 * command is pending. 13228 * 13229 * For all of these conditions, IO processing will 13230 * restart after the condition is cleared. 13231 */ 13232 if (un->un_ncmds_in_transport >= un->un_throttle) { 13233 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13234 "sd_start_cmds: exiting, " 13235 "throttle limit reached!\n"); 13236 goto exit; 13237 } 13238 if (un->un_retry_bp != NULL) { 13239 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13240 "sd_start_cmds: exiting, retry pending!\n"); 13241 goto exit; 13242 } 13243 if (un->un_startstop_timeid != NULL) { 13244 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13245 "sd_start_cmds: exiting, " 13246 "START_STOP pending!\n"); 13247 goto exit; 13248 } 13249 if (un->un_direct_priority_timeid != NULL) { 13250 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13251 "sd_start_cmds: exiting, " 13252 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 13253 goto exit; 13254 } 13255 13256 /* Dequeue the command */ 13257 un->un_waitq_headp = bp->av_forw; 13258 if (un->un_waitq_headp == NULL) { 13259 un->un_waitq_tailp = NULL; 13260 } 13261 bp->av_forw = NULL; 13262 statp = kstat_waitq_to_runq; 13263 SD_TRACE(SD_LOG_IO_CORE, un, 13264 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 13265 13266 } else { 13267 /* No work to do so bail out now */ 13268 SD_TRACE(SD_LOG_IO_CORE, un, 13269 "sd_start_cmds: no more work, exiting!\n"); 13270 goto exit; 13271 } 13272 13273 /* 13274 * Reset the state to normal. This is the mechanism by which 13275 * the state transitions from either SD_STATE_RWAIT or 13276 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 13277 * If state is SD_STATE_PM_CHANGING then this command is 13278 * part of the device power control and the state must 13279 * not be put back to normal. Doing so would would 13280 * allow new commands to proceed when they shouldn't, 13281 * the device may be going off. 13282 */ 13283 if ((un->un_state != SD_STATE_SUSPENDED) && 13284 (un->un_state != SD_STATE_PM_CHANGING)) { 13285 New_state(un, SD_STATE_NORMAL); 13286 } 13287 13288 xp = SD_GET_XBUF(bp); 13289 ASSERT(xp != NULL); 13290 13291 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13292 /* 13293 * Allocate the scsi_pkt if we need one, or attach DMA 13294 * resources if we have a scsi_pkt that needs them. The 13295 * latter should only occur for commands that are being 13296 * retried. 13297 */ 13298 if ((xp->xb_pktp == NULL) || 13299 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 13300 #else 13301 if (xp->xb_pktp == NULL) { 13302 #endif 13303 /* 13304 * There is no scsi_pkt allocated for this buf. Call 13305 * the initpkt function to allocate & init one. 13306 * 13307 * The scsi_init_pkt runout callback functionality is 13308 * implemented as follows: 13309 * 13310 * 1) The initpkt function always calls 13311 * scsi_init_pkt(9F) with sdrunout specified as the 13312 * callback routine. 13313 * 2) A successful packet allocation is initialized and 13314 * the I/O is transported. 13315 * 3) The I/O associated with an allocation resource 13316 * failure is left on its queue to be retried via 13317 * runout or the next I/O. 13318 * 4) The I/O associated with a DMA error is removed 13319 * from the queue and failed with EIO. Processing of 13320 * the transport queues is also halted to be 13321 * restarted via runout or the next I/O. 13322 * 5) The I/O associated with a CDB size or packet 13323 * size error is removed from the queue and failed 13324 * with EIO. Processing of the transport queues is 13325 * continued. 13326 * 13327 * Note: there is no interface for canceling a runout 13328 * callback. To prevent the driver from detaching or 13329 * suspending while a runout is pending the driver 13330 * state is set to SD_STATE_RWAIT 13331 * 13332 * Note: using the scsi_init_pkt callback facility can 13333 * result in an I/O request persisting at the head of 13334 * the list which cannot be satisfied even after 13335 * multiple retries. In the future the driver may 13336 * implement some kind of maximum runout count before 13337 * failing an I/O. 13338 * 13339 * Note: the use of funcp below may seem superfluous, 13340 * but it helps warlock figure out the correct 13341 * initpkt function calls (see [s]sd.wlcmd). 13342 */ 13343 struct scsi_pkt *pktp; 13344 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 13345 13346 ASSERT(bp != un->un_rqs_bp); 13347 13348 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 13349 switch ((*funcp)(bp, &pktp)) { 13350 case SD_PKT_ALLOC_SUCCESS: 13351 xp->xb_pktp = pktp; 13352 SD_TRACE(SD_LOG_IO_CORE, un, 13353 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 13354 pktp); 13355 goto got_pkt; 13356 13357 case SD_PKT_ALLOC_FAILURE: 13358 /* 13359 * Temporary (hopefully) resource depletion. 13360 * Since retries and RQS commands always have a 13361 * scsi_pkt allocated, these cases should never 13362 * get here. So the only cases this needs to 13363 * handle is a bp from the waitq (which we put 13364 * back onto the waitq for sdrunout), or a bp 13365 * sent as an immed_bp (which we just fail). 13366 */ 13367 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13368 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 13369 13370 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13371 13372 if (bp == immed_bp) { 13373 /* 13374 * If SD_XB_DMA_FREED is clear, then 13375 * this is a failure to allocate a 13376 * scsi_pkt, and we must fail the 13377 * command. 13378 */ 13379 if ((xp->xb_pkt_flags & 13380 SD_XB_DMA_FREED) == 0) { 13381 break; 13382 } 13383 13384 /* 13385 * If this immediate command is NOT our 13386 * un_retry_bp, then we must fail it. 13387 */ 13388 if (bp != un->un_retry_bp) { 13389 break; 13390 } 13391 13392 /* 13393 * We get here if this cmd is our 13394 * un_retry_bp that was DMAFREED, but 13395 * scsi_init_pkt() failed to reallocate 13396 * DMA resources when we attempted to 13397 * retry it. This can happen when an 13398 * mpxio failover is in progress, but 13399 * we don't want to just fail the 13400 * command in this case. 13401 * 13402 * Use timeout(9F) to restart it after 13403 * a 100ms delay. We don't want to 13404 * let sdrunout() restart it, because 13405 * sdrunout() is just supposed to start 13406 * commands that are sitting on the 13407 * wait queue. The un_retry_bp stays 13408 * set until the command completes, but 13409 * sdrunout can be called many times 13410 * before that happens. Since sdrunout 13411 * cannot tell if the un_retry_bp is 13412 * already in the transport, it could 13413 * end up calling scsi_transport() for 13414 * the un_retry_bp multiple times. 13415 * 13416 * Also: don't schedule the callback 13417 * if some other callback is already 13418 * pending. 13419 */ 13420 if (un->un_retry_statp == NULL) { 13421 /* 13422 * restore the kstat pointer to 13423 * keep kstat counts coherent 13424 * when we do retry the command. 13425 */ 13426 un->un_retry_statp = 13427 saved_statp; 13428 } 13429 13430 if ((un->un_startstop_timeid == NULL) && 13431 (un->un_retry_timeid == NULL) && 13432 (un->un_direct_priority_timeid == 13433 NULL)) { 13434 13435 un->un_retry_timeid = 13436 timeout( 13437 sd_start_retry_command, 13438 un, SD_RESTART_TIMEOUT); 13439 } 13440 goto exit; 13441 } 13442 13443 #else 13444 if (bp == immed_bp) { 13445 break; /* Just fail the command */ 13446 } 13447 #endif 13448 13449 /* Add the buf back to the head of the waitq */ 13450 bp->av_forw = un->un_waitq_headp; 13451 un->un_waitq_headp = bp; 13452 if (un->un_waitq_tailp == NULL) { 13453 un->un_waitq_tailp = bp; 13454 } 13455 goto exit; 13456 13457 case SD_PKT_ALLOC_FAILURE_NO_DMA: 13458 /* 13459 * HBA DMA resource failure. Fail the command 13460 * and continue processing of the queues. 13461 */ 13462 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13463 "sd_start_cmds: " 13464 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 13465 break; 13466 13467 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 13468 /* 13469 * Note:x86: Partial DMA mapping not supported 13470 * for USCSI commands, and all the needed DMA 13471 * resources were not allocated. 13472 */ 13473 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13474 "sd_start_cmds: " 13475 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 13476 break; 13477 13478 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 13479 /* 13480 * Note:x86: Request cannot fit into CDB based 13481 * on lba and len. 13482 */ 13483 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13484 "sd_start_cmds: " 13485 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 13486 break; 13487 13488 default: 13489 /* Should NEVER get here! */ 13490 panic("scsi_initpkt error"); 13491 /*NOTREACHED*/ 13492 } 13493 13494 /* 13495 * Fatal error in allocating a scsi_pkt for this buf. 13496 * Update kstats & return the buf with an error code. 13497 * We must use sd_return_failed_command_no_restart() to 13498 * avoid a recursive call back into sd_start_cmds(). 13499 * However this also means that we must keep processing 13500 * the waitq here in order to avoid stalling. 13501 */ 13502 if (statp == kstat_waitq_to_runq) { 13503 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 13504 } 13505 sd_return_failed_command_no_restart(un, bp, EIO); 13506 if (bp == immed_bp) { 13507 /* immed_bp is gone by now, so clear this */ 13508 immed_bp = NULL; 13509 } 13510 continue; 13511 } 13512 got_pkt: 13513 if (bp == immed_bp) { 13514 /* goto the head of the class.... */ 13515 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13516 } 13517 13518 un->un_ncmds_in_transport++; 13519 SD_UPDATE_KSTATS(un, statp, bp); 13520 13521 /* 13522 * Call scsi_transport() to send the command to the target. 13523 * According to SCSA architecture, we must drop the mutex here 13524 * before calling scsi_transport() in order to avoid deadlock. 13525 * Note that the scsi_pkt's completion routine can be executed 13526 * (from interrupt context) even before the call to 13527 * scsi_transport() returns. 13528 */ 13529 SD_TRACE(SD_LOG_IO_CORE, un, 13530 "sd_start_cmds: calling scsi_transport()\n"); 13531 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 13532 13533 mutex_exit(SD_MUTEX(un)); 13534 rval = scsi_transport(xp->xb_pktp); 13535 mutex_enter(SD_MUTEX(un)); 13536 13537 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13538 "sd_start_cmds: scsi_transport() returned %d\n", rval); 13539 13540 switch (rval) { 13541 case TRAN_ACCEPT: 13542 /* Clear this with every pkt accepted by the HBA */ 13543 un->un_tran_fatal_count = 0; 13544 break; /* Success; try the next cmd (if any) */ 13545 13546 case TRAN_BUSY: 13547 un->un_ncmds_in_transport--; 13548 ASSERT(un->un_ncmds_in_transport >= 0); 13549 13550 /* 13551 * Don't retry request sense, the sense data 13552 * is lost when another request is sent. 13553 * Free up the rqs buf and retry 13554 * the original failed cmd. Update kstat. 13555 */ 13556 if (bp == un->un_rqs_bp) { 13557 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13558 bp = sd_mark_rqs_idle(un, xp); 13559 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13560 NULL, NULL, EIO, un->un_busy_timeout / 500, 13561 kstat_waitq_enter); 13562 goto exit; 13563 } 13564 13565 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13566 /* 13567 * Free the DMA resources for the scsi_pkt. This will 13568 * allow mpxio to select another path the next time 13569 * we call scsi_transport() with this scsi_pkt. 13570 * See sdintr() for the rationalization behind this. 13571 */ 13572 if ((un->un_f_is_fibre == TRUE) && 13573 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13574 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13575 scsi_dmafree(xp->xb_pktp); 13576 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13577 } 13578 #endif 13579 13580 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13581 /* 13582 * Commands that are SD_PATH_DIRECT_PRIORITY 13583 * are for error recovery situations. These do 13584 * not use the normal command waitq, so if they 13585 * get a TRAN_BUSY we cannot put them back onto 13586 * the waitq for later retry. One possible 13587 * problem is that there could already be some 13588 * other command on un_retry_bp that is waiting 13589 * for this one to complete, so we would be 13590 * deadlocked if we put this command back onto 13591 * the waitq for later retry (since un_retry_bp 13592 * must complete before the driver gets back to 13593 * commands on the waitq). 13594 * 13595 * To avoid deadlock we must schedule a callback 13596 * that will restart this command after a set 13597 * interval. This should keep retrying for as 13598 * long as the underlying transport keeps 13599 * returning TRAN_BUSY (just like for other 13600 * commands). Use the same timeout interval as 13601 * for the ordinary TRAN_BUSY retry. 13602 */ 13603 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13604 "sd_start_cmds: scsi_transport() returned " 13605 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13606 13607 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13608 un->un_direct_priority_timeid = 13609 timeout(sd_start_direct_priority_command, 13610 bp, un->un_busy_timeout / 500); 13611 13612 goto exit; 13613 } 13614 13615 /* 13616 * For TRAN_BUSY, we want to reduce the throttle value, 13617 * unless we are retrying a command. 13618 */ 13619 if (bp != un->un_retry_bp) { 13620 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13621 } 13622 13623 /* 13624 * Set up the bp to be tried again 10 ms later. 13625 * Note:x86: Is there a timeout value in the sd_lun 13626 * for this condition? 13627 */ 13628 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 13629 kstat_runq_back_to_waitq); 13630 goto exit; 13631 13632 case TRAN_FATAL_ERROR: 13633 un->un_tran_fatal_count++; 13634 /* FALLTHRU */ 13635 13636 case TRAN_BADPKT: 13637 default: 13638 un->un_ncmds_in_transport--; 13639 ASSERT(un->un_ncmds_in_transport >= 0); 13640 13641 /* 13642 * If this is our REQUEST SENSE command with a 13643 * transport error, we must get back the pointers 13644 * to the original buf, and mark the REQUEST 13645 * SENSE command as "available". 13646 */ 13647 if (bp == un->un_rqs_bp) { 13648 bp = sd_mark_rqs_idle(un, xp); 13649 xp = SD_GET_XBUF(bp); 13650 } else { 13651 /* 13652 * Legacy behavior: do not update transport 13653 * error count for request sense commands. 13654 */ 13655 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13656 } 13657 13658 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13659 sd_print_transport_rejected_message(un, xp, rval); 13660 13661 /* 13662 * We must use sd_return_failed_command_no_restart() to 13663 * avoid a recursive call back into sd_start_cmds(). 13664 * However this also means that we must keep processing 13665 * the waitq here in order to avoid stalling. 13666 */ 13667 sd_return_failed_command_no_restart(un, bp, EIO); 13668 13669 /* 13670 * Notify any threads waiting in sd_ddi_suspend() that 13671 * a command completion has occurred. 13672 */ 13673 if (un->un_state == SD_STATE_SUSPENDED) { 13674 cv_broadcast(&un->un_disk_busy_cv); 13675 } 13676 13677 if (bp == immed_bp) { 13678 /* immed_bp is gone by now, so clear this */ 13679 immed_bp = NULL; 13680 } 13681 break; 13682 } 13683 13684 } while (immed_bp == NULL); 13685 13686 exit: 13687 ASSERT(mutex_owned(SD_MUTEX(un))); 13688 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13689 } 13690 13691 13692 /* 13693 * Function: sd_return_command 13694 * 13695 * Description: Returns a command to its originator (with or without an 13696 * error). Also starts commands waiting to be transported 13697 * to the target. 13698 * 13699 * Context: May be called from interrupt, kernel, or timeout context 13700 */ 13701 13702 static void 13703 sd_return_command(struct sd_lun *un, struct buf *bp) 13704 { 13705 struct sd_xbuf *xp; 13706 struct scsi_pkt *pktp; 13707 13708 ASSERT(bp != NULL); 13709 ASSERT(un != NULL); 13710 ASSERT(mutex_owned(SD_MUTEX(un))); 13711 ASSERT(bp != un->un_rqs_bp); 13712 xp = SD_GET_XBUF(bp); 13713 ASSERT(xp != NULL); 13714 13715 pktp = SD_GET_PKTP(bp); 13716 13717 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13718 13719 /* 13720 * Note: check for the "sdrestart failed" case. 13721 */ 13722 if ((un->un_partial_dma_supported == 1) && 13723 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13724 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13725 (xp->xb_pktp->pkt_resid == 0)) { 13726 13727 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13728 /* 13729 * Successfully set up next portion of cmd 13730 * transfer, try sending it 13731 */ 13732 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13733 NULL, NULL, 0, (clock_t)0, NULL); 13734 sd_start_cmds(un, NULL); 13735 return; /* Note:x86: need a return here? */ 13736 } 13737 } 13738 13739 /* 13740 * If this is the failfast bp, clear it from un_failfast_bp. This 13741 * can happen if upon being re-tried the failfast bp either 13742 * succeeded or encountered another error (possibly even a different 13743 * error than the one that precipitated the failfast state, but in 13744 * that case it would have had to exhaust retries as well). Regardless, 13745 * this should not occur whenever the instance is in the active 13746 * failfast state. 13747 */ 13748 if (bp == un->un_failfast_bp) { 13749 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13750 un->un_failfast_bp = NULL; 13751 } 13752 13753 /* 13754 * Clear the failfast state upon successful completion of ANY cmd. 13755 */ 13756 if (bp->b_error == 0) { 13757 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13758 } 13759 13760 /* 13761 * This is used if the command was retried one or more times. Show that 13762 * we are done with it, and allow processing of the waitq to resume. 13763 */ 13764 if (bp == un->un_retry_bp) { 13765 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13766 "sd_return_command: un:0x%p: " 13767 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13768 un->un_retry_bp = NULL; 13769 un->un_retry_statp = NULL; 13770 } 13771 13772 SD_UPDATE_RDWR_STATS(un, bp); 13773 SD_UPDATE_PARTITION_STATS(un, bp); 13774 13775 switch (un->un_state) { 13776 case SD_STATE_SUSPENDED: 13777 /* 13778 * Notify any threads waiting in sd_ddi_suspend() that 13779 * a command completion has occurred. 13780 */ 13781 cv_broadcast(&un->un_disk_busy_cv); 13782 break; 13783 default: 13784 sd_start_cmds(un, NULL); 13785 break; 13786 } 13787 13788 /* Return this command up the iodone chain to its originator. */ 13789 mutex_exit(SD_MUTEX(un)); 13790 13791 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13792 xp->xb_pktp = NULL; 13793 13794 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13795 13796 ASSERT(!mutex_owned(SD_MUTEX(un))); 13797 mutex_enter(SD_MUTEX(un)); 13798 13799 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13800 } 13801 13802 13803 /* 13804 * Function: sd_return_failed_command 13805 * 13806 * Description: Command completion when an error occurred. 13807 * 13808 * Context: May be called from interrupt context 13809 */ 13810 13811 static void 13812 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13813 { 13814 ASSERT(bp != NULL); 13815 ASSERT(un != NULL); 13816 ASSERT(mutex_owned(SD_MUTEX(un))); 13817 13818 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13819 "sd_return_failed_command: entry\n"); 13820 13821 /* 13822 * b_resid could already be nonzero due to a partial data 13823 * transfer, so do not change it here. 13824 */ 13825 SD_BIOERROR(bp, errcode); 13826 13827 sd_return_command(un, bp); 13828 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13829 "sd_return_failed_command: exit\n"); 13830 } 13831 13832 13833 /* 13834 * Function: sd_return_failed_command_no_restart 13835 * 13836 * Description: Same as sd_return_failed_command, but ensures that no 13837 * call back into sd_start_cmds will be issued. 13838 * 13839 * Context: May be called from interrupt context 13840 */ 13841 13842 static void 13843 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13844 int errcode) 13845 { 13846 struct sd_xbuf *xp; 13847 13848 ASSERT(bp != NULL); 13849 ASSERT(un != NULL); 13850 ASSERT(mutex_owned(SD_MUTEX(un))); 13851 xp = SD_GET_XBUF(bp); 13852 ASSERT(xp != NULL); 13853 ASSERT(errcode != 0); 13854 13855 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13856 "sd_return_failed_command_no_restart: entry\n"); 13857 13858 /* 13859 * b_resid could already be nonzero due to a partial data 13860 * transfer, so do not change it here. 13861 */ 13862 SD_BIOERROR(bp, errcode); 13863 13864 /* 13865 * If this is the failfast bp, clear it. This can happen if the 13866 * failfast bp encounterd a fatal error when we attempted to 13867 * re-try it (such as a scsi_transport(9F) failure). However 13868 * we should NOT be in an active failfast state if the failfast 13869 * bp is not NULL. 13870 */ 13871 if (bp == un->un_failfast_bp) { 13872 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13873 un->un_failfast_bp = NULL; 13874 } 13875 13876 if (bp == un->un_retry_bp) { 13877 /* 13878 * This command was retried one or more times. Show that we are 13879 * done with it, and allow processing of the waitq to resume. 13880 */ 13881 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13882 "sd_return_failed_command_no_restart: " 13883 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13884 un->un_retry_bp = NULL; 13885 un->un_retry_statp = NULL; 13886 } 13887 13888 SD_UPDATE_RDWR_STATS(un, bp); 13889 SD_UPDATE_PARTITION_STATS(un, bp); 13890 13891 mutex_exit(SD_MUTEX(un)); 13892 13893 if (xp->xb_pktp != NULL) { 13894 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13895 xp->xb_pktp = NULL; 13896 } 13897 13898 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13899 13900 mutex_enter(SD_MUTEX(un)); 13901 13902 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13903 "sd_return_failed_command_no_restart: exit\n"); 13904 } 13905 13906 13907 /* 13908 * Function: sd_retry_command 13909 * 13910 * Description: queue up a command for retry, or (optionally) fail it 13911 * if retry counts are exhausted. 13912 * 13913 * Arguments: un - Pointer to the sd_lun struct for the target. 13914 * 13915 * bp - Pointer to the buf for the command to be retried. 13916 * 13917 * retry_check_flag - Flag to see which (if any) of the retry 13918 * counts should be decremented/checked. If the indicated 13919 * retry count is exhausted, then the command will not be 13920 * retried; it will be failed instead. This should use a 13921 * value equal to one of the following: 13922 * 13923 * SD_RETRIES_NOCHECK 13924 * SD_RESD_RETRIES_STANDARD 13925 * SD_RETRIES_VICTIM 13926 * 13927 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13928 * if the check should be made to see of FLAG_ISOLATE is set 13929 * in the pkt. If FLAG_ISOLATE is set, then the command is 13930 * not retried, it is simply failed. 13931 * 13932 * user_funcp - Ptr to function to call before dispatching the 13933 * command. May be NULL if no action needs to be performed. 13934 * (Primarily intended for printing messages.) 13935 * 13936 * user_arg - Optional argument to be passed along to 13937 * the user_funcp call. 13938 * 13939 * failure_code - errno return code to set in the bp if the 13940 * command is going to be failed. 13941 * 13942 * retry_delay - Retry delay interval in (clock_t) units. May 13943 * be zero which indicates that the retry should be retried 13944 * immediately (ie, without an intervening delay). 13945 * 13946 * statp - Ptr to kstat function to be updated if the command 13947 * is queued for a delayed retry. May be NULL if no kstat 13948 * update is desired. 13949 * 13950 * Context: May be called from interrupt context. 13951 */ 13952 13953 static void 13954 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13955 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13956 code), void *user_arg, int failure_code, clock_t retry_delay, 13957 void (*statp)(kstat_io_t *)) 13958 { 13959 struct sd_xbuf *xp; 13960 struct scsi_pkt *pktp; 13961 13962 ASSERT(un != NULL); 13963 ASSERT(mutex_owned(SD_MUTEX(un))); 13964 ASSERT(bp != NULL); 13965 xp = SD_GET_XBUF(bp); 13966 ASSERT(xp != NULL); 13967 pktp = SD_GET_PKTP(bp); 13968 ASSERT(pktp != NULL); 13969 13970 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13971 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13972 13973 /* 13974 * If we are syncing or dumping, fail the command to avoid 13975 * recursively calling back into scsi_transport(). 13976 */ 13977 if (ddi_in_panic()) { 13978 goto fail_command_no_log; 13979 } 13980 13981 /* 13982 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13983 * log an error and fail the command. 13984 */ 13985 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13986 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13987 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13988 sd_dump_memory(un, SD_LOG_IO, "CDB", 13989 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13990 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13991 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13992 goto fail_command; 13993 } 13994 13995 /* 13996 * If we are suspended, then put the command onto head of the 13997 * wait queue since we don't want to start more commands, and 13998 * clear the un_retry_bp. Next time when we are resumed, will 13999 * handle the command in the wait queue. 14000 */ 14001 switch (un->un_state) { 14002 case SD_STATE_SUSPENDED: 14003 case SD_STATE_DUMPING: 14004 bp->av_forw = un->un_waitq_headp; 14005 un->un_waitq_headp = bp; 14006 if (un->un_waitq_tailp == NULL) { 14007 un->un_waitq_tailp = bp; 14008 } 14009 if (bp == un->un_retry_bp) { 14010 un->un_retry_bp = NULL; 14011 un->un_retry_statp = NULL; 14012 } 14013 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 14014 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 14015 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 14016 return; 14017 default: 14018 break; 14019 } 14020 14021 /* 14022 * If the caller wants us to check FLAG_ISOLATE, then see if that 14023 * is set; if it is then we do not want to retry the command. 14024 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 14025 */ 14026 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 14027 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 14028 goto fail_command; 14029 } 14030 } 14031 14032 14033 /* 14034 * If SD_RETRIES_FAILFAST is set, it indicates that either a 14035 * command timeout or a selection timeout has occurred. This means 14036 * that we were unable to establish an kind of communication with 14037 * the target, and subsequent retries and/or commands are likely 14038 * to encounter similar results and take a long time to complete. 14039 * 14040 * If this is a failfast error condition, we need to update the 14041 * failfast state, even if this bp does not have B_FAILFAST set. 14042 */ 14043 if (retry_check_flag & SD_RETRIES_FAILFAST) { 14044 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 14045 ASSERT(un->un_failfast_bp == NULL); 14046 /* 14047 * If we are already in the active failfast state, and 14048 * another failfast error condition has been detected, 14049 * then fail this command if it has B_FAILFAST set. 14050 * If B_FAILFAST is clear, then maintain the legacy 14051 * behavior of retrying heroically, even tho this will 14052 * take a lot more time to fail the command. 14053 */ 14054 if (bp->b_flags & B_FAILFAST) { 14055 goto fail_command; 14056 } 14057 } else { 14058 /* 14059 * We're not in the active failfast state, but we 14060 * have a failfast error condition, so we must begin 14061 * transition to the next state. We do this regardless 14062 * of whether or not this bp has B_FAILFAST set. 14063 */ 14064 if (un->un_failfast_bp == NULL) { 14065 /* 14066 * This is the first bp to meet a failfast 14067 * condition so save it on un_failfast_bp & 14068 * do normal retry processing. Do not enter 14069 * active failfast state yet. This marks 14070 * entry into the "failfast pending" state. 14071 */ 14072 un->un_failfast_bp = bp; 14073 14074 } else if (un->un_failfast_bp == bp) { 14075 /* 14076 * This is the second time *this* bp has 14077 * encountered a failfast error condition, 14078 * so enter active failfast state & flush 14079 * queues as appropriate. 14080 */ 14081 un->un_failfast_state = SD_FAILFAST_ACTIVE; 14082 un->un_failfast_bp = NULL; 14083 sd_failfast_flushq(un); 14084 14085 /* 14086 * Fail this bp now if B_FAILFAST set; 14087 * otherwise continue with retries. (It would 14088 * be pretty ironic if this bp succeeded on a 14089 * subsequent retry after we just flushed all 14090 * the queues). 14091 */ 14092 if (bp->b_flags & B_FAILFAST) { 14093 goto fail_command; 14094 } 14095 14096 #if !defined(lint) && !defined(__lint) 14097 } else { 14098 /* 14099 * If neither of the preceeding conditionals 14100 * was true, it means that there is some 14101 * *other* bp that has met an inital failfast 14102 * condition and is currently either being 14103 * retried or is waiting to be retried. In 14104 * that case we should perform normal retry 14105 * processing on *this* bp, since there is a 14106 * chance that the current failfast condition 14107 * is transient and recoverable. If that does 14108 * not turn out to be the case, then retries 14109 * will be cleared when the wait queue is 14110 * flushed anyway. 14111 */ 14112 #endif 14113 } 14114 } 14115 } else { 14116 /* 14117 * SD_RETRIES_FAILFAST is clear, which indicates that we 14118 * likely were able to at least establish some level of 14119 * communication with the target and subsequent commands 14120 * and/or retries are likely to get through to the target, 14121 * In this case we want to be aggressive about clearing 14122 * the failfast state. Note that this does not affect 14123 * the "failfast pending" condition. 14124 */ 14125 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14126 } 14127 14128 14129 /* 14130 * Check the specified retry count to see if we can still do 14131 * any retries with this pkt before we should fail it. 14132 */ 14133 switch (retry_check_flag & SD_RETRIES_MASK) { 14134 case SD_RETRIES_VICTIM: 14135 /* 14136 * Check the victim retry count. If exhausted, then fall 14137 * thru & check against the standard retry count. 14138 */ 14139 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 14140 /* Increment count & proceed with the retry */ 14141 xp->xb_victim_retry_count++; 14142 break; 14143 } 14144 /* Victim retries exhausted, fall back to std. retries... */ 14145 /* FALLTHRU */ 14146 14147 case SD_RETRIES_STANDARD: 14148 if (xp->xb_retry_count >= un->un_retry_count) { 14149 /* Retries exhausted, fail the command */ 14150 SD_TRACE(SD_LOG_IO_CORE, un, 14151 "sd_retry_command: retries exhausted!\n"); 14152 /* 14153 * update b_resid for failed SCMD_READ & SCMD_WRITE 14154 * commands with nonzero pkt_resid. 14155 */ 14156 if ((pktp->pkt_reason == CMD_CMPLT) && 14157 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 14158 (pktp->pkt_resid != 0)) { 14159 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 14160 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 14161 SD_UPDATE_B_RESID(bp, pktp); 14162 } 14163 } 14164 goto fail_command; 14165 } 14166 xp->xb_retry_count++; 14167 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14168 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 14169 break; 14170 14171 case SD_RETRIES_UA: 14172 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 14173 /* Retries exhausted, fail the command */ 14174 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14175 "Unit Attention retries exhausted. " 14176 "Check the target.\n"); 14177 goto fail_command; 14178 } 14179 xp->xb_ua_retry_count++; 14180 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14181 "sd_retry_command: retry count:%d\n", 14182 xp->xb_ua_retry_count); 14183 break; 14184 14185 case SD_RETRIES_BUSY: 14186 if (xp->xb_retry_count >= un->un_busy_retry_count) { 14187 /* Retries exhausted, fail the command */ 14188 SD_TRACE(SD_LOG_IO_CORE, un, 14189 "sd_retry_command: retries exhausted!\n"); 14190 goto fail_command; 14191 } 14192 xp->xb_retry_count++; 14193 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14194 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 14195 break; 14196 14197 case SD_RETRIES_NOCHECK: 14198 default: 14199 /* No retry count to check. Just proceed with the retry */ 14200 break; 14201 } 14202 14203 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14204 14205 /* 14206 * If we were given a zero timeout, we must attempt to retry the 14207 * command immediately (ie, without a delay). 14208 */ 14209 if (retry_delay == 0) { 14210 /* 14211 * Check some limiting conditions to see if we can actually 14212 * do the immediate retry. If we cannot, then we must 14213 * fall back to queueing up a delayed retry. 14214 */ 14215 if (un->un_ncmds_in_transport >= un->un_throttle) { 14216 /* 14217 * We are at the throttle limit for the target, 14218 * fall back to delayed retry. 14219 */ 14220 retry_delay = un->un_busy_timeout; 14221 statp = kstat_waitq_enter; 14222 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14223 "sd_retry_command: immed. retry hit " 14224 "throttle!\n"); 14225 } else { 14226 /* 14227 * We're clear to proceed with the immediate retry. 14228 * First call the user-provided function (if any) 14229 */ 14230 if (user_funcp != NULL) { 14231 (*user_funcp)(un, bp, user_arg, 14232 SD_IMMEDIATE_RETRY_ISSUED); 14233 #ifdef __lock_lint 14234 sd_print_incomplete_msg(un, bp, user_arg, 14235 SD_IMMEDIATE_RETRY_ISSUED); 14236 sd_print_cmd_incomplete_msg(un, bp, user_arg, 14237 SD_IMMEDIATE_RETRY_ISSUED); 14238 sd_print_sense_failed_msg(un, bp, user_arg, 14239 SD_IMMEDIATE_RETRY_ISSUED); 14240 #endif 14241 } 14242 14243 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14244 "sd_retry_command: issuing immediate retry\n"); 14245 14246 /* 14247 * Call sd_start_cmds() to transport the command to 14248 * the target. 14249 */ 14250 sd_start_cmds(un, bp); 14251 14252 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14253 "sd_retry_command exit\n"); 14254 return; 14255 } 14256 } 14257 14258 /* 14259 * Set up to retry the command after a delay. 14260 * First call the user-provided function (if any) 14261 */ 14262 if (user_funcp != NULL) { 14263 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 14264 } 14265 14266 sd_set_retry_bp(un, bp, retry_delay, statp); 14267 14268 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14269 return; 14270 14271 fail_command: 14272 14273 if (user_funcp != NULL) { 14274 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 14275 } 14276 14277 fail_command_no_log: 14278 14279 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14280 "sd_retry_command: returning failed command\n"); 14281 14282 sd_return_failed_command(un, bp, failure_code); 14283 14284 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14285 } 14286 14287 14288 /* 14289 * Function: sd_set_retry_bp 14290 * 14291 * Description: Set up the given bp for retry. 14292 * 14293 * Arguments: un - ptr to associated softstate 14294 * bp - ptr to buf(9S) for the command 14295 * retry_delay - time interval before issuing retry (may be 0) 14296 * statp - optional pointer to kstat function 14297 * 14298 * Context: May be called under interrupt context 14299 */ 14300 14301 static void 14302 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 14303 void (*statp)(kstat_io_t *)) 14304 { 14305 ASSERT(un != NULL); 14306 ASSERT(mutex_owned(SD_MUTEX(un))); 14307 ASSERT(bp != NULL); 14308 14309 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14310 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 14311 14312 /* 14313 * Indicate that the command is being retried. This will not allow any 14314 * other commands on the wait queue to be transported to the target 14315 * until this command has been completed (success or failure). The 14316 * "retry command" is not transported to the target until the given 14317 * time delay expires, unless the user specified a 0 retry_delay. 14318 * 14319 * Note: the timeout(9F) callback routine is what actually calls 14320 * sd_start_cmds() to transport the command, with the exception of a 14321 * zero retry_delay. The only current implementor of a zero retry delay 14322 * is the case where a START_STOP_UNIT is sent to spin-up a device. 14323 */ 14324 if (un->un_retry_bp == NULL) { 14325 ASSERT(un->un_retry_statp == NULL); 14326 un->un_retry_bp = bp; 14327 14328 /* 14329 * If the user has not specified a delay the command should 14330 * be queued and no timeout should be scheduled. 14331 */ 14332 if (retry_delay == 0) { 14333 /* 14334 * Save the kstat pointer that will be used in the 14335 * call to SD_UPDATE_KSTATS() below, so that 14336 * sd_start_cmds() can correctly decrement the waitq 14337 * count when it is time to transport this command. 14338 */ 14339 un->un_retry_statp = statp; 14340 goto done; 14341 } 14342 } 14343 14344 if (un->un_retry_bp == bp) { 14345 /* 14346 * Save the kstat pointer that will be used in the call to 14347 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 14348 * correctly decrement the waitq count when it is time to 14349 * transport this command. 14350 */ 14351 un->un_retry_statp = statp; 14352 14353 /* 14354 * Schedule a timeout if: 14355 * 1) The user has specified a delay. 14356 * 2) There is not a START_STOP_UNIT callback pending. 14357 * 14358 * If no delay has been specified, then it is up to the caller 14359 * to ensure that IO processing continues without stalling. 14360 * Effectively, this means that the caller will issue the 14361 * required call to sd_start_cmds(). The START_STOP_UNIT 14362 * callback does this after the START STOP UNIT command has 14363 * completed. In either of these cases we should not schedule 14364 * a timeout callback here. Also don't schedule the timeout if 14365 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 14366 */ 14367 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 14368 (un->un_direct_priority_timeid == NULL)) { 14369 un->un_retry_timeid = 14370 timeout(sd_start_retry_command, un, retry_delay); 14371 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14372 "sd_set_retry_bp: setting timeout: un: 0x%p" 14373 " bp:0x%p un_retry_timeid:0x%p\n", 14374 un, bp, un->un_retry_timeid); 14375 } 14376 } else { 14377 /* 14378 * We only get in here if there is already another command 14379 * waiting to be retried. In this case, we just put the 14380 * given command onto the wait queue, so it can be transported 14381 * after the current retry command has completed. 14382 * 14383 * Also we have to make sure that if the command at the head 14384 * of the wait queue is the un_failfast_bp, that we do not 14385 * put ahead of it any other commands that are to be retried. 14386 */ 14387 if ((un->un_failfast_bp != NULL) && 14388 (un->un_failfast_bp == un->un_waitq_headp)) { 14389 /* 14390 * Enqueue this command AFTER the first command on 14391 * the wait queue (which is also un_failfast_bp). 14392 */ 14393 bp->av_forw = un->un_waitq_headp->av_forw; 14394 un->un_waitq_headp->av_forw = bp; 14395 if (un->un_waitq_headp == un->un_waitq_tailp) { 14396 un->un_waitq_tailp = bp; 14397 } 14398 } else { 14399 /* Enqueue this command at the head of the waitq. */ 14400 bp->av_forw = un->un_waitq_headp; 14401 un->un_waitq_headp = bp; 14402 if (un->un_waitq_tailp == NULL) { 14403 un->un_waitq_tailp = bp; 14404 } 14405 } 14406 14407 if (statp == NULL) { 14408 statp = kstat_waitq_enter; 14409 } 14410 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14411 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 14412 } 14413 14414 done: 14415 if (statp != NULL) { 14416 SD_UPDATE_KSTATS(un, statp, bp); 14417 } 14418 14419 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14420 "sd_set_retry_bp: exit un:0x%p\n", un); 14421 } 14422 14423 14424 /* 14425 * Function: sd_start_retry_command 14426 * 14427 * Description: Start the command that has been waiting on the target's 14428 * retry queue. Called from timeout(9F) context after the 14429 * retry delay interval has expired. 14430 * 14431 * Arguments: arg - pointer to associated softstate for the device. 14432 * 14433 * Context: timeout(9F) thread context. May not sleep. 14434 */ 14435 14436 static void 14437 sd_start_retry_command(void *arg) 14438 { 14439 struct sd_lun *un = arg; 14440 14441 ASSERT(un != NULL); 14442 ASSERT(!mutex_owned(SD_MUTEX(un))); 14443 14444 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14445 "sd_start_retry_command: entry\n"); 14446 14447 mutex_enter(SD_MUTEX(un)); 14448 14449 un->un_retry_timeid = NULL; 14450 14451 if (un->un_retry_bp != NULL) { 14452 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14453 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 14454 un, un->un_retry_bp); 14455 sd_start_cmds(un, un->un_retry_bp); 14456 } 14457 14458 mutex_exit(SD_MUTEX(un)); 14459 14460 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14461 "sd_start_retry_command: exit\n"); 14462 } 14463 14464 14465 /* 14466 * Function: sd_start_direct_priority_command 14467 * 14468 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 14469 * received TRAN_BUSY when we called scsi_transport() to send it 14470 * to the underlying HBA. This function is called from timeout(9F) 14471 * context after the delay interval has expired. 14472 * 14473 * Arguments: arg - pointer to associated buf(9S) to be restarted. 14474 * 14475 * Context: timeout(9F) thread context. May not sleep. 14476 */ 14477 14478 static void 14479 sd_start_direct_priority_command(void *arg) 14480 { 14481 struct buf *priority_bp = arg; 14482 struct sd_lun *un; 14483 14484 ASSERT(priority_bp != NULL); 14485 un = SD_GET_UN(priority_bp); 14486 ASSERT(un != NULL); 14487 ASSERT(!mutex_owned(SD_MUTEX(un))); 14488 14489 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14490 "sd_start_direct_priority_command: entry\n"); 14491 14492 mutex_enter(SD_MUTEX(un)); 14493 un->un_direct_priority_timeid = NULL; 14494 sd_start_cmds(un, priority_bp); 14495 mutex_exit(SD_MUTEX(un)); 14496 14497 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14498 "sd_start_direct_priority_command: exit\n"); 14499 } 14500 14501 14502 /* 14503 * Function: sd_send_request_sense_command 14504 * 14505 * Description: Sends a REQUEST SENSE command to the target 14506 * 14507 * Context: May be called from interrupt context. 14508 */ 14509 14510 static void 14511 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 14512 struct scsi_pkt *pktp) 14513 { 14514 ASSERT(bp != NULL); 14515 ASSERT(un != NULL); 14516 ASSERT(mutex_owned(SD_MUTEX(un))); 14517 14518 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 14519 "entry: buf:0x%p\n", bp); 14520 14521 /* 14522 * If we are syncing or dumping, then fail the command to avoid a 14523 * recursive callback into scsi_transport(). Also fail the command 14524 * if we are suspended (legacy behavior). 14525 */ 14526 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 14527 (un->un_state == SD_STATE_DUMPING)) { 14528 sd_return_failed_command(un, bp, EIO); 14529 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14530 "sd_send_request_sense_command: syncing/dumping, exit\n"); 14531 return; 14532 } 14533 14534 /* 14535 * Retry the failed command and don't issue the request sense if: 14536 * 1) the sense buf is busy 14537 * 2) we have 1 or more outstanding commands on the target 14538 * (the sense data will be cleared or invalidated any way) 14539 * 14540 * Note: There could be an issue with not checking a retry limit here, 14541 * the problem is determining which retry limit to check. 14542 */ 14543 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14544 /* Don't retry if the command is flagged as non-retryable */ 14545 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14546 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14547 NULL, NULL, 0, un->un_busy_timeout, 14548 kstat_waitq_enter); 14549 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14550 "sd_send_request_sense_command: " 14551 "at full throttle, retrying exit\n"); 14552 } else { 14553 sd_return_failed_command(un, bp, EIO); 14554 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14555 "sd_send_request_sense_command: " 14556 "at full throttle, non-retryable exit\n"); 14557 } 14558 return; 14559 } 14560 14561 sd_mark_rqs_busy(un, bp); 14562 sd_start_cmds(un, un->un_rqs_bp); 14563 14564 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14565 "sd_send_request_sense_command: exit\n"); 14566 } 14567 14568 14569 /* 14570 * Function: sd_mark_rqs_busy 14571 * 14572 * Description: Indicate that the request sense bp for this instance is 14573 * in use. 14574 * 14575 * Context: May be called under interrupt context 14576 */ 14577 14578 static void 14579 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14580 { 14581 struct sd_xbuf *sense_xp; 14582 14583 ASSERT(un != NULL); 14584 ASSERT(bp != NULL); 14585 ASSERT(mutex_owned(SD_MUTEX(un))); 14586 ASSERT(un->un_sense_isbusy == 0); 14587 14588 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14589 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14590 14591 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14592 ASSERT(sense_xp != NULL); 14593 14594 SD_INFO(SD_LOG_IO, un, 14595 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14596 14597 ASSERT(sense_xp->xb_pktp != NULL); 14598 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14599 == (FLAG_SENSING | FLAG_HEAD)); 14600 14601 un->un_sense_isbusy = 1; 14602 un->un_rqs_bp->b_resid = 0; 14603 sense_xp->xb_pktp->pkt_resid = 0; 14604 sense_xp->xb_pktp->pkt_reason = 0; 14605 14606 /* So we can get back the bp at interrupt time! */ 14607 sense_xp->xb_sense_bp = bp; 14608 14609 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14610 14611 /* 14612 * Mark this buf as awaiting sense data. (This is already set in 14613 * the pkt_flags for the RQS packet.) 14614 */ 14615 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14616 14617 sense_xp->xb_retry_count = 0; 14618 sense_xp->xb_victim_retry_count = 0; 14619 sense_xp->xb_ua_retry_count = 0; 14620 sense_xp->xb_nr_retry_count = 0; 14621 sense_xp->xb_dma_resid = 0; 14622 14623 /* Clean up the fields for auto-request sense */ 14624 sense_xp->xb_sense_status = 0; 14625 sense_xp->xb_sense_state = 0; 14626 sense_xp->xb_sense_resid = 0; 14627 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14628 14629 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14630 } 14631 14632 14633 /* 14634 * Function: sd_mark_rqs_idle 14635 * 14636 * Description: SD_MUTEX must be held continuously through this routine 14637 * to prevent reuse of the rqs struct before the caller can 14638 * complete it's processing. 14639 * 14640 * Return Code: Pointer to the RQS buf 14641 * 14642 * Context: May be called under interrupt context 14643 */ 14644 14645 static struct buf * 14646 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14647 { 14648 struct buf *bp; 14649 ASSERT(un != NULL); 14650 ASSERT(sense_xp != NULL); 14651 ASSERT(mutex_owned(SD_MUTEX(un))); 14652 ASSERT(un->un_sense_isbusy != 0); 14653 14654 un->un_sense_isbusy = 0; 14655 bp = sense_xp->xb_sense_bp; 14656 sense_xp->xb_sense_bp = NULL; 14657 14658 /* This pkt is no longer interested in getting sense data */ 14659 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14660 14661 return (bp); 14662 } 14663 14664 14665 14666 /* 14667 * Function: sd_alloc_rqs 14668 * 14669 * Description: Set up the unit to receive auto request sense data 14670 * 14671 * Return Code: DDI_SUCCESS or DDI_FAILURE 14672 * 14673 * Context: Called under attach(9E) context 14674 */ 14675 14676 static int 14677 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14678 { 14679 struct sd_xbuf *xp; 14680 14681 ASSERT(un != NULL); 14682 ASSERT(!mutex_owned(SD_MUTEX(un))); 14683 ASSERT(un->un_rqs_bp == NULL); 14684 ASSERT(un->un_rqs_pktp == NULL); 14685 14686 /* 14687 * First allocate the required buf and scsi_pkt structs, then set up 14688 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14689 */ 14690 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14691 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14692 if (un->un_rqs_bp == NULL) { 14693 return (DDI_FAILURE); 14694 } 14695 14696 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14697 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14698 14699 if (un->un_rqs_pktp == NULL) { 14700 sd_free_rqs(un); 14701 return (DDI_FAILURE); 14702 } 14703 14704 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14705 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14706 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 14707 14708 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14709 14710 /* Set up the other needed members in the ARQ scsi_pkt. */ 14711 un->un_rqs_pktp->pkt_comp = sdintr; 14712 un->un_rqs_pktp->pkt_time = sd_io_time; 14713 un->un_rqs_pktp->pkt_flags |= 14714 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14715 14716 /* 14717 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14718 * provide any intpkt, destroypkt routines as we take care of 14719 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14720 */ 14721 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14722 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14723 xp->xb_pktp = un->un_rqs_pktp; 14724 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14725 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14726 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14727 14728 /* 14729 * Save the pointer to the request sense private bp so it can 14730 * be retrieved in sdintr. 14731 */ 14732 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14733 ASSERT(un->un_rqs_bp->b_private == xp); 14734 14735 /* 14736 * See if the HBA supports auto-request sense for the specified 14737 * target/lun. If it does, then try to enable it (if not already 14738 * enabled). 14739 * 14740 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14741 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14742 * return success. However, in both of these cases ARQ is always 14743 * enabled and scsi_ifgetcap will always return true. The best approach 14744 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14745 * 14746 * The 3rd case is the HBA (adp) always return enabled on 14747 * scsi_ifgetgetcap even when it's not enable, the best approach 14748 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14749 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14750 */ 14751 14752 if (un->un_f_is_fibre == TRUE) { 14753 un->un_f_arq_enabled = TRUE; 14754 } else { 14755 #if defined(__i386) || defined(__amd64) 14756 /* 14757 * Circumvent the Adaptec bug, remove this code when 14758 * the bug is fixed 14759 */ 14760 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14761 #endif 14762 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14763 case 0: 14764 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14765 "sd_alloc_rqs: HBA supports ARQ\n"); 14766 /* 14767 * ARQ is supported by this HBA but currently is not 14768 * enabled. Attempt to enable it and if successful then 14769 * mark this instance as ARQ enabled. 14770 */ 14771 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14772 == 1) { 14773 /* Successfully enabled ARQ in the HBA */ 14774 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14775 "sd_alloc_rqs: ARQ enabled\n"); 14776 un->un_f_arq_enabled = TRUE; 14777 } else { 14778 /* Could not enable ARQ in the HBA */ 14779 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14780 "sd_alloc_rqs: failed ARQ enable\n"); 14781 un->un_f_arq_enabled = FALSE; 14782 } 14783 break; 14784 case 1: 14785 /* 14786 * ARQ is supported by this HBA and is already enabled. 14787 * Just mark ARQ as enabled for this instance. 14788 */ 14789 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14790 "sd_alloc_rqs: ARQ already enabled\n"); 14791 un->un_f_arq_enabled = TRUE; 14792 break; 14793 default: 14794 /* 14795 * ARQ is not supported by this HBA; disable it for this 14796 * instance. 14797 */ 14798 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14799 "sd_alloc_rqs: HBA does not support ARQ\n"); 14800 un->un_f_arq_enabled = FALSE; 14801 break; 14802 } 14803 } 14804 14805 return (DDI_SUCCESS); 14806 } 14807 14808 14809 /* 14810 * Function: sd_free_rqs 14811 * 14812 * Description: Cleanup for the pre-instance RQS command. 14813 * 14814 * Context: Kernel thread context 14815 */ 14816 14817 static void 14818 sd_free_rqs(struct sd_lun *un) 14819 { 14820 ASSERT(un != NULL); 14821 14822 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14823 14824 /* 14825 * If consistent memory is bound to a scsi_pkt, the pkt 14826 * has to be destroyed *before* freeing the consistent memory. 14827 * Don't change the sequence of this operations. 14828 * scsi_destroy_pkt() might access memory, which isn't allowed, 14829 * after it was freed in scsi_free_consistent_buf(). 14830 */ 14831 if (un->un_rqs_pktp != NULL) { 14832 scsi_destroy_pkt(un->un_rqs_pktp); 14833 un->un_rqs_pktp = NULL; 14834 } 14835 14836 if (un->un_rqs_bp != NULL) { 14837 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 14838 if (xp != NULL) { 14839 kmem_free(xp, sizeof (struct sd_xbuf)); 14840 } 14841 scsi_free_consistent_buf(un->un_rqs_bp); 14842 un->un_rqs_bp = NULL; 14843 } 14844 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14845 } 14846 14847 14848 14849 /* 14850 * Function: sd_reduce_throttle 14851 * 14852 * Description: Reduces the maximum # of outstanding commands on a 14853 * target to the current number of outstanding commands. 14854 * Queues a tiemout(9F) callback to restore the limit 14855 * after a specified interval has elapsed. 14856 * Typically used when we get a TRAN_BUSY return code 14857 * back from scsi_transport(). 14858 * 14859 * Arguments: un - ptr to the sd_lun softstate struct 14860 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14861 * 14862 * Context: May be called from interrupt context 14863 */ 14864 14865 static void 14866 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14867 { 14868 ASSERT(un != NULL); 14869 ASSERT(mutex_owned(SD_MUTEX(un))); 14870 ASSERT(un->un_ncmds_in_transport >= 0); 14871 14872 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14873 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14874 un, un->un_throttle, un->un_ncmds_in_transport); 14875 14876 if (un->un_throttle > 1) { 14877 if (un->un_f_use_adaptive_throttle == TRUE) { 14878 switch (throttle_type) { 14879 case SD_THROTTLE_TRAN_BUSY: 14880 if (un->un_busy_throttle == 0) { 14881 un->un_busy_throttle = un->un_throttle; 14882 } 14883 break; 14884 case SD_THROTTLE_QFULL: 14885 un->un_busy_throttle = 0; 14886 break; 14887 default: 14888 ASSERT(FALSE); 14889 } 14890 14891 if (un->un_ncmds_in_transport > 0) { 14892 un->un_throttle = un->un_ncmds_in_transport; 14893 } 14894 14895 } else { 14896 if (un->un_ncmds_in_transport == 0) { 14897 un->un_throttle = 1; 14898 } else { 14899 un->un_throttle = un->un_ncmds_in_transport; 14900 } 14901 } 14902 } 14903 14904 /* Reschedule the timeout if none is currently active */ 14905 if (un->un_reset_throttle_timeid == NULL) { 14906 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14907 un, SD_THROTTLE_RESET_INTERVAL); 14908 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14909 "sd_reduce_throttle: timeout scheduled!\n"); 14910 } 14911 14912 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14913 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14914 } 14915 14916 14917 14918 /* 14919 * Function: sd_restore_throttle 14920 * 14921 * Description: Callback function for timeout(9F). Resets the current 14922 * value of un->un_throttle to its default. 14923 * 14924 * Arguments: arg - pointer to associated softstate for the device. 14925 * 14926 * Context: May be called from interrupt context 14927 */ 14928 14929 static void 14930 sd_restore_throttle(void *arg) 14931 { 14932 struct sd_lun *un = arg; 14933 14934 ASSERT(un != NULL); 14935 ASSERT(!mutex_owned(SD_MUTEX(un))); 14936 14937 mutex_enter(SD_MUTEX(un)); 14938 14939 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14940 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14941 14942 un->un_reset_throttle_timeid = NULL; 14943 14944 if (un->un_f_use_adaptive_throttle == TRUE) { 14945 /* 14946 * If un_busy_throttle is nonzero, then it contains the 14947 * value that un_throttle was when we got a TRAN_BUSY back 14948 * from scsi_transport(). We want to revert back to this 14949 * value. 14950 * 14951 * In the QFULL case, the throttle limit will incrementally 14952 * increase until it reaches max throttle. 14953 */ 14954 if (un->un_busy_throttle > 0) { 14955 un->un_throttle = un->un_busy_throttle; 14956 un->un_busy_throttle = 0; 14957 } else { 14958 /* 14959 * increase throttle by 10% open gate slowly, schedule 14960 * another restore if saved throttle has not been 14961 * reached 14962 */ 14963 short throttle; 14964 if (sd_qfull_throttle_enable) { 14965 throttle = un->un_throttle + 14966 max((un->un_throttle / 10), 1); 14967 un->un_throttle = 14968 (throttle < un->un_saved_throttle) ? 14969 throttle : un->un_saved_throttle; 14970 if (un->un_throttle < un->un_saved_throttle) { 14971 un->un_reset_throttle_timeid = 14972 timeout(sd_restore_throttle, 14973 un, 14974 SD_QFULL_THROTTLE_RESET_INTERVAL); 14975 } 14976 } 14977 } 14978 14979 /* 14980 * If un_throttle has fallen below the low-water mark, we 14981 * restore the maximum value here (and allow it to ratchet 14982 * down again if necessary). 14983 */ 14984 if (un->un_throttle < un->un_min_throttle) { 14985 un->un_throttle = un->un_saved_throttle; 14986 } 14987 } else { 14988 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14989 "restoring limit from 0x%x to 0x%x\n", 14990 un->un_throttle, un->un_saved_throttle); 14991 un->un_throttle = un->un_saved_throttle; 14992 } 14993 14994 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14995 "sd_restore_throttle: calling sd_start_cmds!\n"); 14996 14997 sd_start_cmds(un, NULL); 14998 14999 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15000 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 15001 un, un->un_throttle); 15002 15003 mutex_exit(SD_MUTEX(un)); 15004 15005 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 15006 } 15007 15008 /* 15009 * Function: sdrunout 15010 * 15011 * Description: Callback routine for scsi_init_pkt when a resource allocation 15012 * fails. 15013 * 15014 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 15015 * soft state instance. 15016 * 15017 * Return Code: The scsi_init_pkt routine allows for the callback function to 15018 * return a 0 indicating the callback should be rescheduled or a 1 15019 * indicating not to reschedule. This routine always returns 1 15020 * because the driver always provides a callback function to 15021 * scsi_init_pkt. This results in a callback always being scheduled 15022 * (via the scsi_init_pkt callback implementation) if a resource 15023 * failure occurs. 15024 * 15025 * Context: This callback function may not block or call routines that block 15026 * 15027 * Note: Using the scsi_init_pkt callback facility can result in an I/O 15028 * request persisting at the head of the list which cannot be 15029 * satisfied even after multiple retries. In the future the driver 15030 * may implement some time of maximum runout count before failing 15031 * an I/O. 15032 */ 15033 15034 static int 15035 sdrunout(caddr_t arg) 15036 { 15037 struct sd_lun *un = (struct sd_lun *)arg; 15038 15039 ASSERT(un != NULL); 15040 ASSERT(!mutex_owned(SD_MUTEX(un))); 15041 15042 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 15043 15044 mutex_enter(SD_MUTEX(un)); 15045 sd_start_cmds(un, NULL); 15046 mutex_exit(SD_MUTEX(un)); 15047 /* 15048 * This callback routine always returns 1 (i.e. do not reschedule) 15049 * because we always specify sdrunout as the callback handler for 15050 * scsi_init_pkt inside the call to sd_start_cmds. 15051 */ 15052 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 15053 return (1); 15054 } 15055 15056 15057 /* 15058 * Function: sdintr 15059 * 15060 * Description: Completion callback routine for scsi_pkt(9S) structs 15061 * sent to the HBA driver via scsi_transport(9F). 15062 * 15063 * Context: Interrupt context 15064 */ 15065 15066 static void 15067 sdintr(struct scsi_pkt *pktp) 15068 { 15069 struct buf *bp; 15070 struct sd_xbuf *xp; 15071 struct sd_lun *un; 15072 size_t actual_len; 15073 15074 ASSERT(pktp != NULL); 15075 bp = (struct buf *)pktp->pkt_private; 15076 ASSERT(bp != NULL); 15077 xp = SD_GET_XBUF(bp); 15078 ASSERT(xp != NULL); 15079 ASSERT(xp->xb_pktp != NULL); 15080 un = SD_GET_UN(bp); 15081 ASSERT(un != NULL); 15082 ASSERT(!mutex_owned(SD_MUTEX(un))); 15083 15084 #ifdef SD_FAULT_INJECTION 15085 15086 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 15087 /* SD FaultInjection */ 15088 sd_faultinjection(pktp); 15089 15090 #endif /* SD_FAULT_INJECTION */ 15091 15092 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 15093 " xp:0x%p, un:0x%p\n", bp, xp, un); 15094 15095 mutex_enter(SD_MUTEX(un)); 15096 15097 /* Reduce the count of the #commands currently in transport */ 15098 un->un_ncmds_in_transport--; 15099 ASSERT(un->un_ncmds_in_transport >= 0); 15100 15101 /* Increment counter to indicate that the callback routine is active */ 15102 un->un_in_callback++; 15103 15104 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15105 15106 #ifdef SDDEBUG 15107 if (bp == un->un_retry_bp) { 15108 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 15109 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 15110 un, un->un_retry_bp, un->un_ncmds_in_transport); 15111 } 15112 #endif 15113 15114 /* 15115 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 15116 * state if needed. 15117 */ 15118 if (pktp->pkt_reason == CMD_DEV_GONE) { 15119 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15120 "Command failed to complete...Device is gone\n"); 15121 if (un->un_mediastate != DKIO_DEV_GONE) { 15122 un->un_mediastate = DKIO_DEV_GONE; 15123 cv_broadcast(&un->un_state_cv); 15124 } 15125 sd_return_failed_command(un, bp, EIO); 15126 goto exit; 15127 } 15128 15129 if (pktp->pkt_state & STATE_XARQ_DONE) { 15130 SD_TRACE(SD_LOG_COMMON, un, 15131 "sdintr: extra sense data received. pkt=%p\n", pktp); 15132 } 15133 15134 /* 15135 * First see if the pkt has auto-request sense data with it.... 15136 * Look at the packet state first so we don't take a performance 15137 * hit looking at the arq enabled flag unless absolutely necessary. 15138 */ 15139 if ((pktp->pkt_state & STATE_ARQ_DONE) && 15140 (un->un_f_arq_enabled == TRUE)) { 15141 /* 15142 * The HBA did an auto request sense for this command so check 15143 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15144 * driver command that should not be retried. 15145 */ 15146 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15147 /* 15148 * Save the relevant sense info into the xp for the 15149 * original cmd. 15150 */ 15151 struct scsi_arq_status *asp; 15152 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15153 xp->xb_sense_status = 15154 *((uchar_t *)(&(asp->sts_rqpkt_status))); 15155 xp->xb_sense_state = asp->sts_rqpkt_state; 15156 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15157 if (pktp->pkt_state & STATE_XARQ_DONE) { 15158 actual_len = MAX_SENSE_LENGTH - 15159 xp->xb_sense_resid; 15160 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15161 MAX_SENSE_LENGTH); 15162 } else { 15163 if (xp->xb_sense_resid > SENSE_LENGTH) { 15164 actual_len = MAX_SENSE_LENGTH - 15165 xp->xb_sense_resid; 15166 } else { 15167 actual_len = SENSE_LENGTH - 15168 xp->xb_sense_resid; 15169 } 15170 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15171 if ((((struct uscsi_cmd *) 15172 (xp->xb_pktinfo))->uscsi_rqlen) > 15173 actual_len) { 15174 xp->xb_sense_resid = 15175 (((struct uscsi_cmd *) 15176 (xp->xb_pktinfo))-> 15177 uscsi_rqlen) - actual_len; 15178 } else { 15179 xp->xb_sense_resid = 0; 15180 } 15181 } 15182 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15183 SENSE_LENGTH); 15184 } 15185 15186 /* fail the command */ 15187 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15188 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 15189 sd_return_failed_command(un, bp, EIO); 15190 goto exit; 15191 } 15192 15193 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15194 /* 15195 * We want to either retry or fail this command, so free 15196 * the DMA resources here. If we retry the command then 15197 * the DMA resources will be reallocated in sd_start_cmds(). 15198 * Note that when PKT_DMA_PARTIAL is used, this reallocation 15199 * causes the *entire* transfer to start over again from the 15200 * beginning of the request, even for PARTIAL chunks that 15201 * have already transferred successfully. 15202 */ 15203 if ((un->un_f_is_fibre == TRUE) && 15204 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15205 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15206 scsi_dmafree(pktp); 15207 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15208 } 15209 #endif 15210 15211 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15212 "sdintr: arq done, sd_handle_auto_request_sense\n"); 15213 15214 sd_handle_auto_request_sense(un, bp, xp, pktp); 15215 goto exit; 15216 } 15217 15218 /* Next see if this is the REQUEST SENSE pkt for the instance */ 15219 if (pktp->pkt_flags & FLAG_SENSING) { 15220 /* This pktp is from the unit's REQUEST_SENSE command */ 15221 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15222 "sdintr: sd_handle_request_sense\n"); 15223 sd_handle_request_sense(un, bp, xp, pktp); 15224 goto exit; 15225 } 15226 15227 /* 15228 * Check to see if the command successfully completed as requested; 15229 * this is the most common case (and also the hot performance path). 15230 * 15231 * Requirements for successful completion are: 15232 * pkt_reason is CMD_CMPLT and packet status is status good. 15233 * In addition: 15234 * - A residual of zero indicates successful completion no matter what 15235 * the command is. 15236 * - If the residual is not zero and the command is not a read or 15237 * write, then it's still defined as successful completion. In other 15238 * words, if the command is a read or write the residual must be 15239 * zero for successful completion. 15240 * - If the residual is not zero and the command is a read or 15241 * write, and it's a USCSICMD, then it's still defined as 15242 * successful completion. 15243 */ 15244 if ((pktp->pkt_reason == CMD_CMPLT) && 15245 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 15246 15247 /* 15248 * Since this command is returned with a good status, we 15249 * can reset the count for Sonoma failover. 15250 */ 15251 un->un_sonoma_failure_count = 0; 15252 15253 /* 15254 * Return all USCSI commands on good status 15255 */ 15256 if (pktp->pkt_resid == 0) { 15257 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15258 "sdintr: returning command for resid == 0\n"); 15259 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 15260 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 15261 SD_UPDATE_B_RESID(bp, pktp); 15262 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15263 "sdintr: returning command for resid != 0\n"); 15264 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15265 SD_UPDATE_B_RESID(bp, pktp); 15266 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15267 "sdintr: returning uscsi command\n"); 15268 } else { 15269 goto not_successful; 15270 } 15271 sd_return_command(un, bp); 15272 15273 /* 15274 * Decrement counter to indicate that the callback routine 15275 * is done. 15276 */ 15277 un->un_in_callback--; 15278 ASSERT(un->un_in_callback >= 0); 15279 mutex_exit(SD_MUTEX(un)); 15280 15281 return; 15282 } 15283 15284 not_successful: 15285 15286 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15287 /* 15288 * The following is based upon knowledge of the underlying transport 15289 * and its use of DMA resources. This code should be removed when 15290 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 15291 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 15292 * and sd_start_cmds(). 15293 * 15294 * Free any DMA resources associated with this command if there 15295 * is a chance it could be retried or enqueued for later retry. 15296 * If we keep the DMA binding then mpxio cannot reissue the 15297 * command on another path whenever a path failure occurs. 15298 * 15299 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 15300 * causes the *entire* transfer to start over again from the 15301 * beginning of the request, even for PARTIAL chunks that 15302 * have already transferred successfully. 15303 * 15304 * This is only done for non-uscsi commands (and also skipped for the 15305 * driver's internal RQS command). Also just do this for Fibre Channel 15306 * devices as these are the only ones that support mpxio. 15307 */ 15308 if ((un->un_f_is_fibre == TRUE) && 15309 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15310 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15311 scsi_dmafree(pktp); 15312 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15313 } 15314 #endif 15315 15316 /* 15317 * The command did not successfully complete as requested so check 15318 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15319 * driver command that should not be retried so just return. If 15320 * FLAG_DIAGNOSE is not set the error will be processed below. 15321 */ 15322 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15323 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15324 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 15325 /* 15326 * Issue a request sense if a check condition caused the error 15327 * (we handle the auto request sense case above), otherwise 15328 * just fail the command. 15329 */ 15330 if ((pktp->pkt_reason == CMD_CMPLT) && 15331 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 15332 sd_send_request_sense_command(un, bp, pktp); 15333 } else { 15334 sd_return_failed_command(un, bp, EIO); 15335 } 15336 goto exit; 15337 } 15338 15339 /* 15340 * The command did not successfully complete as requested so process 15341 * the error, retry, and/or attempt recovery. 15342 */ 15343 switch (pktp->pkt_reason) { 15344 case CMD_CMPLT: 15345 switch (SD_GET_PKT_STATUS(pktp)) { 15346 case STATUS_GOOD: 15347 /* 15348 * The command completed successfully with a non-zero 15349 * residual 15350 */ 15351 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15352 "sdintr: STATUS_GOOD \n"); 15353 sd_pkt_status_good(un, bp, xp, pktp); 15354 break; 15355 15356 case STATUS_CHECK: 15357 case STATUS_TERMINATED: 15358 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15359 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 15360 sd_pkt_status_check_condition(un, bp, xp, pktp); 15361 break; 15362 15363 case STATUS_BUSY: 15364 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15365 "sdintr: STATUS_BUSY\n"); 15366 sd_pkt_status_busy(un, bp, xp, pktp); 15367 break; 15368 15369 case STATUS_RESERVATION_CONFLICT: 15370 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15371 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 15372 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15373 break; 15374 15375 case STATUS_QFULL: 15376 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15377 "sdintr: STATUS_QFULL\n"); 15378 sd_pkt_status_qfull(un, bp, xp, pktp); 15379 break; 15380 15381 case STATUS_MET: 15382 case STATUS_INTERMEDIATE: 15383 case STATUS_SCSI2: 15384 case STATUS_INTERMEDIATE_MET: 15385 case STATUS_ACA_ACTIVE: 15386 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15387 "Unexpected SCSI status received: 0x%x\n", 15388 SD_GET_PKT_STATUS(pktp)); 15389 sd_return_failed_command(un, bp, EIO); 15390 break; 15391 15392 default: 15393 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15394 "Invalid SCSI status received: 0x%x\n", 15395 SD_GET_PKT_STATUS(pktp)); 15396 sd_return_failed_command(un, bp, EIO); 15397 break; 15398 15399 } 15400 break; 15401 15402 case CMD_INCOMPLETE: 15403 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15404 "sdintr: CMD_INCOMPLETE\n"); 15405 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 15406 break; 15407 case CMD_TRAN_ERR: 15408 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15409 "sdintr: CMD_TRAN_ERR\n"); 15410 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 15411 break; 15412 case CMD_RESET: 15413 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15414 "sdintr: CMD_RESET \n"); 15415 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 15416 break; 15417 case CMD_ABORTED: 15418 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15419 "sdintr: CMD_ABORTED \n"); 15420 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 15421 break; 15422 case CMD_TIMEOUT: 15423 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15424 "sdintr: CMD_TIMEOUT\n"); 15425 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 15426 break; 15427 case CMD_UNX_BUS_FREE: 15428 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15429 "sdintr: CMD_UNX_BUS_FREE \n"); 15430 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 15431 break; 15432 case CMD_TAG_REJECT: 15433 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15434 "sdintr: CMD_TAG_REJECT\n"); 15435 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 15436 break; 15437 default: 15438 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15439 "sdintr: default\n"); 15440 sd_pkt_reason_default(un, bp, xp, pktp); 15441 break; 15442 } 15443 15444 exit: 15445 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 15446 15447 /* Decrement counter to indicate that the callback routine is done. */ 15448 un->un_in_callback--; 15449 ASSERT(un->un_in_callback >= 0); 15450 15451 /* 15452 * At this point, the pkt has been dispatched, ie, it is either 15453 * being re-tried or has been returned to its caller and should 15454 * not be referenced. 15455 */ 15456 15457 mutex_exit(SD_MUTEX(un)); 15458 } 15459 15460 15461 /* 15462 * Function: sd_print_incomplete_msg 15463 * 15464 * Description: Prints the error message for a CMD_INCOMPLETE error. 15465 * 15466 * Arguments: un - ptr to associated softstate for the device. 15467 * bp - ptr to the buf(9S) for the command. 15468 * arg - message string ptr 15469 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 15470 * or SD_NO_RETRY_ISSUED. 15471 * 15472 * Context: May be called under interrupt context 15473 */ 15474 15475 static void 15476 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15477 { 15478 struct scsi_pkt *pktp; 15479 char *msgp; 15480 char *cmdp = arg; 15481 15482 ASSERT(un != NULL); 15483 ASSERT(mutex_owned(SD_MUTEX(un))); 15484 ASSERT(bp != NULL); 15485 ASSERT(arg != NULL); 15486 pktp = SD_GET_PKTP(bp); 15487 ASSERT(pktp != NULL); 15488 15489 switch (code) { 15490 case SD_DELAYED_RETRY_ISSUED: 15491 case SD_IMMEDIATE_RETRY_ISSUED: 15492 msgp = "retrying"; 15493 break; 15494 case SD_NO_RETRY_ISSUED: 15495 default: 15496 msgp = "giving up"; 15497 break; 15498 } 15499 15500 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15501 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15502 "incomplete %s- %s\n", cmdp, msgp); 15503 } 15504 } 15505 15506 15507 15508 /* 15509 * Function: sd_pkt_status_good 15510 * 15511 * Description: Processing for a STATUS_GOOD code in pkt_status. 15512 * 15513 * Context: May be called under interrupt context 15514 */ 15515 15516 static void 15517 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 15518 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15519 { 15520 char *cmdp; 15521 15522 ASSERT(un != NULL); 15523 ASSERT(mutex_owned(SD_MUTEX(un))); 15524 ASSERT(bp != NULL); 15525 ASSERT(xp != NULL); 15526 ASSERT(pktp != NULL); 15527 ASSERT(pktp->pkt_reason == CMD_CMPLT); 15528 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 15529 ASSERT(pktp->pkt_resid != 0); 15530 15531 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 15532 15533 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15534 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 15535 case SCMD_READ: 15536 cmdp = "read"; 15537 break; 15538 case SCMD_WRITE: 15539 cmdp = "write"; 15540 break; 15541 default: 15542 SD_UPDATE_B_RESID(bp, pktp); 15543 sd_return_command(un, bp); 15544 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15545 return; 15546 } 15547 15548 /* 15549 * See if we can retry the read/write, preferrably immediately. 15550 * If retries are exhaused, then sd_retry_command() will update 15551 * the b_resid count. 15552 */ 15553 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 15554 cmdp, EIO, (clock_t)0, NULL); 15555 15556 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15557 } 15558 15559 15560 15561 15562 15563 /* 15564 * Function: sd_handle_request_sense 15565 * 15566 * Description: Processing for non-auto Request Sense command. 15567 * 15568 * Arguments: un - ptr to associated softstate 15569 * sense_bp - ptr to buf(9S) for the RQS command 15570 * sense_xp - ptr to the sd_xbuf for the RQS command 15571 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 15572 * 15573 * Context: May be called under interrupt context 15574 */ 15575 15576 static void 15577 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15578 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15579 { 15580 struct buf *cmd_bp; /* buf for the original command */ 15581 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15582 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15583 size_t actual_len; /* actual sense data length */ 15584 15585 ASSERT(un != NULL); 15586 ASSERT(mutex_owned(SD_MUTEX(un))); 15587 ASSERT(sense_bp != NULL); 15588 ASSERT(sense_xp != NULL); 15589 ASSERT(sense_pktp != NULL); 15590 15591 /* 15592 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15593 * RQS command and not the original command. 15594 */ 15595 ASSERT(sense_pktp == un->un_rqs_pktp); 15596 ASSERT(sense_bp == un->un_rqs_bp); 15597 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15598 (FLAG_SENSING | FLAG_HEAD)); 15599 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15600 FLAG_SENSING) == FLAG_SENSING); 15601 15602 /* These are the bp, xp, and pktp for the original command */ 15603 cmd_bp = sense_xp->xb_sense_bp; 15604 cmd_xp = SD_GET_XBUF(cmd_bp); 15605 cmd_pktp = SD_GET_PKTP(cmd_bp); 15606 15607 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15608 /* 15609 * The REQUEST SENSE command failed. Release the REQUEST 15610 * SENSE command for re-use, get back the bp for the original 15611 * command, and attempt to re-try the original command if 15612 * FLAG_DIAGNOSE is not set in the original packet. 15613 */ 15614 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15615 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15616 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15617 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15618 NULL, NULL, EIO, (clock_t)0, NULL); 15619 return; 15620 } 15621 } 15622 15623 /* 15624 * Save the relevant sense info into the xp for the original cmd. 15625 * 15626 * Note: if the request sense failed the state info will be zero 15627 * as set in sd_mark_rqs_busy() 15628 */ 15629 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15630 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15631 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 15632 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 15633 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 15634 SENSE_LENGTH)) { 15635 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15636 MAX_SENSE_LENGTH); 15637 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15638 } else { 15639 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15640 SENSE_LENGTH); 15641 if (actual_len < SENSE_LENGTH) { 15642 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 15643 } else { 15644 cmd_xp->xb_sense_resid = 0; 15645 } 15646 } 15647 15648 /* 15649 * Free up the RQS command.... 15650 * NOTE: 15651 * Must do this BEFORE calling sd_validate_sense_data! 15652 * sd_validate_sense_data may return the original command in 15653 * which case the pkt will be freed and the flags can no 15654 * longer be touched. 15655 * SD_MUTEX is held through this process until the command 15656 * is dispatched based upon the sense data, so there are 15657 * no race conditions. 15658 */ 15659 (void) sd_mark_rqs_idle(un, sense_xp); 15660 15661 /* 15662 * For a retryable command see if we have valid sense data, if so then 15663 * turn it over to sd_decode_sense() to figure out the right course of 15664 * action. Just fail a non-retryable command. 15665 */ 15666 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15667 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 15668 SD_SENSE_DATA_IS_VALID) { 15669 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15670 } 15671 } else { 15672 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15673 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15674 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15675 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15676 sd_return_failed_command(un, cmd_bp, EIO); 15677 } 15678 } 15679 15680 15681 15682 15683 /* 15684 * Function: sd_handle_auto_request_sense 15685 * 15686 * Description: Processing for auto-request sense information. 15687 * 15688 * Arguments: un - ptr to associated softstate 15689 * bp - ptr to buf(9S) for the command 15690 * xp - ptr to the sd_xbuf for the command 15691 * pktp - ptr to the scsi_pkt(9S) for the command 15692 * 15693 * Context: May be called under interrupt context 15694 */ 15695 15696 static void 15697 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15698 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15699 { 15700 struct scsi_arq_status *asp; 15701 size_t actual_len; 15702 15703 ASSERT(un != NULL); 15704 ASSERT(mutex_owned(SD_MUTEX(un))); 15705 ASSERT(bp != NULL); 15706 ASSERT(xp != NULL); 15707 ASSERT(pktp != NULL); 15708 ASSERT(pktp != un->un_rqs_pktp); 15709 ASSERT(bp != un->un_rqs_bp); 15710 15711 /* 15712 * For auto-request sense, we get a scsi_arq_status back from 15713 * the HBA, with the sense data in the sts_sensedata member. 15714 * The pkt_scbp of the packet points to this scsi_arq_status. 15715 */ 15716 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15717 15718 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15719 /* 15720 * The auto REQUEST SENSE failed; see if we can re-try 15721 * the original command. 15722 */ 15723 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15724 "auto request sense failed (reason=%s)\n", 15725 scsi_rname(asp->sts_rqpkt_reason)); 15726 15727 sd_reset_target(un, pktp); 15728 15729 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15730 NULL, NULL, EIO, (clock_t)0, NULL); 15731 return; 15732 } 15733 15734 /* Save the relevant sense info into the xp for the original cmd. */ 15735 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15736 xp->xb_sense_state = asp->sts_rqpkt_state; 15737 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15738 if (xp->xb_sense_state & STATE_XARQ_DONE) { 15739 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15740 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15741 MAX_SENSE_LENGTH); 15742 } else { 15743 if (xp->xb_sense_resid > SENSE_LENGTH) { 15744 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15745 } else { 15746 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 15747 } 15748 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15749 if ((((struct uscsi_cmd *) 15750 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 15751 xp->xb_sense_resid = (((struct uscsi_cmd *) 15752 (xp->xb_pktinfo))->uscsi_rqlen) - 15753 actual_len; 15754 } else { 15755 xp->xb_sense_resid = 0; 15756 } 15757 } 15758 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 15759 } 15760 15761 /* 15762 * See if we have valid sense data, if so then turn it over to 15763 * sd_decode_sense() to figure out the right course of action. 15764 */ 15765 if (sd_validate_sense_data(un, bp, xp, actual_len) == 15766 SD_SENSE_DATA_IS_VALID) { 15767 sd_decode_sense(un, bp, xp, pktp); 15768 } 15769 } 15770 15771 15772 /* 15773 * Function: sd_print_sense_failed_msg 15774 * 15775 * Description: Print log message when RQS has failed. 15776 * 15777 * Arguments: un - ptr to associated softstate 15778 * bp - ptr to buf(9S) for the command 15779 * arg - generic message string ptr 15780 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15781 * or SD_NO_RETRY_ISSUED 15782 * 15783 * Context: May be called from interrupt context 15784 */ 15785 15786 static void 15787 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15788 int code) 15789 { 15790 char *msgp = arg; 15791 15792 ASSERT(un != NULL); 15793 ASSERT(mutex_owned(SD_MUTEX(un))); 15794 ASSERT(bp != NULL); 15795 15796 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15797 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15798 } 15799 } 15800 15801 15802 /* 15803 * Function: sd_validate_sense_data 15804 * 15805 * Description: Check the given sense data for validity. 15806 * If the sense data is not valid, the command will 15807 * be either failed or retried! 15808 * 15809 * Return Code: SD_SENSE_DATA_IS_INVALID 15810 * SD_SENSE_DATA_IS_VALID 15811 * 15812 * Context: May be called from interrupt context 15813 */ 15814 15815 static int 15816 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15817 size_t actual_len) 15818 { 15819 struct scsi_extended_sense *esp; 15820 struct scsi_pkt *pktp; 15821 char *msgp = NULL; 15822 15823 ASSERT(un != NULL); 15824 ASSERT(mutex_owned(SD_MUTEX(un))); 15825 ASSERT(bp != NULL); 15826 ASSERT(bp != un->un_rqs_bp); 15827 ASSERT(xp != NULL); 15828 15829 pktp = SD_GET_PKTP(bp); 15830 ASSERT(pktp != NULL); 15831 15832 /* 15833 * Check the status of the RQS command (auto or manual). 15834 */ 15835 switch (xp->xb_sense_status & STATUS_MASK) { 15836 case STATUS_GOOD: 15837 break; 15838 15839 case STATUS_RESERVATION_CONFLICT: 15840 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15841 return (SD_SENSE_DATA_IS_INVALID); 15842 15843 case STATUS_BUSY: 15844 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15845 "Busy Status on REQUEST SENSE\n"); 15846 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15847 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 15848 return (SD_SENSE_DATA_IS_INVALID); 15849 15850 case STATUS_QFULL: 15851 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15852 "QFULL Status on REQUEST SENSE\n"); 15853 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15854 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 15855 return (SD_SENSE_DATA_IS_INVALID); 15856 15857 case STATUS_CHECK: 15858 case STATUS_TERMINATED: 15859 msgp = "Check Condition on REQUEST SENSE\n"; 15860 goto sense_failed; 15861 15862 default: 15863 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15864 goto sense_failed; 15865 } 15866 15867 /* 15868 * See if we got the minimum required amount of sense data. 15869 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15870 * or less. 15871 */ 15872 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15873 (actual_len == 0)) { 15874 msgp = "Request Sense couldn't get sense data\n"; 15875 goto sense_failed; 15876 } 15877 15878 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15879 msgp = "Not enough sense information\n"; 15880 goto sense_failed; 15881 } 15882 15883 /* 15884 * We require the extended sense data 15885 */ 15886 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15887 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15888 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15889 static char tmp[8]; 15890 static char buf[148]; 15891 char *p = (char *)(xp->xb_sense_data); 15892 int i; 15893 15894 mutex_enter(&sd_sense_mutex); 15895 (void) strcpy(buf, "undecodable sense information:"); 15896 for (i = 0; i < actual_len; i++) { 15897 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15898 (void) strcpy(&buf[strlen(buf)], tmp); 15899 } 15900 i = strlen(buf); 15901 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15902 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15903 mutex_exit(&sd_sense_mutex); 15904 } 15905 /* Note: Legacy behavior, fail the command with no retry */ 15906 sd_return_failed_command(un, bp, EIO); 15907 return (SD_SENSE_DATA_IS_INVALID); 15908 } 15909 15910 /* 15911 * Check that es_code is valid (es_class concatenated with es_code 15912 * make up the "response code" field. es_class will always be 7, so 15913 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15914 * format. 15915 */ 15916 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15917 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15918 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15919 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15920 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15921 goto sense_failed; 15922 } 15923 15924 return (SD_SENSE_DATA_IS_VALID); 15925 15926 sense_failed: 15927 /* 15928 * If the request sense failed (for whatever reason), attempt 15929 * to retry the original command. 15930 */ 15931 #if defined(__i386) || defined(__amd64) 15932 /* 15933 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15934 * sddef.h for Sparc platform, and x86 uses 1 binary 15935 * for both SCSI/FC. 15936 * The SD_RETRY_DELAY value need to be adjusted here 15937 * when SD_RETRY_DELAY change in sddef.h 15938 */ 15939 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15940 sd_print_sense_failed_msg, msgp, EIO, 15941 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15942 #else 15943 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15944 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15945 #endif 15946 15947 return (SD_SENSE_DATA_IS_INVALID); 15948 } 15949 15950 15951 15952 /* 15953 * Function: sd_decode_sense 15954 * 15955 * Description: Take recovery action(s) when SCSI Sense Data is received. 15956 * 15957 * Context: Interrupt context. 15958 */ 15959 15960 static void 15961 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15962 struct scsi_pkt *pktp) 15963 { 15964 uint8_t sense_key; 15965 15966 ASSERT(un != NULL); 15967 ASSERT(mutex_owned(SD_MUTEX(un))); 15968 ASSERT(bp != NULL); 15969 ASSERT(bp != un->un_rqs_bp); 15970 ASSERT(xp != NULL); 15971 ASSERT(pktp != NULL); 15972 15973 sense_key = scsi_sense_key(xp->xb_sense_data); 15974 15975 switch (sense_key) { 15976 case KEY_NO_SENSE: 15977 sd_sense_key_no_sense(un, bp, xp, pktp); 15978 break; 15979 case KEY_RECOVERABLE_ERROR: 15980 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15981 bp, xp, pktp); 15982 break; 15983 case KEY_NOT_READY: 15984 sd_sense_key_not_ready(un, xp->xb_sense_data, 15985 bp, xp, pktp); 15986 break; 15987 case KEY_MEDIUM_ERROR: 15988 case KEY_HARDWARE_ERROR: 15989 sd_sense_key_medium_or_hardware_error(un, 15990 xp->xb_sense_data, bp, xp, pktp); 15991 break; 15992 case KEY_ILLEGAL_REQUEST: 15993 sd_sense_key_illegal_request(un, bp, xp, pktp); 15994 break; 15995 case KEY_UNIT_ATTENTION: 15996 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15997 bp, xp, pktp); 15998 break; 15999 case KEY_WRITE_PROTECT: 16000 case KEY_VOLUME_OVERFLOW: 16001 case KEY_MISCOMPARE: 16002 sd_sense_key_fail_command(un, bp, xp, pktp); 16003 break; 16004 case KEY_BLANK_CHECK: 16005 sd_sense_key_blank_check(un, bp, xp, pktp); 16006 break; 16007 case KEY_ABORTED_COMMAND: 16008 sd_sense_key_aborted_command(un, bp, xp, pktp); 16009 break; 16010 case KEY_VENDOR_UNIQUE: 16011 case KEY_COPY_ABORTED: 16012 case KEY_EQUAL: 16013 case KEY_RESERVED: 16014 default: 16015 sd_sense_key_default(un, xp->xb_sense_data, 16016 bp, xp, pktp); 16017 break; 16018 } 16019 } 16020 16021 16022 /* 16023 * Function: sd_dump_memory 16024 * 16025 * Description: Debug logging routine to print the contents of a user provided 16026 * buffer. The output of the buffer is broken up into 256 byte 16027 * segments due to a size constraint of the scsi_log. 16028 * implementation. 16029 * 16030 * Arguments: un - ptr to softstate 16031 * comp - component mask 16032 * title - "title" string to preceed data when printed 16033 * data - ptr to data block to be printed 16034 * len - size of data block to be printed 16035 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 16036 * 16037 * Context: May be called from interrupt context 16038 */ 16039 16040 #define SD_DUMP_MEMORY_BUF_SIZE 256 16041 16042 static char *sd_dump_format_string[] = { 16043 " 0x%02x", 16044 " %c" 16045 }; 16046 16047 static void 16048 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 16049 int len, int fmt) 16050 { 16051 int i, j; 16052 int avail_count; 16053 int start_offset; 16054 int end_offset; 16055 size_t entry_len; 16056 char *bufp; 16057 char *local_buf; 16058 char *format_string; 16059 16060 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 16061 16062 /* 16063 * In the debug version of the driver, this function is called from a 16064 * number of places which are NOPs in the release driver. 16065 * The debug driver therefore has additional methods of filtering 16066 * debug output. 16067 */ 16068 #ifdef SDDEBUG 16069 /* 16070 * In the debug version of the driver we can reduce the amount of debug 16071 * messages by setting sd_error_level to something other than 16072 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 16073 * sd_component_mask. 16074 */ 16075 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 16076 (sd_error_level != SCSI_ERR_ALL)) { 16077 return; 16078 } 16079 if (((sd_component_mask & comp) == 0) || 16080 (sd_error_level != SCSI_ERR_ALL)) { 16081 return; 16082 } 16083 #else 16084 if (sd_error_level != SCSI_ERR_ALL) { 16085 return; 16086 } 16087 #endif 16088 16089 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 16090 bufp = local_buf; 16091 /* 16092 * Available length is the length of local_buf[], minus the 16093 * length of the title string, minus one for the ":", minus 16094 * one for the newline, minus one for the NULL terminator. 16095 * This gives the #bytes available for holding the printed 16096 * values from the given data buffer. 16097 */ 16098 if (fmt == SD_LOG_HEX) { 16099 format_string = sd_dump_format_string[0]; 16100 } else /* SD_LOG_CHAR */ { 16101 format_string = sd_dump_format_string[1]; 16102 } 16103 /* 16104 * Available count is the number of elements from the given 16105 * data buffer that we can fit into the available length. 16106 * This is based upon the size of the format string used. 16107 * Make one entry and find it's size. 16108 */ 16109 (void) sprintf(bufp, format_string, data[0]); 16110 entry_len = strlen(bufp); 16111 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 16112 16113 j = 0; 16114 while (j < len) { 16115 bufp = local_buf; 16116 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 16117 start_offset = j; 16118 16119 end_offset = start_offset + avail_count; 16120 16121 (void) sprintf(bufp, "%s:", title); 16122 bufp += strlen(bufp); 16123 for (i = start_offset; ((i < end_offset) && (j < len)); 16124 i++, j++) { 16125 (void) sprintf(bufp, format_string, data[i]); 16126 bufp += entry_len; 16127 } 16128 (void) sprintf(bufp, "\n"); 16129 16130 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 16131 } 16132 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 16133 } 16134 16135 /* 16136 * Function: sd_print_sense_msg 16137 * 16138 * Description: Log a message based upon the given sense data. 16139 * 16140 * Arguments: un - ptr to associated softstate 16141 * bp - ptr to buf(9S) for the command 16142 * arg - ptr to associate sd_sense_info struct 16143 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16144 * or SD_NO_RETRY_ISSUED 16145 * 16146 * Context: May be called from interrupt context 16147 */ 16148 16149 static void 16150 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16151 { 16152 struct sd_xbuf *xp; 16153 struct scsi_pkt *pktp; 16154 uint8_t *sensep; 16155 daddr_t request_blkno; 16156 diskaddr_t err_blkno; 16157 int severity; 16158 int pfa_flag; 16159 extern struct scsi_key_strings scsi_cmds[]; 16160 16161 ASSERT(un != NULL); 16162 ASSERT(mutex_owned(SD_MUTEX(un))); 16163 ASSERT(bp != NULL); 16164 xp = SD_GET_XBUF(bp); 16165 ASSERT(xp != NULL); 16166 pktp = SD_GET_PKTP(bp); 16167 ASSERT(pktp != NULL); 16168 ASSERT(arg != NULL); 16169 16170 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 16171 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 16172 16173 if ((code == SD_DELAYED_RETRY_ISSUED) || 16174 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 16175 severity = SCSI_ERR_RETRYABLE; 16176 } 16177 16178 /* Use absolute block number for the request block number */ 16179 request_blkno = xp->xb_blkno; 16180 16181 /* 16182 * Now try to get the error block number from the sense data 16183 */ 16184 sensep = xp->xb_sense_data; 16185 16186 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 16187 (uint64_t *)&err_blkno)) { 16188 /* 16189 * We retrieved the error block number from the information 16190 * portion of the sense data. 16191 * 16192 * For USCSI commands we are better off using the error 16193 * block no. as the requested block no. (This is the best 16194 * we can estimate.) 16195 */ 16196 if ((SD_IS_BUFIO(xp) == FALSE) && 16197 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 16198 request_blkno = err_blkno; 16199 } 16200 } else { 16201 /* 16202 * Without the es_valid bit set (for fixed format) or an 16203 * information descriptor (for descriptor format) we cannot 16204 * be certain of the error blkno, so just use the 16205 * request_blkno. 16206 */ 16207 err_blkno = (diskaddr_t)request_blkno; 16208 } 16209 16210 /* 16211 * The following will log the buffer contents for the release driver 16212 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 16213 * level is set to verbose. 16214 */ 16215 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 16216 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16217 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 16218 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 16219 16220 if (pfa_flag == FALSE) { 16221 /* This is normally only set for USCSI */ 16222 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 16223 return; 16224 } 16225 16226 if ((SD_IS_BUFIO(xp) == TRUE) && 16227 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 16228 (severity < sd_error_level))) { 16229 return; 16230 } 16231 } 16232 16233 /* 16234 * Check for Sonoma Failover and keep a count of how many failed I/O's 16235 */ 16236 if ((SD_IS_LSI(un)) && 16237 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 16238 (scsi_sense_asc(sensep) == 0x94) && 16239 (scsi_sense_ascq(sensep) == 0x01)) { 16240 un->un_sonoma_failure_count++; 16241 if (un->un_sonoma_failure_count > 1) { 16242 return; 16243 } 16244 } 16245 16246 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 16247 request_blkno, err_blkno, scsi_cmds, 16248 (struct scsi_extended_sense *)sensep, 16249 un->un_additional_codes, NULL); 16250 } 16251 16252 /* 16253 * Function: sd_sense_key_no_sense 16254 * 16255 * Description: Recovery action when sense data was not received. 16256 * 16257 * Context: May be called from interrupt context 16258 */ 16259 16260 static void 16261 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 16262 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16263 { 16264 struct sd_sense_info si; 16265 16266 ASSERT(un != NULL); 16267 ASSERT(mutex_owned(SD_MUTEX(un))); 16268 ASSERT(bp != NULL); 16269 ASSERT(xp != NULL); 16270 ASSERT(pktp != NULL); 16271 16272 si.ssi_severity = SCSI_ERR_FATAL; 16273 si.ssi_pfa_flag = FALSE; 16274 16275 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16276 16277 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16278 &si, EIO, (clock_t)0, NULL); 16279 } 16280 16281 16282 /* 16283 * Function: sd_sense_key_recoverable_error 16284 * 16285 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 16286 * 16287 * Context: May be called from interrupt context 16288 */ 16289 16290 static void 16291 sd_sense_key_recoverable_error(struct sd_lun *un, 16292 uint8_t *sense_datap, 16293 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16294 { 16295 struct sd_sense_info si; 16296 uint8_t asc = scsi_sense_asc(sense_datap); 16297 16298 ASSERT(un != NULL); 16299 ASSERT(mutex_owned(SD_MUTEX(un))); 16300 ASSERT(bp != NULL); 16301 ASSERT(xp != NULL); 16302 ASSERT(pktp != NULL); 16303 16304 /* 16305 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 16306 */ 16307 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 16308 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16309 si.ssi_severity = SCSI_ERR_INFO; 16310 si.ssi_pfa_flag = TRUE; 16311 } else { 16312 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16313 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 16314 si.ssi_severity = SCSI_ERR_RECOVERED; 16315 si.ssi_pfa_flag = FALSE; 16316 } 16317 16318 if (pktp->pkt_resid == 0) { 16319 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16320 sd_return_command(un, bp); 16321 return; 16322 } 16323 16324 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16325 &si, EIO, (clock_t)0, NULL); 16326 } 16327 16328 16329 16330 16331 /* 16332 * Function: sd_sense_key_not_ready 16333 * 16334 * Description: Recovery actions for a SCSI "Not Ready" sense key. 16335 * 16336 * Context: May be called from interrupt context 16337 */ 16338 16339 static void 16340 sd_sense_key_not_ready(struct sd_lun *un, 16341 uint8_t *sense_datap, 16342 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16343 { 16344 struct sd_sense_info si; 16345 uint8_t asc = scsi_sense_asc(sense_datap); 16346 uint8_t ascq = scsi_sense_ascq(sense_datap); 16347 16348 ASSERT(un != NULL); 16349 ASSERT(mutex_owned(SD_MUTEX(un))); 16350 ASSERT(bp != NULL); 16351 ASSERT(xp != NULL); 16352 ASSERT(pktp != NULL); 16353 16354 si.ssi_severity = SCSI_ERR_FATAL; 16355 si.ssi_pfa_flag = FALSE; 16356 16357 /* 16358 * Update error stats after first NOT READY error. Disks may have 16359 * been powered down and may need to be restarted. For CDROMs, 16360 * report NOT READY errors only if media is present. 16361 */ 16362 if ((ISCD(un) && (asc == 0x3A)) || 16363 (xp->xb_nr_retry_count > 0)) { 16364 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16365 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 16366 } 16367 16368 /* 16369 * Just fail if the "not ready" retry limit has been reached. 16370 */ 16371 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 16372 /* Special check for error message printing for removables. */ 16373 if (un->un_f_has_removable_media && (asc == 0x04) && 16374 (ascq >= 0x04)) { 16375 si.ssi_severity = SCSI_ERR_ALL; 16376 } 16377 goto fail_command; 16378 } 16379 16380 /* 16381 * Check the ASC and ASCQ in the sense data as needed, to determine 16382 * what to do. 16383 */ 16384 switch (asc) { 16385 case 0x04: /* LOGICAL UNIT NOT READY */ 16386 /* 16387 * disk drives that don't spin up result in a very long delay 16388 * in format without warning messages. We will log a message 16389 * if the error level is set to verbose. 16390 */ 16391 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16392 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16393 "logical unit not ready, resetting disk\n"); 16394 } 16395 16396 /* 16397 * There are different requirements for CDROMs and disks for 16398 * the number of retries. If a CD-ROM is giving this, it is 16399 * probably reading TOC and is in the process of getting 16400 * ready, so we should keep on trying for a long time to make 16401 * sure that all types of media are taken in account (for 16402 * some media the drive takes a long time to read TOC). For 16403 * disks we do not want to retry this too many times as this 16404 * can cause a long hang in format when the drive refuses to 16405 * spin up (a very common failure). 16406 */ 16407 switch (ascq) { 16408 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 16409 /* 16410 * Disk drives frequently refuse to spin up which 16411 * results in a very long hang in format without 16412 * warning messages. 16413 * 16414 * Note: This code preserves the legacy behavior of 16415 * comparing xb_nr_retry_count against zero for fibre 16416 * channel targets instead of comparing against the 16417 * un_reset_retry_count value. The reason for this 16418 * discrepancy has been so utterly lost beneath the 16419 * Sands of Time that even Indiana Jones could not 16420 * find it. 16421 */ 16422 if (un->un_f_is_fibre == TRUE) { 16423 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16424 (xp->xb_nr_retry_count > 0)) && 16425 (un->un_startstop_timeid == NULL)) { 16426 scsi_log(SD_DEVINFO(un), sd_label, 16427 CE_WARN, "logical unit not ready, " 16428 "resetting disk\n"); 16429 sd_reset_target(un, pktp); 16430 } 16431 } else { 16432 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16433 (xp->xb_nr_retry_count > 16434 un->un_reset_retry_count)) && 16435 (un->un_startstop_timeid == NULL)) { 16436 scsi_log(SD_DEVINFO(un), sd_label, 16437 CE_WARN, "logical unit not ready, " 16438 "resetting disk\n"); 16439 sd_reset_target(un, pktp); 16440 } 16441 } 16442 break; 16443 16444 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 16445 /* 16446 * If the target is in the process of becoming 16447 * ready, just proceed with the retry. This can 16448 * happen with CD-ROMs that take a long time to 16449 * read TOC after a power cycle or reset. 16450 */ 16451 goto do_retry; 16452 16453 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 16454 break; 16455 16456 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 16457 /* 16458 * Retries cannot help here so just fail right away. 16459 */ 16460 goto fail_command; 16461 16462 case 0x88: 16463 /* 16464 * Vendor-unique code for T3/T4: it indicates a 16465 * path problem in a mutipathed config, but as far as 16466 * the target driver is concerned it equates to a fatal 16467 * error, so we should just fail the command right away 16468 * (without printing anything to the console). If this 16469 * is not a T3/T4, fall thru to the default recovery 16470 * action. 16471 * T3/T4 is FC only, don't need to check is_fibre 16472 */ 16473 if (SD_IS_T3(un) || SD_IS_T4(un)) { 16474 sd_return_failed_command(un, bp, EIO); 16475 return; 16476 } 16477 /* FALLTHRU */ 16478 16479 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 16480 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 16481 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 16482 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 16483 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 16484 default: /* Possible future codes in SCSI spec? */ 16485 /* 16486 * For removable-media devices, do not retry if 16487 * ASCQ > 2 as these result mostly from USCSI commands 16488 * on MMC devices issued to check status of an 16489 * operation initiated in immediate mode. Also for 16490 * ASCQ >= 4 do not print console messages as these 16491 * mainly represent a user-initiated operation 16492 * instead of a system failure. 16493 */ 16494 if (un->un_f_has_removable_media) { 16495 si.ssi_severity = SCSI_ERR_ALL; 16496 goto fail_command; 16497 } 16498 break; 16499 } 16500 16501 /* 16502 * As part of our recovery attempt for the NOT READY 16503 * condition, we issue a START STOP UNIT command. However 16504 * we want to wait for a short delay before attempting this 16505 * as there may still be more commands coming back from the 16506 * target with the check condition. To do this we use 16507 * timeout(9F) to call sd_start_stop_unit_callback() after 16508 * the delay interval expires. (sd_start_stop_unit_callback() 16509 * dispatches sd_start_stop_unit_task(), which will issue 16510 * the actual START STOP UNIT command. The delay interval 16511 * is one-half of the delay that we will use to retry the 16512 * command that generated the NOT READY condition. 16513 * 16514 * Note that we could just dispatch sd_start_stop_unit_task() 16515 * from here and allow it to sleep for the delay interval, 16516 * but then we would be tying up the taskq thread 16517 * uncesessarily for the duration of the delay. 16518 * 16519 * Do not issue the START STOP UNIT if the current command 16520 * is already a START STOP UNIT. 16521 */ 16522 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 16523 break; 16524 } 16525 16526 /* 16527 * Do not schedule the timeout if one is already pending. 16528 */ 16529 if (un->un_startstop_timeid != NULL) { 16530 SD_INFO(SD_LOG_ERROR, un, 16531 "sd_sense_key_not_ready: restart already issued to" 16532 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 16533 ddi_get_instance(SD_DEVINFO(un))); 16534 break; 16535 } 16536 16537 /* 16538 * Schedule the START STOP UNIT command, then queue the command 16539 * for a retry. 16540 * 16541 * Note: A timeout is not scheduled for this retry because we 16542 * want the retry to be serial with the START_STOP_UNIT. The 16543 * retry will be started when the START_STOP_UNIT is completed 16544 * in sd_start_stop_unit_task. 16545 */ 16546 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 16547 un, un->un_busy_timeout / 2); 16548 xp->xb_nr_retry_count++; 16549 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 16550 return; 16551 16552 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 16553 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16554 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16555 "unit does not respond to selection\n"); 16556 } 16557 break; 16558 16559 case 0x3A: /* MEDIUM NOT PRESENT */ 16560 if (sd_error_level >= SCSI_ERR_FATAL) { 16561 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16562 "Caddy not inserted in drive\n"); 16563 } 16564 16565 sr_ejected(un); 16566 un->un_mediastate = DKIO_EJECTED; 16567 /* The state has changed, inform the media watch routines */ 16568 cv_broadcast(&un->un_state_cv); 16569 /* Just fail if no media is present in the drive. */ 16570 goto fail_command; 16571 16572 default: 16573 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16574 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 16575 "Unit not Ready. Additional sense code 0x%x\n", 16576 asc); 16577 } 16578 break; 16579 } 16580 16581 do_retry: 16582 16583 /* 16584 * Retry the command, as some targets may report NOT READY for 16585 * several seconds after being reset. 16586 */ 16587 xp->xb_nr_retry_count++; 16588 si.ssi_severity = SCSI_ERR_RETRYABLE; 16589 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 16590 &si, EIO, un->un_busy_timeout, NULL); 16591 16592 return; 16593 16594 fail_command: 16595 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16596 sd_return_failed_command(un, bp, EIO); 16597 } 16598 16599 16600 16601 /* 16602 * Function: sd_sense_key_medium_or_hardware_error 16603 * 16604 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 16605 * sense key. 16606 * 16607 * Context: May be called from interrupt context 16608 */ 16609 16610 static void 16611 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16612 uint8_t *sense_datap, 16613 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16614 { 16615 struct sd_sense_info si; 16616 uint8_t sense_key = scsi_sense_key(sense_datap); 16617 uint8_t asc = scsi_sense_asc(sense_datap); 16618 16619 ASSERT(un != NULL); 16620 ASSERT(mutex_owned(SD_MUTEX(un))); 16621 ASSERT(bp != NULL); 16622 ASSERT(xp != NULL); 16623 ASSERT(pktp != NULL); 16624 16625 si.ssi_severity = SCSI_ERR_FATAL; 16626 si.ssi_pfa_flag = FALSE; 16627 16628 if (sense_key == KEY_MEDIUM_ERROR) { 16629 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16630 } 16631 16632 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16633 16634 if ((un->un_reset_retry_count != 0) && 16635 (xp->xb_retry_count == un->un_reset_retry_count)) { 16636 mutex_exit(SD_MUTEX(un)); 16637 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16638 if (un->un_f_allow_bus_device_reset == TRUE) { 16639 16640 boolean_t try_resetting_target = B_TRUE; 16641 16642 /* 16643 * We need to be able to handle specific ASC when we are 16644 * handling a KEY_HARDWARE_ERROR. In particular 16645 * taking the default action of resetting the target may 16646 * not be the appropriate way to attempt recovery. 16647 * Resetting a target because of a single LUN failure 16648 * victimizes all LUNs on that target. 16649 * 16650 * This is true for the LSI arrays, if an LSI 16651 * array controller returns an ASC of 0x84 (LUN Dead) we 16652 * should trust it. 16653 */ 16654 16655 if (sense_key == KEY_HARDWARE_ERROR) { 16656 switch (asc) { 16657 case 0x84: 16658 if (SD_IS_LSI(un)) { 16659 try_resetting_target = B_FALSE; 16660 } 16661 break; 16662 default: 16663 break; 16664 } 16665 } 16666 16667 if (try_resetting_target == B_TRUE) { 16668 int reset_retval = 0; 16669 if (un->un_f_lun_reset_enabled == TRUE) { 16670 SD_TRACE(SD_LOG_IO_CORE, un, 16671 "sd_sense_key_medium_or_hardware_" 16672 "error: issuing RESET_LUN\n"); 16673 reset_retval = 16674 scsi_reset(SD_ADDRESS(un), 16675 RESET_LUN); 16676 } 16677 if (reset_retval == 0) { 16678 SD_TRACE(SD_LOG_IO_CORE, un, 16679 "sd_sense_key_medium_or_hardware_" 16680 "error: issuing RESET_TARGET\n"); 16681 (void) scsi_reset(SD_ADDRESS(un), 16682 RESET_TARGET); 16683 } 16684 } 16685 } 16686 mutex_enter(SD_MUTEX(un)); 16687 } 16688 16689 /* 16690 * This really ought to be a fatal error, but we will retry anyway 16691 * as some drives report this as a spurious error. 16692 */ 16693 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16694 &si, EIO, (clock_t)0, NULL); 16695 } 16696 16697 16698 16699 /* 16700 * Function: sd_sense_key_illegal_request 16701 * 16702 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16703 * 16704 * Context: May be called from interrupt context 16705 */ 16706 16707 static void 16708 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16709 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16710 { 16711 struct sd_sense_info si; 16712 16713 ASSERT(un != NULL); 16714 ASSERT(mutex_owned(SD_MUTEX(un))); 16715 ASSERT(bp != NULL); 16716 ASSERT(xp != NULL); 16717 ASSERT(pktp != NULL); 16718 16719 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16720 16721 si.ssi_severity = SCSI_ERR_INFO; 16722 si.ssi_pfa_flag = FALSE; 16723 16724 /* Pointless to retry if the target thinks it's an illegal request */ 16725 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16726 sd_return_failed_command(un, bp, EIO); 16727 } 16728 16729 16730 16731 16732 /* 16733 * Function: sd_sense_key_unit_attention 16734 * 16735 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16736 * 16737 * Context: May be called from interrupt context 16738 */ 16739 16740 static void 16741 sd_sense_key_unit_attention(struct sd_lun *un, 16742 uint8_t *sense_datap, 16743 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16744 { 16745 /* 16746 * For UNIT ATTENTION we allow retries for one minute. Devices 16747 * like Sonoma can return UNIT ATTENTION close to a minute 16748 * under certain conditions. 16749 */ 16750 int retry_check_flag = SD_RETRIES_UA; 16751 boolean_t kstat_updated = B_FALSE; 16752 struct sd_sense_info si; 16753 uint8_t asc = scsi_sense_asc(sense_datap); 16754 uint8_t ascq = scsi_sense_ascq(sense_datap); 16755 16756 ASSERT(un != NULL); 16757 ASSERT(mutex_owned(SD_MUTEX(un))); 16758 ASSERT(bp != NULL); 16759 ASSERT(xp != NULL); 16760 ASSERT(pktp != NULL); 16761 16762 si.ssi_severity = SCSI_ERR_INFO; 16763 si.ssi_pfa_flag = FALSE; 16764 16765 16766 switch (asc) { 16767 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16768 if (sd_report_pfa != 0) { 16769 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16770 si.ssi_pfa_flag = TRUE; 16771 retry_check_flag = SD_RETRIES_STANDARD; 16772 goto do_retry; 16773 } 16774 16775 break; 16776 16777 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16778 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16779 un->un_resvd_status |= 16780 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16781 } 16782 #ifdef _LP64 16783 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16784 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16785 un, KM_NOSLEEP) == 0) { 16786 /* 16787 * If we can't dispatch the task we'll just 16788 * live without descriptor sense. We can 16789 * try again on the next "unit attention" 16790 */ 16791 SD_ERROR(SD_LOG_ERROR, un, 16792 "sd_sense_key_unit_attention: " 16793 "Could not dispatch " 16794 "sd_reenable_dsense_task\n"); 16795 } 16796 } 16797 #endif /* _LP64 */ 16798 /* FALLTHRU */ 16799 16800 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16801 if (!un->un_f_has_removable_media) { 16802 break; 16803 } 16804 16805 /* 16806 * When we get a unit attention from a removable-media device, 16807 * it may be in a state that will take a long time to recover 16808 * (e.g., from a reset). Since we are executing in interrupt 16809 * context here, we cannot wait around for the device to come 16810 * back. So hand this command off to sd_media_change_task() 16811 * for deferred processing under taskq thread context. (Note 16812 * that the command still may be failed if a problem is 16813 * encountered at a later time.) 16814 */ 16815 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16816 KM_NOSLEEP) == 0) { 16817 /* 16818 * Cannot dispatch the request so fail the command. 16819 */ 16820 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16821 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16822 si.ssi_severity = SCSI_ERR_FATAL; 16823 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16824 sd_return_failed_command(un, bp, EIO); 16825 } 16826 16827 /* 16828 * If failed to dispatch sd_media_change_task(), we already 16829 * updated kstat. If succeed to dispatch sd_media_change_task(), 16830 * we should update kstat later if it encounters an error. So, 16831 * we update kstat_updated flag here. 16832 */ 16833 kstat_updated = B_TRUE; 16834 16835 /* 16836 * Either the command has been successfully dispatched to a 16837 * task Q for retrying, or the dispatch failed. In either case 16838 * do NOT retry again by calling sd_retry_command. This sets up 16839 * two retries of the same command and when one completes and 16840 * frees the resources the other will access freed memory, 16841 * a bad thing. 16842 */ 16843 return; 16844 16845 default: 16846 break; 16847 } 16848 16849 /* 16850 * ASC ASCQ 16851 * 2A 09 Capacity data has changed 16852 * 2A 01 Mode parameters changed 16853 * 3F 0E Reported luns data has changed 16854 * Arrays that support logical unit expansion should report 16855 * capacity changes(2Ah/09). Mode parameters changed and 16856 * reported luns data has changed are the approximation. 16857 */ 16858 if (((asc == 0x2a) && (ascq == 0x09)) || 16859 ((asc == 0x2a) && (ascq == 0x01)) || 16860 ((asc == 0x3f) && (ascq == 0x0e))) { 16861 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 16862 KM_NOSLEEP) == 0) { 16863 SD_ERROR(SD_LOG_ERROR, un, 16864 "sd_sense_key_unit_attention: " 16865 "Could not dispatch sd_target_change_task\n"); 16866 } 16867 } 16868 16869 /* 16870 * Update kstat if we haven't done that. 16871 */ 16872 if (!kstat_updated) { 16873 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16874 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16875 } 16876 16877 do_retry: 16878 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16879 EIO, SD_UA_RETRY_DELAY, NULL); 16880 } 16881 16882 16883 16884 /* 16885 * Function: sd_sense_key_fail_command 16886 * 16887 * Description: Use to fail a command when we don't like the sense key that 16888 * was returned. 16889 * 16890 * Context: May be called from interrupt context 16891 */ 16892 16893 static void 16894 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16895 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16896 { 16897 struct sd_sense_info si; 16898 16899 ASSERT(un != NULL); 16900 ASSERT(mutex_owned(SD_MUTEX(un))); 16901 ASSERT(bp != NULL); 16902 ASSERT(xp != NULL); 16903 ASSERT(pktp != NULL); 16904 16905 si.ssi_severity = SCSI_ERR_FATAL; 16906 si.ssi_pfa_flag = FALSE; 16907 16908 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16909 sd_return_failed_command(un, bp, EIO); 16910 } 16911 16912 16913 16914 /* 16915 * Function: sd_sense_key_blank_check 16916 * 16917 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16918 * Has no monetary connotation. 16919 * 16920 * Context: May be called from interrupt context 16921 */ 16922 16923 static void 16924 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16925 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16926 { 16927 struct sd_sense_info si; 16928 16929 ASSERT(un != NULL); 16930 ASSERT(mutex_owned(SD_MUTEX(un))); 16931 ASSERT(bp != NULL); 16932 ASSERT(xp != NULL); 16933 ASSERT(pktp != NULL); 16934 16935 /* 16936 * Blank check is not fatal for removable devices, therefore 16937 * it does not require a console message. 16938 */ 16939 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16940 SCSI_ERR_FATAL; 16941 si.ssi_pfa_flag = FALSE; 16942 16943 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16944 sd_return_failed_command(un, bp, EIO); 16945 } 16946 16947 16948 16949 16950 /* 16951 * Function: sd_sense_key_aborted_command 16952 * 16953 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16954 * 16955 * Context: May be called from interrupt context 16956 */ 16957 16958 static void 16959 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16960 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16961 { 16962 struct sd_sense_info si; 16963 16964 ASSERT(un != NULL); 16965 ASSERT(mutex_owned(SD_MUTEX(un))); 16966 ASSERT(bp != NULL); 16967 ASSERT(xp != NULL); 16968 ASSERT(pktp != NULL); 16969 16970 si.ssi_severity = SCSI_ERR_FATAL; 16971 si.ssi_pfa_flag = FALSE; 16972 16973 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16974 16975 /* 16976 * This really ought to be a fatal error, but we will retry anyway 16977 * as some drives report this as a spurious error. 16978 */ 16979 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16980 &si, EIO, drv_usectohz(100000), NULL); 16981 } 16982 16983 16984 16985 /* 16986 * Function: sd_sense_key_default 16987 * 16988 * Description: Default recovery action for several SCSI sense keys (basically 16989 * attempts a retry). 16990 * 16991 * Context: May be called from interrupt context 16992 */ 16993 16994 static void 16995 sd_sense_key_default(struct sd_lun *un, 16996 uint8_t *sense_datap, 16997 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16998 { 16999 struct sd_sense_info si; 17000 uint8_t sense_key = scsi_sense_key(sense_datap); 17001 17002 ASSERT(un != NULL); 17003 ASSERT(mutex_owned(SD_MUTEX(un))); 17004 ASSERT(bp != NULL); 17005 ASSERT(xp != NULL); 17006 ASSERT(pktp != NULL); 17007 17008 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17009 17010 /* 17011 * Undecoded sense key. Attempt retries and hope that will fix 17012 * the problem. Otherwise, we're dead. 17013 */ 17014 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17015 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17016 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 17017 } 17018 17019 si.ssi_severity = SCSI_ERR_FATAL; 17020 si.ssi_pfa_flag = FALSE; 17021 17022 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17023 &si, EIO, (clock_t)0, NULL); 17024 } 17025 17026 17027 17028 /* 17029 * Function: sd_print_retry_msg 17030 * 17031 * Description: Print a message indicating the retry action being taken. 17032 * 17033 * Arguments: un - ptr to associated softstate 17034 * bp - ptr to buf(9S) for the command 17035 * arg - not used. 17036 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17037 * or SD_NO_RETRY_ISSUED 17038 * 17039 * Context: May be called from interrupt context 17040 */ 17041 /* ARGSUSED */ 17042 static void 17043 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 17044 { 17045 struct sd_xbuf *xp; 17046 struct scsi_pkt *pktp; 17047 char *reasonp; 17048 char *msgp; 17049 17050 ASSERT(un != NULL); 17051 ASSERT(mutex_owned(SD_MUTEX(un))); 17052 ASSERT(bp != NULL); 17053 pktp = SD_GET_PKTP(bp); 17054 ASSERT(pktp != NULL); 17055 xp = SD_GET_XBUF(bp); 17056 ASSERT(xp != NULL); 17057 17058 ASSERT(!mutex_owned(&un->un_pm_mutex)); 17059 mutex_enter(&un->un_pm_mutex); 17060 if ((un->un_state == SD_STATE_SUSPENDED) || 17061 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 17062 (pktp->pkt_flags & FLAG_SILENT)) { 17063 mutex_exit(&un->un_pm_mutex); 17064 goto update_pkt_reason; 17065 } 17066 mutex_exit(&un->un_pm_mutex); 17067 17068 /* 17069 * Suppress messages if they are all the same pkt_reason; with 17070 * TQ, many (up to 256) are returned with the same pkt_reason. 17071 * If we are in panic, then suppress the retry messages. 17072 */ 17073 switch (flag) { 17074 case SD_NO_RETRY_ISSUED: 17075 msgp = "giving up"; 17076 break; 17077 case SD_IMMEDIATE_RETRY_ISSUED: 17078 case SD_DELAYED_RETRY_ISSUED: 17079 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 17080 ((pktp->pkt_reason == un->un_last_pkt_reason) && 17081 (sd_error_level != SCSI_ERR_ALL))) { 17082 return; 17083 } 17084 msgp = "retrying command"; 17085 break; 17086 default: 17087 goto update_pkt_reason; 17088 } 17089 17090 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 17091 scsi_rname(pktp->pkt_reason)); 17092 17093 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17094 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 17095 17096 update_pkt_reason: 17097 /* 17098 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 17099 * This is to prevent multiple console messages for the same failure 17100 * condition. Note that un->un_last_pkt_reason is NOT restored if & 17101 * when the command is retried successfully because there still may be 17102 * more commands coming back with the same value of pktp->pkt_reason. 17103 */ 17104 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 17105 un->un_last_pkt_reason = pktp->pkt_reason; 17106 } 17107 } 17108 17109 17110 /* 17111 * Function: sd_print_cmd_incomplete_msg 17112 * 17113 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 17114 * 17115 * Arguments: un - ptr to associated softstate 17116 * bp - ptr to buf(9S) for the command 17117 * arg - passed to sd_print_retry_msg() 17118 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17119 * or SD_NO_RETRY_ISSUED 17120 * 17121 * Context: May be called from interrupt context 17122 */ 17123 17124 static void 17125 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 17126 int code) 17127 { 17128 dev_info_t *dip; 17129 17130 ASSERT(un != NULL); 17131 ASSERT(mutex_owned(SD_MUTEX(un))); 17132 ASSERT(bp != NULL); 17133 17134 switch (code) { 17135 case SD_NO_RETRY_ISSUED: 17136 /* Command was failed. Someone turned off this target? */ 17137 if (un->un_state != SD_STATE_OFFLINE) { 17138 /* 17139 * Suppress message if we are detaching and 17140 * device has been disconnected 17141 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 17142 * private interface and not part of the DDI 17143 */ 17144 dip = un->un_sd->sd_dev; 17145 if (!(DEVI_IS_DETACHING(dip) && 17146 DEVI_IS_DEVICE_REMOVED(dip))) { 17147 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17148 "disk not responding to selection\n"); 17149 } 17150 New_state(un, SD_STATE_OFFLINE); 17151 } 17152 break; 17153 17154 case SD_DELAYED_RETRY_ISSUED: 17155 case SD_IMMEDIATE_RETRY_ISSUED: 17156 default: 17157 /* Command was successfully queued for retry */ 17158 sd_print_retry_msg(un, bp, arg, code); 17159 break; 17160 } 17161 } 17162 17163 17164 /* 17165 * Function: sd_pkt_reason_cmd_incomplete 17166 * 17167 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 17168 * 17169 * Context: May be called from interrupt context 17170 */ 17171 17172 static void 17173 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 17174 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17175 { 17176 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 17177 17178 ASSERT(un != NULL); 17179 ASSERT(mutex_owned(SD_MUTEX(un))); 17180 ASSERT(bp != NULL); 17181 ASSERT(xp != NULL); 17182 ASSERT(pktp != NULL); 17183 17184 /* Do not do a reset if selection did not complete */ 17185 /* Note: Should this not just check the bit? */ 17186 if (pktp->pkt_state != STATE_GOT_BUS) { 17187 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17188 sd_reset_target(un, pktp); 17189 } 17190 17191 /* 17192 * If the target was not successfully selected, then set 17193 * SD_RETRIES_FAILFAST to indicate that we lost communication 17194 * with the target, and further retries and/or commands are 17195 * likely to take a long time. 17196 */ 17197 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 17198 flag |= SD_RETRIES_FAILFAST; 17199 } 17200 17201 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17202 17203 sd_retry_command(un, bp, flag, 17204 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17205 } 17206 17207 17208 17209 /* 17210 * Function: sd_pkt_reason_cmd_tran_err 17211 * 17212 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 17213 * 17214 * Context: May be called from interrupt context 17215 */ 17216 17217 static void 17218 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 17219 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17220 { 17221 ASSERT(un != NULL); 17222 ASSERT(mutex_owned(SD_MUTEX(un))); 17223 ASSERT(bp != NULL); 17224 ASSERT(xp != NULL); 17225 ASSERT(pktp != NULL); 17226 17227 /* 17228 * Do not reset if we got a parity error, or if 17229 * selection did not complete. 17230 */ 17231 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17232 /* Note: Should this not just check the bit for pkt_state? */ 17233 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 17234 (pktp->pkt_state != STATE_GOT_BUS)) { 17235 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17236 sd_reset_target(un, pktp); 17237 } 17238 17239 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17240 17241 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17242 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17243 } 17244 17245 17246 17247 /* 17248 * Function: sd_pkt_reason_cmd_reset 17249 * 17250 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 17251 * 17252 * Context: May be called from interrupt context 17253 */ 17254 17255 static void 17256 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 17257 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17258 { 17259 ASSERT(un != NULL); 17260 ASSERT(mutex_owned(SD_MUTEX(un))); 17261 ASSERT(bp != NULL); 17262 ASSERT(xp != NULL); 17263 ASSERT(pktp != NULL); 17264 17265 /* The target may still be running the command, so try to reset. */ 17266 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17267 sd_reset_target(un, pktp); 17268 17269 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17270 17271 /* 17272 * If pkt_reason is CMD_RESET chances are that this pkt got 17273 * reset because another target on this bus caused it. The target 17274 * that caused it should get CMD_TIMEOUT with pkt_statistics 17275 * of STAT_TIMEOUT/STAT_DEV_RESET. 17276 */ 17277 17278 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17279 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17280 } 17281 17282 17283 17284 17285 /* 17286 * Function: sd_pkt_reason_cmd_aborted 17287 * 17288 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 17289 * 17290 * Context: May be called from interrupt context 17291 */ 17292 17293 static void 17294 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 17295 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17296 { 17297 ASSERT(un != NULL); 17298 ASSERT(mutex_owned(SD_MUTEX(un))); 17299 ASSERT(bp != NULL); 17300 ASSERT(xp != NULL); 17301 ASSERT(pktp != NULL); 17302 17303 /* The target may still be running the command, so try to reset. */ 17304 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17305 sd_reset_target(un, pktp); 17306 17307 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17308 17309 /* 17310 * If pkt_reason is CMD_ABORTED chances are that this pkt got 17311 * aborted because another target on this bus caused it. The target 17312 * that caused it should get CMD_TIMEOUT with pkt_statistics 17313 * of STAT_TIMEOUT/STAT_DEV_RESET. 17314 */ 17315 17316 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17317 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17318 } 17319 17320 17321 17322 /* 17323 * Function: sd_pkt_reason_cmd_timeout 17324 * 17325 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 17326 * 17327 * Context: May be called from interrupt context 17328 */ 17329 17330 static void 17331 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 17332 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17333 { 17334 ASSERT(un != NULL); 17335 ASSERT(mutex_owned(SD_MUTEX(un))); 17336 ASSERT(bp != NULL); 17337 ASSERT(xp != NULL); 17338 ASSERT(pktp != NULL); 17339 17340 17341 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17342 sd_reset_target(un, pktp); 17343 17344 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17345 17346 /* 17347 * A command timeout indicates that we could not establish 17348 * communication with the target, so set SD_RETRIES_FAILFAST 17349 * as further retries/commands are likely to take a long time. 17350 */ 17351 sd_retry_command(un, bp, 17352 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 17353 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17354 } 17355 17356 17357 17358 /* 17359 * Function: sd_pkt_reason_cmd_unx_bus_free 17360 * 17361 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 17362 * 17363 * Context: May be called from interrupt context 17364 */ 17365 17366 static void 17367 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 17368 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17369 { 17370 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 17371 17372 ASSERT(un != NULL); 17373 ASSERT(mutex_owned(SD_MUTEX(un))); 17374 ASSERT(bp != NULL); 17375 ASSERT(xp != NULL); 17376 ASSERT(pktp != NULL); 17377 17378 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17379 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17380 17381 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 17382 sd_print_retry_msg : NULL; 17383 17384 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17385 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17386 } 17387 17388 17389 /* 17390 * Function: sd_pkt_reason_cmd_tag_reject 17391 * 17392 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 17393 * 17394 * Context: May be called from interrupt context 17395 */ 17396 17397 static void 17398 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 17399 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17400 { 17401 ASSERT(un != NULL); 17402 ASSERT(mutex_owned(SD_MUTEX(un))); 17403 ASSERT(bp != NULL); 17404 ASSERT(xp != NULL); 17405 ASSERT(pktp != NULL); 17406 17407 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17408 pktp->pkt_flags = 0; 17409 un->un_tagflags = 0; 17410 if (un->un_f_opt_queueing == TRUE) { 17411 un->un_throttle = min(un->un_throttle, 3); 17412 } else { 17413 un->un_throttle = 1; 17414 } 17415 mutex_exit(SD_MUTEX(un)); 17416 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 17417 mutex_enter(SD_MUTEX(un)); 17418 17419 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17420 17421 /* Legacy behavior not to check retry counts here. */ 17422 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 17423 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17424 } 17425 17426 17427 /* 17428 * Function: sd_pkt_reason_default 17429 * 17430 * Description: Default recovery actions for SCSA pkt_reason values that 17431 * do not have more explicit recovery actions. 17432 * 17433 * Context: May be called from interrupt context 17434 */ 17435 17436 static void 17437 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 17438 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17439 { 17440 ASSERT(un != NULL); 17441 ASSERT(mutex_owned(SD_MUTEX(un))); 17442 ASSERT(bp != NULL); 17443 ASSERT(xp != NULL); 17444 ASSERT(pktp != NULL); 17445 17446 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17447 sd_reset_target(un, pktp); 17448 17449 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17450 17451 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17452 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17453 } 17454 17455 17456 17457 /* 17458 * Function: sd_pkt_status_check_condition 17459 * 17460 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 17461 * 17462 * Context: May be called from interrupt context 17463 */ 17464 17465 static void 17466 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 17467 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17468 { 17469 ASSERT(un != NULL); 17470 ASSERT(mutex_owned(SD_MUTEX(un))); 17471 ASSERT(bp != NULL); 17472 ASSERT(xp != NULL); 17473 ASSERT(pktp != NULL); 17474 17475 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 17476 "entry: buf:0x%p xp:0x%p\n", bp, xp); 17477 17478 /* 17479 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 17480 * command will be retried after the request sense). Otherwise, retry 17481 * the command. Note: we are issuing the request sense even though the 17482 * retry limit may have been reached for the failed command. 17483 */ 17484 if (un->un_f_arq_enabled == FALSE) { 17485 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17486 "no ARQ, sending request sense command\n"); 17487 sd_send_request_sense_command(un, bp, pktp); 17488 } else { 17489 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17490 "ARQ,retrying request sense command\n"); 17491 #if defined(__i386) || defined(__amd64) 17492 /* 17493 * The SD_RETRY_DELAY value need to be adjusted here 17494 * when SD_RETRY_DELAY change in sddef.h 17495 */ 17496 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17497 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 17498 NULL); 17499 #else 17500 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 17501 EIO, SD_RETRY_DELAY, NULL); 17502 #endif 17503 } 17504 17505 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 17506 } 17507 17508 17509 /* 17510 * Function: sd_pkt_status_busy 17511 * 17512 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 17513 * 17514 * Context: May be called from interrupt context 17515 */ 17516 17517 static void 17518 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17519 struct scsi_pkt *pktp) 17520 { 17521 ASSERT(un != NULL); 17522 ASSERT(mutex_owned(SD_MUTEX(un))); 17523 ASSERT(bp != NULL); 17524 ASSERT(xp != NULL); 17525 ASSERT(pktp != NULL); 17526 17527 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17528 "sd_pkt_status_busy: entry\n"); 17529 17530 /* If retries are exhausted, just fail the command. */ 17531 if (xp->xb_retry_count >= un->un_busy_retry_count) { 17532 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17533 "device busy too long\n"); 17534 sd_return_failed_command(un, bp, EIO); 17535 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17536 "sd_pkt_status_busy: exit\n"); 17537 return; 17538 } 17539 xp->xb_retry_count++; 17540 17541 /* 17542 * Try to reset the target. However, we do not want to perform 17543 * more than one reset if the device continues to fail. The reset 17544 * will be performed when the retry count reaches the reset 17545 * threshold. This threshold should be set such that at least 17546 * one retry is issued before the reset is performed. 17547 */ 17548 if (xp->xb_retry_count == 17549 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 17550 int rval = 0; 17551 mutex_exit(SD_MUTEX(un)); 17552 if (un->un_f_allow_bus_device_reset == TRUE) { 17553 /* 17554 * First try to reset the LUN; if we cannot then 17555 * try to reset the target. 17556 */ 17557 if (un->un_f_lun_reset_enabled == TRUE) { 17558 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17559 "sd_pkt_status_busy: RESET_LUN\n"); 17560 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17561 } 17562 if (rval == 0) { 17563 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17564 "sd_pkt_status_busy: RESET_TARGET\n"); 17565 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17566 } 17567 } 17568 if (rval == 0) { 17569 /* 17570 * If the RESET_LUN and/or RESET_TARGET failed, 17571 * try RESET_ALL 17572 */ 17573 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17574 "sd_pkt_status_busy: RESET_ALL\n"); 17575 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 17576 } 17577 mutex_enter(SD_MUTEX(un)); 17578 if (rval == 0) { 17579 /* 17580 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 17581 * At this point we give up & fail the command. 17582 */ 17583 sd_return_failed_command(un, bp, EIO); 17584 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17585 "sd_pkt_status_busy: exit (failed cmd)\n"); 17586 return; 17587 } 17588 } 17589 17590 /* 17591 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 17592 * we have already checked the retry counts above. 17593 */ 17594 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 17595 EIO, un->un_busy_timeout, NULL); 17596 17597 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17598 "sd_pkt_status_busy: exit\n"); 17599 } 17600 17601 17602 /* 17603 * Function: sd_pkt_status_reservation_conflict 17604 * 17605 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 17606 * command status. 17607 * 17608 * Context: May be called from interrupt context 17609 */ 17610 17611 static void 17612 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 17613 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17614 { 17615 ASSERT(un != NULL); 17616 ASSERT(mutex_owned(SD_MUTEX(un))); 17617 ASSERT(bp != NULL); 17618 ASSERT(xp != NULL); 17619 ASSERT(pktp != NULL); 17620 17621 /* 17622 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 17623 * conflict could be due to various reasons like incorrect keys, not 17624 * registered or not reserved etc. So, we return EACCES to the caller. 17625 */ 17626 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 17627 int cmd = SD_GET_PKT_OPCODE(pktp); 17628 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 17629 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 17630 sd_return_failed_command(un, bp, EACCES); 17631 return; 17632 } 17633 } 17634 17635 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17636 17637 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17638 if (sd_failfast_enable != 0) { 17639 /* By definition, we must panic here.... */ 17640 sd_panic_for_res_conflict(un); 17641 /*NOTREACHED*/ 17642 } 17643 SD_ERROR(SD_LOG_IO, un, 17644 "sd_handle_resv_conflict: Disk Reserved\n"); 17645 sd_return_failed_command(un, bp, EACCES); 17646 return; 17647 } 17648 17649 /* 17650 * 1147670: retry only if sd_retry_on_reservation_conflict 17651 * property is set (default is 1). Retries will not succeed 17652 * on a disk reserved by another initiator. HA systems 17653 * may reset this via sd.conf to avoid these retries. 17654 * 17655 * Note: The legacy return code for this failure is EIO, however EACCES 17656 * seems more appropriate for a reservation conflict. 17657 */ 17658 if (sd_retry_on_reservation_conflict == 0) { 17659 SD_ERROR(SD_LOG_IO, un, 17660 "sd_handle_resv_conflict: Device Reserved\n"); 17661 sd_return_failed_command(un, bp, EIO); 17662 return; 17663 } 17664 17665 /* 17666 * Retry the command if we can. 17667 * 17668 * Note: The legacy return code for this failure is EIO, however EACCES 17669 * seems more appropriate for a reservation conflict. 17670 */ 17671 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17672 (clock_t)2, NULL); 17673 } 17674 17675 17676 17677 /* 17678 * Function: sd_pkt_status_qfull 17679 * 17680 * Description: Handle a QUEUE FULL condition from the target. This can 17681 * occur if the HBA does not handle the queue full condition. 17682 * (Basically this means third-party HBAs as Sun HBAs will 17683 * handle the queue full condition.) Note that if there are 17684 * some commands already in the transport, then the queue full 17685 * has occurred because the queue for this nexus is actually 17686 * full. If there are no commands in the transport, then the 17687 * queue full is resulting from some other initiator or lun 17688 * consuming all the resources at the target. 17689 * 17690 * Context: May be called from interrupt context 17691 */ 17692 17693 static void 17694 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17695 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17696 { 17697 ASSERT(un != NULL); 17698 ASSERT(mutex_owned(SD_MUTEX(un))); 17699 ASSERT(bp != NULL); 17700 ASSERT(xp != NULL); 17701 ASSERT(pktp != NULL); 17702 17703 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17704 "sd_pkt_status_qfull: entry\n"); 17705 17706 /* 17707 * Just lower the QFULL throttle and retry the command. Note that 17708 * we do not limit the number of retries here. 17709 */ 17710 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17711 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17712 SD_RESTART_TIMEOUT, NULL); 17713 17714 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17715 "sd_pkt_status_qfull: exit\n"); 17716 } 17717 17718 17719 /* 17720 * Function: sd_reset_target 17721 * 17722 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17723 * RESET_TARGET, or RESET_ALL. 17724 * 17725 * Context: May be called under interrupt context. 17726 */ 17727 17728 static void 17729 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17730 { 17731 int rval = 0; 17732 17733 ASSERT(un != NULL); 17734 ASSERT(mutex_owned(SD_MUTEX(un))); 17735 ASSERT(pktp != NULL); 17736 17737 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17738 17739 /* 17740 * No need to reset if the transport layer has already done so. 17741 */ 17742 if ((pktp->pkt_statistics & 17743 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17744 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17745 "sd_reset_target: no reset\n"); 17746 return; 17747 } 17748 17749 mutex_exit(SD_MUTEX(un)); 17750 17751 if (un->un_f_allow_bus_device_reset == TRUE) { 17752 if (un->un_f_lun_reset_enabled == TRUE) { 17753 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17754 "sd_reset_target: RESET_LUN\n"); 17755 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17756 } 17757 if (rval == 0) { 17758 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17759 "sd_reset_target: RESET_TARGET\n"); 17760 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17761 } 17762 } 17763 17764 if (rval == 0) { 17765 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17766 "sd_reset_target: RESET_ALL\n"); 17767 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17768 } 17769 17770 mutex_enter(SD_MUTEX(un)); 17771 17772 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17773 } 17774 17775 /* 17776 * Function: sd_target_change_task 17777 * 17778 * Description: Handle dynamic target change 17779 * 17780 * Context: Executes in a taskq() thread context 17781 */ 17782 static void 17783 sd_target_change_task(void *arg) 17784 { 17785 struct sd_lun *un = arg; 17786 uint64_t capacity; 17787 diskaddr_t label_cap; 17788 uint_t lbasize; 17789 17790 ASSERT(un != NULL); 17791 ASSERT(!mutex_owned(SD_MUTEX(un))); 17792 17793 if ((un->un_f_blockcount_is_valid == FALSE) || 17794 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 17795 return; 17796 } 17797 17798 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 17799 &lbasize, SD_PATH_DIRECT) != 0) { 17800 SD_ERROR(SD_LOG_ERROR, un, 17801 "sd_target_change_task: fail to read capacity\n"); 17802 return; 17803 } 17804 17805 mutex_enter(SD_MUTEX(un)); 17806 if (capacity <= un->un_blockcount) { 17807 mutex_exit(SD_MUTEX(un)); 17808 return; 17809 } 17810 17811 sd_update_block_info(un, lbasize, capacity); 17812 mutex_exit(SD_MUTEX(un)); 17813 17814 /* 17815 * If lun is EFI labeled and lun capacity is greater than the 17816 * capacity contained in the label, log a sys event. 17817 */ 17818 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 17819 (void*)SD_PATH_DIRECT) == 0) { 17820 mutex_enter(SD_MUTEX(un)); 17821 if (un->un_f_blockcount_is_valid && 17822 un->un_blockcount > label_cap) { 17823 mutex_exit(SD_MUTEX(un)); 17824 sd_log_lun_expansion_event(un, KM_SLEEP); 17825 } else { 17826 mutex_exit(SD_MUTEX(un)); 17827 } 17828 } 17829 } 17830 17831 /* 17832 * Function: sd_log_lun_expansion_event 17833 * 17834 * Description: Log lun expansion sys event 17835 * 17836 * Context: Never called from interrupt context 17837 */ 17838 static void 17839 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 17840 { 17841 int err; 17842 char *path; 17843 nvlist_t *dle_attr_list; 17844 17845 /* Allocate and build sysevent attribute list */ 17846 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 17847 if (err != 0) { 17848 SD_ERROR(SD_LOG_ERROR, un, 17849 "sd_log_lun_expansion_event: fail to allocate space\n"); 17850 return; 17851 } 17852 17853 path = kmem_alloc(MAXPATHLEN, km_flag); 17854 if (path == NULL) { 17855 nvlist_free(dle_attr_list); 17856 SD_ERROR(SD_LOG_ERROR, un, 17857 "sd_log_lun_expansion_event: fail to allocate space\n"); 17858 return; 17859 } 17860 /* 17861 * Add path attribute to identify the lun. 17862 * We are using minor node 'a' as the sysevent attribute. 17863 */ 17864 (void) snprintf(path, MAXPATHLEN, "/devices"); 17865 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 17866 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 17867 ":a"); 17868 17869 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 17870 if (err != 0) { 17871 nvlist_free(dle_attr_list); 17872 kmem_free(path, MAXPATHLEN); 17873 SD_ERROR(SD_LOG_ERROR, un, 17874 "sd_log_lun_expansion_event: fail to add attribute\n"); 17875 return; 17876 } 17877 17878 /* Log dynamic lun expansion sysevent */ 17879 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 17880 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 17881 if (err != DDI_SUCCESS) { 17882 SD_ERROR(SD_LOG_ERROR, un, 17883 "sd_log_lun_expansion_event: fail to log sysevent\n"); 17884 } 17885 17886 nvlist_free(dle_attr_list); 17887 kmem_free(path, MAXPATHLEN); 17888 } 17889 17890 /* 17891 * Function: sd_media_change_task 17892 * 17893 * Description: Recovery action for CDROM to become available. 17894 * 17895 * Context: Executes in a taskq() thread context 17896 */ 17897 17898 static void 17899 sd_media_change_task(void *arg) 17900 { 17901 struct scsi_pkt *pktp = arg; 17902 struct sd_lun *un; 17903 struct buf *bp; 17904 struct sd_xbuf *xp; 17905 int err = 0; 17906 int retry_count = 0; 17907 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17908 struct sd_sense_info si; 17909 17910 ASSERT(pktp != NULL); 17911 bp = (struct buf *)pktp->pkt_private; 17912 ASSERT(bp != NULL); 17913 xp = SD_GET_XBUF(bp); 17914 ASSERT(xp != NULL); 17915 un = SD_GET_UN(bp); 17916 ASSERT(un != NULL); 17917 ASSERT(!mutex_owned(SD_MUTEX(un))); 17918 ASSERT(un->un_f_monitor_media_state); 17919 17920 si.ssi_severity = SCSI_ERR_INFO; 17921 si.ssi_pfa_flag = FALSE; 17922 17923 /* 17924 * When a reset is issued on a CDROM, it takes a long time to 17925 * recover. First few attempts to read capacity and other things 17926 * related to handling unit attention fail (with a ASC 0x4 and 17927 * ASCQ 0x1). In that case we want to do enough retries and we want 17928 * to limit the retries in other cases of genuine failures like 17929 * no media in drive. 17930 */ 17931 while (retry_count++ < retry_limit) { 17932 if ((err = sd_handle_mchange(un)) == 0) { 17933 break; 17934 } 17935 if (err == EAGAIN) { 17936 retry_limit = SD_UNIT_ATTENTION_RETRY; 17937 } 17938 /* Sleep for 0.5 sec. & try again */ 17939 delay(drv_usectohz(500000)); 17940 } 17941 17942 /* 17943 * Dispatch (retry or fail) the original command here, 17944 * along with appropriate console messages.... 17945 * 17946 * Must grab the mutex before calling sd_retry_command, 17947 * sd_print_sense_msg and sd_return_failed_command. 17948 */ 17949 mutex_enter(SD_MUTEX(un)); 17950 if (err != SD_CMD_SUCCESS) { 17951 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17952 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17953 si.ssi_severity = SCSI_ERR_FATAL; 17954 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17955 sd_return_failed_command(un, bp, EIO); 17956 } else { 17957 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17958 &si, EIO, (clock_t)0, NULL); 17959 } 17960 mutex_exit(SD_MUTEX(un)); 17961 } 17962 17963 17964 17965 /* 17966 * Function: sd_handle_mchange 17967 * 17968 * Description: Perform geometry validation & other recovery when CDROM 17969 * has been removed from drive. 17970 * 17971 * Return Code: 0 for success 17972 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17973 * sd_send_scsi_READ_CAPACITY() 17974 * 17975 * Context: Executes in a taskq() thread context 17976 */ 17977 17978 static int 17979 sd_handle_mchange(struct sd_lun *un) 17980 { 17981 uint64_t capacity; 17982 uint32_t lbasize; 17983 int rval; 17984 17985 ASSERT(!mutex_owned(SD_MUTEX(un))); 17986 ASSERT(un->un_f_monitor_media_state); 17987 17988 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17989 SD_PATH_DIRECT_PRIORITY)) != 0) { 17990 return (rval); 17991 } 17992 17993 mutex_enter(SD_MUTEX(un)); 17994 sd_update_block_info(un, lbasize, capacity); 17995 17996 if (un->un_errstats != NULL) { 17997 struct sd_errstats *stp = 17998 (struct sd_errstats *)un->un_errstats->ks_data; 17999 stp->sd_capacity.value.ui64 = (uint64_t) 18000 ((uint64_t)un->un_blockcount * 18001 (uint64_t)un->un_tgt_blocksize); 18002 } 18003 18004 18005 /* 18006 * Check if the media in the device is writable or not 18007 */ 18008 if (ISCD(un)) 18009 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 18010 18011 /* 18012 * Note: Maybe let the strategy/partitioning chain worry about getting 18013 * valid geometry. 18014 */ 18015 mutex_exit(SD_MUTEX(un)); 18016 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 18017 18018 18019 if (cmlb_validate(un->un_cmlbhandle, 0, 18020 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 18021 return (EIO); 18022 } else { 18023 if (un->un_f_pkstats_enabled) { 18024 sd_set_pstats(un); 18025 SD_TRACE(SD_LOG_IO_PARTITION, un, 18026 "sd_handle_mchange: un:0x%p pstats created and " 18027 "set\n", un); 18028 } 18029 } 18030 18031 18032 /* 18033 * Try to lock the door 18034 */ 18035 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 18036 SD_PATH_DIRECT_PRIORITY)); 18037 } 18038 18039 18040 /* 18041 * Function: sd_send_scsi_DOORLOCK 18042 * 18043 * Description: Issue the scsi DOOR LOCK command 18044 * 18045 * Arguments: un - pointer to driver soft state (unit) structure for 18046 * this target. 18047 * flag - SD_REMOVAL_ALLOW 18048 * SD_REMOVAL_PREVENT 18049 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18050 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18051 * to use the USCSI "direct" chain and bypass the normal 18052 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18053 * command is issued as part of an error recovery action. 18054 * 18055 * Return Code: 0 - Success 18056 * errno return code from sd_send_scsi_cmd() 18057 * 18058 * Context: Can sleep. 18059 */ 18060 18061 static int 18062 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 18063 { 18064 union scsi_cdb cdb; 18065 struct uscsi_cmd ucmd_buf; 18066 struct scsi_extended_sense sense_buf; 18067 int status; 18068 18069 ASSERT(un != NULL); 18070 ASSERT(!mutex_owned(SD_MUTEX(un))); 18071 18072 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 18073 18074 /* already determined doorlock is not supported, fake success */ 18075 if (un->un_f_doorlock_supported == FALSE) { 18076 return (0); 18077 } 18078 18079 /* 18080 * If we are ejecting and see an SD_REMOVAL_PREVENT 18081 * ignore the command so we can complete the eject 18082 * operation. 18083 */ 18084 if (flag == SD_REMOVAL_PREVENT) { 18085 mutex_enter(SD_MUTEX(un)); 18086 if (un->un_f_ejecting == TRUE) { 18087 mutex_exit(SD_MUTEX(un)); 18088 return (EAGAIN); 18089 } 18090 mutex_exit(SD_MUTEX(un)); 18091 } 18092 18093 bzero(&cdb, sizeof (cdb)); 18094 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18095 18096 cdb.scc_cmd = SCMD_DOORLOCK; 18097 cdb.cdb_opaque[4] = (uchar_t)flag; 18098 18099 ucmd_buf.uscsi_cdb = (char *)&cdb; 18100 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18101 ucmd_buf.uscsi_bufaddr = NULL; 18102 ucmd_buf.uscsi_buflen = 0; 18103 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18104 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18105 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18106 ucmd_buf.uscsi_timeout = 15; 18107 18108 SD_TRACE(SD_LOG_IO, un, 18109 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 18110 18111 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18112 UIO_SYSSPACE, path_flag); 18113 18114 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 18115 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18116 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 18117 /* fake success and skip subsequent doorlock commands */ 18118 un->un_f_doorlock_supported = FALSE; 18119 return (0); 18120 } 18121 18122 return (status); 18123 } 18124 18125 /* 18126 * Function: sd_send_scsi_READ_CAPACITY 18127 * 18128 * Description: This routine uses the scsi READ CAPACITY command to determine 18129 * the device capacity in number of blocks and the device native 18130 * block size. If this function returns a failure, then the 18131 * values in *capp and *lbap are undefined. If the capacity 18132 * returned is 0xffffffff then the lun is too large for a 18133 * normal READ CAPACITY command and the results of a 18134 * READ CAPACITY 16 will be used instead. 18135 * 18136 * Arguments: un - ptr to soft state struct for the target 18137 * capp - ptr to unsigned 64-bit variable to receive the 18138 * capacity value from the command. 18139 * lbap - ptr to unsigned 32-bit varaible to receive the 18140 * block size value from the command 18141 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18142 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18143 * to use the USCSI "direct" chain and bypass the normal 18144 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18145 * command is issued as part of an error recovery action. 18146 * 18147 * Return Code: 0 - Success 18148 * EIO - IO error 18149 * EACCES - Reservation conflict detected 18150 * EAGAIN - Device is becoming ready 18151 * errno return code from sd_send_scsi_cmd() 18152 * 18153 * Context: Can sleep. Blocks until command completes. 18154 */ 18155 18156 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 18157 18158 static int 18159 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 18160 int path_flag) 18161 { 18162 struct scsi_extended_sense sense_buf; 18163 struct uscsi_cmd ucmd_buf; 18164 union scsi_cdb cdb; 18165 uint32_t *capacity_buf; 18166 uint64_t capacity; 18167 uint32_t lbasize; 18168 int status; 18169 18170 ASSERT(un != NULL); 18171 ASSERT(!mutex_owned(SD_MUTEX(un))); 18172 ASSERT(capp != NULL); 18173 ASSERT(lbap != NULL); 18174 18175 SD_TRACE(SD_LOG_IO, un, 18176 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18177 18178 /* 18179 * First send a READ_CAPACITY command to the target. 18180 * (This command is mandatory under SCSI-2.) 18181 * 18182 * Set up the CDB for the READ_CAPACITY command. The Partial 18183 * Medium Indicator bit is cleared. The address field must be 18184 * zero if the PMI bit is zero. 18185 */ 18186 bzero(&cdb, sizeof (cdb)); 18187 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18188 18189 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 18190 18191 cdb.scc_cmd = SCMD_READ_CAPACITY; 18192 18193 ucmd_buf.uscsi_cdb = (char *)&cdb; 18194 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18195 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 18196 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 18197 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18198 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18199 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18200 ucmd_buf.uscsi_timeout = 60; 18201 18202 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18203 UIO_SYSSPACE, path_flag); 18204 18205 switch (status) { 18206 case 0: 18207 /* Return failure if we did not get valid capacity data. */ 18208 if (ucmd_buf.uscsi_resid != 0) { 18209 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18210 return (EIO); 18211 } 18212 18213 /* 18214 * Read capacity and block size from the READ CAPACITY 10 data. 18215 * This data may be adjusted later due to device specific 18216 * issues. 18217 * 18218 * According to the SCSI spec, the READ CAPACITY 10 18219 * command returns the following: 18220 * 18221 * bytes 0-3: Maximum logical block address available. 18222 * (MSB in byte:0 & LSB in byte:3) 18223 * 18224 * bytes 4-7: Block length in bytes 18225 * (MSB in byte:4 & LSB in byte:7) 18226 * 18227 */ 18228 capacity = BE_32(capacity_buf[0]); 18229 lbasize = BE_32(capacity_buf[1]); 18230 18231 /* 18232 * Done with capacity_buf 18233 */ 18234 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18235 18236 /* 18237 * if the reported capacity is set to all 0xf's, then 18238 * this disk is too large and requires SBC-2 commands. 18239 * Reissue the request using READ CAPACITY 16. 18240 */ 18241 if (capacity == 0xffffffff) { 18242 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 18243 &lbasize, path_flag); 18244 if (status != 0) { 18245 return (status); 18246 } 18247 } 18248 break; /* Success! */ 18249 case EIO: 18250 switch (ucmd_buf.uscsi_status) { 18251 case STATUS_RESERVATION_CONFLICT: 18252 status = EACCES; 18253 break; 18254 case STATUS_CHECK: 18255 /* 18256 * Check condition; look for ASC/ASCQ of 0x04/0x01 18257 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18258 */ 18259 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18260 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18261 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18262 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18263 return (EAGAIN); 18264 } 18265 break; 18266 default: 18267 break; 18268 } 18269 /* FALLTHRU */ 18270 default: 18271 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18272 return (status); 18273 } 18274 18275 /* 18276 * Some ATAPI CD-ROM drives report inaccurate LBA size values 18277 * (2352 and 0 are common) so for these devices always force the value 18278 * to 2048 as required by the ATAPI specs. 18279 */ 18280 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 18281 lbasize = 2048; 18282 } 18283 18284 /* 18285 * Get the maximum LBA value from the READ CAPACITY data. 18286 * Here we assume that the Partial Medium Indicator (PMI) bit 18287 * was cleared when issuing the command. This means that the LBA 18288 * returned from the device is the LBA of the last logical block 18289 * on the logical unit. The actual logical block count will be 18290 * this value plus one. 18291 * 18292 * Currently the capacity is saved in terms of un->un_sys_blocksize, 18293 * so scale the capacity value to reflect this. 18294 */ 18295 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 18296 18297 /* 18298 * Copy the values from the READ CAPACITY command into the space 18299 * provided by the caller. 18300 */ 18301 *capp = capacity; 18302 *lbap = lbasize; 18303 18304 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 18305 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18306 18307 /* 18308 * Both the lbasize and capacity from the device must be nonzero, 18309 * otherwise we assume that the values are not valid and return 18310 * failure to the caller. (4203735) 18311 */ 18312 if ((capacity == 0) || (lbasize == 0)) { 18313 return (EIO); 18314 } 18315 18316 return (0); 18317 } 18318 18319 /* 18320 * Function: sd_send_scsi_READ_CAPACITY_16 18321 * 18322 * Description: This routine uses the scsi READ CAPACITY 16 command to 18323 * determine the device capacity in number of blocks and the 18324 * device native block size. If this function returns a failure, 18325 * then the values in *capp and *lbap are undefined. 18326 * This routine should always be called by 18327 * sd_send_scsi_READ_CAPACITY which will appy any device 18328 * specific adjustments to capacity and lbasize. 18329 * 18330 * Arguments: un - ptr to soft state struct for the target 18331 * capp - ptr to unsigned 64-bit variable to receive the 18332 * capacity value from the command. 18333 * lbap - ptr to unsigned 32-bit varaible to receive the 18334 * block size value from the command 18335 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18336 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18337 * to use the USCSI "direct" chain and bypass the normal 18338 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 18339 * this command is issued as part of an error recovery 18340 * action. 18341 * 18342 * Return Code: 0 - Success 18343 * EIO - IO error 18344 * EACCES - Reservation conflict detected 18345 * EAGAIN - Device is becoming ready 18346 * errno return code from sd_send_scsi_cmd() 18347 * 18348 * Context: Can sleep. Blocks until command completes. 18349 */ 18350 18351 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 18352 18353 static int 18354 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 18355 uint32_t *lbap, int path_flag) 18356 { 18357 struct scsi_extended_sense sense_buf; 18358 struct uscsi_cmd ucmd_buf; 18359 union scsi_cdb cdb; 18360 uint64_t *capacity16_buf; 18361 uint64_t capacity; 18362 uint32_t lbasize; 18363 int status; 18364 18365 ASSERT(un != NULL); 18366 ASSERT(!mutex_owned(SD_MUTEX(un))); 18367 ASSERT(capp != NULL); 18368 ASSERT(lbap != NULL); 18369 18370 SD_TRACE(SD_LOG_IO, un, 18371 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18372 18373 /* 18374 * First send a READ_CAPACITY_16 command to the target. 18375 * 18376 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 18377 * Medium Indicator bit is cleared. The address field must be 18378 * zero if the PMI bit is zero. 18379 */ 18380 bzero(&cdb, sizeof (cdb)); 18381 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18382 18383 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 18384 18385 ucmd_buf.uscsi_cdb = (char *)&cdb; 18386 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 18387 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 18388 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 18389 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18390 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18391 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18392 ucmd_buf.uscsi_timeout = 60; 18393 18394 /* 18395 * Read Capacity (16) is a Service Action In command. One 18396 * command byte (0x9E) is overloaded for multiple operations, 18397 * with the second CDB byte specifying the desired operation 18398 */ 18399 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 18400 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 18401 18402 /* 18403 * Fill in allocation length field 18404 */ 18405 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 18406 18407 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18408 UIO_SYSSPACE, path_flag); 18409 18410 switch (status) { 18411 case 0: 18412 /* Return failure if we did not get valid capacity data. */ 18413 if (ucmd_buf.uscsi_resid > 20) { 18414 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18415 return (EIO); 18416 } 18417 18418 /* 18419 * Read capacity and block size from the READ CAPACITY 10 data. 18420 * This data may be adjusted later due to device specific 18421 * issues. 18422 * 18423 * According to the SCSI spec, the READ CAPACITY 10 18424 * command returns the following: 18425 * 18426 * bytes 0-7: Maximum logical block address available. 18427 * (MSB in byte:0 & LSB in byte:7) 18428 * 18429 * bytes 8-11: Block length in bytes 18430 * (MSB in byte:8 & LSB in byte:11) 18431 * 18432 */ 18433 capacity = BE_64(capacity16_buf[0]); 18434 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 18435 18436 /* 18437 * Done with capacity16_buf 18438 */ 18439 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18440 18441 /* 18442 * if the reported capacity is set to all 0xf's, then 18443 * this disk is too large. This could only happen with 18444 * a device that supports LBAs larger than 64 bits which 18445 * are not defined by any current T10 standards. 18446 */ 18447 if (capacity == 0xffffffffffffffff) { 18448 return (EIO); 18449 } 18450 break; /* Success! */ 18451 case EIO: 18452 switch (ucmd_buf.uscsi_status) { 18453 case STATUS_RESERVATION_CONFLICT: 18454 status = EACCES; 18455 break; 18456 case STATUS_CHECK: 18457 /* 18458 * Check condition; look for ASC/ASCQ of 0x04/0x01 18459 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18460 */ 18461 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18462 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18463 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18464 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18465 return (EAGAIN); 18466 } 18467 break; 18468 default: 18469 break; 18470 } 18471 /* FALLTHRU */ 18472 default: 18473 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18474 return (status); 18475 } 18476 18477 *capp = capacity; 18478 *lbap = lbasize; 18479 18480 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 18481 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18482 18483 return (0); 18484 } 18485 18486 18487 /* 18488 * Function: sd_send_scsi_START_STOP_UNIT 18489 * 18490 * Description: Issue a scsi START STOP UNIT command to the target. 18491 * 18492 * Arguments: un - pointer to driver soft state (unit) structure for 18493 * this target. 18494 * flag - SD_TARGET_START 18495 * SD_TARGET_STOP 18496 * SD_TARGET_EJECT 18497 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18498 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18499 * to use the USCSI "direct" chain and bypass the normal 18500 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18501 * command is issued as part of an error recovery action. 18502 * 18503 * Return Code: 0 - Success 18504 * EIO - IO error 18505 * EACCES - Reservation conflict detected 18506 * ENXIO - Not Ready, medium not present 18507 * errno return code from sd_send_scsi_cmd() 18508 * 18509 * Context: Can sleep. 18510 */ 18511 18512 static int 18513 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 18514 { 18515 struct scsi_extended_sense sense_buf; 18516 union scsi_cdb cdb; 18517 struct uscsi_cmd ucmd_buf; 18518 int status; 18519 18520 ASSERT(un != NULL); 18521 ASSERT(!mutex_owned(SD_MUTEX(un))); 18522 18523 SD_TRACE(SD_LOG_IO, un, 18524 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 18525 18526 if (un->un_f_check_start_stop && 18527 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 18528 (un->un_f_start_stop_supported != TRUE)) { 18529 return (0); 18530 } 18531 18532 /* 18533 * If we are performing an eject operation and 18534 * we receive any command other than SD_TARGET_EJECT 18535 * we should immediately return. 18536 */ 18537 if (flag != SD_TARGET_EJECT) { 18538 mutex_enter(SD_MUTEX(un)); 18539 if (un->un_f_ejecting == TRUE) { 18540 mutex_exit(SD_MUTEX(un)); 18541 return (EAGAIN); 18542 } 18543 mutex_exit(SD_MUTEX(un)); 18544 } 18545 18546 bzero(&cdb, sizeof (cdb)); 18547 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18548 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18549 18550 cdb.scc_cmd = SCMD_START_STOP; 18551 cdb.cdb_opaque[4] = (uchar_t)flag; 18552 18553 ucmd_buf.uscsi_cdb = (char *)&cdb; 18554 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18555 ucmd_buf.uscsi_bufaddr = NULL; 18556 ucmd_buf.uscsi_buflen = 0; 18557 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18558 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18559 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18560 ucmd_buf.uscsi_timeout = 200; 18561 18562 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18563 UIO_SYSSPACE, path_flag); 18564 18565 switch (status) { 18566 case 0: 18567 break; /* Success! */ 18568 case EIO: 18569 switch (ucmd_buf.uscsi_status) { 18570 case STATUS_RESERVATION_CONFLICT: 18571 status = EACCES; 18572 break; 18573 case STATUS_CHECK: 18574 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 18575 switch (scsi_sense_key( 18576 (uint8_t *)&sense_buf)) { 18577 case KEY_ILLEGAL_REQUEST: 18578 status = ENOTSUP; 18579 break; 18580 case KEY_NOT_READY: 18581 if (scsi_sense_asc( 18582 (uint8_t *)&sense_buf) 18583 == 0x3A) { 18584 status = ENXIO; 18585 } 18586 break; 18587 default: 18588 break; 18589 } 18590 } 18591 break; 18592 default: 18593 break; 18594 } 18595 break; 18596 default: 18597 break; 18598 } 18599 18600 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 18601 18602 return (status); 18603 } 18604 18605 18606 /* 18607 * Function: sd_start_stop_unit_callback 18608 * 18609 * Description: timeout(9F) callback to begin recovery process for a 18610 * device that has spun down. 18611 * 18612 * Arguments: arg - pointer to associated softstate struct. 18613 * 18614 * Context: Executes in a timeout(9F) thread context 18615 */ 18616 18617 static void 18618 sd_start_stop_unit_callback(void *arg) 18619 { 18620 struct sd_lun *un = arg; 18621 ASSERT(un != NULL); 18622 ASSERT(!mutex_owned(SD_MUTEX(un))); 18623 18624 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 18625 18626 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 18627 } 18628 18629 18630 /* 18631 * Function: sd_start_stop_unit_task 18632 * 18633 * Description: Recovery procedure when a drive is spun down. 18634 * 18635 * Arguments: arg - pointer to associated softstate struct. 18636 * 18637 * Context: Executes in a taskq() thread context 18638 */ 18639 18640 static void 18641 sd_start_stop_unit_task(void *arg) 18642 { 18643 struct sd_lun *un = arg; 18644 18645 ASSERT(un != NULL); 18646 ASSERT(!mutex_owned(SD_MUTEX(un))); 18647 18648 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 18649 18650 /* 18651 * Some unformatted drives report not ready error, no need to 18652 * restart if format has been initiated. 18653 */ 18654 mutex_enter(SD_MUTEX(un)); 18655 if (un->un_f_format_in_progress == TRUE) { 18656 mutex_exit(SD_MUTEX(un)); 18657 return; 18658 } 18659 mutex_exit(SD_MUTEX(un)); 18660 18661 /* 18662 * When a START STOP command is issued from here, it is part of a 18663 * failure recovery operation and must be issued before any other 18664 * commands, including any pending retries. Thus it must be sent 18665 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 18666 * succeeds or not, we will start I/O after the attempt. 18667 */ 18668 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 18669 SD_PATH_DIRECT_PRIORITY); 18670 18671 /* 18672 * The above call blocks until the START_STOP_UNIT command completes. 18673 * Now that it has completed, we must re-try the original IO that 18674 * received the NOT READY condition in the first place. There are 18675 * three possible conditions here: 18676 * 18677 * (1) The original IO is on un_retry_bp. 18678 * (2) The original IO is on the regular wait queue, and un_retry_bp 18679 * is NULL. 18680 * (3) The original IO is on the regular wait queue, and un_retry_bp 18681 * points to some other, unrelated bp. 18682 * 18683 * For each case, we must call sd_start_cmds() with un_retry_bp 18684 * as the argument. If un_retry_bp is NULL, this will initiate 18685 * processing of the regular wait queue. If un_retry_bp is not NULL, 18686 * then this will process the bp on un_retry_bp. That may or may not 18687 * be the original IO, but that does not matter: the important thing 18688 * is to keep the IO processing going at this point. 18689 * 18690 * Note: This is a very specific error recovery sequence associated 18691 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 18692 * serialize the I/O with completion of the spin-up. 18693 */ 18694 mutex_enter(SD_MUTEX(un)); 18695 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18696 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 18697 un, un->un_retry_bp); 18698 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 18699 sd_start_cmds(un, un->un_retry_bp); 18700 mutex_exit(SD_MUTEX(un)); 18701 18702 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 18703 } 18704 18705 18706 /* 18707 * Function: sd_send_scsi_INQUIRY 18708 * 18709 * Description: Issue the scsi INQUIRY command. 18710 * 18711 * Arguments: un 18712 * bufaddr 18713 * buflen 18714 * evpd 18715 * page_code 18716 * page_length 18717 * 18718 * Return Code: 0 - Success 18719 * errno return code from sd_send_scsi_cmd() 18720 * 18721 * Context: Can sleep. Does not return until command is completed. 18722 */ 18723 18724 static int 18725 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 18726 uchar_t evpd, uchar_t page_code, size_t *residp) 18727 { 18728 union scsi_cdb cdb; 18729 struct uscsi_cmd ucmd_buf; 18730 int status; 18731 18732 ASSERT(un != NULL); 18733 ASSERT(!mutex_owned(SD_MUTEX(un))); 18734 ASSERT(bufaddr != NULL); 18735 18736 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 18737 18738 bzero(&cdb, sizeof (cdb)); 18739 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18740 bzero(bufaddr, buflen); 18741 18742 cdb.scc_cmd = SCMD_INQUIRY; 18743 cdb.cdb_opaque[1] = evpd; 18744 cdb.cdb_opaque[2] = page_code; 18745 FORMG0COUNT(&cdb, buflen); 18746 18747 ucmd_buf.uscsi_cdb = (char *)&cdb; 18748 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18749 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18750 ucmd_buf.uscsi_buflen = buflen; 18751 ucmd_buf.uscsi_rqbuf = NULL; 18752 ucmd_buf.uscsi_rqlen = 0; 18753 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18754 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18755 18756 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18757 UIO_SYSSPACE, SD_PATH_DIRECT); 18758 18759 if ((status == 0) && (residp != NULL)) { 18760 *residp = ucmd_buf.uscsi_resid; 18761 } 18762 18763 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18764 18765 return (status); 18766 } 18767 18768 18769 /* 18770 * Function: sd_send_scsi_TEST_UNIT_READY 18771 * 18772 * Description: Issue the scsi TEST UNIT READY command. 18773 * This routine can be told to set the flag USCSI_DIAGNOSE to 18774 * prevent retrying failed commands. Use this when the intent 18775 * is either to check for device readiness, to clear a Unit 18776 * Attention, or to clear any outstanding sense data. 18777 * However under specific conditions the expected behavior 18778 * is for retries to bring a device ready, so use the flag 18779 * with caution. 18780 * 18781 * Arguments: un 18782 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18783 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18784 * 0: dont check for media present, do retries on cmd. 18785 * 18786 * Return Code: 0 - Success 18787 * EIO - IO error 18788 * EACCES - Reservation conflict detected 18789 * ENXIO - Not Ready, medium not present 18790 * errno return code from sd_send_scsi_cmd() 18791 * 18792 * Context: Can sleep. Does not return until command is completed. 18793 */ 18794 18795 static int 18796 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18797 { 18798 struct scsi_extended_sense sense_buf; 18799 union scsi_cdb cdb; 18800 struct uscsi_cmd ucmd_buf; 18801 int status; 18802 18803 ASSERT(un != NULL); 18804 ASSERT(!mutex_owned(SD_MUTEX(un))); 18805 18806 SD_TRACE(SD_LOG_IO, un, 18807 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18808 18809 /* 18810 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18811 * timeouts when they receive a TUR and the queue is not empty. Check 18812 * the configuration flag set during attach (indicating the drive has 18813 * this firmware bug) and un_ncmds_in_transport before issuing the 18814 * TUR. If there are 18815 * pending commands return success, this is a bit arbitrary but is ok 18816 * for non-removables (i.e. the eliteI disks) and non-clustering 18817 * configurations. 18818 */ 18819 if (un->un_f_cfg_tur_check == TRUE) { 18820 mutex_enter(SD_MUTEX(un)); 18821 if (un->un_ncmds_in_transport != 0) { 18822 mutex_exit(SD_MUTEX(un)); 18823 return (0); 18824 } 18825 mutex_exit(SD_MUTEX(un)); 18826 } 18827 18828 bzero(&cdb, sizeof (cdb)); 18829 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18830 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18831 18832 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18833 18834 ucmd_buf.uscsi_cdb = (char *)&cdb; 18835 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18836 ucmd_buf.uscsi_bufaddr = NULL; 18837 ucmd_buf.uscsi_buflen = 0; 18838 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18839 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18840 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18841 18842 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18843 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18844 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18845 } 18846 ucmd_buf.uscsi_timeout = 60; 18847 18848 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18849 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18850 SD_PATH_STANDARD)); 18851 18852 switch (status) { 18853 case 0: 18854 break; /* Success! */ 18855 case EIO: 18856 switch (ucmd_buf.uscsi_status) { 18857 case STATUS_RESERVATION_CONFLICT: 18858 status = EACCES; 18859 break; 18860 case STATUS_CHECK: 18861 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18862 break; 18863 } 18864 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18865 (scsi_sense_key((uint8_t *)&sense_buf) == 18866 KEY_NOT_READY) && 18867 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18868 status = ENXIO; 18869 } 18870 break; 18871 default: 18872 break; 18873 } 18874 break; 18875 default: 18876 break; 18877 } 18878 18879 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18880 18881 return (status); 18882 } 18883 18884 18885 /* 18886 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18887 * 18888 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18889 * 18890 * Arguments: un 18891 * 18892 * Return Code: 0 - Success 18893 * EACCES 18894 * ENOTSUP 18895 * errno return code from sd_send_scsi_cmd() 18896 * 18897 * Context: Can sleep. Does not return until command is completed. 18898 */ 18899 18900 static int 18901 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18902 uint16_t data_len, uchar_t *data_bufp) 18903 { 18904 struct scsi_extended_sense sense_buf; 18905 union scsi_cdb cdb; 18906 struct uscsi_cmd ucmd_buf; 18907 int status; 18908 int no_caller_buf = FALSE; 18909 18910 ASSERT(un != NULL); 18911 ASSERT(!mutex_owned(SD_MUTEX(un))); 18912 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18913 18914 SD_TRACE(SD_LOG_IO, un, 18915 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18916 18917 bzero(&cdb, sizeof (cdb)); 18918 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18919 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18920 if (data_bufp == NULL) { 18921 /* Allocate a default buf if the caller did not give one */ 18922 ASSERT(data_len == 0); 18923 data_len = MHIOC_RESV_KEY_SIZE; 18924 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18925 no_caller_buf = TRUE; 18926 } 18927 18928 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18929 cdb.cdb_opaque[1] = usr_cmd; 18930 FORMG1COUNT(&cdb, data_len); 18931 18932 ucmd_buf.uscsi_cdb = (char *)&cdb; 18933 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18934 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18935 ucmd_buf.uscsi_buflen = data_len; 18936 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18937 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18938 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18939 ucmd_buf.uscsi_timeout = 60; 18940 18941 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18942 UIO_SYSSPACE, SD_PATH_STANDARD); 18943 18944 switch (status) { 18945 case 0: 18946 break; /* Success! */ 18947 case EIO: 18948 switch (ucmd_buf.uscsi_status) { 18949 case STATUS_RESERVATION_CONFLICT: 18950 status = EACCES; 18951 break; 18952 case STATUS_CHECK: 18953 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18954 (scsi_sense_key((uint8_t *)&sense_buf) == 18955 KEY_ILLEGAL_REQUEST)) { 18956 status = ENOTSUP; 18957 } 18958 break; 18959 default: 18960 break; 18961 } 18962 break; 18963 default: 18964 break; 18965 } 18966 18967 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18968 18969 if (no_caller_buf == TRUE) { 18970 kmem_free(data_bufp, data_len); 18971 } 18972 18973 return (status); 18974 } 18975 18976 18977 /* 18978 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18979 * 18980 * Description: This routine is the driver entry point for handling CD-ROM 18981 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18982 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18983 * device. 18984 * 18985 * Arguments: un - Pointer to soft state struct for the target. 18986 * usr_cmd SCSI-3 reservation facility command (one of 18987 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18988 * SD_SCSI3_PREEMPTANDABORT) 18989 * usr_bufp - user provided pointer register, reserve descriptor or 18990 * preempt and abort structure (mhioc_register_t, 18991 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18992 * 18993 * Return Code: 0 - Success 18994 * EACCES 18995 * ENOTSUP 18996 * errno return code from sd_send_scsi_cmd() 18997 * 18998 * Context: Can sleep. Does not return until command is completed. 18999 */ 19000 19001 static int 19002 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 19003 uchar_t *usr_bufp) 19004 { 19005 struct scsi_extended_sense sense_buf; 19006 union scsi_cdb cdb; 19007 struct uscsi_cmd ucmd_buf; 19008 int status; 19009 uchar_t data_len = sizeof (sd_prout_t); 19010 sd_prout_t *prp; 19011 19012 ASSERT(un != NULL); 19013 ASSERT(!mutex_owned(SD_MUTEX(un))); 19014 ASSERT(data_len == 24); /* required by scsi spec */ 19015 19016 SD_TRACE(SD_LOG_IO, un, 19017 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 19018 19019 if (usr_bufp == NULL) { 19020 return (EINVAL); 19021 } 19022 19023 bzero(&cdb, sizeof (cdb)); 19024 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19025 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19026 prp = kmem_zalloc(data_len, KM_SLEEP); 19027 19028 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 19029 cdb.cdb_opaque[1] = usr_cmd; 19030 FORMG1COUNT(&cdb, data_len); 19031 19032 ucmd_buf.uscsi_cdb = (char *)&cdb; 19033 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19034 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 19035 ucmd_buf.uscsi_buflen = data_len; 19036 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19037 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19038 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19039 ucmd_buf.uscsi_timeout = 60; 19040 19041 switch (usr_cmd) { 19042 case SD_SCSI3_REGISTER: { 19043 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 19044 19045 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19046 bcopy(ptr->newkey.key, prp->service_key, 19047 MHIOC_RESV_KEY_SIZE); 19048 prp->aptpl = ptr->aptpl; 19049 break; 19050 } 19051 case SD_SCSI3_RESERVE: 19052 case SD_SCSI3_RELEASE: { 19053 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 19054 19055 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19056 prp->scope_address = BE_32(ptr->scope_specific_addr); 19057 cdb.cdb_opaque[2] = ptr->type; 19058 break; 19059 } 19060 case SD_SCSI3_PREEMPTANDABORT: { 19061 mhioc_preemptandabort_t *ptr = 19062 (mhioc_preemptandabort_t *)usr_bufp; 19063 19064 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19065 bcopy(ptr->victim_key.key, prp->service_key, 19066 MHIOC_RESV_KEY_SIZE); 19067 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 19068 cdb.cdb_opaque[2] = ptr->resvdesc.type; 19069 ucmd_buf.uscsi_flags |= USCSI_HEAD; 19070 break; 19071 } 19072 case SD_SCSI3_REGISTERANDIGNOREKEY: 19073 { 19074 mhioc_registerandignorekey_t *ptr; 19075 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 19076 bcopy(ptr->newkey.key, 19077 prp->service_key, MHIOC_RESV_KEY_SIZE); 19078 prp->aptpl = ptr->aptpl; 19079 break; 19080 } 19081 default: 19082 ASSERT(FALSE); 19083 break; 19084 } 19085 19086 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19087 UIO_SYSSPACE, SD_PATH_STANDARD); 19088 19089 switch (status) { 19090 case 0: 19091 break; /* Success! */ 19092 case EIO: 19093 switch (ucmd_buf.uscsi_status) { 19094 case STATUS_RESERVATION_CONFLICT: 19095 status = EACCES; 19096 break; 19097 case STATUS_CHECK: 19098 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19099 (scsi_sense_key((uint8_t *)&sense_buf) == 19100 KEY_ILLEGAL_REQUEST)) { 19101 status = ENOTSUP; 19102 } 19103 break; 19104 default: 19105 break; 19106 } 19107 break; 19108 default: 19109 break; 19110 } 19111 19112 kmem_free(prp, data_len); 19113 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 19114 return (status); 19115 } 19116 19117 19118 /* 19119 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 19120 * 19121 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 19122 * 19123 * Arguments: un - pointer to the target's soft state struct 19124 * dkc - pointer to the callback structure 19125 * 19126 * Return Code: 0 - success 19127 * errno-type error code 19128 * 19129 * Context: kernel thread context only. 19130 * 19131 * _______________________________________________________________ 19132 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 19133 * |FLUSH_VOLATILE| | operation | 19134 * |______________|______________|_________________________________| 19135 * | 0 | NULL | Synchronous flush on both | 19136 * | | | volatile and non-volatile cache | 19137 * |______________|______________|_________________________________| 19138 * | 1 | NULL | Synchronous flush on volatile | 19139 * | | | cache; disk drivers may suppress| 19140 * | | | flush if disk table indicates | 19141 * | | | non-volatile cache | 19142 * |______________|______________|_________________________________| 19143 * | 0 | !NULL | Asynchronous flush on both | 19144 * | | | volatile and non-volatile cache;| 19145 * |______________|______________|_________________________________| 19146 * | 1 | !NULL | Asynchronous flush on volatile | 19147 * | | | cache; disk drivers may suppress| 19148 * | | | flush if disk table indicates | 19149 * | | | non-volatile cache | 19150 * |______________|______________|_________________________________| 19151 * 19152 */ 19153 19154 static int 19155 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 19156 { 19157 struct sd_uscsi_info *uip; 19158 struct uscsi_cmd *uscmd; 19159 union scsi_cdb *cdb; 19160 struct buf *bp; 19161 int rval = 0; 19162 int is_async; 19163 19164 SD_TRACE(SD_LOG_IO, un, 19165 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 19166 19167 ASSERT(un != NULL); 19168 ASSERT(!mutex_owned(SD_MUTEX(un))); 19169 19170 if (dkc == NULL || dkc->dkc_callback == NULL) { 19171 is_async = FALSE; 19172 } else { 19173 is_async = TRUE; 19174 } 19175 19176 mutex_enter(SD_MUTEX(un)); 19177 /* check whether cache flush should be suppressed */ 19178 if (un->un_f_suppress_cache_flush == TRUE) { 19179 mutex_exit(SD_MUTEX(un)); 19180 /* 19181 * suppress the cache flush if the device is told to do 19182 * so by sd.conf or disk table 19183 */ 19184 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 19185 skip the cache flush since suppress_cache_flush is %d!\n", 19186 un->un_f_suppress_cache_flush); 19187 19188 if (is_async == TRUE) { 19189 /* invoke callback for asynchronous flush */ 19190 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 19191 } 19192 return (rval); 19193 } 19194 mutex_exit(SD_MUTEX(un)); 19195 19196 /* 19197 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 19198 * set properly 19199 */ 19200 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 19201 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 19202 19203 mutex_enter(SD_MUTEX(un)); 19204 if (dkc != NULL && un->un_f_sync_nv_supported && 19205 (dkc->dkc_flag & FLUSH_VOLATILE)) { 19206 /* 19207 * if the device supports SYNC_NV bit, turn on 19208 * the SYNC_NV bit to only flush volatile cache 19209 */ 19210 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 19211 } 19212 mutex_exit(SD_MUTEX(un)); 19213 19214 /* 19215 * First get some memory for the uscsi_cmd struct and cdb 19216 * and initialize for SYNCHRONIZE_CACHE cmd. 19217 */ 19218 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 19219 uscmd->uscsi_cdblen = CDB_GROUP1; 19220 uscmd->uscsi_cdb = (caddr_t)cdb; 19221 uscmd->uscsi_bufaddr = NULL; 19222 uscmd->uscsi_buflen = 0; 19223 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 19224 uscmd->uscsi_rqlen = SENSE_LENGTH; 19225 uscmd->uscsi_rqresid = SENSE_LENGTH; 19226 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19227 uscmd->uscsi_timeout = sd_io_time; 19228 19229 /* 19230 * Allocate an sd_uscsi_info struct and fill it with the info 19231 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 19232 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 19233 * since we allocate the buf here in this function, we do not 19234 * need to preserve the prior contents of b_private. 19235 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 19236 */ 19237 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 19238 uip->ui_flags = SD_PATH_DIRECT; 19239 uip->ui_cmdp = uscmd; 19240 19241 bp = getrbuf(KM_SLEEP); 19242 bp->b_private = uip; 19243 19244 /* 19245 * Setup buffer to carry uscsi request. 19246 */ 19247 bp->b_flags = B_BUSY; 19248 bp->b_bcount = 0; 19249 bp->b_blkno = 0; 19250 19251 if (is_async == TRUE) { 19252 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 19253 uip->ui_dkc = *dkc; 19254 } 19255 19256 bp->b_edev = SD_GET_DEV(un); 19257 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 19258 19259 /* 19260 * Unset un_f_sync_cache_required flag 19261 */ 19262 mutex_enter(SD_MUTEX(un)); 19263 un->un_f_sync_cache_required = FALSE; 19264 mutex_exit(SD_MUTEX(un)); 19265 19266 (void) sd_uscsi_strategy(bp); 19267 19268 /* 19269 * If synchronous request, wait for completion 19270 * If async just return and let b_iodone callback 19271 * cleanup. 19272 * NOTE: On return, u_ncmds_in_driver will be decremented, 19273 * but it was also incremented in sd_uscsi_strategy(), so 19274 * we should be ok. 19275 */ 19276 if (is_async == FALSE) { 19277 (void) biowait(bp); 19278 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 19279 } 19280 19281 return (rval); 19282 } 19283 19284 19285 static int 19286 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 19287 { 19288 struct sd_uscsi_info *uip; 19289 struct uscsi_cmd *uscmd; 19290 uint8_t *sense_buf; 19291 struct sd_lun *un; 19292 int status; 19293 union scsi_cdb *cdb; 19294 19295 uip = (struct sd_uscsi_info *)(bp->b_private); 19296 ASSERT(uip != NULL); 19297 19298 uscmd = uip->ui_cmdp; 19299 ASSERT(uscmd != NULL); 19300 19301 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 19302 ASSERT(sense_buf != NULL); 19303 19304 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 19305 ASSERT(un != NULL); 19306 19307 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 19308 19309 status = geterror(bp); 19310 switch (status) { 19311 case 0: 19312 break; /* Success! */ 19313 case EIO: 19314 switch (uscmd->uscsi_status) { 19315 case STATUS_RESERVATION_CONFLICT: 19316 /* Ignore reservation conflict */ 19317 status = 0; 19318 goto done; 19319 19320 case STATUS_CHECK: 19321 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 19322 (scsi_sense_key(sense_buf) == 19323 KEY_ILLEGAL_REQUEST)) { 19324 /* Ignore Illegal Request error */ 19325 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 19326 mutex_enter(SD_MUTEX(un)); 19327 un->un_f_sync_nv_supported = FALSE; 19328 mutex_exit(SD_MUTEX(un)); 19329 status = 0; 19330 SD_TRACE(SD_LOG_IO, un, 19331 "un_f_sync_nv_supported \ 19332 is set to false.\n"); 19333 goto done; 19334 } 19335 19336 mutex_enter(SD_MUTEX(un)); 19337 un->un_f_sync_cache_supported = FALSE; 19338 mutex_exit(SD_MUTEX(un)); 19339 SD_TRACE(SD_LOG_IO, un, 19340 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 19341 un_f_sync_cache_supported set to false \ 19342 with asc = %x, ascq = %x\n", 19343 scsi_sense_asc(sense_buf), 19344 scsi_sense_ascq(sense_buf)); 19345 status = ENOTSUP; 19346 goto done; 19347 } 19348 break; 19349 default: 19350 break; 19351 } 19352 /* FALLTHRU */ 19353 default: 19354 /* 19355 * Turn on the un_f_sync_cache_required flag 19356 * since the SYNC CACHE command failed 19357 */ 19358 mutex_enter(SD_MUTEX(un)); 19359 un->un_f_sync_cache_required = TRUE; 19360 mutex_exit(SD_MUTEX(un)); 19361 19362 /* 19363 * Don't log an error message if this device 19364 * has removable media. 19365 */ 19366 if (!un->un_f_has_removable_media) { 19367 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19368 "SYNCHRONIZE CACHE command failed (%d)\n", status); 19369 } 19370 break; 19371 } 19372 19373 done: 19374 if (uip->ui_dkc.dkc_callback != NULL) { 19375 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 19376 } 19377 19378 ASSERT((bp->b_flags & B_REMAPPED) == 0); 19379 freerbuf(bp); 19380 kmem_free(uip, sizeof (struct sd_uscsi_info)); 19381 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 19382 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 19383 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 19384 19385 return (status); 19386 } 19387 19388 19389 /* 19390 * Function: sd_send_scsi_GET_CONFIGURATION 19391 * 19392 * Description: Issues the get configuration command to the device. 19393 * Called from sd_check_for_writable_cd & sd_get_media_info 19394 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 19395 * Arguments: un 19396 * ucmdbuf 19397 * rqbuf 19398 * rqbuflen 19399 * bufaddr 19400 * buflen 19401 * path_flag 19402 * 19403 * Return Code: 0 - Success 19404 * errno return code from sd_send_scsi_cmd() 19405 * 19406 * Context: Can sleep. Does not return until command is completed. 19407 * 19408 */ 19409 19410 static int 19411 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 19412 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 19413 int path_flag) 19414 { 19415 char cdb[CDB_GROUP1]; 19416 int status; 19417 19418 ASSERT(un != NULL); 19419 ASSERT(!mutex_owned(SD_MUTEX(un))); 19420 ASSERT(bufaddr != NULL); 19421 ASSERT(ucmdbuf != NULL); 19422 ASSERT(rqbuf != NULL); 19423 19424 SD_TRACE(SD_LOG_IO, un, 19425 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 19426 19427 bzero(cdb, sizeof (cdb)); 19428 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19429 bzero(rqbuf, rqbuflen); 19430 bzero(bufaddr, buflen); 19431 19432 /* 19433 * Set up cdb field for the get configuration command. 19434 */ 19435 cdb[0] = SCMD_GET_CONFIGURATION; 19436 cdb[1] = 0x02; /* Requested Type */ 19437 cdb[8] = SD_PROFILE_HEADER_LEN; 19438 ucmdbuf->uscsi_cdb = cdb; 19439 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19440 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19441 ucmdbuf->uscsi_buflen = buflen; 19442 ucmdbuf->uscsi_timeout = sd_io_time; 19443 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19444 ucmdbuf->uscsi_rqlen = rqbuflen; 19445 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19446 19447 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19448 UIO_SYSSPACE, path_flag); 19449 19450 switch (status) { 19451 case 0: 19452 break; /* Success! */ 19453 case EIO: 19454 switch (ucmdbuf->uscsi_status) { 19455 case STATUS_RESERVATION_CONFLICT: 19456 status = EACCES; 19457 break; 19458 default: 19459 break; 19460 } 19461 break; 19462 default: 19463 break; 19464 } 19465 19466 if (status == 0) { 19467 SD_DUMP_MEMORY(un, SD_LOG_IO, 19468 "sd_send_scsi_GET_CONFIGURATION: data", 19469 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19470 } 19471 19472 SD_TRACE(SD_LOG_IO, un, 19473 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19474 19475 return (status); 19476 } 19477 19478 /* 19479 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19480 * 19481 * Description: Issues the get configuration command to the device to 19482 * retrieve a specific feature. Called from 19483 * sd_check_for_writable_cd & sd_set_mmc_caps. 19484 * Arguments: un 19485 * ucmdbuf 19486 * rqbuf 19487 * rqbuflen 19488 * bufaddr 19489 * buflen 19490 * feature 19491 * 19492 * Return Code: 0 - Success 19493 * errno return code from sd_send_scsi_cmd() 19494 * 19495 * Context: Can sleep. Does not return until command is completed. 19496 * 19497 */ 19498 static int 19499 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19500 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19501 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 19502 { 19503 char cdb[CDB_GROUP1]; 19504 int status; 19505 19506 ASSERT(un != NULL); 19507 ASSERT(!mutex_owned(SD_MUTEX(un))); 19508 ASSERT(bufaddr != NULL); 19509 ASSERT(ucmdbuf != NULL); 19510 ASSERT(rqbuf != NULL); 19511 19512 SD_TRACE(SD_LOG_IO, un, 19513 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 19514 19515 bzero(cdb, sizeof (cdb)); 19516 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19517 bzero(rqbuf, rqbuflen); 19518 bzero(bufaddr, buflen); 19519 19520 /* 19521 * Set up cdb field for the get configuration command. 19522 */ 19523 cdb[0] = SCMD_GET_CONFIGURATION; 19524 cdb[1] = 0x02; /* Requested Type */ 19525 cdb[3] = feature; 19526 cdb[8] = buflen; 19527 ucmdbuf->uscsi_cdb = cdb; 19528 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19529 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19530 ucmdbuf->uscsi_buflen = buflen; 19531 ucmdbuf->uscsi_timeout = sd_io_time; 19532 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19533 ucmdbuf->uscsi_rqlen = rqbuflen; 19534 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19535 19536 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19537 UIO_SYSSPACE, path_flag); 19538 19539 switch (status) { 19540 case 0: 19541 break; /* Success! */ 19542 case EIO: 19543 switch (ucmdbuf->uscsi_status) { 19544 case STATUS_RESERVATION_CONFLICT: 19545 status = EACCES; 19546 break; 19547 default: 19548 break; 19549 } 19550 break; 19551 default: 19552 break; 19553 } 19554 19555 if (status == 0) { 19556 SD_DUMP_MEMORY(un, SD_LOG_IO, 19557 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19558 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19559 } 19560 19561 SD_TRACE(SD_LOG_IO, un, 19562 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19563 19564 return (status); 19565 } 19566 19567 19568 /* 19569 * Function: sd_send_scsi_MODE_SENSE 19570 * 19571 * Description: Utility function for issuing a scsi MODE SENSE command. 19572 * Note: This routine uses a consistent implementation for Group0, 19573 * Group1, and Group2 commands across all platforms. ATAPI devices 19574 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19575 * 19576 * Arguments: un - pointer to the softstate struct for the target. 19577 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19578 * CDB_GROUP[1|2] (10 byte). 19579 * bufaddr - buffer for page data retrieved from the target. 19580 * buflen - size of page to be retrieved. 19581 * page_code - page code of data to be retrieved from the target. 19582 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19583 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19584 * to use the USCSI "direct" chain and bypass the normal 19585 * command waitq. 19586 * 19587 * Return Code: 0 - Success 19588 * errno return code from sd_send_scsi_cmd() 19589 * 19590 * Context: Can sleep. Does not return until command is completed. 19591 */ 19592 19593 static int 19594 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19595 size_t buflen, uchar_t page_code, int path_flag) 19596 { 19597 struct scsi_extended_sense sense_buf; 19598 union scsi_cdb cdb; 19599 struct uscsi_cmd ucmd_buf; 19600 int status; 19601 int headlen; 19602 19603 ASSERT(un != NULL); 19604 ASSERT(!mutex_owned(SD_MUTEX(un))); 19605 ASSERT(bufaddr != NULL); 19606 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19607 (cdbsize == CDB_GROUP2)); 19608 19609 SD_TRACE(SD_LOG_IO, un, 19610 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 19611 19612 bzero(&cdb, sizeof (cdb)); 19613 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19614 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19615 bzero(bufaddr, buflen); 19616 19617 if (cdbsize == CDB_GROUP0) { 19618 cdb.scc_cmd = SCMD_MODE_SENSE; 19619 cdb.cdb_opaque[2] = page_code; 19620 FORMG0COUNT(&cdb, buflen); 19621 headlen = MODE_HEADER_LENGTH; 19622 } else { 19623 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 19624 cdb.cdb_opaque[2] = page_code; 19625 FORMG1COUNT(&cdb, buflen); 19626 headlen = MODE_HEADER_LENGTH_GRP2; 19627 } 19628 19629 ASSERT(headlen <= buflen); 19630 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19631 19632 ucmd_buf.uscsi_cdb = (char *)&cdb; 19633 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19634 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19635 ucmd_buf.uscsi_buflen = buflen; 19636 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19637 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19638 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19639 ucmd_buf.uscsi_timeout = 60; 19640 19641 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19642 UIO_SYSSPACE, path_flag); 19643 19644 switch (status) { 19645 case 0: 19646 /* 19647 * sr_check_wp() uses 0x3f page code and check the header of 19648 * mode page to determine if target device is write-protected. 19649 * But some USB devices return 0 bytes for 0x3f page code. For 19650 * this case, make sure that mode page header is returned at 19651 * least. 19652 */ 19653 if (buflen - ucmd_buf.uscsi_resid < headlen) 19654 status = EIO; 19655 break; /* Success! */ 19656 case EIO: 19657 switch (ucmd_buf.uscsi_status) { 19658 case STATUS_RESERVATION_CONFLICT: 19659 status = EACCES; 19660 break; 19661 default: 19662 break; 19663 } 19664 break; 19665 default: 19666 break; 19667 } 19668 19669 if (status == 0) { 19670 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 19671 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19672 } 19673 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 19674 19675 return (status); 19676 } 19677 19678 19679 /* 19680 * Function: sd_send_scsi_MODE_SELECT 19681 * 19682 * Description: Utility function for issuing a scsi MODE SELECT command. 19683 * Note: This routine uses a consistent implementation for Group0, 19684 * Group1, and Group2 commands across all platforms. ATAPI devices 19685 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19686 * 19687 * Arguments: un - pointer to the softstate struct for the target. 19688 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19689 * CDB_GROUP[1|2] (10 byte). 19690 * bufaddr - buffer for page data retrieved from the target. 19691 * buflen - size of page to be retrieved. 19692 * save_page - boolean to determin if SP bit should be set. 19693 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19694 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19695 * to use the USCSI "direct" chain and bypass the normal 19696 * command waitq. 19697 * 19698 * Return Code: 0 - Success 19699 * errno return code from sd_send_scsi_cmd() 19700 * 19701 * Context: Can sleep. Does not return until command is completed. 19702 */ 19703 19704 static int 19705 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19706 size_t buflen, uchar_t save_page, int path_flag) 19707 { 19708 struct scsi_extended_sense sense_buf; 19709 union scsi_cdb cdb; 19710 struct uscsi_cmd ucmd_buf; 19711 int status; 19712 19713 ASSERT(un != NULL); 19714 ASSERT(!mutex_owned(SD_MUTEX(un))); 19715 ASSERT(bufaddr != NULL); 19716 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19717 (cdbsize == CDB_GROUP2)); 19718 19719 SD_TRACE(SD_LOG_IO, un, 19720 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 19721 19722 bzero(&cdb, sizeof (cdb)); 19723 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19724 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19725 19726 /* Set the PF bit for many third party drives */ 19727 cdb.cdb_opaque[1] = 0x10; 19728 19729 /* Set the savepage(SP) bit if given */ 19730 if (save_page == SD_SAVE_PAGE) { 19731 cdb.cdb_opaque[1] |= 0x01; 19732 } 19733 19734 if (cdbsize == CDB_GROUP0) { 19735 cdb.scc_cmd = SCMD_MODE_SELECT; 19736 FORMG0COUNT(&cdb, buflen); 19737 } else { 19738 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 19739 FORMG1COUNT(&cdb, buflen); 19740 } 19741 19742 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19743 19744 ucmd_buf.uscsi_cdb = (char *)&cdb; 19745 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19746 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19747 ucmd_buf.uscsi_buflen = buflen; 19748 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19749 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19750 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19751 ucmd_buf.uscsi_timeout = 60; 19752 19753 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19754 UIO_SYSSPACE, path_flag); 19755 19756 switch (status) { 19757 case 0: 19758 break; /* Success! */ 19759 case EIO: 19760 switch (ucmd_buf.uscsi_status) { 19761 case STATUS_RESERVATION_CONFLICT: 19762 status = EACCES; 19763 break; 19764 default: 19765 break; 19766 } 19767 break; 19768 default: 19769 break; 19770 } 19771 19772 if (status == 0) { 19773 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 19774 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19775 } 19776 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 19777 19778 return (status); 19779 } 19780 19781 19782 /* 19783 * Function: sd_send_scsi_RDWR 19784 * 19785 * Description: Issue a scsi READ or WRITE command with the given parameters. 19786 * 19787 * Arguments: un: Pointer to the sd_lun struct for the target. 19788 * cmd: SCMD_READ or SCMD_WRITE 19789 * bufaddr: Address of caller's buffer to receive the RDWR data 19790 * buflen: Length of caller's buffer receive the RDWR data. 19791 * start_block: Block number for the start of the RDWR operation. 19792 * (Assumes target-native block size.) 19793 * residp: Pointer to variable to receive the redisual of the 19794 * RDWR operation (may be NULL of no residual requested). 19795 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19796 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19797 * to use the USCSI "direct" chain and bypass the normal 19798 * command waitq. 19799 * 19800 * Return Code: 0 - Success 19801 * errno return code from sd_send_scsi_cmd() 19802 * 19803 * Context: Can sleep. Does not return until command is completed. 19804 */ 19805 19806 static int 19807 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 19808 size_t buflen, daddr_t start_block, int path_flag) 19809 { 19810 struct scsi_extended_sense sense_buf; 19811 union scsi_cdb cdb; 19812 struct uscsi_cmd ucmd_buf; 19813 uint32_t block_count; 19814 int status; 19815 int cdbsize; 19816 uchar_t flag; 19817 19818 ASSERT(un != NULL); 19819 ASSERT(!mutex_owned(SD_MUTEX(un))); 19820 ASSERT(bufaddr != NULL); 19821 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 19822 19823 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 19824 19825 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 19826 return (EINVAL); 19827 } 19828 19829 mutex_enter(SD_MUTEX(un)); 19830 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 19831 mutex_exit(SD_MUTEX(un)); 19832 19833 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 19834 19835 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 19836 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 19837 bufaddr, buflen, start_block, block_count); 19838 19839 bzero(&cdb, sizeof (cdb)); 19840 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19841 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19842 19843 /* Compute CDB size to use */ 19844 if (start_block > 0xffffffff) 19845 cdbsize = CDB_GROUP4; 19846 else if ((start_block & 0xFFE00000) || 19847 (un->un_f_cfg_is_atapi == TRUE)) 19848 cdbsize = CDB_GROUP1; 19849 else 19850 cdbsize = CDB_GROUP0; 19851 19852 switch (cdbsize) { 19853 case CDB_GROUP0: /* 6-byte CDBs */ 19854 cdb.scc_cmd = cmd; 19855 FORMG0ADDR(&cdb, start_block); 19856 FORMG0COUNT(&cdb, block_count); 19857 break; 19858 case CDB_GROUP1: /* 10-byte CDBs */ 19859 cdb.scc_cmd = cmd | SCMD_GROUP1; 19860 FORMG1ADDR(&cdb, start_block); 19861 FORMG1COUNT(&cdb, block_count); 19862 break; 19863 case CDB_GROUP4: /* 16-byte CDBs */ 19864 cdb.scc_cmd = cmd | SCMD_GROUP4; 19865 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19866 FORMG4COUNT(&cdb, block_count); 19867 break; 19868 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19869 default: 19870 /* All others reserved */ 19871 return (EINVAL); 19872 } 19873 19874 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19875 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19876 19877 ucmd_buf.uscsi_cdb = (char *)&cdb; 19878 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19879 ucmd_buf.uscsi_bufaddr = bufaddr; 19880 ucmd_buf.uscsi_buflen = buflen; 19881 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19882 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19883 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19884 ucmd_buf.uscsi_timeout = 60; 19885 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19886 UIO_SYSSPACE, path_flag); 19887 switch (status) { 19888 case 0: 19889 break; /* Success! */ 19890 case EIO: 19891 switch (ucmd_buf.uscsi_status) { 19892 case STATUS_RESERVATION_CONFLICT: 19893 status = EACCES; 19894 break; 19895 default: 19896 break; 19897 } 19898 break; 19899 default: 19900 break; 19901 } 19902 19903 if (status == 0) { 19904 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19905 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19906 } 19907 19908 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19909 19910 return (status); 19911 } 19912 19913 19914 /* 19915 * Function: sd_send_scsi_LOG_SENSE 19916 * 19917 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19918 * 19919 * Arguments: un: Pointer to the sd_lun struct for the target. 19920 * 19921 * Return Code: 0 - Success 19922 * errno return code from sd_send_scsi_cmd() 19923 * 19924 * Context: Can sleep. Does not return until command is completed. 19925 */ 19926 19927 static int 19928 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19929 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19930 int path_flag) 19931 19932 { 19933 struct scsi_extended_sense sense_buf; 19934 union scsi_cdb cdb; 19935 struct uscsi_cmd ucmd_buf; 19936 int status; 19937 19938 ASSERT(un != NULL); 19939 ASSERT(!mutex_owned(SD_MUTEX(un))); 19940 19941 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19942 19943 bzero(&cdb, sizeof (cdb)); 19944 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19945 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19946 19947 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19948 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19949 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19950 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19951 FORMG1COUNT(&cdb, buflen); 19952 19953 ucmd_buf.uscsi_cdb = (char *)&cdb; 19954 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19955 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19956 ucmd_buf.uscsi_buflen = buflen; 19957 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19958 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19959 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19960 ucmd_buf.uscsi_timeout = 60; 19961 19962 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19963 UIO_SYSSPACE, path_flag); 19964 19965 switch (status) { 19966 case 0: 19967 break; 19968 case EIO: 19969 switch (ucmd_buf.uscsi_status) { 19970 case STATUS_RESERVATION_CONFLICT: 19971 status = EACCES; 19972 break; 19973 case STATUS_CHECK: 19974 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19975 (scsi_sense_key((uint8_t *)&sense_buf) == 19976 KEY_ILLEGAL_REQUEST) && 19977 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19978 /* 19979 * ASC 0x24: INVALID FIELD IN CDB 19980 */ 19981 switch (page_code) { 19982 case START_STOP_CYCLE_PAGE: 19983 /* 19984 * The start stop cycle counter is 19985 * implemented as page 0x31 in earlier 19986 * generation disks. In new generation 19987 * disks the start stop cycle counter is 19988 * implemented as page 0xE. To properly 19989 * handle this case if an attempt for 19990 * log page 0xE is made and fails we 19991 * will try again using page 0x31. 19992 * 19993 * Network storage BU committed to 19994 * maintain the page 0x31 for this 19995 * purpose and will not have any other 19996 * page implemented with page code 0x31 19997 * until all disks transition to the 19998 * standard page. 19999 */ 20000 mutex_enter(SD_MUTEX(un)); 20001 un->un_start_stop_cycle_page = 20002 START_STOP_CYCLE_VU_PAGE; 20003 cdb.cdb_opaque[2] = 20004 (char)(page_control << 6) | 20005 un->un_start_stop_cycle_page; 20006 mutex_exit(SD_MUTEX(un)); 20007 status = sd_send_scsi_cmd( 20008 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 20009 UIO_SYSSPACE, path_flag); 20010 20011 break; 20012 case TEMPERATURE_PAGE: 20013 status = ENOTTY; 20014 break; 20015 default: 20016 break; 20017 } 20018 } 20019 break; 20020 default: 20021 break; 20022 } 20023 break; 20024 default: 20025 break; 20026 } 20027 20028 if (status == 0) { 20029 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 20030 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20031 } 20032 20033 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 20034 20035 return (status); 20036 } 20037 20038 20039 /* 20040 * Function: sdioctl 20041 * 20042 * Description: Driver's ioctl(9e) entry point function. 20043 * 20044 * Arguments: dev - device number 20045 * cmd - ioctl operation to be performed 20046 * arg - user argument, contains data to be set or reference 20047 * parameter for get 20048 * flag - bit flag, indicating open settings, 32/64 bit type 20049 * cred_p - user credential pointer 20050 * rval_p - calling process return value (OPT) 20051 * 20052 * Return Code: EINVAL 20053 * ENOTTY 20054 * ENXIO 20055 * EIO 20056 * EFAULT 20057 * ENOTSUP 20058 * EPERM 20059 * 20060 * Context: Called from the device switch at normal priority. 20061 */ 20062 20063 static int 20064 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 20065 { 20066 struct sd_lun *un = NULL; 20067 int err = 0; 20068 int i = 0; 20069 cred_t *cr; 20070 int tmprval = EINVAL; 20071 int is_valid; 20072 20073 /* 20074 * All device accesses go thru sdstrategy where we check on suspend 20075 * status 20076 */ 20077 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20078 return (ENXIO); 20079 } 20080 20081 ASSERT(!mutex_owned(SD_MUTEX(un))); 20082 20083 20084 is_valid = SD_IS_VALID_LABEL(un); 20085 20086 /* 20087 * Moved this wait from sd_uscsi_strategy to here for 20088 * reasons of deadlock prevention. Internal driver commands, 20089 * specifically those to change a devices power level, result 20090 * in a call to sd_uscsi_strategy. 20091 */ 20092 mutex_enter(SD_MUTEX(un)); 20093 while ((un->un_state == SD_STATE_SUSPENDED) || 20094 (un->un_state == SD_STATE_PM_CHANGING)) { 20095 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 20096 } 20097 /* 20098 * Twiddling the counter here protects commands from now 20099 * through to the top of sd_uscsi_strategy. Without the 20100 * counter inc. a power down, for example, could get in 20101 * after the above check for state is made and before 20102 * execution gets to the top of sd_uscsi_strategy. 20103 * That would cause problems. 20104 */ 20105 un->un_ncmds_in_driver++; 20106 20107 if (!is_valid && 20108 (flag & (FNDELAY | FNONBLOCK))) { 20109 switch (cmd) { 20110 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 20111 case DKIOCGVTOC: 20112 case DKIOCGEXTVTOC: 20113 case DKIOCGAPART: 20114 case DKIOCPARTINFO: 20115 case DKIOCEXTPARTINFO: 20116 case DKIOCSGEOM: 20117 case DKIOCSAPART: 20118 case DKIOCGETEFI: 20119 case DKIOCPARTITION: 20120 case DKIOCSVTOC: 20121 case DKIOCSEXTVTOC: 20122 case DKIOCSETEFI: 20123 case DKIOCGMBOOT: 20124 case DKIOCSMBOOT: 20125 case DKIOCG_PHYGEOM: 20126 case DKIOCG_VIRTGEOM: 20127 /* let cmlb handle it */ 20128 goto skip_ready_valid; 20129 20130 case CDROMPAUSE: 20131 case CDROMRESUME: 20132 case CDROMPLAYMSF: 20133 case CDROMPLAYTRKIND: 20134 case CDROMREADTOCHDR: 20135 case CDROMREADTOCENTRY: 20136 case CDROMSTOP: 20137 case CDROMSTART: 20138 case CDROMVOLCTRL: 20139 case CDROMSUBCHNL: 20140 case CDROMREADMODE2: 20141 case CDROMREADMODE1: 20142 case CDROMREADOFFSET: 20143 case CDROMSBLKMODE: 20144 case CDROMGBLKMODE: 20145 case CDROMGDRVSPEED: 20146 case CDROMSDRVSPEED: 20147 case CDROMCDDA: 20148 case CDROMCDXA: 20149 case CDROMSUBCODE: 20150 if (!ISCD(un)) { 20151 un->un_ncmds_in_driver--; 20152 ASSERT(un->un_ncmds_in_driver >= 0); 20153 mutex_exit(SD_MUTEX(un)); 20154 return (ENOTTY); 20155 } 20156 break; 20157 case FDEJECT: 20158 case DKIOCEJECT: 20159 case CDROMEJECT: 20160 if (!un->un_f_eject_media_supported) { 20161 un->un_ncmds_in_driver--; 20162 ASSERT(un->un_ncmds_in_driver >= 0); 20163 mutex_exit(SD_MUTEX(un)); 20164 return (ENOTTY); 20165 } 20166 break; 20167 case DKIOCFLUSHWRITECACHE: 20168 mutex_exit(SD_MUTEX(un)); 20169 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20170 if (err != 0) { 20171 mutex_enter(SD_MUTEX(un)); 20172 un->un_ncmds_in_driver--; 20173 ASSERT(un->un_ncmds_in_driver >= 0); 20174 mutex_exit(SD_MUTEX(un)); 20175 return (EIO); 20176 } 20177 mutex_enter(SD_MUTEX(un)); 20178 /* FALLTHROUGH */ 20179 case DKIOCREMOVABLE: 20180 case DKIOCHOTPLUGGABLE: 20181 case DKIOCINFO: 20182 case DKIOCGMEDIAINFO: 20183 case MHIOCENFAILFAST: 20184 case MHIOCSTATUS: 20185 case MHIOCTKOWN: 20186 case MHIOCRELEASE: 20187 case MHIOCGRP_INKEYS: 20188 case MHIOCGRP_INRESV: 20189 case MHIOCGRP_REGISTER: 20190 case MHIOCGRP_RESERVE: 20191 case MHIOCGRP_PREEMPTANDABORT: 20192 case MHIOCGRP_REGISTERANDIGNOREKEY: 20193 case CDROMCLOSETRAY: 20194 case USCSICMD: 20195 goto skip_ready_valid; 20196 default: 20197 break; 20198 } 20199 20200 mutex_exit(SD_MUTEX(un)); 20201 err = sd_ready_and_valid(un, SDPART(dev)); 20202 mutex_enter(SD_MUTEX(un)); 20203 20204 if (err != SD_READY_VALID) { 20205 switch (cmd) { 20206 case DKIOCSTATE: 20207 case CDROMGDRVSPEED: 20208 case CDROMSDRVSPEED: 20209 case FDEJECT: /* for eject command */ 20210 case DKIOCEJECT: 20211 case CDROMEJECT: 20212 case DKIOCREMOVABLE: 20213 case DKIOCHOTPLUGGABLE: 20214 break; 20215 default: 20216 if (un->un_f_has_removable_media) { 20217 err = ENXIO; 20218 } else { 20219 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 20220 if (err == SD_RESERVED_BY_OTHERS) { 20221 err = EACCES; 20222 } else { 20223 err = EIO; 20224 } 20225 } 20226 un->un_ncmds_in_driver--; 20227 ASSERT(un->un_ncmds_in_driver >= 0); 20228 mutex_exit(SD_MUTEX(un)); 20229 return (err); 20230 } 20231 } 20232 } 20233 20234 skip_ready_valid: 20235 mutex_exit(SD_MUTEX(un)); 20236 20237 switch (cmd) { 20238 case DKIOCINFO: 20239 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 20240 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 20241 break; 20242 20243 case DKIOCGMEDIAINFO: 20244 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 20245 err = sd_get_media_info(dev, (caddr_t)arg, flag); 20246 break; 20247 20248 case DKIOCGGEOM: 20249 case DKIOCGVTOC: 20250 case DKIOCGEXTVTOC: 20251 case DKIOCGAPART: 20252 case DKIOCPARTINFO: 20253 case DKIOCEXTPARTINFO: 20254 case DKIOCSGEOM: 20255 case DKIOCSAPART: 20256 case DKIOCGETEFI: 20257 case DKIOCPARTITION: 20258 case DKIOCSVTOC: 20259 case DKIOCSEXTVTOC: 20260 case DKIOCSETEFI: 20261 case DKIOCGMBOOT: 20262 case DKIOCSMBOOT: 20263 case DKIOCG_PHYGEOM: 20264 case DKIOCG_VIRTGEOM: 20265 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 20266 20267 /* TUR should spin up */ 20268 20269 if (un->un_f_has_removable_media) 20270 err = sd_send_scsi_TEST_UNIT_READY(un, 20271 SD_CHECK_FOR_MEDIA); 20272 else 20273 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20274 20275 if (err != 0) 20276 break; 20277 20278 err = cmlb_ioctl(un->un_cmlbhandle, dev, 20279 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 20280 20281 if ((err == 0) && 20282 ((cmd == DKIOCSETEFI) || 20283 (un->un_f_pkstats_enabled) && 20284 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 20285 20286 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 20287 (void *)SD_PATH_DIRECT); 20288 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 20289 sd_set_pstats(un); 20290 SD_TRACE(SD_LOG_IO_PARTITION, un, 20291 "sd_ioctl: un:0x%p pstats created and " 20292 "set\n", un); 20293 } 20294 } 20295 20296 if ((cmd == DKIOCSVTOC) || 20297 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 20298 20299 mutex_enter(SD_MUTEX(un)); 20300 if (un->un_f_devid_supported && 20301 (un->un_f_opt_fab_devid == TRUE)) { 20302 if (un->un_devid == NULL) { 20303 sd_register_devid(un, SD_DEVINFO(un), 20304 SD_TARGET_IS_UNRESERVED); 20305 } else { 20306 /* 20307 * The device id for this disk 20308 * has been fabricated. The 20309 * device id must be preserved 20310 * by writing it back out to 20311 * disk. 20312 */ 20313 if (sd_write_deviceid(un) != 0) { 20314 ddi_devid_free(un->un_devid); 20315 un->un_devid = NULL; 20316 } 20317 } 20318 } 20319 mutex_exit(SD_MUTEX(un)); 20320 } 20321 20322 break; 20323 20324 case DKIOCLOCK: 20325 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 20326 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20327 SD_PATH_STANDARD); 20328 break; 20329 20330 case DKIOCUNLOCK: 20331 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 20332 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 20333 SD_PATH_STANDARD); 20334 break; 20335 20336 case DKIOCSTATE: { 20337 enum dkio_state state; 20338 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 20339 20340 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 20341 err = EFAULT; 20342 } else { 20343 err = sd_check_media(dev, state); 20344 if (err == 0) { 20345 if (ddi_copyout(&un->un_mediastate, (void *)arg, 20346 sizeof (int), flag) != 0) 20347 err = EFAULT; 20348 } 20349 } 20350 break; 20351 } 20352 20353 case DKIOCREMOVABLE: 20354 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 20355 i = un->un_f_has_removable_media ? 1 : 0; 20356 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20357 err = EFAULT; 20358 } else { 20359 err = 0; 20360 } 20361 break; 20362 20363 case DKIOCHOTPLUGGABLE: 20364 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 20365 i = un->un_f_is_hotpluggable ? 1 : 0; 20366 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20367 err = EFAULT; 20368 } else { 20369 err = 0; 20370 } 20371 break; 20372 20373 case DKIOCGTEMPERATURE: 20374 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 20375 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 20376 break; 20377 20378 case MHIOCENFAILFAST: 20379 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 20380 if ((err = drv_priv(cred_p)) == 0) { 20381 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 20382 } 20383 break; 20384 20385 case MHIOCTKOWN: 20386 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 20387 if ((err = drv_priv(cred_p)) == 0) { 20388 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 20389 } 20390 break; 20391 20392 case MHIOCRELEASE: 20393 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 20394 if ((err = drv_priv(cred_p)) == 0) { 20395 err = sd_mhdioc_release(dev); 20396 } 20397 break; 20398 20399 case MHIOCSTATUS: 20400 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 20401 if ((err = drv_priv(cred_p)) == 0) { 20402 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 20403 case 0: 20404 err = 0; 20405 break; 20406 case EACCES: 20407 *rval_p = 1; 20408 err = 0; 20409 break; 20410 default: 20411 err = EIO; 20412 break; 20413 } 20414 } 20415 break; 20416 20417 case MHIOCQRESERVE: 20418 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 20419 if ((err = drv_priv(cred_p)) == 0) { 20420 err = sd_reserve_release(dev, SD_RESERVE); 20421 } 20422 break; 20423 20424 case MHIOCREREGISTERDEVID: 20425 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 20426 if (drv_priv(cred_p) == EPERM) { 20427 err = EPERM; 20428 } else if (!un->un_f_devid_supported) { 20429 err = ENOTTY; 20430 } else { 20431 err = sd_mhdioc_register_devid(dev); 20432 } 20433 break; 20434 20435 case MHIOCGRP_INKEYS: 20436 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 20437 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20438 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20439 err = ENOTSUP; 20440 } else { 20441 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20442 flag); 20443 } 20444 } 20445 break; 20446 20447 case MHIOCGRP_INRESV: 20448 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20449 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20450 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20451 err = ENOTSUP; 20452 } else { 20453 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20454 } 20455 } 20456 break; 20457 20458 case MHIOCGRP_REGISTER: 20459 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20460 if ((err = drv_priv(cred_p)) != EPERM) { 20461 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20462 err = ENOTSUP; 20463 } else if (arg != NULL) { 20464 mhioc_register_t reg; 20465 if (ddi_copyin((void *)arg, ®, 20466 sizeof (mhioc_register_t), flag) != 0) { 20467 err = EFAULT; 20468 } else { 20469 err = 20470 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20471 un, SD_SCSI3_REGISTER, 20472 (uchar_t *)®); 20473 } 20474 } 20475 } 20476 break; 20477 20478 case MHIOCGRP_RESERVE: 20479 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20480 if ((err = drv_priv(cred_p)) != EPERM) { 20481 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20482 err = ENOTSUP; 20483 } else if (arg != NULL) { 20484 mhioc_resv_desc_t resv_desc; 20485 if (ddi_copyin((void *)arg, &resv_desc, 20486 sizeof (mhioc_resv_desc_t), flag) != 0) { 20487 err = EFAULT; 20488 } else { 20489 err = 20490 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20491 un, SD_SCSI3_RESERVE, 20492 (uchar_t *)&resv_desc); 20493 } 20494 } 20495 } 20496 break; 20497 20498 case MHIOCGRP_PREEMPTANDABORT: 20499 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20500 if ((err = drv_priv(cred_p)) != EPERM) { 20501 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20502 err = ENOTSUP; 20503 } else if (arg != NULL) { 20504 mhioc_preemptandabort_t preempt_abort; 20505 if (ddi_copyin((void *)arg, &preempt_abort, 20506 sizeof (mhioc_preemptandabort_t), 20507 flag) != 0) { 20508 err = EFAULT; 20509 } else { 20510 err = 20511 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20512 un, SD_SCSI3_PREEMPTANDABORT, 20513 (uchar_t *)&preempt_abort); 20514 } 20515 } 20516 } 20517 break; 20518 20519 case MHIOCGRP_REGISTERANDIGNOREKEY: 20520 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 20521 if ((err = drv_priv(cred_p)) != EPERM) { 20522 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20523 err = ENOTSUP; 20524 } else if (arg != NULL) { 20525 mhioc_registerandignorekey_t r_and_i; 20526 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20527 sizeof (mhioc_registerandignorekey_t), 20528 flag) != 0) { 20529 err = EFAULT; 20530 } else { 20531 err = 20532 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20533 un, SD_SCSI3_REGISTERANDIGNOREKEY, 20534 (uchar_t *)&r_and_i); 20535 } 20536 } 20537 } 20538 break; 20539 20540 case USCSICMD: 20541 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 20542 cr = ddi_get_cred(); 20543 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 20544 err = EPERM; 20545 } else { 20546 enum uio_seg uioseg; 20547 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 20548 UIO_USERSPACE; 20549 if (un->un_f_format_in_progress == TRUE) { 20550 err = EAGAIN; 20551 break; 20552 } 20553 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 20554 flag, uioseg, SD_PATH_STANDARD); 20555 } 20556 break; 20557 20558 case CDROMPAUSE: 20559 case CDROMRESUME: 20560 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20561 if (!ISCD(un)) { 20562 err = ENOTTY; 20563 } else { 20564 err = sr_pause_resume(dev, cmd); 20565 } 20566 break; 20567 20568 case CDROMPLAYMSF: 20569 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20570 if (!ISCD(un)) { 20571 err = ENOTTY; 20572 } else { 20573 err = sr_play_msf(dev, (caddr_t)arg, flag); 20574 } 20575 break; 20576 20577 case CDROMPLAYTRKIND: 20578 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20579 #if defined(__i386) || defined(__amd64) 20580 /* 20581 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20582 */ 20583 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20584 #else 20585 if (!ISCD(un)) { 20586 #endif 20587 err = ENOTTY; 20588 } else { 20589 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20590 } 20591 break; 20592 20593 case CDROMREADTOCHDR: 20594 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20595 if (!ISCD(un)) { 20596 err = ENOTTY; 20597 } else { 20598 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20599 } 20600 break; 20601 20602 case CDROMREADTOCENTRY: 20603 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 20604 if (!ISCD(un)) { 20605 err = ENOTTY; 20606 } else { 20607 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 20608 } 20609 break; 20610 20611 case CDROMSTOP: 20612 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 20613 if (!ISCD(un)) { 20614 err = ENOTTY; 20615 } else { 20616 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 20617 SD_PATH_STANDARD); 20618 } 20619 break; 20620 20621 case CDROMSTART: 20622 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 20623 if (!ISCD(un)) { 20624 err = ENOTTY; 20625 } else { 20626 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 20627 SD_PATH_STANDARD); 20628 } 20629 break; 20630 20631 case CDROMCLOSETRAY: 20632 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 20633 if (!ISCD(un)) { 20634 err = ENOTTY; 20635 } else { 20636 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 20637 SD_PATH_STANDARD); 20638 } 20639 break; 20640 20641 case FDEJECT: /* for eject command */ 20642 case DKIOCEJECT: 20643 case CDROMEJECT: 20644 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 20645 if (!un->un_f_eject_media_supported) { 20646 err = ENOTTY; 20647 } else { 20648 err = sr_eject(dev); 20649 } 20650 break; 20651 20652 case CDROMVOLCTRL: 20653 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 20654 if (!ISCD(un)) { 20655 err = ENOTTY; 20656 } else { 20657 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 20658 } 20659 break; 20660 20661 case CDROMSUBCHNL: 20662 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 20663 if (!ISCD(un)) { 20664 err = ENOTTY; 20665 } else { 20666 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 20667 } 20668 break; 20669 20670 case CDROMREADMODE2: 20671 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 20672 if (!ISCD(un)) { 20673 err = ENOTTY; 20674 } else if (un->un_f_cfg_is_atapi == TRUE) { 20675 /* 20676 * If the drive supports READ CD, use that instead of 20677 * switching the LBA size via a MODE SELECT 20678 * Block Descriptor 20679 */ 20680 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 20681 } else { 20682 err = sr_read_mode2(dev, (caddr_t)arg, flag); 20683 } 20684 break; 20685 20686 case CDROMREADMODE1: 20687 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 20688 if (!ISCD(un)) { 20689 err = ENOTTY; 20690 } else { 20691 err = sr_read_mode1(dev, (caddr_t)arg, flag); 20692 } 20693 break; 20694 20695 case CDROMREADOFFSET: 20696 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 20697 if (!ISCD(un)) { 20698 err = ENOTTY; 20699 } else { 20700 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 20701 flag); 20702 } 20703 break; 20704 20705 case CDROMSBLKMODE: 20706 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 20707 /* 20708 * There is no means of changing block size in case of atapi 20709 * drives, thus return ENOTTY if drive type is atapi 20710 */ 20711 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20712 err = ENOTTY; 20713 } else if (un->un_f_mmc_cap == TRUE) { 20714 20715 /* 20716 * MMC Devices do not support changing the 20717 * logical block size 20718 * 20719 * Note: EINVAL is being returned instead of ENOTTY to 20720 * maintain consistancy with the original mmc 20721 * driver update. 20722 */ 20723 err = EINVAL; 20724 } else { 20725 mutex_enter(SD_MUTEX(un)); 20726 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 20727 (un->un_ncmds_in_transport > 0)) { 20728 mutex_exit(SD_MUTEX(un)); 20729 err = EINVAL; 20730 } else { 20731 mutex_exit(SD_MUTEX(un)); 20732 err = sr_change_blkmode(dev, cmd, arg, flag); 20733 } 20734 } 20735 break; 20736 20737 case CDROMGBLKMODE: 20738 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 20739 if (!ISCD(un)) { 20740 err = ENOTTY; 20741 } else if ((un->un_f_cfg_is_atapi != FALSE) && 20742 (un->un_f_blockcount_is_valid != FALSE)) { 20743 /* 20744 * Drive is an ATAPI drive so return target block 20745 * size for ATAPI drives since we cannot change the 20746 * blocksize on ATAPI drives. Used primarily to detect 20747 * if an ATAPI cdrom is present. 20748 */ 20749 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 20750 sizeof (int), flag) != 0) { 20751 err = EFAULT; 20752 } else { 20753 err = 0; 20754 } 20755 20756 } else { 20757 /* 20758 * Drive supports changing block sizes via a Mode 20759 * Select. 20760 */ 20761 err = sr_change_blkmode(dev, cmd, arg, flag); 20762 } 20763 break; 20764 20765 case CDROMGDRVSPEED: 20766 case CDROMSDRVSPEED: 20767 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 20768 if (!ISCD(un)) { 20769 err = ENOTTY; 20770 } else if (un->un_f_mmc_cap == TRUE) { 20771 /* 20772 * Note: In the future the driver implementation 20773 * for getting and 20774 * setting cd speed should entail: 20775 * 1) If non-mmc try the Toshiba mode page 20776 * (sr_change_speed) 20777 * 2) If mmc but no support for Real Time Streaming try 20778 * the SET CD SPEED (0xBB) command 20779 * (sr_atapi_change_speed) 20780 * 3) If mmc and support for Real Time Streaming 20781 * try the GET PERFORMANCE and SET STREAMING 20782 * commands (not yet implemented, 4380808) 20783 */ 20784 /* 20785 * As per recent MMC spec, CD-ROM speed is variable 20786 * and changes with LBA. Since there is no such 20787 * things as drive speed now, fail this ioctl. 20788 * 20789 * Note: EINVAL is returned for consistancy of original 20790 * implementation which included support for getting 20791 * the drive speed of mmc devices but not setting 20792 * the drive speed. Thus EINVAL would be returned 20793 * if a set request was made for an mmc device. 20794 * We no longer support get or set speed for 20795 * mmc but need to remain consistent with regard 20796 * to the error code returned. 20797 */ 20798 err = EINVAL; 20799 } else if (un->un_f_cfg_is_atapi == TRUE) { 20800 err = sr_atapi_change_speed(dev, cmd, arg, flag); 20801 } else { 20802 err = sr_change_speed(dev, cmd, arg, flag); 20803 } 20804 break; 20805 20806 case CDROMCDDA: 20807 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 20808 if (!ISCD(un)) { 20809 err = ENOTTY; 20810 } else { 20811 err = sr_read_cdda(dev, (void *)arg, flag); 20812 } 20813 break; 20814 20815 case CDROMCDXA: 20816 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 20817 if (!ISCD(un)) { 20818 err = ENOTTY; 20819 } else { 20820 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 20821 } 20822 break; 20823 20824 case CDROMSUBCODE: 20825 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 20826 if (!ISCD(un)) { 20827 err = ENOTTY; 20828 } else { 20829 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 20830 } 20831 break; 20832 20833 20834 #ifdef SDDEBUG 20835 /* RESET/ABORTS testing ioctls */ 20836 case DKIOCRESET: { 20837 int reset_level; 20838 20839 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 20840 err = EFAULT; 20841 } else { 20842 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 20843 "reset_level = 0x%lx\n", reset_level); 20844 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 20845 err = 0; 20846 } else { 20847 err = EIO; 20848 } 20849 } 20850 break; 20851 } 20852 20853 case DKIOCABORT: 20854 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20855 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20856 err = 0; 20857 } else { 20858 err = EIO; 20859 } 20860 break; 20861 #endif 20862 20863 #ifdef SD_FAULT_INJECTION 20864 /* SDIOC FaultInjection testing ioctls */ 20865 case SDIOCSTART: 20866 case SDIOCSTOP: 20867 case SDIOCINSERTPKT: 20868 case SDIOCINSERTXB: 20869 case SDIOCINSERTUN: 20870 case SDIOCINSERTARQ: 20871 case SDIOCPUSH: 20872 case SDIOCRETRIEVE: 20873 case SDIOCRUN: 20874 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20875 "SDIOC detected cmd:0x%X:\n", cmd); 20876 /* call error generator */ 20877 sd_faultinjection_ioctl(cmd, arg, un); 20878 err = 0; 20879 break; 20880 20881 #endif /* SD_FAULT_INJECTION */ 20882 20883 case DKIOCFLUSHWRITECACHE: 20884 { 20885 struct dk_callback *dkc = (struct dk_callback *)arg; 20886 20887 mutex_enter(SD_MUTEX(un)); 20888 if (!un->un_f_sync_cache_supported || 20889 !un->un_f_write_cache_enabled) { 20890 err = un->un_f_sync_cache_supported ? 20891 0 : ENOTSUP; 20892 mutex_exit(SD_MUTEX(un)); 20893 if ((flag & FKIOCTL) && dkc != NULL && 20894 dkc->dkc_callback != NULL) { 20895 (*dkc->dkc_callback)(dkc->dkc_cookie, 20896 err); 20897 /* 20898 * Did callback and reported error. 20899 * Since we did a callback, ioctl 20900 * should return 0. 20901 */ 20902 err = 0; 20903 } 20904 break; 20905 } 20906 mutex_exit(SD_MUTEX(un)); 20907 20908 if ((flag & FKIOCTL) && dkc != NULL && 20909 dkc->dkc_callback != NULL) { 20910 /* async SYNC CACHE request */ 20911 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20912 } else { 20913 /* synchronous SYNC CACHE request */ 20914 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20915 } 20916 } 20917 break; 20918 20919 case DKIOCGETWCE: { 20920 20921 int wce; 20922 20923 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20924 break; 20925 } 20926 20927 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20928 err = EFAULT; 20929 } 20930 break; 20931 } 20932 20933 case DKIOCSETWCE: { 20934 20935 int wce, sync_supported; 20936 20937 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20938 err = EFAULT; 20939 break; 20940 } 20941 20942 /* 20943 * Synchronize multiple threads trying to enable 20944 * or disable the cache via the un_f_wcc_cv 20945 * condition variable. 20946 */ 20947 mutex_enter(SD_MUTEX(un)); 20948 20949 /* 20950 * Don't allow the cache to be enabled if the 20951 * config file has it disabled. 20952 */ 20953 if (un->un_f_opt_disable_cache && wce) { 20954 mutex_exit(SD_MUTEX(un)); 20955 err = EINVAL; 20956 break; 20957 } 20958 20959 /* 20960 * Wait for write cache change in progress 20961 * bit to be clear before proceeding. 20962 */ 20963 while (un->un_f_wcc_inprog) 20964 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20965 20966 un->un_f_wcc_inprog = 1; 20967 20968 if (un->un_f_write_cache_enabled && wce == 0) { 20969 /* 20970 * Disable the write cache. Don't clear 20971 * un_f_write_cache_enabled until after 20972 * the mode select and flush are complete. 20973 */ 20974 sync_supported = un->un_f_sync_cache_supported; 20975 20976 /* 20977 * If cache flush is suppressed, we assume that the 20978 * controller firmware will take care of managing the 20979 * write cache for us: no need to explicitly 20980 * disable it. 20981 */ 20982 if (!un->un_f_suppress_cache_flush) { 20983 mutex_exit(SD_MUTEX(un)); 20984 if ((err = sd_cache_control(un, 20985 SD_CACHE_NOCHANGE, 20986 SD_CACHE_DISABLE)) == 0 && 20987 sync_supported) { 20988 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 20989 NULL); 20990 } 20991 } else { 20992 mutex_exit(SD_MUTEX(un)); 20993 } 20994 20995 mutex_enter(SD_MUTEX(un)); 20996 if (err == 0) { 20997 un->un_f_write_cache_enabled = 0; 20998 } 20999 21000 } else if (!un->un_f_write_cache_enabled && wce != 0) { 21001 /* 21002 * Set un_f_write_cache_enabled first, so there is 21003 * no window where the cache is enabled, but the 21004 * bit says it isn't. 21005 */ 21006 un->un_f_write_cache_enabled = 1; 21007 21008 /* 21009 * If cache flush is suppressed, we assume that the 21010 * controller firmware will take care of managing the 21011 * write cache for us: no need to explicitly 21012 * enable it. 21013 */ 21014 if (!un->un_f_suppress_cache_flush) { 21015 mutex_exit(SD_MUTEX(un)); 21016 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 21017 SD_CACHE_ENABLE); 21018 } else { 21019 mutex_exit(SD_MUTEX(un)); 21020 } 21021 21022 mutex_enter(SD_MUTEX(un)); 21023 21024 if (err) { 21025 un->un_f_write_cache_enabled = 0; 21026 } 21027 } 21028 21029 un->un_f_wcc_inprog = 0; 21030 cv_broadcast(&un->un_wcc_cv); 21031 mutex_exit(SD_MUTEX(un)); 21032 break; 21033 } 21034 21035 default: 21036 err = ENOTTY; 21037 break; 21038 } 21039 mutex_enter(SD_MUTEX(un)); 21040 un->un_ncmds_in_driver--; 21041 ASSERT(un->un_ncmds_in_driver >= 0); 21042 mutex_exit(SD_MUTEX(un)); 21043 21044 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 21045 return (err); 21046 } 21047 21048 21049 /* 21050 * Function: sd_dkio_ctrl_info 21051 * 21052 * Description: This routine is the driver entry point for handling controller 21053 * information ioctl requests (DKIOCINFO). 21054 * 21055 * Arguments: dev - the device number 21056 * arg - pointer to user provided dk_cinfo structure 21057 * specifying the controller type and attributes. 21058 * flag - this argument is a pass through to ddi_copyxxx() 21059 * directly from the mode argument of ioctl(). 21060 * 21061 * Return Code: 0 21062 * EFAULT 21063 * ENXIO 21064 */ 21065 21066 static int 21067 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 21068 { 21069 struct sd_lun *un = NULL; 21070 struct dk_cinfo *info; 21071 dev_info_t *pdip; 21072 int lun, tgt; 21073 21074 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21075 return (ENXIO); 21076 } 21077 21078 info = (struct dk_cinfo *) 21079 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 21080 21081 switch (un->un_ctype) { 21082 case CTYPE_CDROM: 21083 info->dki_ctype = DKC_CDROM; 21084 break; 21085 default: 21086 info->dki_ctype = DKC_SCSI_CCS; 21087 break; 21088 } 21089 pdip = ddi_get_parent(SD_DEVINFO(un)); 21090 info->dki_cnum = ddi_get_instance(pdip); 21091 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 21092 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 21093 } else { 21094 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 21095 DK_DEVLEN - 1); 21096 } 21097 21098 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21099 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 21100 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21101 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 21102 21103 /* Unit Information */ 21104 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 21105 info->dki_slave = ((tgt << 3) | lun); 21106 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 21107 DK_DEVLEN - 1); 21108 info->dki_flags = DKI_FMTVOL; 21109 info->dki_partition = SDPART(dev); 21110 21111 /* Max Transfer size of this device in blocks */ 21112 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 21113 info->dki_addr = 0; 21114 info->dki_space = 0; 21115 info->dki_prio = 0; 21116 info->dki_vec = 0; 21117 21118 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 21119 kmem_free(info, sizeof (struct dk_cinfo)); 21120 return (EFAULT); 21121 } else { 21122 kmem_free(info, sizeof (struct dk_cinfo)); 21123 return (0); 21124 } 21125 } 21126 21127 21128 /* 21129 * Function: sd_get_media_info 21130 * 21131 * Description: This routine is the driver entry point for handling ioctl 21132 * requests for the media type or command set profile used by the 21133 * drive to operate on the media (DKIOCGMEDIAINFO). 21134 * 21135 * Arguments: dev - the device number 21136 * arg - pointer to user provided dk_minfo structure 21137 * specifying the media type, logical block size and 21138 * drive capacity. 21139 * flag - this argument is a pass through to ddi_copyxxx() 21140 * directly from the mode argument of ioctl(). 21141 * 21142 * Return Code: 0 21143 * EACCESS 21144 * EFAULT 21145 * ENXIO 21146 * EIO 21147 */ 21148 21149 static int 21150 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 21151 { 21152 struct sd_lun *un = NULL; 21153 struct uscsi_cmd com; 21154 struct scsi_inquiry *sinq; 21155 struct dk_minfo media_info; 21156 u_longlong_t media_capacity; 21157 uint64_t capacity; 21158 uint_t lbasize; 21159 uchar_t *out_data; 21160 uchar_t *rqbuf; 21161 int rval = 0; 21162 int rtn; 21163 21164 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 21165 (un->un_state == SD_STATE_OFFLINE)) { 21166 return (ENXIO); 21167 } 21168 21169 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 21170 21171 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 21172 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 21173 21174 /* Issue a TUR to determine if the drive is ready with media present */ 21175 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 21176 if (rval == ENXIO) { 21177 goto done; 21178 } 21179 21180 /* Now get configuration data */ 21181 if (ISCD(un)) { 21182 media_info.dki_media_type = DK_CDROM; 21183 21184 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 21185 if (un->un_f_mmc_cap == TRUE) { 21186 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 21187 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 21188 SD_PATH_STANDARD); 21189 21190 if (rtn) { 21191 /* 21192 * Failed for other than an illegal request 21193 * or command not supported 21194 */ 21195 if ((com.uscsi_status == STATUS_CHECK) && 21196 (com.uscsi_rqstatus == STATUS_GOOD)) { 21197 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 21198 (rqbuf[12] != 0x20)) { 21199 rval = EIO; 21200 goto done; 21201 } 21202 } 21203 } else { 21204 /* 21205 * The GET CONFIGURATION command succeeded 21206 * so set the media type according to the 21207 * returned data 21208 */ 21209 media_info.dki_media_type = out_data[6]; 21210 media_info.dki_media_type <<= 8; 21211 media_info.dki_media_type |= out_data[7]; 21212 } 21213 } 21214 } else { 21215 /* 21216 * The profile list is not available, so we attempt to identify 21217 * the media type based on the inquiry data 21218 */ 21219 sinq = un->un_sd->sd_inq; 21220 if ((sinq->inq_dtype == DTYPE_DIRECT) || 21221 (sinq->inq_dtype == DTYPE_OPTICAL)) { 21222 /* This is a direct access device or optical disk */ 21223 media_info.dki_media_type = DK_FIXED_DISK; 21224 21225 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 21226 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 21227 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 21228 media_info.dki_media_type = DK_ZIP; 21229 } else if ( 21230 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 21231 media_info.dki_media_type = DK_JAZ; 21232 } 21233 } 21234 } else { 21235 /* 21236 * Not a CD, direct access or optical disk so return 21237 * unknown media 21238 */ 21239 media_info.dki_media_type = DK_UNKNOWN; 21240 } 21241 } 21242 21243 /* Now read the capacity so we can provide the lbasize and capacity */ 21244 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 21245 SD_PATH_DIRECT)) { 21246 case 0: 21247 break; 21248 case EACCES: 21249 rval = EACCES; 21250 goto done; 21251 default: 21252 rval = EIO; 21253 goto done; 21254 } 21255 21256 /* 21257 * If lun is expanded dynamically, update the un structure. 21258 */ 21259 mutex_enter(SD_MUTEX(un)); 21260 if ((un->un_f_blockcount_is_valid == TRUE) && 21261 (un->un_f_tgt_blocksize_is_valid == TRUE) && 21262 (capacity > un->un_blockcount)) { 21263 sd_update_block_info(un, lbasize, capacity); 21264 } 21265 mutex_exit(SD_MUTEX(un)); 21266 21267 media_info.dki_lbsize = lbasize; 21268 media_capacity = capacity; 21269 21270 /* 21271 * sd_send_scsi_READ_CAPACITY() reports capacity in 21272 * un->un_sys_blocksize chunks. So we need to convert it into 21273 * cap.lbasize chunks. 21274 */ 21275 media_capacity *= un->un_sys_blocksize; 21276 media_capacity /= lbasize; 21277 media_info.dki_capacity = media_capacity; 21278 21279 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 21280 rval = EFAULT; 21281 /* Put goto. Anybody might add some code below in future */ 21282 goto done; 21283 } 21284 done: 21285 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 21286 kmem_free(rqbuf, SENSE_LENGTH); 21287 return (rval); 21288 } 21289 21290 21291 /* 21292 * Function: sd_check_media 21293 * 21294 * Description: This utility routine implements the functionality for the 21295 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 21296 * driver state changes from that specified by the user 21297 * (inserted or ejected). For example, if the user specifies 21298 * DKIO_EJECTED and the current media state is inserted this 21299 * routine will immediately return DKIO_INSERTED. However, if the 21300 * current media state is not inserted the user thread will be 21301 * blocked until the drive state changes. If DKIO_NONE is specified 21302 * the user thread will block until a drive state change occurs. 21303 * 21304 * Arguments: dev - the device number 21305 * state - user pointer to a dkio_state, updated with the current 21306 * drive state at return. 21307 * 21308 * Return Code: ENXIO 21309 * EIO 21310 * EAGAIN 21311 * EINTR 21312 */ 21313 21314 static int 21315 sd_check_media(dev_t dev, enum dkio_state state) 21316 { 21317 struct sd_lun *un = NULL; 21318 enum dkio_state prev_state; 21319 opaque_t token = NULL; 21320 int rval = 0; 21321 21322 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21323 return (ENXIO); 21324 } 21325 21326 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 21327 21328 mutex_enter(SD_MUTEX(un)); 21329 21330 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 21331 "state=%x, mediastate=%x\n", state, un->un_mediastate); 21332 21333 prev_state = un->un_mediastate; 21334 21335 /* is there anything to do? */ 21336 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 21337 /* 21338 * submit the request to the scsi_watch service; 21339 * scsi_media_watch_cb() does the real work 21340 */ 21341 mutex_exit(SD_MUTEX(un)); 21342 21343 /* 21344 * This change handles the case where a scsi watch request is 21345 * added to a device that is powered down. To accomplish this 21346 * we power up the device before adding the scsi watch request, 21347 * since the scsi watch sends a TUR directly to the device 21348 * which the device cannot handle if it is powered down. 21349 */ 21350 if (sd_pm_entry(un) != DDI_SUCCESS) { 21351 mutex_enter(SD_MUTEX(un)); 21352 goto done; 21353 } 21354 21355 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 21356 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 21357 (caddr_t)dev); 21358 21359 sd_pm_exit(un); 21360 21361 mutex_enter(SD_MUTEX(un)); 21362 if (token == NULL) { 21363 rval = EAGAIN; 21364 goto done; 21365 } 21366 21367 /* 21368 * This is a special case IOCTL that doesn't return 21369 * until the media state changes. Routine sdpower 21370 * knows about and handles this so don't count it 21371 * as an active cmd in the driver, which would 21372 * keep the device busy to the pm framework. 21373 * If the count isn't decremented the device can't 21374 * be powered down. 21375 */ 21376 un->un_ncmds_in_driver--; 21377 ASSERT(un->un_ncmds_in_driver >= 0); 21378 21379 /* 21380 * if a prior request had been made, this will be the same 21381 * token, as scsi_watch was designed that way. 21382 */ 21383 un->un_swr_token = token; 21384 un->un_specified_mediastate = state; 21385 21386 /* 21387 * now wait for media change 21388 * we will not be signalled unless mediastate == state but it is 21389 * still better to test for this condition, since there is a 21390 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 21391 */ 21392 SD_TRACE(SD_LOG_COMMON, un, 21393 "sd_check_media: waiting for media state change\n"); 21394 while (un->un_mediastate == state) { 21395 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 21396 SD_TRACE(SD_LOG_COMMON, un, 21397 "sd_check_media: waiting for media state " 21398 "was interrupted\n"); 21399 un->un_ncmds_in_driver++; 21400 rval = EINTR; 21401 goto done; 21402 } 21403 SD_TRACE(SD_LOG_COMMON, un, 21404 "sd_check_media: received signal, state=%x\n", 21405 un->un_mediastate); 21406 } 21407 /* 21408 * Inc the counter to indicate the device once again 21409 * has an active outstanding cmd. 21410 */ 21411 un->un_ncmds_in_driver++; 21412 } 21413 21414 /* invalidate geometry */ 21415 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 21416 sr_ejected(un); 21417 } 21418 21419 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 21420 uint64_t capacity; 21421 uint_t lbasize; 21422 21423 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 21424 mutex_exit(SD_MUTEX(un)); 21425 /* 21426 * Since the following routines use SD_PATH_DIRECT, we must 21427 * call PM directly before the upcoming disk accesses. This 21428 * may cause the disk to be power/spin up. 21429 */ 21430 21431 if (sd_pm_entry(un) == DDI_SUCCESS) { 21432 rval = sd_send_scsi_READ_CAPACITY(un, 21433 &capacity, 21434 &lbasize, SD_PATH_DIRECT); 21435 if (rval != 0) { 21436 sd_pm_exit(un); 21437 mutex_enter(SD_MUTEX(un)); 21438 goto done; 21439 } 21440 } else { 21441 rval = EIO; 21442 mutex_enter(SD_MUTEX(un)); 21443 goto done; 21444 } 21445 mutex_enter(SD_MUTEX(un)); 21446 21447 sd_update_block_info(un, lbasize, capacity); 21448 21449 /* 21450 * Check if the media in the device is writable or not 21451 */ 21452 if (ISCD(un)) 21453 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 21454 21455 mutex_exit(SD_MUTEX(un)); 21456 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 21457 if ((cmlb_validate(un->un_cmlbhandle, 0, 21458 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 21459 sd_set_pstats(un); 21460 SD_TRACE(SD_LOG_IO_PARTITION, un, 21461 "sd_check_media: un:0x%p pstats created and " 21462 "set\n", un); 21463 } 21464 21465 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 21466 SD_PATH_DIRECT); 21467 sd_pm_exit(un); 21468 21469 mutex_enter(SD_MUTEX(un)); 21470 } 21471 done: 21472 un->un_f_watcht_stopped = FALSE; 21473 /* 21474 * Use of this local token and the mutex ensures that we avoid 21475 * some race conditions associated with terminating the 21476 * scsi watch. 21477 */ 21478 if (token) { 21479 un->un_swr_token = (opaque_t)NULL; 21480 mutex_exit(SD_MUTEX(un)); 21481 (void) scsi_watch_request_terminate(token, 21482 SCSI_WATCH_TERMINATE_WAIT); 21483 mutex_enter(SD_MUTEX(un)); 21484 } 21485 21486 /* 21487 * Update the capacity kstat value, if no media previously 21488 * (capacity kstat is 0) and a media has been inserted 21489 * (un_f_blockcount_is_valid == TRUE) 21490 */ 21491 if (un->un_errstats) { 21492 struct sd_errstats *stp = NULL; 21493 21494 stp = (struct sd_errstats *)un->un_errstats->ks_data; 21495 if ((stp->sd_capacity.value.ui64 == 0) && 21496 (un->un_f_blockcount_is_valid == TRUE)) { 21497 stp->sd_capacity.value.ui64 = 21498 (uint64_t)((uint64_t)un->un_blockcount * 21499 un->un_sys_blocksize); 21500 } 21501 } 21502 mutex_exit(SD_MUTEX(un)); 21503 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 21504 return (rval); 21505 } 21506 21507 21508 /* 21509 * Function: sd_delayed_cv_broadcast 21510 * 21511 * Description: Delayed cv_broadcast to allow for target to recover from media 21512 * insertion. 21513 * 21514 * Arguments: arg - driver soft state (unit) structure 21515 */ 21516 21517 static void 21518 sd_delayed_cv_broadcast(void *arg) 21519 { 21520 struct sd_lun *un = arg; 21521 21522 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 21523 21524 mutex_enter(SD_MUTEX(un)); 21525 un->un_dcvb_timeid = NULL; 21526 cv_broadcast(&un->un_state_cv); 21527 mutex_exit(SD_MUTEX(un)); 21528 } 21529 21530 21531 /* 21532 * Function: sd_media_watch_cb 21533 * 21534 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 21535 * routine processes the TUR sense data and updates the driver 21536 * state if a transition has occurred. The user thread 21537 * (sd_check_media) is then signalled. 21538 * 21539 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21540 * among multiple watches that share this callback function 21541 * resultp - scsi watch facility result packet containing scsi 21542 * packet, status byte and sense data 21543 * 21544 * Return Code: 0 for success, -1 for failure 21545 */ 21546 21547 static int 21548 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21549 { 21550 struct sd_lun *un; 21551 struct scsi_status *statusp = resultp->statusp; 21552 uint8_t *sensep = (uint8_t *)resultp->sensep; 21553 enum dkio_state state = DKIO_NONE; 21554 dev_t dev = (dev_t)arg; 21555 uchar_t actual_sense_length; 21556 uint8_t skey, asc, ascq; 21557 21558 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21559 return (-1); 21560 } 21561 actual_sense_length = resultp->actual_sense_length; 21562 21563 mutex_enter(SD_MUTEX(un)); 21564 SD_TRACE(SD_LOG_COMMON, un, 21565 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 21566 *((char *)statusp), (void *)sensep, actual_sense_length); 21567 21568 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 21569 un->un_mediastate = DKIO_DEV_GONE; 21570 cv_broadcast(&un->un_state_cv); 21571 mutex_exit(SD_MUTEX(un)); 21572 21573 return (0); 21574 } 21575 21576 /* 21577 * If there was a check condition then sensep points to valid sense data 21578 * If status was not a check condition but a reservation or busy status 21579 * then the new state is DKIO_NONE 21580 */ 21581 if (sensep != NULL) { 21582 skey = scsi_sense_key(sensep); 21583 asc = scsi_sense_asc(sensep); 21584 ascq = scsi_sense_ascq(sensep); 21585 21586 SD_INFO(SD_LOG_COMMON, un, 21587 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 21588 skey, asc, ascq); 21589 /* This routine only uses up to 13 bytes of sense data. */ 21590 if (actual_sense_length >= 13) { 21591 if (skey == KEY_UNIT_ATTENTION) { 21592 if (asc == 0x28) { 21593 state = DKIO_INSERTED; 21594 } 21595 } else if (skey == KEY_NOT_READY) { 21596 /* 21597 * if 02/04/02 means that the host 21598 * should send start command. Explicitly 21599 * leave the media state as is 21600 * (inserted) as the media is inserted 21601 * and host has stopped device for PM 21602 * reasons. Upon next true read/write 21603 * to this media will bring the 21604 * device to the right state good for 21605 * media access. 21606 */ 21607 if (asc == 0x3a) { 21608 state = DKIO_EJECTED; 21609 } else { 21610 /* 21611 * If the drive is busy with an 21612 * operation or long write, keep the 21613 * media in an inserted state. 21614 */ 21615 21616 if ((asc == 0x04) && 21617 ((ascq == 0x02) || 21618 (ascq == 0x07) || 21619 (ascq == 0x08))) { 21620 state = DKIO_INSERTED; 21621 } 21622 } 21623 } else if (skey == KEY_NO_SENSE) { 21624 if ((asc == 0x00) && (ascq == 0x00)) { 21625 /* 21626 * Sense Data 00/00/00 does not provide 21627 * any information about the state of 21628 * the media. Ignore it. 21629 */ 21630 mutex_exit(SD_MUTEX(un)); 21631 return (0); 21632 } 21633 } 21634 } 21635 } else if ((*((char *)statusp) == STATUS_GOOD) && 21636 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 21637 state = DKIO_INSERTED; 21638 } 21639 21640 SD_TRACE(SD_LOG_COMMON, un, 21641 "sd_media_watch_cb: state=%x, specified=%x\n", 21642 state, un->un_specified_mediastate); 21643 21644 /* 21645 * now signal the waiting thread if this is *not* the specified state; 21646 * delay the signal if the state is DKIO_INSERTED to allow the target 21647 * to recover 21648 */ 21649 if (state != un->un_specified_mediastate) { 21650 un->un_mediastate = state; 21651 if (state == DKIO_INSERTED) { 21652 /* 21653 * delay the signal to give the drive a chance 21654 * to do what it apparently needs to do 21655 */ 21656 SD_TRACE(SD_LOG_COMMON, un, 21657 "sd_media_watch_cb: delayed cv_broadcast\n"); 21658 if (un->un_dcvb_timeid == NULL) { 21659 un->un_dcvb_timeid = 21660 timeout(sd_delayed_cv_broadcast, un, 21661 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 21662 } 21663 } else { 21664 SD_TRACE(SD_LOG_COMMON, un, 21665 "sd_media_watch_cb: immediate cv_broadcast\n"); 21666 cv_broadcast(&un->un_state_cv); 21667 } 21668 } 21669 mutex_exit(SD_MUTEX(un)); 21670 return (0); 21671 } 21672 21673 21674 /* 21675 * Function: sd_dkio_get_temp 21676 * 21677 * Description: This routine is the driver entry point for handling ioctl 21678 * requests to get the disk temperature. 21679 * 21680 * Arguments: dev - the device number 21681 * arg - pointer to user provided dk_temperature structure. 21682 * flag - this argument is a pass through to ddi_copyxxx() 21683 * directly from the mode argument of ioctl(). 21684 * 21685 * Return Code: 0 21686 * EFAULT 21687 * ENXIO 21688 * EAGAIN 21689 */ 21690 21691 static int 21692 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 21693 { 21694 struct sd_lun *un = NULL; 21695 struct dk_temperature *dktemp = NULL; 21696 uchar_t *temperature_page; 21697 int rval = 0; 21698 int path_flag = SD_PATH_STANDARD; 21699 21700 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21701 return (ENXIO); 21702 } 21703 21704 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 21705 21706 /* copyin the disk temp argument to get the user flags */ 21707 if (ddi_copyin((void *)arg, dktemp, 21708 sizeof (struct dk_temperature), flag) != 0) { 21709 rval = EFAULT; 21710 goto done; 21711 } 21712 21713 /* Initialize the temperature to invalid. */ 21714 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21715 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21716 21717 /* 21718 * Note: Investigate removing the "bypass pm" semantic. 21719 * Can we just bypass PM always? 21720 */ 21721 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 21722 path_flag = SD_PATH_DIRECT; 21723 ASSERT(!mutex_owned(&un->un_pm_mutex)); 21724 mutex_enter(&un->un_pm_mutex); 21725 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 21726 /* 21727 * If DKT_BYPASS_PM is set, and the drive happens to be 21728 * in low power mode, we can not wake it up, Need to 21729 * return EAGAIN. 21730 */ 21731 mutex_exit(&un->un_pm_mutex); 21732 rval = EAGAIN; 21733 goto done; 21734 } else { 21735 /* 21736 * Indicate to PM the device is busy. This is required 21737 * to avoid a race - i.e. the ioctl is issuing a 21738 * command and the pm framework brings down the device 21739 * to low power mode (possible power cut-off on some 21740 * platforms). 21741 */ 21742 mutex_exit(&un->un_pm_mutex); 21743 if (sd_pm_entry(un) != DDI_SUCCESS) { 21744 rval = EAGAIN; 21745 goto done; 21746 } 21747 } 21748 } 21749 21750 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 21751 21752 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 21753 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 21754 goto done2; 21755 } 21756 21757 /* 21758 * For the current temperature verify that the parameter length is 0x02 21759 * and the parameter code is 0x00 21760 */ 21761 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 21762 (temperature_page[5] == 0x00)) { 21763 if (temperature_page[9] == 0xFF) { 21764 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21765 } else { 21766 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 21767 } 21768 } 21769 21770 /* 21771 * For the reference temperature verify that the parameter 21772 * length is 0x02 and the parameter code is 0x01 21773 */ 21774 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 21775 (temperature_page[11] == 0x01)) { 21776 if (temperature_page[15] == 0xFF) { 21777 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21778 } else { 21779 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 21780 } 21781 } 21782 21783 /* Do the copyout regardless of the temperature commands status. */ 21784 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 21785 flag) != 0) { 21786 rval = EFAULT; 21787 } 21788 21789 done2: 21790 if (path_flag == SD_PATH_DIRECT) { 21791 sd_pm_exit(un); 21792 } 21793 21794 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 21795 done: 21796 if (dktemp != NULL) { 21797 kmem_free(dktemp, sizeof (struct dk_temperature)); 21798 } 21799 21800 return (rval); 21801 } 21802 21803 21804 /* 21805 * Function: sd_log_page_supported 21806 * 21807 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 21808 * supported log pages. 21809 * 21810 * Arguments: un - 21811 * log_page - 21812 * 21813 * Return Code: -1 - on error (log sense is optional and may not be supported). 21814 * 0 - log page not found. 21815 * 1 - log page found. 21816 */ 21817 21818 static int 21819 sd_log_page_supported(struct sd_lun *un, int log_page) 21820 { 21821 uchar_t *log_page_data; 21822 int i; 21823 int match = 0; 21824 int log_size; 21825 21826 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 21827 21828 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 21829 SD_PATH_DIRECT) != 0) { 21830 SD_ERROR(SD_LOG_COMMON, un, 21831 "sd_log_page_supported: failed log page retrieval\n"); 21832 kmem_free(log_page_data, 0xFF); 21833 return (-1); 21834 } 21835 log_size = log_page_data[3]; 21836 21837 /* 21838 * The list of supported log pages start from the fourth byte. Check 21839 * until we run out of log pages or a match is found. 21840 */ 21841 for (i = 4; (i < (log_size + 4)) && !match; i++) { 21842 if (log_page_data[i] == log_page) { 21843 match++; 21844 } 21845 } 21846 kmem_free(log_page_data, 0xFF); 21847 return (match); 21848 } 21849 21850 21851 /* 21852 * Function: sd_mhdioc_failfast 21853 * 21854 * Description: This routine is the driver entry point for handling ioctl 21855 * requests to enable/disable the multihost failfast option. 21856 * (MHIOCENFAILFAST) 21857 * 21858 * Arguments: dev - the device number 21859 * arg - user specified probing interval. 21860 * flag - this argument is a pass through to ddi_copyxxx() 21861 * directly from the mode argument of ioctl(). 21862 * 21863 * Return Code: 0 21864 * EFAULT 21865 * ENXIO 21866 */ 21867 21868 static int 21869 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 21870 { 21871 struct sd_lun *un = NULL; 21872 int mh_time; 21873 int rval = 0; 21874 21875 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21876 return (ENXIO); 21877 } 21878 21879 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 21880 return (EFAULT); 21881 21882 if (mh_time) { 21883 mutex_enter(SD_MUTEX(un)); 21884 un->un_resvd_status |= SD_FAILFAST; 21885 mutex_exit(SD_MUTEX(un)); 21886 /* 21887 * If mh_time is INT_MAX, then this ioctl is being used for 21888 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21889 */ 21890 if (mh_time != INT_MAX) { 21891 rval = sd_check_mhd(dev, mh_time); 21892 } 21893 } else { 21894 (void) sd_check_mhd(dev, 0); 21895 mutex_enter(SD_MUTEX(un)); 21896 un->un_resvd_status &= ~SD_FAILFAST; 21897 mutex_exit(SD_MUTEX(un)); 21898 } 21899 return (rval); 21900 } 21901 21902 21903 /* 21904 * Function: sd_mhdioc_takeown 21905 * 21906 * Description: This routine is the driver entry point for handling ioctl 21907 * requests to forcefully acquire exclusive access rights to the 21908 * multihost disk (MHIOCTKOWN). 21909 * 21910 * Arguments: dev - the device number 21911 * arg - user provided structure specifying the delay 21912 * parameters in milliseconds 21913 * flag - this argument is a pass through to ddi_copyxxx() 21914 * directly from the mode argument of ioctl(). 21915 * 21916 * Return Code: 0 21917 * EFAULT 21918 * ENXIO 21919 */ 21920 21921 static int 21922 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21923 { 21924 struct sd_lun *un = NULL; 21925 struct mhioctkown *tkown = NULL; 21926 int rval = 0; 21927 21928 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21929 return (ENXIO); 21930 } 21931 21932 if (arg != NULL) { 21933 tkown = (struct mhioctkown *) 21934 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21935 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21936 if (rval != 0) { 21937 rval = EFAULT; 21938 goto error; 21939 } 21940 } 21941 21942 rval = sd_take_ownership(dev, tkown); 21943 mutex_enter(SD_MUTEX(un)); 21944 if (rval == 0) { 21945 un->un_resvd_status |= SD_RESERVE; 21946 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21947 sd_reinstate_resv_delay = 21948 tkown->reinstate_resv_delay * 1000; 21949 } else { 21950 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21951 } 21952 /* 21953 * Give the scsi_watch routine interval set by 21954 * the MHIOCENFAILFAST ioctl precedence here. 21955 */ 21956 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21957 mutex_exit(SD_MUTEX(un)); 21958 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21959 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21960 "sd_mhdioc_takeown : %d\n", 21961 sd_reinstate_resv_delay); 21962 } else { 21963 mutex_exit(SD_MUTEX(un)); 21964 } 21965 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21966 sd_mhd_reset_notify_cb, (caddr_t)un); 21967 } else { 21968 un->un_resvd_status &= ~SD_RESERVE; 21969 mutex_exit(SD_MUTEX(un)); 21970 } 21971 21972 error: 21973 if (tkown != NULL) { 21974 kmem_free(tkown, sizeof (struct mhioctkown)); 21975 } 21976 return (rval); 21977 } 21978 21979 21980 /* 21981 * Function: sd_mhdioc_release 21982 * 21983 * Description: This routine is the driver entry point for handling ioctl 21984 * requests to release exclusive access rights to the multihost 21985 * disk (MHIOCRELEASE). 21986 * 21987 * Arguments: dev - the device number 21988 * 21989 * Return Code: 0 21990 * ENXIO 21991 */ 21992 21993 static int 21994 sd_mhdioc_release(dev_t dev) 21995 { 21996 struct sd_lun *un = NULL; 21997 timeout_id_t resvd_timeid_save; 21998 int resvd_status_save; 21999 int rval = 0; 22000 22001 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22002 return (ENXIO); 22003 } 22004 22005 mutex_enter(SD_MUTEX(un)); 22006 resvd_status_save = un->un_resvd_status; 22007 un->un_resvd_status &= 22008 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 22009 if (un->un_resvd_timeid) { 22010 resvd_timeid_save = un->un_resvd_timeid; 22011 un->un_resvd_timeid = NULL; 22012 mutex_exit(SD_MUTEX(un)); 22013 (void) untimeout(resvd_timeid_save); 22014 } else { 22015 mutex_exit(SD_MUTEX(un)); 22016 } 22017 22018 /* 22019 * destroy any pending timeout thread that may be attempting to 22020 * reinstate reservation on this device. 22021 */ 22022 sd_rmv_resv_reclaim_req(dev); 22023 22024 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 22025 mutex_enter(SD_MUTEX(un)); 22026 if ((un->un_mhd_token) && 22027 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 22028 mutex_exit(SD_MUTEX(un)); 22029 (void) sd_check_mhd(dev, 0); 22030 } else { 22031 mutex_exit(SD_MUTEX(un)); 22032 } 22033 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 22034 sd_mhd_reset_notify_cb, (caddr_t)un); 22035 } else { 22036 /* 22037 * sd_mhd_watch_cb will restart the resvd recover timeout thread 22038 */ 22039 mutex_enter(SD_MUTEX(un)); 22040 un->un_resvd_status = resvd_status_save; 22041 mutex_exit(SD_MUTEX(un)); 22042 } 22043 return (rval); 22044 } 22045 22046 22047 /* 22048 * Function: sd_mhdioc_register_devid 22049 * 22050 * Description: This routine is the driver entry point for handling ioctl 22051 * requests to register the device id (MHIOCREREGISTERDEVID). 22052 * 22053 * Note: The implementation for this ioctl has been updated to 22054 * be consistent with the original PSARC case (1999/357) 22055 * (4375899, 4241671, 4220005) 22056 * 22057 * Arguments: dev - the device number 22058 * 22059 * Return Code: 0 22060 * ENXIO 22061 */ 22062 22063 static int 22064 sd_mhdioc_register_devid(dev_t dev) 22065 { 22066 struct sd_lun *un = NULL; 22067 int rval = 0; 22068 22069 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22070 return (ENXIO); 22071 } 22072 22073 ASSERT(!mutex_owned(SD_MUTEX(un))); 22074 22075 mutex_enter(SD_MUTEX(un)); 22076 22077 /* If a devid already exists, de-register it */ 22078 if (un->un_devid != NULL) { 22079 ddi_devid_unregister(SD_DEVINFO(un)); 22080 /* 22081 * After unregister devid, needs to free devid memory 22082 */ 22083 ddi_devid_free(un->un_devid); 22084 un->un_devid = NULL; 22085 } 22086 22087 /* Check for reservation conflict */ 22088 mutex_exit(SD_MUTEX(un)); 22089 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 22090 mutex_enter(SD_MUTEX(un)); 22091 22092 switch (rval) { 22093 case 0: 22094 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 22095 break; 22096 case EACCES: 22097 break; 22098 default: 22099 rval = EIO; 22100 } 22101 22102 mutex_exit(SD_MUTEX(un)); 22103 return (rval); 22104 } 22105 22106 22107 /* 22108 * Function: sd_mhdioc_inkeys 22109 * 22110 * Description: This routine is the driver entry point for handling ioctl 22111 * requests to issue the SCSI-3 Persistent In Read Keys command 22112 * to the device (MHIOCGRP_INKEYS). 22113 * 22114 * Arguments: dev - the device number 22115 * arg - user provided in_keys structure 22116 * flag - this argument is a pass through to ddi_copyxxx() 22117 * directly from the mode argument of ioctl(). 22118 * 22119 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 22120 * ENXIO 22121 * EFAULT 22122 */ 22123 22124 static int 22125 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 22126 { 22127 struct sd_lun *un; 22128 mhioc_inkeys_t inkeys; 22129 int rval = 0; 22130 22131 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22132 return (ENXIO); 22133 } 22134 22135 #ifdef _MULTI_DATAMODEL 22136 switch (ddi_model_convert_from(flag & FMODELS)) { 22137 case DDI_MODEL_ILP32: { 22138 struct mhioc_inkeys32 inkeys32; 22139 22140 if (ddi_copyin(arg, &inkeys32, 22141 sizeof (struct mhioc_inkeys32), flag) != 0) { 22142 return (EFAULT); 22143 } 22144 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 22145 if ((rval = sd_persistent_reservation_in_read_keys(un, 22146 &inkeys, flag)) != 0) { 22147 return (rval); 22148 } 22149 inkeys32.generation = inkeys.generation; 22150 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 22151 flag) != 0) { 22152 return (EFAULT); 22153 } 22154 break; 22155 } 22156 case DDI_MODEL_NONE: 22157 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 22158 flag) != 0) { 22159 return (EFAULT); 22160 } 22161 if ((rval = sd_persistent_reservation_in_read_keys(un, 22162 &inkeys, flag)) != 0) { 22163 return (rval); 22164 } 22165 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 22166 flag) != 0) { 22167 return (EFAULT); 22168 } 22169 break; 22170 } 22171 22172 #else /* ! _MULTI_DATAMODEL */ 22173 22174 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 22175 return (EFAULT); 22176 } 22177 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 22178 if (rval != 0) { 22179 return (rval); 22180 } 22181 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 22182 return (EFAULT); 22183 } 22184 22185 #endif /* _MULTI_DATAMODEL */ 22186 22187 return (rval); 22188 } 22189 22190 22191 /* 22192 * Function: sd_mhdioc_inresv 22193 * 22194 * Description: This routine is the driver entry point for handling ioctl 22195 * requests to issue the SCSI-3 Persistent In Read Reservations 22196 * command to the device (MHIOCGRP_INKEYS). 22197 * 22198 * Arguments: dev - the device number 22199 * arg - user provided in_resv structure 22200 * flag - this argument is a pass through to ddi_copyxxx() 22201 * directly from the mode argument of ioctl(). 22202 * 22203 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 22204 * ENXIO 22205 * EFAULT 22206 */ 22207 22208 static int 22209 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 22210 { 22211 struct sd_lun *un; 22212 mhioc_inresvs_t inresvs; 22213 int rval = 0; 22214 22215 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22216 return (ENXIO); 22217 } 22218 22219 #ifdef _MULTI_DATAMODEL 22220 22221 switch (ddi_model_convert_from(flag & FMODELS)) { 22222 case DDI_MODEL_ILP32: { 22223 struct mhioc_inresvs32 inresvs32; 22224 22225 if (ddi_copyin(arg, &inresvs32, 22226 sizeof (struct mhioc_inresvs32), flag) != 0) { 22227 return (EFAULT); 22228 } 22229 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 22230 if ((rval = sd_persistent_reservation_in_read_resv(un, 22231 &inresvs, flag)) != 0) { 22232 return (rval); 22233 } 22234 inresvs32.generation = inresvs.generation; 22235 if (ddi_copyout(&inresvs32, arg, 22236 sizeof (struct mhioc_inresvs32), flag) != 0) { 22237 return (EFAULT); 22238 } 22239 break; 22240 } 22241 case DDI_MODEL_NONE: 22242 if (ddi_copyin(arg, &inresvs, 22243 sizeof (mhioc_inresvs_t), flag) != 0) { 22244 return (EFAULT); 22245 } 22246 if ((rval = sd_persistent_reservation_in_read_resv(un, 22247 &inresvs, flag)) != 0) { 22248 return (rval); 22249 } 22250 if (ddi_copyout(&inresvs, arg, 22251 sizeof (mhioc_inresvs_t), flag) != 0) { 22252 return (EFAULT); 22253 } 22254 break; 22255 } 22256 22257 #else /* ! _MULTI_DATAMODEL */ 22258 22259 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 22260 return (EFAULT); 22261 } 22262 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 22263 if (rval != 0) { 22264 return (rval); 22265 } 22266 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 22267 return (EFAULT); 22268 } 22269 22270 #endif /* ! _MULTI_DATAMODEL */ 22271 22272 return (rval); 22273 } 22274 22275 22276 /* 22277 * The following routines support the clustering functionality described below 22278 * and implement lost reservation reclaim functionality. 22279 * 22280 * Clustering 22281 * ---------- 22282 * The clustering code uses two different, independent forms of SCSI 22283 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 22284 * Persistent Group Reservations. For any particular disk, it will use either 22285 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 22286 * 22287 * SCSI-2 22288 * The cluster software takes ownership of a multi-hosted disk by issuing the 22289 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 22290 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 22291 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 22292 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 22293 * driver. The meaning of failfast is that if the driver (on this host) ever 22294 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 22295 * it should immediately panic the host. The motivation for this ioctl is that 22296 * if this host does encounter reservation conflict, the underlying cause is 22297 * that some other host of the cluster has decided that this host is no longer 22298 * in the cluster and has seized control of the disks for itself. Since this 22299 * host is no longer in the cluster, it ought to panic itself. The 22300 * MHIOCENFAILFAST ioctl does two things: 22301 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 22302 * error to panic the host 22303 * (b) it sets up a periodic timer to test whether this host still has 22304 * "access" (in that no other host has reserved the device): if the 22305 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 22306 * purpose of that periodic timer is to handle scenarios where the host is 22307 * otherwise temporarily quiescent, temporarily doing no real i/o. 22308 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 22309 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 22310 * the device itself. 22311 * 22312 * SCSI-3 PGR 22313 * A direct semantic implementation of the SCSI-3 Persistent Reservation 22314 * facility is supported through the shared multihost disk ioctls 22315 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 22316 * MHIOCGRP_PREEMPTANDABORT) 22317 * 22318 * Reservation Reclaim: 22319 * -------------------- 22320 * To support the lost reservation reclaim operations this driver creates a 22321 * single thread to handle reinstating reservations on all devices that have 22322 * lost reservations sd_resv_reclaim_requests are logged for all devices that 22323 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 22324 * and the reservation reclaim thread loops through the requests to regain the 22325 * lost reservations. 22326 */ 22327 22328 /* 22329 * Function: sd_check_mhd() 22330 * 22331 * Description: This function sets up and submits a scsi watch request or 22332 * terminates an existing watch request. This routine is used in 22333 * support of reservation reclaim. 22334 * 22335 * Arguments: dev - the device 'dev_t' is used for context to discriminate 22336 * among multiple watches that share the callback function 22337 * interval - the number of microseconds specifying the watch 22338 * interval for issuing TEST UNIT READY commands. If 22339 * set to 0 the watch should be terminated. If the 22340 * interval is set to 0 and if the device is required 22341 * to hold reservation while disabling failfast, the 22342 * watch is restarted with an interval of 22343 * reinstate_resv_delay. 22344 * 22345 * Return Code: 0 - Successful submit/terminate of scsi watch request 22346 * ENXIO - Indicates an invalid device was specified 22347 * EAGAIN - Unable to submit the scsi watch request 22348 */ 22349 22350 static int 22351 sd_check_mhd(dev_t dev, int interval) 22352 { 22353 struct sd_lun *un; 22354 opaque_t token; 22355 22356 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22357 return (ENXIO); 22358 } 22359 22360 /* is this a watch termination request? */ 22361 if (interval == 0) { 22362 mutex_enter(SD_MUTEX(un)); 22363 /* if there is an existing watch task then terminate it */ 22364 if (un->un_mhd_token) { 22365 token = un->un_mhd_token; 22366 un->un_mhd_token = NULL; 22367 mutex_exit(SD_MUTEX(un)); 22368 (void) scsi_watch_request_terminate(token, 22369 SCSI_WATCH_TERMINATE_ALL_WAIT); 22370 mutex_enter(SD_MUTEX(un)); 22371 } else { 22372 mutex_exit(SD_MUTEX(un)); 22373 /* 22374 * Note: If we return here we don't check for the 22375 * failfast case. This is the original legacy 22376 * implementation but perhaps we should be checking 22377 * the failfast case. 22378 */ 22379 return (0); 22380 } 22381 /* 22382 * If the device is required to hold reservation while 22383 * disabling failfast, we need to restart the scsi_watch 22384 * routine with an interval of reinstate_resv_delay. 22385 */ 22386 if (un->un_resvd_status & SD_RESERVE) { 22387 interval = sd_reinstate_resv_delay/1000; 22388 } else { 22389 /* no failfast so bail */ 22390 mutex_exit(SD_MUTEX(un)); 22391 return (0); 22392 } 22393 mutex_exit(SD_MUTEX(un)); 22394 } 22395 22396 /* 22397 * adjust minimum time interval to 1 second, 22398 * and convert from msecs to usecs 22399 */ 22400 if (interval > 0 && interval < 1000) { 22401 interval = 1000; 22402 } 22403 interval *= 1000; 22404 22405 /* 22406 * submit the request to the scsi_watch service 22407 */ 22408 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 22409 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 22410 if (token == NULL) { 22411 return (EAGAIN); 22412 } 22413 22414 /* 22415 * save token for termination later on 22416 */ 22417 mutex_enter(SD_MUTEX(un)); 22418 un->un_mhd_token = token; 22419 mutex_exit(SD_MUTEX(un)); 22420 return (0); 22421 } 22422 22423 22424 /* 22425 * Function: sd_mhd_watch_cb() 22426 * 22427 * Description: This function is the call back function used by the scsi watch 22428 * facility. The scsi watch facility sends the "Test Unit Ready" 22429 * and processes the status. If applicable (i.e. a "Unit Attention" 22430 * status and automatic "Request Sense" not used) the scsi watch 22431 * facility will send a "Request Sense" and retrieve the sense data 22432 * to be passed to this callback function. In either case the 22433 * automatic "Request Sense" or the facility submitting one, this 22434 * callback is passed the status and sense data. 22435 * 22436 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22437 * among multiple watches that share this callback function 22438 * resultp - scsi watch facility result packet containing scsi 22439 * packet, status byte and sense data 22440 * 22441 * Return Code: 0 - continue the watch task 22442 * non-zero - terminate the watch task 22443 */ 22444 22445 static int 22446 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22447 { 22448 struct sd_lun *un; 22449 struct scsi_status *statusp; 22450 uint8_t *sensep; 22451 struct scsi_pkt *pkt; 22452 uchar_t actual_sense_length; 22453 dev_t dev = (dev_t)arg; 22454 22455 ASSERT(resultp != NULL); 22456 statusp = resultp->statusp; 22457 sensep = (uint8_t *)resultp->sensep; 22458 pkt = resultp->pkt; 22459 actual_sense_length = resultp->actual_sense_length; 22460 22461 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22462 return (ENXIO); 22463 } 22464 22465 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22466 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 22467 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 22468 22469 /* Begin processing of the status and/or sense data */ 22470 if (pkt->pkt_reason != CMD_CMPLT) { 22471 /* Handle the incomplete packet */ 22472 sd_mhd_watch_incomplete(un, pkt); 22473 return (0); 22474 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 22475 if (*((unsigned char *)statusp) 22476 == STATUS_RESERVATION_CONFLICT) { 22477 /* 22478 * Handle a reservation conflict by panicking if 22479 * configured for failfast or by logging the conflict 22480 * and updating the reservation status 22481 */ 22482 mutex_enter(SD_MUTEX(un)); 22483 if ((un->un_resvd_status & SD_FAILFAST) && 22484 (sd_failfast_enable)) { 22485 sd_panic_for_res_conflict(un); 22486 /*NOTREACHED*/ 22487 } 22488 SD_INFO(SD_LOG_IOCTL_MHD, un, 22489 "sd_mhd_watch_cb: Reservation Conflict\n"); 22490 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 22491 mutex_exit(SD_MUTEX(un)); 22492 } 22493 } 22494 22495 if (sensep != NULL) { 22496 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 22497 mutex_enter(SD_MUTEX(un)); 22498 if ((scsi_sense_asc(sensep) == 22499 SD_SCSI_RESET_SENSE_CODE) && 22500 (un->un_resvd_status & SD_RESERVE)) { 22501 /* 22502 * The additional sense code indicates a power 22503 * on or bus device reset has occurred; update 22504 * the reservation status. 22505 */ 22506 un->un_resvd_status |= 22507 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22508 SD_INFO(SD_LOG_IOCTL_MHD, un, 22509 "sd_mhd_watch_cb: Lost Reservation\n"); 22510 } 22511 } else { 22512 return (0); 22513 } 22514 } else { 22515 mutex_enter(SD_MUTEX(un)); 22516 } 22517 22518 if ((un->un_resvd_status & SD_RESERVE) && 22519 (un->un_resvd_status & SD_LOST_RESERVE)) { 22520 if (un->un_resvd_status & SD_WANT_RESERVE) { 22521 /* 22522 * A reset occurred in between the last probe and this 22523 * one so if a timeout is pending cancel it. 22524 */ 22525 if (un->un_resvd_timeid) { 22526 timeout_id_t temp_id = un->un_resvd_timeid; 22527 un->un_resvd_timeid = NULL; 22528 mutex_exit(SD_MUTEX(un)); 22529 (void) untimeout(temp_id); 22530 mutex_enter(SD_MUTEX(un)); 22531 } 22532 un->un_resvd_status &= ~SD_WANT_RESERVE; 22533 } 22534 if (un->un_resvd_timeid == 0) { 22535 /* Schedule a timeout to handle the lost reservation */ 22536 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 22537 (void *)dev, 22538 drv_usectohz(sd_reinstate_resv_delay)); 22539 } 22540 } 22541 mutex_exit(SD_MUTEX(un)); 22542 return (0); 22543 } 22544 22545 22546 /* 22547 * Function: sd_mhd_watch_incomplete() 22548 * 22549 * Description: This function is used to find out why a scsi pkt sent by the 22550 * scsi watch facility was not completed. Under some scenarios this 22551 * routine will return. Otherwise it will send a bus reset to see 22552 * if the drive is still online. 22553 * 22554 * Arguments: un - driver soft state (unit) structure 22555 * pkt - incomplete scsi pkt 22556 */ 22557 22558 static void 22559 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 22560 { 22561 int be_chatty; 22562 int perr; 22563 22564 ASSERT(pkt != NULL); 22565 ASSERT(un != NULL); 22566 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 22567 perr = (pkt->pkt_statistics & STAT_PERR); 22568 22569 mutex_enter(SD_MUTEX(un)); 22570 if (un->un_state == SD_STATE_DUMPING) { 22571 mutex_exit(SD_MUTEX(un)); 22572 return; 22573 } 22574 22575 switch (pkt->pkt_reason) { 22576 case CMD_UNX_BUS_FREE: 22577 /* 22578 * If we had a parity error that caused the target to drop BSY*, 22579 * don't be chatty about it. 22580 */ 22581 if (perr && be_chatty) { 22582 be_chatty = 0; 22583 } 22584 break; 22585 case CMD_TAG_REJECT: 22586 /* 22587 * The SCSI-2 spec states that a tag reject will be sent by the 22588 * target if tagged queuing is not supported. A tag reject may 22589 * also be sent during certain initialization periods or to 22590 * control internal resources. For the latter case the target 22591 * may also return Queue Full. 22592 * 22593 * If this driver receives a tag reject from a target that is 22594 * going through an init period or controlling internal 22595 * resources tagged queuing will be disabled. This is a less 22596 * than optimal behavior but the driver is unable to determine 22597 * the target state and assumes tagged queueing is not supported 22598 */ 22599 pkt->pkt_flags = 0; 22600 un->un_tagflags = 0; 22601 22602 if (un->un_f_opt_queueing == TRUE) { 22603 un->un_throttle = min(un->un_throttle, 3); 22604 } else { 22605 un->un_throttle = 1; 22606 } 22607 mutex_exit(SD_MUTEX(un)); 22608 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 22609 mutex_enter(SD_MUTEX(un)); 22610 break; 22611 case CMD_INCOMPLETE: 22612 /* 22613 * The transport stopped with an abnormal state, fallthrough and 22614 * reset the target and/or bus unless selection did not complete 22615 * (indicated by STATE_GOT_BUS) in which case we don't want to 22616 * go through a target/bus reset 22617 */ 22618 if (pkt->pkt_state == STATE_GOT_BUS) { 22619 break; 22620 } 22621 /*FALLTHROUGH*/ 22622 22623 case CMD_TIMEOUT: 22624 default: 22625 /* 22626 * The lun may still be running the command, so a lun reset 22627 * should be attempted. If the lun reset fails or cannot be 22628 * issued, than try a target reset. Lastly try a bus reset. 22629 */ 22630 if ((pkt->pkt_statistics & 22631 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 22632 int reset_retval = 0; 22633 mutex_exit(SD_MUTEX(un)); 22634 if (un->un_f_allow_bus_device_reset == TRUE) { 22635 if (un->un_f_lun_reset_enabled == TRUE) { 22636 reset_retval = 22637 scsi_reset(SD_ADDRESS(un), 22638 RESET_LUN); 22639 } 22640 if (reset_retval == 0) { 22641 reset_retval = 22642 scsi_reset(SD_ADDRESS(un), 22643 RESET_TARGET); 22644 } 22645 } 22646 if (reset_retval == 0) { 22647 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22648 } 22649 mutex_enter(SD_MUTEX(un)); 22650 } 22651 break; 22652 } 22653 22654 /* A device/bus reset has occurred; update the reservation status. */ 22655 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 22656 (STAT_BUS_RESET | STAT_DEV_RESET))) { 22657 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22658 un->un_resvd_status |= 22659 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22660 SD_INFO(SD_LOG_IOCTL_MHD, un, 22661 "sd_mhd_watch_incomplete: Lost Reservation\n"); 22662 } 22663 } 22664 22665 /* 22666 * The disk has been turned off; Update the device state. 22667 * 22668 * Note: Should we be offlining the disk here? 22669 */ 22670 if (pkt->pkt_state == STATE_GOT_BUS) { 22671 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 22672 "Disk not responding to selection\n"); 22673 if (un->un_state != SD_STATE_OFFLINE) { 22674 New_state(un, SD_STATE_OFFLINE); 22675 } 22676 } else if (be_chatty) { 22677 /* 22678 * suppress messages if they are all the same pkt reason; 22679 * with TQ, many (up to 256) are returned with the same 22680 * pkt_reason 22681 */ 22682 if (pkt->pkt_reason != un->un_last_pkt_reason) { 22683 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22684 "sd_mhd_watch_incomplete: " 22685 "SCSI transport failed: reason '%s'\n", 22686 scsi_rname(pkt->pkt_reason)); 22687 } 22688 } 22689 un->un_last_pkt_reason = pkt->pkt_reason; 22690 mutex_exit(SD_MUTEX(un)); 22691 } 22692 22693 22694 /* 22695 * Function: sd_sname() 22696 * 22697 * Description: This is a simple little routine to return a string containing 22698 * a printable description of command status byte for use in 22699 * logging. 22700 * 22701 * Arguments: status - pointer to a status byte 22702 * 22703 * Return Code: char * - string containing status description. 22704 */ 22705 22706 static char * 22707 sd_sname(uchar_t status) 22708 { 22709 switch (status & STATUS_MASK) { 22710 case STATUS_GOOD: 22711 return ("good status"); 22712 case STATUS_CHECK: 22713 return ("check condition"); 22714 case STATUS_MET: 22715 return ("condition met"); 22716 case STATUS_BUSY: 22717 return ("busy"); 22718 case STATUS_INTERMEDIATE: 22719 return ("intermediate"); 22720 case STATUS_INTERMEDIATE_MET: 22721 return ("intermediate - condition met"); 22722 case STATUS_RESERVATION_CONFLICT: 22723 return ("reservation_conflict"); 22724 case STATUS_TERMINATED: 22725 return ("command terminated"); 22726 case STATUS_QFULL: 22727 return ("queue full"); 22728 default: 22729 return ("<unknown status>"); 22730 } 22731 } 22732 22733 22734 /* 22735 * Function: sd_mhd_resvd_recover() 22736 * 22737 * Description: This function adds a reservation entry to the 22738 * sd_resv_reclaim_request list and signals the reservation 22739 * reclaim thread that there is work pending. If the reservation 22740 * reclaim thread has not been previously created this function 22741 * will kick it off. 22742 * 22743 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22744 * among multiple watches that share this callback function 22745 * 22746 * Context: This routine is called by timeout() and is run in interrupt 22747 * context. It must not sleep or call other functions which may 22748 * sleep. 22749 */ 22750 22751 static void 22752 sd_mhd_resvd_recover(void *arg) 22753 { 22754 dev_t dev = (dev_t)arg; 22755 struct sd_lun *un; 22756 struct sd_thr_request *sd_treq = NULL; 22757 struct sd_thr_request *sd_cur = NULL; 22758 struct sd_thr_request *sd_prev = NULL; 22759 int already_there = 0; 22760 22761 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22762 return; 22763 } 22764 22765 mutex_enter(SD_MUTEX(un)); 22766 un->un_resvd_timeid = NULL; 22767 if (un->un_resvd_status & SD_WANT_RESERVE) { 22768 /* 22769 * There was a reset so don't issue the reserve, allow the 22770 * sd_mhd_watch_cb callback function to notice this and 22771 * reschedule the timeout for reservation. 22772 */ 22773 mutex_exit(SD_MUTEX(un)); 22774 return; 22775 } 22776 mutex_exit(SD_MUTEX(un)); 22777 22778 /* 22779 * Add this device to the sd_resv_reclaim_request list and the 22780 * sd_resv_reclaim_thread should take care of the rest. 22781 * 22782 * Note: We can't sleep in this context so if the memory allocation 22783 * fails allow the sd_mhd_watch_cb callback function to notice this and 22784 * reschedule the timeout for reservation. (4378460) 22785 */ 22786 sd_treq = (struct sd_thr_request *) 22787 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 22788 if (sd_treq == NULL) { 22789 return; 22790 } 22791 22792 sd_treq->sd_thr_req_next = NULL; 22793 sd_treq->dev = dev; 22794 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22795 if (sd_tr.srq_thr_req_head == NULL) { 22796 sd_tr.srq_thr_req_head = sd_treq; 22797 } else { 22798 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 22799 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 22800 if (sd_cur->dev == dev) { 22801 /* 22802 * already in Queue so don't log 22803 * another request for the device 22804 */ 22805 already_there = 1; 22806 break; 22807 } 22808 sd_prev = sd_cur; 22809 } 22810 if (!already_there) { 22811 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 22812 "logging request for %lx\n", dev); 22813 sd_prev->sd_thr_req_next = sd_treq; 22814 } else { 22815 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 22816 } 22817 } 22818 22819 /* 22820 * Create a kernel thread to do the reservation reclaim and free up this 22821 * thread. We cannot block this thread while we go away to do the 22822 * reservation reclaim 22823 */ 22824 if (sd_tr.srq_resv_reclaim_thread == NULL) 22825 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 22826 sd_resv_reclaim_thread, NULL, 22827 0, &p0, TS_RUN, v.v_maxsyspri - 2); 22828 22829 /* Tell the reservation reclaim thread that it has work to do */ 22830 cv_signal(&sd_tr.srq_resv_reclaim_cv); 22831 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22832 } 22833 22834 /* 22835 * Function: sd_resv_reclaim_thread() 22836 * 22837 * Description: This function implements the reservation reclaim operations 22838 * 22839 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22840 * among multiple watches that share this callback function 22841 */ 22842 22843 static void 22844 sd_resv_reclaim_thread() 22845 { 22846 struct sd_lun *un; 22847 struct sd_thr_request *sd_mhreq; 22848 22849 /* Wait for work */ 22850 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22851 if (sd_tr.srq_thr_req_head == NULL) { 22852 cv_wait(&sd_tr.srq_resv_reclaim_cv, 22853 &sd_tr.srq_resv_reclaim_mutex); 22854 } 22855 22856 /* Loop while we have work */ 22857 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 22858 un = ddi_get_soft_state(sd_state, 22859 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 22860 if (un == NULL) { 22861 /* 22862 * softstate structure is NULL so just 22863 * dequeue the request and continue 22864 */ 22865 sd_tr.srq_thr_req_head = 22866 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22867 kmem_free(sd_tr.srq_thr_cur_req, 22868 sizeof (struct sd_thr_request)); 22869 continue; 22870 } 22871 22872 /* dequeue the request */ 22873 sd_mhreq = sd_tr.srq_thr_cur_req; 22874 sd_tr.srq_thr_req_head = 22875 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22876 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22877 22878 /* 22879 * Reclaim reservation only if SD_RESERVE is still set. There 22880 * may have been a call to MHIOCRELEASE before we got here. 22881 */ 22882 mutex_enter(SD_MUTEX(un)); 22883 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22884 /* 22885 * Note: The SD_LOST_RESERVE flag is cleared before 22886 * reclaiming the reservation. If this is done after the 22887 * call to sd_reserve_release a reservation loss in the 22888 * window between pkt completion of reserve cmd and 22889 * mutex_enter below may not be recognized 22890 */ 22891 un->un_resvd_status &= ~SD_LOST_RESERVE; 22892 mutex_exit(SD_MUTEX(un)); 22893 22894 if (sd_reserve_release(sd_mhreq->dev, 22895 SD_RESERVE) == 0) { 22896 mutex_enter(SD_MUTEX(un)); 22897 un->un_resvd_status |= SD_RESERVE; 22898 mutex_exit(SD_MUTEX(un)); 22899 SD_INFO(SD_LOG_IOCTL_MHD, un, 22900 "sd_resv_reclaim_thread: " 22901 "Reservation Recovered\n"); 22902 } else { 22903 mutex_enter(SD_MUTEX(un)); 22904 un->un_resvd_status |= SD_LOST_RESERVE; 22905 mutex_exit(SD_MUTEX(un)); 22906 SD_INFO(SD_LOG_IOCTL_MHD, un, 22907 "sd_resv_reclaim_thread: Failed " 22908 "Reservation Recovery\n"); 22909 } 22910 } else { 22911 mutex_exit(SD_MUTEX(un)); 22912 } 22913 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22914 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22915 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22916 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22917 /* 22918 * wakeup the destroy thread if anyone is waiting on 22919 * us to complete. 22920 */ 22921 cv_signal(&sd_tr.srq_inprocess_cv); 22922 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22923 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22924 } 22925 22926 /* 22927 * cleanup the sd_tr structure now that this thread will not exist 22928 */ 22929 ASSERT(sd_tr.srq_thr_req_head == NULL); 22930 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22931 sd_tr.srq_resv_reclaim_thread = NULL; 22932 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22933 thread_exit(); 22934 } 22935 22936 22937 /* 22938 * Function: sd_rmv_resv_reclaim_req() 22939 * 22940 * Description: This function removes any pending reservation reclaim requests 22941 * for the specified device. 22942 * 22943 * Arguments: dev - the device 'dev_t' 22944 */ 22945 22946 static void 22947 sd_rmv_resv_reclaim_req(dev_t dev) 22948 { 22949 struct sd_thr_request *sd_mhreq; 22950 struct sd_thr_request *sd_prev; 22951 22952 /* Remove a reservation reclaim request from the list */ 22953 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22954 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22955 /* 22956 * We are attempting to reinstate reservation for 22957 * this device. We wait for sd_reserve_release() 22958 * to return before we return. 22959 */ 22960 cv_wait(&sd_tr.srq_inprocess_cv, 22961 &sd_tr.srq_resv_reclaim_mutex); 22962 } else { 22963 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22964 if (sd_mhreq && sd_mhreq->dev == dev) { 22965 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22966 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22967 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22968 return; 22969 } 22970 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22971 if (sd_mhreq && sd_mhreq->dev == dev) { 22972 break; 22973 } 22974 sd_prev = sd_mhreq; 22975 } 22976 if (sd_mhreq != NULL) { 22977 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22978 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22979 } 22980 } 22981 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22982 } 22983 22984 22985 /* 22986 * Function: sd_mhd_reset_notify_cb() 22987 * 22988 * Description: This is a call back function for scsi_reset_notify. This 22989 * function updates the softstate reserved status and logs the 22990 * reset. The driver scsi watch facility callback function 22991 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22992 * will reclaim the reservation. 22993 * 22994 * Arguments: arg - driver soft state (unit) structure 22995 */ 22996 22997 static void 22998 sd_mhd_reset_notify_cb(caddr_t arg) 22999 { 23000 struct sd_lun *un = (struct sd_lun *)arg; 23001 23002 mutex_enter(SD_MUTEX(un)); 23003 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 23004 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 23005 SD_INFO(SD_LOG_IOCTL_MHD, un, 23006 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 23007 } 23008 mutex_exit(SD_MUTEX(un)); 23009 } 23010 23011 23012 /* 23013 * Function: sd_take_ownership() 23014 * 23015 * Description: This routine implements an algorithm to achieve a stable 23016 * reservation on disks which don't implement priority reserve, 23017 * and makes sure that other host lose re-reservation attempts. 23018 * This algorithm contains of a loop that keeps issuing the RESERVE 23019 * for some period of time (min_ownership_delay, default 6 seconds) 23020 * During that loop, it looks to see if there has been a bus device 23021 * reset or bus reset (both of which cause an existing reservation 23022 * to be lost). If the reservation is lost issue RESERVE until a 23023 * period of min_ownership_delay with no resets has gone by, or 23024 * until max_ownership_delay has expired. This loop ensures that 23025 * the host really did manage to reserve the device, in spite of 23026 * resets. The looping for min_ownership_delay (default six 23027 * seconds) is important to early generation clustering products, 23028 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 23029 * MHIOCENFAILFAST periodic timer of two seconds. By having 23030 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 23031 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 23032 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 23033 * have already noticed, via the MHIOCENFAILFAST polling, that it 23034 * no longer "owns" the disk and will have panicked itself. Thus, 23035 * the host issuing the MHIOCTKOWN is assured (with timing 23036 * dependencies) that by the time it actually starts to use the 23037 * disk for real work, the old owner is no longer accessing it. 23038 * 23039 * min_ownership_delay is the minimum amount of time for which the 23040 * disk must be reserved continuously devoid of resets before the 23041 * MHIOCTKOWN ioctl will return success. 23042 * 23043 * max_ownership_delay indicates the amount of time by which the 23044 * take ownership should succeed or timeout with an error. 23045 * 23046 * Arguments: dev - the device 'dev_t' 23047 * *p - struct containing timing info. 23048 * 23049 * Return Code: 0 for success or error code 23050 */ 23051 23052 static int 23053 sd_take_ownership(dev_t dev, struct mhioctkown *p) 23054 { 23055 struct sd_lun *un; 23056 int rval; 23057 int err; 23058 int reservation_count = 0; 23059 int min_ownership_delay = 6000000; /* in usec */ 23060 int max_ownership_delay = 30000000; /* in usec */ 23061 clock_t start_time; /* starting time of this algorithm */ 23062 clock_t end_time; /* time limit for giving up */ 23063 clock_t ownership_time; /* time limit for stable ownership */ 23064 clock_t current_time; 23065 clock_t previous_current_time; 23066 23067 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23068 return (ENXIO); 23069 } 23070 23071 /* 23072 * Attempt a device reservation. A priority reservation is requested. 23073 */ 23074 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 23075 != SD_SUCCESS) { 23076 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23077 "sd_take_ownership: return(1)=%d\n", rval); 23078 return (rval); 23079 } 23080 23081 /* Update the softstate reserved status to indicate the reservation */ 23082 mutex_enter(SD_MUTEX(un)); 23083 un->un_resvd_status |= SD_RESERVE; 23084 un->un_resvd_status &= 23085 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 23086 mutex_exit(SD_MUTEX(un)); 23087 23088 if (p != NULL) { 23089 if (p->min_ownership_delay != 0) { 23090 min_ownership_delay = p->min_ownership_delay * 1000; 23091 } 23092 if (p->max_ownership_delay != 0) { 23093 max_ownership_delay = p->max_ownership_delay * 1000; 23094 } 23095 } 23096 SD_INFO(SD_LOG_IOCTL_MHD, un, 23097 "sd_take_ownership: min, max delays: %d, %d\n", 23098 min_ownership_delay, max_ownership_delay); 23099 23100 start_time = ddi_get_lbolt(); 23101 current_time = start_time; 23102 ownership_time = current_time + drv_usectohz(min_ownership_delay); 23103 end_time = start_time + drv_usectohz(max_ownership_delay); 23104 23105 while (current_time - end_time < 0) { 23106 delay(drv_usectohz(500000)); 23107 23108 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 23109 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 23110 mutex_enter(SD_MUTEX(un)); 23111 rval = (un->un_resvd_status & 23112 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 23113 mutex_exit(SD_MUTEX(un)); 23114 break; 23115 } 23116 } 23117 previous_current_time = current_time; 23118 current_time = ddi_get_lbolt(); 23119 mutex_enter(SD_MUTEX(un)); 23120 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 23121 ownership_time = ddi_get_lbolt() + 23122 drv_usectohz(min_ownership_delay); 23123 reservation_count = 0; 23124 } else { 23125 reservation_count++; 23126 } 23127 un->un_resvd_status |= SD_RESERVE; 23128 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 23129 mutex_exit(SD_MUTEX(un)); 23130 23131 SD_INFO(SD_LOG_IOCTL_MHD, un, 23132 "sd_take_ownership: ticks for loop iteration=%ld, " 23133 "reservation=%s\n", (current_time - previous_current_time), 23134 reservation_count ? "ok" : "reclaimed"); 23135 23136 if (current_time - ownership_time >= 0 && 23137 reservation_count >= 4) { 23138 rval = 0; /* Achieved a stable ownership */ 23139 break; 23140 } 23141 if (current_time - end_time >= 0) { 23142 rval = EACCES; /* No ownership in max possible time */ 23143 break; 23144 } 23145 } 23146 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23147 "sd_take_ownership: return(2)=%d\n", rval); 23148 return (rval); 23149 } 23150 23151 23152 /* 23153 * Function: sd_reserve_release() 23154 * 23155 * Description: This function builds and sends scsi RESERVE, RELEASE, and 23156 * PRIORITY RESERVE commands based on a user specified command type 23157 * 23158 * Arguments: dev - the device 'dev_t' 23159 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 23160 * SD_RESERVE, SD_RELEASE 23161 * 23162 * Return Code: 0 or Error Code 23163 */ 23164 23165 static int 23166 sd_reserve_release(dev_t dev, int cmd) 23167 { 23168 struct uscsi_cmd *com = NULL; 23169 struct sd_lun *un = NULL; 23170 char cdb[CDB_GROUP0]; 23171 int rval; 23172 23173 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 23174 (cmd == SD_PRIORITY_RESERVE)); 23175 23176 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23177 return (ENXIO); 23178 } 23179 23180 /* instantiate and initialize the command and cdb */ 23181 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23182 bzero(cdb, CDB_GROUP0); 23183 com->uscsi_flags = USCSI_SILENT; 23184 com->uscsi_timeout = un->un_reserve_release_time; 23185 com->uscsi_cdblen = CDB_GROUP0; 23186 com->uscsi_cdb = cdb; 23187 if (cmd == SD_RELEASE) { 23188 cdb[0] = SCMD_RELEASE; 23189 } else { 23190 cdb[0] = SCMD_RESERVE; 23191 } 23192 23193 /* Send the command. */ 23194 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23195 SD_PATH_STANDARD); 23196 23197 /* 23198 * "break" a reservation that is held by another host, by issuing a 23199 * reset if priority reserve is desired, and we could not get the 23200 * device. 23201 */ 23202 if ((cmd == SD_PRIORITY_RESERVE) && 23203 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 23204 /* 23205 * First try to reset the LUN. If we cannot, then try a target 23206 * reset, followed by a bus reset if the target reset fails. 23207 */ 23208 int reset_retval = 0; 23209 if (un->un_f_lun_reset_enabled == TRUE) { 23210 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 23211 } 23212 if (reset_retval == 0) { 23213 /* The LUN reset either failed or was not issued */ 23214 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23215 } 23216 if ((reset_retval == 0) && 23217 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 23218 rval = EIO; 23219 kmem_free(com, sizeof (*com)); 23220 return (rval); 23221 } 23222 23223 bzero(com, sizeof (struct uscsi_cmd)); 23224 com->uscsi_flags = USCSI_SILENT; 23225 com->uscsi_cdb = cdb; 23226 com->uscsi_cdblen = CDB_GROUP0; 23227 com->uscsi_timeout = 5; 23228 23229 /* 23230 * Reissue the last reserve command, this time without request 23231 * sense. Assume that it is just a regular reserve command. 23232 */ 23233 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23234 SD_PATH_STANDARD); 23235 } 23236 23237 /* Return an error if still getting a reservation conflict. */ 23238 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 23239 rval = EACCES; 23240 } 23241 23242 kmem_free(com, sizeof (*com)); 23243 return (rval); 23244 } 23245 23246 23247 #define SD_NDUMP_RETRIES 12 23248 /* 23249 * System Crash Dump routine 23250 */ 23251 23252 static int 23253 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 23254 { 23255 int instance; 23256 int partition; 23257 int i; 23258 int err; 23259 struct sd_lun *un; 23260 struct scsi_pkt *wr_pktp; 23261 struct buf *wr_bp; 23262 struct buf wr_buf; 23263 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 23264 daddr_t tgt_blkno; /* rmw - blkno for target */ 23265 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 23266 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 23267 size_t io_start_offset; 23268 int doing_rmw = FALSE; 23269 int rval; 23270 ssize_t dma_resid; 23271 daddr_t oblkno; 23272 diskaddr_t nblks = 0; 23273 diskaddr_t start_block; 23274 23275 instance = SDUNIT(dev); 23276 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 23277 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 23278 return (ENXIO); 23279 } 23280 23281 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 23282 23283 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 23284 23285 partition = SDPART(dev); 23286 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 23287 23288 /* Validate blocks to dump at against partition size. */ 23289 23290 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 23291 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 23292 23293 if ((blkno + nblk) > nblks) { 23294 SD_TRACE(SD_LOG_DUMP, un, 23295 "sddump: dump range larger than partition: " 23296 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 23297 blkno, nblk, nblks); 23298 return (EINVAL); 23299 } 23300 23301 mutex_enter(&un->un_pm_mutex); 23302 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23303 struct scsi_pkt *start_pktp; 23304 23305 mutex_exit(&un->un_pm_mutex); 23306 23307 /* 23308 * use pm framework to power on HBA 1st 23309 */ 23310 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 23311 23312 /* 23313 * Dump no long uses sdpower to power on a device, it's 23314 * in-line here so it can be done in polled mode. 23315 */ 23316 23317 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 23318 23319 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 23320 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 23321 23322 if (start_pktp == NULL) { 23323 /* We were not given a SCSI packet, fail. */ 23324 return (EIO); 23325 } 23326 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 23327 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 23328 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 23329 start_pktp->pkt_flags = FLAG_NOINTR; 23330 23331 mutex_enter(SD_MUTEX(un)); 23332 SD_FILL_SCSI1_LUN(un, start_pktp); 23333 mutex_exit(SD_MUTEX(un)); 23334 /* 23335 * Scsi_poll returns 0 (success) if the command completes and 23336 * the status block is STATUS_GOOD. 23337 */ 23338 if (sd_scsi_poll(un, start_pktp) != 0) { 23339 scsi_destroy_pkt(start_pktp); 23340 return (EIO); 23341 } 23342 scsi_destroy_pkt(start_pktp); 23343 (void) sd_ddi_pm_resume(un); 23344 } else { 23345 mutex_exit(&un->un_pm_mutex); 23346 } 23347 23348 mutex_enter(SD_MUTEX(un)); 23349 un->un_throttle = 0; 23350 23351 /* 23352 * The first time through, reset the specific target device. 23353 * However, when cpr calls sddump we know that sd is in a 23354 * a good state so no bus reset is required. 23355 * Clear sense data via Request Sense cmd. 23356 * In sddump we don't care about allow_bus_device_reset anymore 23357 */ 23358 23359 if ((un->un_state != SD_STATE_SUSPENDED) && 23360 (un->un_state != SD_STATE_DUMPING)) { 23361 23362 New_state(un, SD_STATE_DUMPING); 23363 23364 if (un->un_f_is_fibre == FALSE) { 23365 mutex_exit(SD_MUTEX(un)); 23366 /* 23367 * Attempt a bus reset for parallel scsi. 23368 * 23369 * Note: A bus reset is required because on some host 23370 * systems (i.e. E420R) a bus device reset is 23371 * insufficient to reset the state of the target. 23372 * 23373 * Note: Don't issue the reset for fibre-channel, 23374 * because this tends to hang the bus (loop) for 23375 * too long while everyone is logging out and in 23376 * and the deadman timer for dumping will fire 23377 * before the dump is complete. 23378 */ 23379 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 23380 mutex_enter(SD_MUTEX(un)); 23381 Restore_state(un); 23382 mutex_exit(SD_MUTEX(un)); 23383 return (EIO); 23384 } 23385 23386 /* Delay to give the device some recovery time. */ 23387 drv_usecwait(10000); 23388 23389 if (sd_send_polled_RQS(un) == SD_FAILURE) { 23390 SD_INFO(SD_LOG_DUMP, un, 23391 "sddump: sd_send_polled_RQS failed\n"); 23392 } 23393 mutex_enter(SD_MUTEX(un)); 23394 } 23395 } 23396 23397 /* 23398 * Convert the partition-relative block number to a 23399 * disk physical block number. 23400 */ 23401 blkno += start_block; 23402 23403 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 23404 23405 23406 /* 23407 * Check if the device has a non-512 block size. 23408 */ 23409 wr_bp = NULL; 23410 if (NOT_DEVBSIZE(un)) { 23411 tgt_byte_offset = blkno * un->un_sys_blocksize; 23412 tgt_byte_count = nblk * un->un_sys_blocksize; 23413 if ((tgt_byte_offset % un->un_tgt_blocksize) || 23414 (tgt_byte_count % un->un_tgt_blocksize)) { 23415 doing_rmw = TRUE; 23416 /* 23417 * Calculate the block number and number of block 23418 * in terms of the media block size. 23419 */ 23420 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23421 tgt_nblk = 23422 ((tgt_byte_offset + tgt_byte_count + 23423 (un->un_tgt_blocksize - 1)) / 23424 un->un_tgt_blocksize) - tgt_blkno; 23425 23426 /* 23427 * Invoke the routine which is going to do read part 23428 * of read-modify-write. 23429 * Note that this routine returns a pointer to 23430 * a valid bp in wr_bp. 23431 */ 23432 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 23433 &wr_bp); 23434 if (err) { 23435 mutex_exit(SD_MUTEX(un)); 23436 return (err); 23437 } 23438 /* 23439 * Offset is being calculated as - 23440 * (original block # * system block size) - 23441 * (new block # * target block size) 23442 */ 23443 io_start_offset = 23444 ((uint64_t)(blkno * un->un_sys_blocksize)) - 23445 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 23446 23447 ASSERT((io_start_offset >= 0) && 23448 (io_start_offset < un->un_tgt_blocksize)); 23449 /* 23450 * Do the modify portion of read modify write. 23451 */ 23452 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 23453 (size_t)nblk * un->un_sys_blocksize); 23454 } else { 23455 doing_rmw = FALSE; 23456 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23457 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 23458 } 23459 23460 /* Convert blkno and nblk to target blocks */ 23461 blkno = tgt_blkno; 23462 nblk = tgt_nblk; 23463 } else { 23464 wr_bp = &wr_buf; 23465 bzero(wr_bp, sizeof (struct buf)); 23466 wr_bp->b_flags = B_BUSY; 23467 wr_bp->b_un.b_addr = addr; 23468 wr_bp->b_bcount = nblk << DEV_BSHIFT; 23469 wr_bp->b_resid = 0; 23470 } 23471 23472 mutex_exit(SD_MUTEX(un)); 23473 23474 /* 23475 * Obtain a SCSI packet for the write command. 23476 * It should be safe to call the allocator here without 23477 * worrying about being locked for DVMA mapping because 23478 * the address we're passed is already a DVMA mapping 23479 * 23480 * We are also not going to worry about semaphore ownership 23481 * in the dump buffer. Dumping is single threaded at present. 23482 */ 23483 23484 wr_pktp = NULL; 23485 23486 dma_resid = wr_bp->b_bcount; 23487 oblkno = blkno; 23488 23489 while (dma_resid != 0) { 23490 23491 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23492 wr_bp->b_flags &= ~B_ERROR; 23493 23494 if (un->un_partial_dma_supported == 1) { 23495 blkno = oblkno + 23496 ((wr_bp->b_bcount - dma_resid) / 23497 un->un_tgt_blocksize); 23498 nblk = dma_resid / un->un_tgt_blocksize; 23499 23500 if (wr_pktp) { 23501 /* 23502 * Partial DMA transfers after initial transfer 23503 */ 23504 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 23505 blkno, nblk); 23506 } else { 23507 /* Initial transfer */ 23508 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23509 un->un_pkt_flags, NULL_FUNC, NULL, 23510 blkno, nblk); 23511 } 23512 } else { 23513 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23514 0, NULL_FUNC, NULL, blkno, nblk); 23515 } 23516 23517 if (rval == 0) { 23518 /* We were given a SCSI packet, continue. */ 23519 break; 23520 } 23521 23522 if (i == 0) { 23523 if (wr_bp->b_flags & B_ERROR) { 23524 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23525 "no resources for dumping; " 23526 "error code: 0x%x, retrying", 23527 geterror(wr_bp)); 23528 } else { 23529 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23530 "no resources for dumping; retrying"); 23531 } 23532 } else if (i != (SD_NDUMP_RETRIES - 1)) { 23533 if (wr_bp->b_flags & B_ERROR) { 23534 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23535 "no resources for dumping; error code: " 23536 "0x%x, retrying\n", geterror(wr_bp)); 23537 } 23538 } else { 23539 if (wr_bp->b_flags & B_ERROR) { 23540 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23541 "no resources for dumping; " 23542 "error code: 0x%x, retries failed, " 23543 "giving up.\n", geterror(wr_bp)); 23544 } else { 23545 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23546 "no resources for dumping; " 23547 "retries failed, giving up.\n"); 23548 } 23549 mutex_enter(SD_MUTEX(un)); 23550 Restore_state(un); 23551 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 23552 mutex_exit(SD_MUTEX(un)); 23553 scsi_free_consistent_buf(wr_bp); 23554 } else { 23555 mutex_exit(SD_MUTEX(un)); 23556 } 23557 return (EIO); 23558 } 23559 drv_usecwait(10000); 23560 } 23561 23562 if (un->un_partial_dma_supported == 1) { 23563 /* 23564 * save the resid from PARTIAL_DMA 23565 */ 23566 dma_resid = wr_pktp->pkt_resid; 23567 if (dma_resid != 0) 23568 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 23569 wr_pktp->pkt_resid = 0; 23570 } else { 23571 dma_resid = 0; 23572 } 23573 23574 /* SunBug 1222170 */ 23575 wr_pktp->pkt_flags = FLAG_NOINTR; 23576 23577 err = EIO; 23578 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23579 23580 /* 23581 * Scsi_poll returns 0 (success) if the command completes and 23582 * the status block is STATUS_GOOD. We should only check 23583 * errors if this condition is not true. Even then we should 23584 * send our own request sense packet only if we have a check 23585 * condition and auto request sense has not been performed by 23586 * the hba. 23587 */ 23588 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 23589 23590 if ((sd_scsi_poll(un, wr_pktp) == 0) && 23591 (wr_pktp->pkt_resid == 0)) { 23592 err = SD_SUCCESS; 23593 break; 23594 } 23595 23596 /* 23597 * Check CMD_DEV_GONE 1st, give up if device is gone. 23598 */ 23599 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 23600 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23601 "Error while dumping state...Device is gone\n"); 23602 break; 23603 } 23604 23605 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 23606 SD_INFO(SD_LOG_DUMP, un, 23607 "sddump: write failed with CHECK, try # %d\n", i); 23608 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 23609 (void) sd_send_polled_RQS(un); 23610 } 23611 23612 continue; 23613 } 23614 23615 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 23616 int reset_retval = 0; 23617 23618 SD_INFO(SD_LOG_DUMP, un, 23619 "sddump: write failed with BUSY, try # %d\n", i); 23620 23621 if (un->un_f_lun_reset_enabled == TRUE) { 23622 reset_retval = scsi_reset(SD_ADDRESS(un), 23623 RESET_LUN); 23624 } 23625 if (reset_retval == 0) { 23626 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23627 } 23628 (void) sd_send_polled_RQS(un); 23629 23630 } else { 23631 SD_INFO(SD_LOG_DUMP, un, 23632 "sddump: write failed with 0x%x, try # %d\n", 23633 SD_GET_PKT_STATUS(wr_pktp), i); 23634 mutex_enter(SD_MUTEX(un)); 23635 sd_reset_target(un, wr_pktp); 23636 mutex_exit(SD_MUTEX(un)); 23637 } 23638 23639 /* 23640 * If we are not getting anywhere with lun/target resets, 23641 * let's reset the bus. 23642 */ 23643 if (i == SD_NDUMP_RETRIES/2) { 23644 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23645 (void) sd_send_polled_RQS(un); 23646 } 23647 } 23648 } 23649 23650 scsi_destroy_pkt(wr_pktp); 23651 mutex_enter(SD_MUTEX(un)); 23652 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 23653 mutex_exit(SD_MUTEX(un)); 23654 scsi_free_consistent_buf(wr_bp); 23655 } else { 23656 mutex_exit(SD_MUTEX(un)); 23657 } 23658 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 23659 return (err); 23660 } 23661 23662 /* 23663 * Function: sd_scsi_poll() 23664 * 23665 * Description: This is a wrapper for the scsi_poll call. 23666 * 23667 * Arguments: sd_lun - The unit structure 23668 * scsi_pkt - The scsi packet being sent to the device. 23669 * 23670 * Return Code: 0 - Command completed successfully with good status 23671 * -1 - Command failed. This could indicate a check condition 23672 * or other status value requiring recovery action. 23673 * 23674 * NOTE: This code is only called off sddump(). 23675 */ 23676 23677 static int 23678 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 23679 { 23680 int status; 23681 23682 ASSERT(un != NULL); 23683 ASSERT(!mutex_owned(SD_MUTEX(un))); 23684 ASSERT(pktp != NULL); 23685 23686 status = SD_SUCCESS; 23687 23688 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 23689 pktp->pkt_flags |= un->un_tagflags; 23690 pktp->pkt_flags &= ~FLAG_NODISCON; 23691 } 23692 23693 status = sd_ddi_scsi_poll(pktp); 23694 /* 23695 * Scsi_poll returns 0 (success) if the command completes and the 23696 * status block is STATUS_GOOD. We should only check errors if this 23697 * condition is not true. Even then we should send our own request 23698 * sense packet only if we have a check condition and auto 23699 * request sense has not been performed by the hba. 23700 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 23701 */ 23702 if ((status != SD_SUCCESS) && 23703 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 23704 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 23705 (pktp->pkt_reason != CMD_DEV_GONE)) 23706 (void) sd_send_polled_RQS(un); 23707 23708 return (status); 23709 } 23710 23711 /* 23712 * Function: sd_send_polled_RQS() 23713 * 23714 * Description: This sends the request sense command to a device. 23715 * 23716 * Arguments: sd_lun - The unit structure 23717 * 23718 * Return Code: 0 - Command completed successfully with good status 23719 * -1 - Command failed. 23720 * 23721 */ 23722 23723 static int 23724 sd_send_polled_RQS(struct sd_lun *un) 23725 { 23726 int ret_val; 23727 struct scsi_pkt *rqs_pktp; 23728 struct buf *rqs_bp; 23729 23730 ASSERT(un != NULL); 23731 ASSERT(!mutex_owned(SD_MUTEX(un))); 23732 23733 ret_val = SD_SUCCESS; 23734 23735 rqs_pktp = un->un_rqs_pktp; 23736 rqs_bp = un->un_rqs_bp; 23737 23738 mutex_enter(SD_MUTEX(un)); 23739 23740 if (un->un_sense_isbusy) { 23741 ret_val = SD_FAILURE; 23742 mutex_exit(SD_MUTEX(un)); 23743 return (ret_val); 23744 } 23745 23746 /* 23747 * If the request sense buffer (and packet) is not in use, 23748 * let's set the un_sense_isbusy and send our packet 23749 */ 23750 un->un_sense_isbusy = 1; 23751 rqs_pktp->pkt_resid = 0; 23752 rqs_pktp->pkt_reason = 0; 23753 rqs_pktp->pkt_flags |= FLAG_NOINTR; 23754 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 23755 23756 mutex_exit(SD_MUTEX(un)); 23757 23758 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 23759 " 0x%p\n", rqs_bp->b_un.b_addr); 23760 23761 /* 23762 * Can't send this to sd_scsi_poll, we wrap ourselves around the 23763 * axle - it has a call into us! 23764 */ 23765 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 23766 SD_INFO(SD_LOG_COMMON, un, 23767 "sd_send_polled_RQS: RQS failed\n"); 23768 } 23769 23770 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 23771 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 23772 23773 mutex_enter(SD_MUTEX(un)); 23774 un->un_sense_isbusy = 0; 23775 mutex_exit(SD_MUTEX(un)); 23776 23777 return (ret_val); 23778 } 23779 23780 /* 23781 * Defines needed for localized version of the scsi_poll routine. 23782 */ 23783 #define CSEC 10000 /* usecs */ 23784 #define SEC_TO_CSEC (1000000/CSEC) 23785 23786 /* 23787 * Function: sd_ddi_scsi_poll() 23788 * 23789 * Description: Localized version of the scsi_poll routine. The purpose is to 23790 * send a scsi_pkt to a device as a polled command. This version 23791 * is to ensure more robust handling of transport errors. 23792 * Specifically this routine cures not ready, coming ready 23793 * transition for power up and reset of sonoma's. This can take 23794 * up to 45 seconds for power-on and 20 seconds for reset of a 23795 * sonoma lun. 23796 * 23797 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 23798 * 23799 * Return Code: 0 - Command completed successfully with good status 23800 * -1 - Command failed. 23801 * 23802 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 23803 * be fixed (removing this code), we need to determine how to handle the 23804 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 23805 * 23806 * NOTE: This code is only called off sddump(). 23807 */ 23808 static int 23809 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 23810 { 23811 int rval = -1; 23812 int savef; 23813 long savet; 23814 void (*savec)(); 23815 int timeout; 23816 int busy_count; 23817 int poll_delay; 23818 int rc; 23819 uint8_t *sensep; 23820 struct scsi_arq_status *arqstat; 23821 extern int do_polled_io; 23822 23823 ASSERT(pkt->pkt_scbp); 23824 23825 /* 23826 * save old flags.. 23827 */ 23828 savef = pkt->pkt_flags; 23829 savec = pkt->pkt_comp; 23830 savet = pkt->pkt_time; 23831 23832 pkt->pkt_flags |= FLAG_NOINTR; 23833 23834 /* 23835 * XXX there is nothing in the SCSA spec that states that we should not 23836 * do a callback for polled cmds; however, removing this will break sd 23837 * and probably other target drivers 23838 */ 23839 pkt->pkt_comp = NULL; 23840 23841 /* 23842 * we don't like a polled command without timeout. 23843 * 60 seconds seems long enough. 23844 */ 23845 if (pkt->pkt_time == 0) 23846 pkt->pkt_time = SCSI_POLL_TIMEOUT; 23847 23848 /* 23849 * Send polled cmd. 23850 * 23851 * We do some error recovery for various errors. Tran_busy, 23852 * queue full, and non-dispatched commands are retried every 10 msec. 23853 * as they are typically transient failures. Busy status and Not 23854 * Ready are retried every second as this status takes a while to 23855 * change. 23856 */ 23857 timeout = pkt->pkt_time * SEC_TO_CSEC; 23858 23859 for (busy_count = 0; busy_count < timeout; busy_count++) { 23860 /* 23861 * Initialize pkt status variables. 23862 */ 23863 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 23864 23865 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 23866 if (rc != TRAN_BUSY) { 23867 /* Transport failed - give up. */ 23868 break; 23869 } else { 23870 /* Transport busy - try again. */ 23871 poll_delay = 1 * CSEC; /* 10 msec. */ 23872 } 23873 } else { 23874 /* 23875 * Transport accepted - check pkt status. 23876 */ 23877 rc = (*pkt->pkt_scbp) & STATUS_MASK; 23878 if ((pkt->pkt_reason == CMD_CMPLT) && 23879 (rc == STATUS_CHECK) && 23880 (pkt->pkt_state & STATE_ARQ_DONE)) { 23881 arqstat = 23882 (struct scsi_arq_status *)(pkt->pkt_scbp); 23883 sensep = (uint8_t *)&arqstat->sts_sensedata; 23884 } else { 23885 sensep = NULL; 23886 } 23887 23888 if ((pkt->pkt_reason == CMD_CMPLT) && 23889 (rc == STATUS_GOOD)) { 23890 /* No error - we're done */ 23891 rval = 0; 23892 break; 23893 23894 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23895 /* Lost connection - give up */ 23896 break; 23897 23898 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23899 (pkt->pkt_state == 0)) { 23900 /* Pkt not dispatched - try again. */ 23901 poll_delay = 1 * CSEC; /* 10 msec. */ 23902 23903 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23904 (rc == STATUS_QFULL)) { 23905 /* Queue full - try again. */ 23906 poll_delay = 1 * CSEC; /* 10 msec. */ 23907 23908 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23909 (rc == STATUS_BUSY)) { 23910 /* Busy - try again. */ 23911 poll_delay = 100 * CSEC; /* 1 sec. */ 23912 busy_count += (SEC_TO_CSEC - 1); 23913 23914 } else if ((sensep != NULL) && 23915 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 23916 /* 23917 * Unit Attention - try again. 23918 * Pretend it took 1 sec. 23919 * NOTE: 'continue' avoids poll_delay 23920 */ 23921 busy_count += (SEC_TO_CSEC - 1); 23922 continue; 23923 23924 } else if ((sensep != NULL) && 23925 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23926 (scsi_sense_asc(sensep) == 0x04) && 23927 (scsi_sense_ascq(sensep) == 0x01)) { 23928 /* 23929 * Not ready -> ready - try again. 23930 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 23931 * ...same as STATUS_BUSY 23932 */ 23933 poll_delay = 100 * CSEC; /* 1 sec. */ 23934 busy_count += (SEC_TO_CSEC - 1); 23935 23936 } else { 23937 /* BAD status - give up. */ 23938 break; 23939 } 23940 } 23941 23942 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 23943 !do_polled_io) { 23944 delay(drv_usectohz(poll_delay)); 23945 } else { 23946 /* we busy wait during cpr_dump or interrupt threads */ 23947 drv_usecwait(poll_delay); 23948 } 23949 } 23950 23951 pkt->pkt_flags = savef; 23952 pkt->pkt_comp = savec; 23953 pkt->pkt_time = savet; 23954 23955 /* return on error */ 23956 if (rval) 23957 return (rval); 23958 23959 /* 23960 * This is not a performance critical code path. 23961 * 23962 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 23963 * issues associated with looking at DMA memory prior to 23964 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 23965 */ 23966 scsi_sync_pkt(pkt); 23967 return (0); 23968 } 23969 23970 23971 23972 /* 23973 * Function: sd_persistent_reservation_in_read_keys 23974 * 23975 * Description: This routine is the driver entry point for handling CD-ROM 23976 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23977 * by sending the SCSI-3 PRIN commands to the device. 23978 * Processes the read keys command response by copying the 23979 * reservation key information into the user provided buffer. 23980 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23981 * 23982 * Arguments: un - Pointer to soft state struct for the target. 23983 * usrp - user provided pointer to multihost Persistent In Read 23984 * Keys structure (mhioc_inkeys_t) 23985 * flag - this argument is a pass through to ddi_copyxxx() 23986 * directly from the mode argument of ioctl(). 23987 * 23988 * Return Code: 0 - Success 23989 * EACCES 23990 * ENOTSUP 23991 * errno return code from sd_send_scsi_cmd() 23992 * 23993 * Context: Can sleep. Does not return until command is completed. 23994 */ 23995 23996 static int 23997 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23998 mhioc_inkeys_t *usrp, int flag) 23999 { 24000 #ifdef _MULTI_DATAMODEL 24001 struct mhioc_key_list32 li32; 24002 #endif 24003 sd_prin_readkeys_t *in; 24004 mhioc_inkeys_t *ptr; 24005 mhioc_key_list_t li; 24006 uchar_t *data_bufp; 24007 int data_len; 24008 int rval; 24009 size_t copysz; 24010 24011 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 24012 return (EINVAL); 24013 } 24014 bzero(&li, sizeof (mhioc_key_list_t)); 24015 24016 /* 24017 * Get the listsize from user 24018 */ 24019 #ifdef _MULTI_DATAMODEL 24020 24021 switch (ddi_model_convert_from(flag & FMODELS)) { 24022 case DDI_MODEL_ILP32: 24023 copysz = sizeof (struct mhioc_key_list32); 24024 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 24025 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24026 "sd_persistent_reservation_in_read_keys: " 24027 "failed ddi_copyin: mhioc_key_list32_t\n"); 24028 rval = EFAULT; 24029 goto done; 24030 } 24031 li.listsize = li32.listsize; 24032 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 24033 break; 24034 24035 case DDI_MODEL_NONE: 24036 copysz = sizeof (mhioc_key_list_t); 24037 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 24038 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24039 "sd_persistent_reservation_in_read_keys: " 24040 "failed ddi_copyin: mhioc_key_list_t\n"); 24041 rval = EFAULT; 24042 goto done; 24043 } 24044 break; 24045 } 24046 24047 #else /* ! _MULTI_DATAMODEL */ 24048 copysz = sizeof (mhioc_key_list_t); 24049 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 24050 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24051 "sd_persistent_reservation_in_read_keys: " 24052 "failed ddi_copyin: mhioc_key_list_t\n"); 24053 rval = EFAULT; 24054 goto done; 24055 } 24056 #endif 24057 24058 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 24059 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 24060 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 24061 24062 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 24063 data_len, data_bufp)) != 0) { 24064 goto done; 24065 } 24066 in = (sd_prin_readkeys_t *)data_bufp; 24067 ptr->generation = BE_32(in->generation); 24068 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 24069 24070 /* 24071 * Return the min(listsize, listlen) keys 24072 */ 24073 #ifdef _MULTI_DATAMODEL 24074 24075 switch (ddi_model_convert_from(flag & FMODELS)) { 24076 case DDI_MODEL_ILP32: 24077 li32.listlen = li.listlen; 24078 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 24079 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24080 "sd_persistent_reservation_in_read_keys: " 24081 "failed ddi_copyout: mhioc_key_list32_t\n"); 24082 rval = EFAULT; 24083 goto done; 24084 } 24085 break; 24086 24087 case DDI_MODEL_NONE: 24088 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 24089 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24090 "sd_persistent_reservation_in_read_keys: " 24091 "failed ddi_copyout: mhioc_key_list_t\n"); 24092 rval = EFAULT; 24093 goto done; 24094 } 24095 break; 24096 } 24097 24098 #else /* ! _MULTI_DATAMODEL */ 24099 24100 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 24101 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24102 "sd_persistent_reservation_in_read_keys: " 24103 "failed ddi_copyout: mhioc_key_list_t\n"); 24104 rval = EFAULT; 24105 goto done; 24106 } 24107 24108 #endif /* _MULTI_DATAMODEL */ 24109 24110 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 24111 li.listsize * MHIOC_RESV_KEY_SIZE); 24112 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 24113 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24114 "sd_persistent_reservation_in_read_keys: " 24115 "failed ddi_copyout: keylist\n"); 24116 rval = EFAULT; 24117 } 24118 done: 24119 kmem_free(data_bufp, data_len); 24120 return (rval); 24121 } 24122 24123 24124 /* 24125 * Function: sd_persistent_reservation_in_read_resv 24126 * 24127 * Description: This routine is the driver entry point for handling CD-ROM 24128 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 24129 * by sending the SCSI-3 PRIN commands to the device. 24130 * Process the read persistent reservations command response by 24131 * copying the reservation information into the user provided 24132 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 24133 * 24134 * Arguments: un - Pointer to soft state struct for the target. 24135 * usrp - user provided pointer to multihost Persistent In Read 24136 * Keys structure (mhioc_inkeys_t) 24137 * flag - this argument is a pass through to ddi_copyxxx() 24138 * directly from the mode argument of ioctl(). 24139 * 24140 * Return Code: 0 - Success 24141 * EACCES 24142 * ENOTSUP 24143 * errno return code from sd_send_scsi_cmd() 24144 * 24145 * Context: Can sleep. Does not return until command is completed. 24146 */ 24147 24148 static int 24149 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 24150 mhioc_inresvs_t *usrp, int flag) 24151 { 24152 #ifdef _MULTI_DATAMODEL 24153 struct mhioc_resv_desc_list32 resvlist32; 24154 #endif 24155 sd_prin_readresv_t *in; 24156 mhioc_inresvs_t *ptr; 24157 sd_readresv_desc_t *readresv_ptr; 24158 mhioc_resv_desc_list_t resvlist; 24159 mhioc_resv_desc_t resvdesc; 24160 uchar_t *data_bufp; 24161 int data_len; 24162 int rval; 24163 int i; 24164 size_t copysz; 24165 mhioc_resv_desc_t *bufp; 24166 24167 if ((ptr = usrp) == NULL) { 24168 return (EINVAL); 24169 } 24170 24171 /* 24172 * Get the listsize from user 24173 */ 24174 #ifdef _MULTI_DATAMODEL 24175 switch (ddi_model_convert_from(flag & FMODELS)) { 24176 case DDI_MODEL_ILP32: 24177 copysz = sizeof (struct mhioc_resv_desc_list32); 24178 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 24179 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24180 "sd_persistent_reservation_in_read_resv: " 24181 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 24182 rval = EFAULT; 24183 goto done; 24184 } 24185 resvlist.listsize = resvlist32.listsize; 24186 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 24187 break; 24188 24189 case DDI_MODEL_NONE: 24190 copysz = sizeof (mhioc_resv_desc_list_t); 24191 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 24192 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24193 "sd_persistent_reservation_in_read_resv: " 24194 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 24195 rval = EFAULT; 24196 goto done; 24197 } 24198 break; 24199 } 24200 #else /* ! _MULTI_DATAMODEL */ 24201 copysz = sizeof (mhioc_resv_desc_list_t); 24202 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 24203 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24204 "sd_persistent_reservation_in_read_resv: " 24205 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 24206 rval = EFAULT; 24207 goto done; 24208 } 24209 #endif /* ! _MULTI_DATAMODEL */ 24210 24211 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 24212 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 24213 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 24214 24215 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 24216 data_len, data_bufp)) != 0) { 24217 goto done; 24218 } 24219 in = (sd_prin_readresv_t *)data_bufp; 24220 ptr->generation = BE_32(in->generation); 24221 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 24222 24223 /* 24224 * Return the min(listsize, listlen( keys 24225 */ 24226 #ifdef _MULTI_DATAMODEL 24227 24228 switch (ddi_model_convert_from(flag & FMODELS)) { 24229 case DDI_MODEL_ILP32: 24230 resvlist32.listlen = resvlist.listlen; 24231 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 24232 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24233 "sd_persistent_reservation_in_read_resv: " 24234 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24235 rval = EFAULT; 24236 goto done; 24237 } 24238 break; 24239 24240 case DDI_MODEL_NONE: 24241 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 24242 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24243 "sd_persistent_reservation_in_read_resv: " 24244 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24245 rval = EFAULT; 24246 goto done; 24247 } 24248 break; 24249 } 24250 24251 #else /* ! _MULTI_DATAMODEL */ 24252 24253 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 24254 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24255 "sd_persistent_reservation_in_read_resv: " 24256 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24257 rval = EFAULT; 24258 goto done; 24259 } 24260 24261 #endif /* ! _MULTI_DATAMODEL */ 24262 24263 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 24264 bufp = resvlist.list; 24265 copysz = sizeof (mhioc_resv_desc_t); 24266 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 24267 i++, readresv_ptr++, bufp++) { 24268 24269 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 24270 MHIOC_RESV_KEY_SIZE); 24271 resvdesc.type = readresv_ptr->type; 24272 resvdesc.scope = readresv_ptr->scope; 24273 resvdesc.scope_specific_addr = 24274 BE_32(readresv_ptr->scope_specific_addr); 24275 24276 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 24277 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24278 "sd_persistent_reservation_in_read_resv: " 24279 "failed ddi_copyout: resvlist\n"); 24280 rval = EFAULT; 24281 goto done; 24282 } 24283 } 24284 done: 24285 kmem_free(data_bufp, data_len); 24286 return (rval); 24287 } 24288 24289 24290 /* 24291 * Function: sr_change_blkmode() 24292 * 24293 * Description: This routine is the driver entry point for handling CD-ROM 24294 * block mode ioctl requests. Support for returning and changing 24295 * the current block size in use by the device is implemented. The 24296 * LBA size is changed via a MODE SELECT Block Descriptor. 24297 * 24298 * This routine issues a mode sense with an allocation length of 24299 * 12 bytes for the mode page header and a single block descriptor. 24300 * 24301 * Arguments: dev - the device 'dev_t' 24302 * cmd - the request type; one of CDROMGBLKMODE (get) or 24303 * CDROMSBLKMODE (set) 24304 * data - current block size or requested block size 24305 * flag - this argument is a pass through to ddi_copyxxx() directly 24306 * from the mode argument of ioctl(). 24307 * 24308 * Return Code: the code returned by sd_send_scsi_cmd() 24309 * EINVAL if invalid arguments are provided 24310 * EFAULT if ddi_copyxxx() fails 24311 * ENXIO if fail ddi_get_soft_state 24312 * EIO if invalid mode sense block descriptor length 24313 * 24314 */ 24315 24316 static int 24317 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 24318 { 24319 struct sd_lun *un = NULL; 24320 struct mode_header *sense_mhp, *select_mhp; 24321 struct block_descriptor *sense_desc, *select_desc; 24322 int current_bsize; 24323 int rval = EINVAL; 24324 uchar_t *sense = NULL; 24325 uchar_t *select = NULL; 24326 24327 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 24328 24329 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24330 return (ENXIO); 24331 } 24332 24333 /* 24334 * The block length is changed via the Mode Select block descriptor, the 24335 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 24336 * required as part of this routine. Therefore the mode sense allocation 24337 * length is specified to be the length of a mode page header and a 24338 * block descriptor. 24339 */ 24340 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 24341 24342 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24343 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 24344 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24345 "sr_change_blkmode: Mode Sense Failed\n"); 24346 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24347 return (rval); 24348 } 24349 24350 /* Check the block descriptor len to handle only 1 block descriptor */ 24351 sense_mhp = (struct mode_header *)sense; 24352 if ((sense_mhp->bdesc_length == 0) || 24353 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 24354 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24355 "sr_change_blkmode: Mode Sense returned invalid block" 24356 " descriptor length\n"); 24357 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24358 return (EIO); 24359 } 24360 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 24361 current_bsize = ((sense_desc->blksize_hi << 16) | 24362 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 24363 24364 /* Process command */ 24365 switch (cmd) { 24366 case CDROMGBLKMODE: 24367 /* Return the block size obtained during the mode sense */ 24368 if (ddi_copyout(¤t_bsize, (void *)data, 24369 sizeof (int), flag) != 0) 24370 rval = EFAULT; 24371 break; 24372 case CDROMSBLKMODE: 24373 /* Validate the requested block size */ 24374 switch (data) { 24375 case CDROM_BLK_512: 24376 case CDROM_BLK_1024: 24377 case CDROM_BLK_2048: 24378 case CDROM_BLK_2056: 24379 case CDROM_BLK_2336: 24380 case CDROM_BLK_2340: 24381 case CDROM_BLK_2352: 24382 case CDROM_BLK_2368: 24383 case CDROM_BLK_2448: 24384 case CDROM_BLK_2646: 24385 case CDROM_BLK_2647: 24386 break; 24387 default: 24388 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24389 "sr_change_blkmode: " 24390 "Block Size '%ld' Not Supported\n", data); 24391 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24392 return (EINVAL); 24393 } 24394 24395 /* 24396 * The current block size matches the requested block size so 24397 * there is no need to send the mode select to change the size 24398 */ 24399 if (current_bsize == data) { 24400 break; 24401 } 24402 24403 /* Build the select data for the requested block size */ 24404 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 24405 select_mhp = (struct mode_header *)select; 24406 select_desc = 24407 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 24408 /* 24409 * The LBA size is changed via the block descriptor, so the 24410 * descriptor is built according to the user data 24411 */ 24412 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 24413 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 24414 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 24415 select_desc->blksize_lo = (char)((data) & 0x000000ff); 24416 24417 /* Send the mode select for the requested block size */ 24418 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 24419 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 24420 SD_PATH_STANDARD)) != 0) { 24421 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24422 "sr_change_blkmode: Mode Select Failed\n"); 24423 /* 24424 * The mode select failed for the requested block size, 24425 * so reset the data for the original block size and 24426 * send it to the target. The error is indicated by the 24427 * return value for the failed mode select. 24428 */ 24429 select_desc->blksize_hi = sense_desc->blksize_hi; 24430 select_desc->blksize_mid = sense_desc->blksize_mid; 24431 select_desc->blksize_lo = sense_desc->blksize_lo; 24432 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 24433 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 24434 SD_PATH_STANDARD); 24435 } else { 24436 ASSERT(!mutex_owned(SD_MUTEX(un))); 24437 mutex_enter(SD_MUTEX(un)); 24438 sd_update_block_info(un, (uint32_t)data, 0); 24439 mutex_exit(SD_MUTEX(un)); 24440 } 24441 break; 24442 default: 24443 /* should not reach here, but check anyway */ 24444 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24445 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 24446 rval = EINVAL; 24447 break; 24448 } 24449 24450 if (select) { 24451 kmem_free(select, BUFLEN_CHG_BLK_MODE); 24452 } 24453 if (sense) { 24454 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24455 } 24456 return (rval); 24457 } 24458 24459 24460 /* 24461 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 24462 * implement driver support for getting and setting the CD speed. The command 24463 * set used will be based on the device type. If the device has not been 24464 * identified as MMC the Toshiba vendor specific mode page will be used. If 24465 * the device is MMC but does not support the Real Time Streaming feature 24466 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 24467 * be used to read the speed. 24468 */ 24469 24470 /* 24471 * Function: sr_change_speed() 24472 * 24473 * Description: This routine is the driver entry point for handling CD-ROM 24474 * drive speed ioctl requests for devices supporting the Toshiba 24475 * vendor specific drive speed mode page. Support for returning 24476 * and changing the current drive speed in use by the device is 24477 * implemented. 24478 * 24479 * Arguments: dev - the device 'dev_t' 24480 * cmd - the request type; one of CDROMGDRVSPEED (get) or 24481 * CDROMSDRVSPEED (set) 24482 * data - current drive speed or requested drive speed 24483 * flag - this argument is a pass through to ddi_copyxxx() directly 24484 * from the mode argument of ioctl(). 24485 * 24486 * Return Code: the code returned by sd_send_scsi_cmd() 24487 * EINVAL if invalid arguments are provided 24488 * EFAULT if ddi_copyxxx() fails 24489 * ENXIO if fail ddi_get_soft_state 24490 * EIO if invalid mode sense block descriptor length 24491 */ 24492 24493 static int 24494 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24495 { 24496 struct sd_lun *un = NULL; 24497 struct mode_header *sense_mhp, *select_mhp; 24498 struct mode_speed *sense_page, *select_page; 24499 int current_speed; 24500 int rval = EINVAL; 24501 int bd_len; 24502 uchar_t *sense = NULL; 24503 uchar_t *select = NULL; 24504 24505 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24506 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24507 return (ENXIO); 24508 } 24509 24510 /* 24511 * Note: The drive speed is being modified here according to a Toshiba 24512 * vendor specific mode page (0x31). 24513 */ 24514 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24515 24516 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24517 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 24518 SD_PATH_STANDARD)) != 0) { 24519 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24520 "sr_change_speed: Mode Sense Failed\n"); 24521 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24522 return (rval); 24523 } 24524 sense_mhp = (struct mode_header *)sense; 24525 24526 /* Check the block descriptor len to handle only 1 block descriptor */ 24527 bd_len = sense_mhp->bdesc_length; 24528 if (bd_len > MODE_BLK_DESC_LENGTH) { 24529 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24530 "sr_change_speed: Mode Sense returned invalid block " 24531 "descriptor length\n"); 24532 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24533 return (EIO); 24534 } 24535 24536 sense_page = (struct mode_speed *) 24537 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 24538 current_speed = sense_page->speed; 24539 24540 /* Process command */ 24541 switch (cmd) { 24542 case CDROMGDRVSPEED: 24543 /* Return the drive speed obtained during the mode sense */ 24544 if (current_speed == 0x2) { 24545 current_speed = CDROM_TWELVE_SPEED; 24546 } 24547 if (ddi_copyout(¤t_speed, (void *)data, 24548 sizeof (int), flag) != 0) { 24549 rval = EFAULT; 24550 } 24551 break; 24552 case CDROMSDRVSPEED: 24553 /* Validate the requested drive speed */ 24554 switch ((uchar_t)data) { 24555 case CDROM_TWELVE_SPEED: 24556 data = 0x2; 24557 /*FALLTHROUGH*/ 24558 case CDROM_NORMAL_SPEED: 24559 case CDROM_DOUBLE_SPEED: 24560 case CDROM_QUAD_SPEED: 24561 case CDROM_MAXIMUM_SPEED: 24562 break; 24563 default: 24564 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24565 "sr_change_speed: " 24566 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 24567 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24568 return (EINVAL); 24569 } 24570 24571 /* 24572 * The current drive speed matches the requested drive speed so 24573 * there is no need to send the mode select to change the speed 24574 */ 24575 if (current_speed == data) { 24576 break; 24577 } 24578 24579 /* Build the select data for the requested drive speed */ 24580 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24581 select_mhp = (struct mode_header *)select; 24582 select_mhp->bdesc_length = 0; 24583 select_page = 24584 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24585 select_page = 24586 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24587 select_page->mode_page.code = CDROM_MODE_SPEED; 24588 select_page->mode_page.length = 2; 24589 select_page->speed = (uchar_t)data; 24590 24591 /* Send the mode select for the requested block size */ 24592 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24593 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24594 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24595 /* 24596 * The mode select failed for the requested drive speed, 24597 * so reset the data for the original drive speed and 24598 * send it to the target. The error is indicated by the 24599 * return value for the failed mode select. 24600 */ 24601 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24602 "sr_drive_speed: Mode Select Failed\n"); 24603 select_page->speed = sense_page->speed; 24604 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24605 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24606 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 24607 } 24608 break; 24609 default: 24610 /* should not reach here, but check anyway */ 24611 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24612 "sr_change_speed: Command '%x' Not Supported\n", cmd); 24613 rval = EINVAL; 24614 break; 24615 } 24616 24617 if (select) { 24618 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 24619 } 24620 if (sense) { 24621 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24622 } 24623 24624 return (rval); 24625 } 24626 24627 24628 /* 24629 * Function: sr_atapi_change_speed() 24630 * 24631 * Description: This routine is the driver entry point for handling CD-ROM 24632 * drive speed ioctl requests for MMC devices that do not support 24633 * the Real Time Streaming feature (0x107). 24634 * 24635 * Note: This routine will use the SET SPEED command which may not 24636 * be supported by all devices. 24637 * 24638 * Arguments: dev- the device 'dev_t' 24639 * cmd- the request type; one of CDROMGDRVSPEED (get) or 24640 * CDROMSDRVSPEED (set) 24641 * data- current drive speed or requested drive speed 24642 * flag- this argument is a pass through to ddi_copyxxx() directly 24643 * from the mode argument of ioctl(). 24644 * 24645 * Return Code: the code returned by sd_send_scsi_cmd() 24646 * EINVAL if invalid arguments are provided 24647 * EFAULT if ddi_copyxxx() fails 24648 * ENXIO if fail ddi_get_soft_state 24649 * EIO if invalid mode sense block descriptor length 24650 */ 24651 24652 static int 24653 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24654 { 24655 struct sd_lun *un; 24656 struct uscsi_cmd *com = NULL; 24657 struct mode_header_grp2 *sense_mhp; 24658 uchar_t *sense_page; 24659 uchar_t *sense = NULL; 24660 char cdb[CDB_GROUP5]; 24661 int bd_len; 24662 int current_speed = 0; 24663 int max_speed = 0; 24664 int rval; 24665 24666 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24667 24668 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24669 return (ENXIO); 24670 } 24671 24672 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 24673 24674 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 24675 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 24676 SD_PATH_STANDARD)) != 0) { 24677 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24678 "sr_atapi_change_speed: Mode Sense Failed\n"); 24679 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24680 return (rval); 24681 } 24682 24683 /* Check the block descriptor len to handle only 1 block descriptor */ 24684 sense_mhp = (struct mode_header_grp2 *)sense; 24685 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 24686 if (bd_len > MODE_BLK_DESC_LENGTH) { 24687 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24688 "sr_atapi_change_speed: Mode Sense returned invalid " 24689 "block descriptor length\n"); 24690 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24691 return (EIO); 24692 } 24693 24694 /* Calculate the current and maximum drive speeds */ 24695 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 24696 current_speed = (sense_page[14] << 8) | sense_page[15]; 24697 max_speed = (sense_page[8] << 8) | sense_page[9]; 24698 24699 /* Process the command */ 24700 switch (cmd) { 24701 case CDROMGDRVSPEED: 24702 current_speed /= SD_SPEED_1X; 24703 if (ddi_copyout(¤t_speed, (void *)data, 24704 sizeof (int), flag) != 0) 24705 rval = EFAULT; 24706 break; 24707 case CDROMSDRVSPEED: 24708 /* Convert the speed code to KB/sec */ 24709 switch ((uchar_t)data) { 24710 case CDROM_NORMAL_SPEED: 24711 current_speed = SD_SPEED_1X; 24712 break; 24713 case CDROM_DOUBLE_SPEED: 24714 current_speed = 2 * SD_SPEED_1X; 24715 break; 24716 case CDROM_QUAD_SPEED: 24717 current_speed = 4 * SD_SPEED_1X; 24718 break; 24719 case CDROM_TWELVE_SPEED: 24720 current_speed = 12 * SD_SPEED_1X; 24721 break; 24722 case CDROM_MAXIMUM_SPEED: 24723 current_speed = 0xffff; 24724 break; 24725 default: 24726 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24727 "sr_atapi_change_speed: invalid drive speed %d\n", 24728 (uchar_t)data); 24729 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24730 return (EINVAL); 24731 } 24732 24733 /* Check the request against the drive's max speed. */ 24734 if (current_speed != 0xffff) { 24735 if (current_speed > max_speed) { 24736 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24737 return (EINVAL); 24738 } 24739 } 24740 24741 /* 24742 * Build and send the SET SPEED command 24743 * 24744 * Note: The SET SPEED (0xBB) command used in this routine is 24745 * obsolete per the SCSI MMC spec but still supported in the 24746 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24747 * therefore the command is still implemented in this routine. 24748 */ 24749 bzero(cdb, sizeof (cdb)); 24750 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 24751 cdb[2] = (uchar_t)(current_speed >> 8); 24752 cdb[3] = (uchar_t)current_speed; 24753 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24754 com->uscsi_cdb = (caddr_t)cdb; 24755 com->uscsi_cdblen = CDB_GROUP5; 24756 com->uscsi_bufaddr = NULL; 24757 com->uscsi_buflen = 0; 24758 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24759 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 24760 break; 24761 default: 24762 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24763 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 24764 rval = EINVAL; 24765 } 24766 24767 if (sense) { 24768 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24769 } 24770 if (com) { 24771 kmem_free(com, sizeof (*com)); 24772 } 24773 return (rval); 24774 } 24775 24776 24777 /* 24778 * Function: sr_pause_resume() 24779 * 24780 * Description: This routine is the driver entry point for handling CD-ROM 24781 * pause/resume ioctl requests. This only affects the audio play 24782 * operation. 24783 * 24784 * Arguments: dev - the device 'dev_t' 24785 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 24786 * for setting the resume bit of the cdb. 24787 * 24788 * Return Code: the code returned by sd_send_scsi_cmd() 24789 * EINVAL if invalid mode specified 24790 * 24791 */ 24792 24793 static int 24794 sr_pause_resume(dev_t dev, int cmd) 24795 { 24796 struct sd_lun *un; 24797 struct uscsi_cmd *com; 24798 char cdb[CDB_GROUP1]; 24799 int rval; 24800 24801 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24802 return (ENXIO); 24803 } 24804 24805 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24806 bzero(cdb, CDB_GROUP1); 24807 cdb[0] = SCMD_PAUSE_RESUME; 24808 switch (cmd) { 24809 case CDROMRESUME: 24810 cdb[8] = 1; 24811 break; 24812 case CDROMPAUSE: 24813 cdb[8] = 0; 24814 break; 24815 default: 24816 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 24817 " Command '%x' Not Supported\n", cmd); 24818 rval = EINVAL; 24819 goto done; 24820 } 24821 24822 com->uscsi_cdb = cdb; 24823 com->uscsi_cdblen = CDB_GROUP1; 24824 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24825 24826 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24827 SD_PATH_STANDARD); 24828 24829 done: 24830 kmem_free(com, sizeof (*com)); 24831 return (rval); 24832 } 24833 24834 24835 /* 24836 * Function: sr_play_msf() 24837 * 24838 * Description: This routine is the driver entry point for handling CD-ROM 24839 * ioctl requests to output the audio signals at the specified 24840 * starting address and continue the audio play until the specified 24841 * ending address (CDROMPLAYMSF) The address is in Minute Second 24842 * Frame (MSF) format. 24843 * 24844 * Arguments: dev - the device 'dev_t' 24845 * data - pointer to user provided audio msf structure, 24846 * specifying start/end addresses. 24847 * flag - this argument is a pass through to ddi_copyxxx() 24848 * directly from the mode argument of ioctl(). 24849 * 24850 * Return Code: the code returned by sd_send_scsi_cmd() 24851 * EFAULT if ddi_copyxxx() fails 24852 * ENXIO if fail ddi_get_soft_state 24853 * EINVAL if data pointer is NULL 24854 */ 24855 24856 static int 24857 sr_play_msf(dev_t dev, caddr_t data, int flag) 24858 { 24859 struct sd_lun *un; 24860 struct uscsi_cmd *com; 24861 struct cdrom_msf msf_struct; 24862 struct cdrom_msf *msf = &msf_struct; 24863 char cdb[CDB_GROUP1]; 24864 int rval; 24865 24866 if (data == NULL) { 24867 return (EINVAL); 24868 } 24869 24870 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24871 return (ENXIO); 24872 } 24873 24874 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 24875 return (EFAULT); 24876 } 24877 24878 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24879 bzero(cdb, CDB_GROUP1); 24880 cdb[0] = SCMD_PLAYAUDIO_MSF; 24881 if (un->un_f_cfg_playmsf_bcd == TRUE) { 24882 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 24883 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 24884 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 24885 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 24886 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 24887 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 24888 } else { 24889 cdb[3] = msf->cdmsf_min0; 24890 cdb[4] = msf->cdmsf_sec0; 24891 cdb[5] = msf->cdmsf_frame0; 24892 cdb[6] = msf->cdmsf_min1; 24893 cdb[7] = msf->cdmsf_sec1; 24894 cdb[8] = msf->cdmsf_frame1; 24895 } 24896 com->uscsi_cdb = cdb; 24897 com->uscsi_cdblen = CDB_GROUP1; 24898 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24899 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24900 SD_PATH_STANDARD); 24901 kmem_free(com, sizeof (*com)); 24902 return (rval); 24903 } 24904 24905 24906 /* 24907 * Function: sr_play_trkind() 24908 * 24909 * Description: This routine is the driver entry point for handling CD-ROM 24910 * ioctl requests to output the audio signals at the specified 24911 * starting address and continue the audio play until the specified 24912 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24913 * format. 24914 * 24915 * Arguments: dev - the device 'dev_t' 24916 * data - pointer to user provided audio track/index structure, 24917 * specifying start/end addresses. 24918 * flag - this argument is a pass through to ddi_copyxxx() 24919 * directly from the mode argument of ioctl(). 24920 * 24921 * Return Code: the code returned by sd_send_scsi_cmd() 24922 * EFAULT if ddi_copyxxx() fails 24923 * ENXIO if fail ddi_get_soft_state 24924 * EINVAL if data pointer is NULL 24925 */ 24926 24927 static int 24928 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24929 { 24930 struct cdrom_ti ti_struct; 24931 struct cdrom_ti *ti = &ti_struct; 24932 struct uscsi_cmd *com = NULL; 24933 char cdb[CDB_GROUP1]; 24934 int rval; 24935 24936 if (data == NULL) { 24937 return (EINVAL); 24938 } 24939 24940 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24941 return (EFAULT); 24942 } 24943 24944 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24945 bzero(cdb, CDB_GROUP1); 24946 cdb[0] = SCMD_PLAYAUDIO_TI; 24947 cdb[4] = ti->cdti_trk0; 24948 cdb[5] = ti->cdti_ind0; 24949 cdb[7] = ti->cdti_trk1; 24950 cdb[8] = ti->cdti_ind1; 24951 com->uscsi_cdb = cdb; 24952 com->uscsi_cdblen = CDB_GROUP1; 24953 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24954 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24955 SD_PATH_STANDARD); 24956 kmem_free(com, sizeof (*com)); 24957 return (rval); 24958 } 24959 24960 24961 /* 24962 * Function: sr_read_all_subcodes() 24963 * 24964 * Description: This routine is the driver entry point for handling CD-ROM 24965 * ioctl requests to return raw subcode data while the target is 24966 * playing audio (CDROMSUBCODE). 24967 * 24968 * Arguments: dev - the device 'dev_t' 24969 * data - pointer to user provided cdrom subcode structure, 24970 * specifying the transfer length and address. 24971 * flag - this argument is a pass through to ddi_copyxxx() 24972 * directly from the mode argument of ioctl(). 24973 * 24974 * Return Code: the code returned by sd_send_scsi_cmd() 24975 * EFAULT if ddi_copyxxx() fails 24976 * ENXIO if fail ddi_get_soft_state 24977 * EINVAL if data pointer is NULL 24978 */ 24979 24980 static int 24981 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24982 { 24983 struct sd_lun *un = NULL; 24984 struct uscsi_cmd *com = NULL; 24985 struct cdrom_subcode *subcode = NULL; 24986 int rval; 24987 size_t buflen; 24988 char cdb[CDB_GROUP5]; 24989 24990 #ifdef _MULTI_DATAMODEL 24991 /* To support ILP32 applications in an LP64 world */ 24992 struct cdrom_subcode32 cdrom_subcode32; 24993 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24994 #endif 24995 if (data == NULL) { 24996 return (EINVAL); 24997 } 24998 24999 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25000 return (ENXIO); 25001 } 25002 25003 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 25004 25005 #ifdef _MULTI_DATAMODEL 25006 switch (ddi_model_convert_from(flag & FMODELS)) { 25007 case DDI_MODEL_ILP32: 25008 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 25009 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25010 "sr_read_all_subcodes: ddi_copyin Failed\n"); 25011 kmem_free(subcode, sizeof (struct cdrom_subcode)); 25012 return (EFAULT); 25013 } 25014 /* Convert the ILP32 uscsi data from the application to LP64 */ 25015 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 25016 break; 25017 case DDI_MODEL_NONE: 25018 if (ddi_copyin(data, subcode, 25019 sizeof (struct cdrom_subcode), flag)) { 25020 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25021 "sr_read_all_subcodes: ddi_copyin Failed\n"); 25022 kmem_free(subcode, sizeof (struct cdrom_subcode)); 25023 return (EFAULT); 25024 } 25025 break; 25026 } 25027 #else /* ! _MULTI_DATAMODEL */ 25028 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 25029 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25030 "sr_read_all_subcodes: ddi_copyin Failed\n"); 25031 kmem_free(subcode, sizeof (struct cdrom_subcode)); 25032 return (EFAULT); 25033 } 25034 #endif /* _MULTI_DATAMODEL */ 25035 25036 /* 25037 * Since MMC-2 expects max 3 bytes for length, check if the 25038 * length input is greater than 3 bytes 25039 */ 25040 if ((subcode->cdsc_length & 0xFF000000) != 0) { 25041 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25042 "sr_read_all_subcodes: " 25043 "cdrom transfer length too large: %d (limit %d)\n", 25044 subcode->cdsc_length, 0xFFFFFF); 25045 kmem_free(subcode, sizeof (struct cdrom_subcode)); 25046 return (EINVAL); 25047 } 25048 25049 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 25050 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25051 bzero(cdb, CDB_GROUP5); 25052 25053 if (un->un_f_mmc_cap == TRUE) { 25054 cdb[0] = (char)SCMD_READ_CD; 25055 cdb[2] = (char)0xff; 25056 cdb[3] = (char)0xff; 25057 cdb[4] = (char)0xff; 25058 cdb[5] = (char)0xff; 25059 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 25060 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 25061 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 25062 cdb[10] = 1; 25063 } else { 25064 /* 25065 * Note: A vendor specific command (0xDF) is being used her to 25066 * request a read of all subcodes. 25067 */ 25068 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 25069 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 25070 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 25071 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 25072 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 25073 } 25074 com->uscsi_cdb = cdb; 25075 com->uscsi_cdblen = CDB_GROUP5; 25076 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 25077 com->uscsi_buflen = buflen; 25078 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25079 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25080 SD_PATH_STANDARD); 25081 kmem_free(subcode, sizeof (struct cdrom_subcode)); 25082 kmem_free(com, sizeof (*com)); 25083 return (rval); 25084 } 25085 25086 25087 /* 25088 * Function: sr_read_subchannel() 25089 * 25090 * Description: This routine is the driver entry point for handling CD-ROM 25091 * ioctl requests to return the Q sub-channel data of the CD 25092 * current position block. (CDROMSUBCHNL) The data includes the 25093 * track number, index number, absolute CD-ROM address (LBA or MSF 25094 * format per the user) , track relative CD-ROM address (LBA or MSF 25095 * format per the user), control data and audio status. 25096 * 25097 * Arguments: dev - the device 'dev_t' 25098 * data - pointer to user provided cdrom sub-channel structure 25099 * flag - this argument is a pass through to ddi_copyxxx() 25100 * directly from the mode argument of ioctl(). 25101 * 25102 * Return Code: the code returned by sd_send_scsi_cmd() 25103 * EFAULT if ddi_copyxxx() fails 25104 * ENXIO if fail ddi_get_soft_state 25105 * EINVAL if data pointer is NULL 25106 */ 25107 25108 static int 25109 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 25110 { 25111 struct sd_lun *un; 25112 struct uscsi_cmd *com; 25113 struct cdrom_subchnl subchanel; 25114 struct cdrom_subchnl *subchnl = &subchanel; 25115 char cdb[CDB_GROUP1]; 25116 caddr_t buffer; 25117 int rval; 25118 25119 if (data == NULL) { 25120 return (EINVAL); 25121 } 25122 25123 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25124 (un->un_state == SD_STATE_OFFLINE)) { 25125 return (ENXIO); 25126 } 25127 25128 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 25129 return (EFAULT); 25130 } 25131 25132 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 25133 bzero(cdb, CDB_GROUP1); 25134 cdb[0] = SCMD_READ_SUBCHANNEL; 25135 /* Set the MSF bit based on the user requested address format */ 25136 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 25137 /* 25138 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 25139 * returned 25140 */ 25141 cdb[2] = 0x40; 25142 /* 25143 * Set byte 3 to specify the return data format. A value of 0x01 25144 * indicates that the CD-ROM current position should be returned. 25145 */ 25146 cdb[3] = 0x01; 25147 cdb[8] = 0x10; 25148 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25149 com->uscsi_cdb = cdb; 25150 com->uscsi_cdblen = CDB_GROUP1; 25151 com->uscsi_bufaddr = buffer; 25152 com->uscsi_buflen = 16; 25153 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25154 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25155 SD_PATH_STANDARD); 25156 if (rval != 0) { 25157 kmem_free(buffer, 16); 25158 kmem_free(com, sizeof (*com)); 25159 return (rval); 25160 } 25161 25162 /* Process the returned Q sub-channel data */ 25163 subchnl->cdsc_audiostatus = buffer[1]; 25164 subchnl->cdsc_adr = (buffer[5] & 0xF0); 25165 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 25166 subchnl->cdsc_trk = buffer[6]; 25167 subchnl->cdsc_ind = buffer[7]; 25168 if (subchnl->cdsc_format & CDROM_LBA) { 25169 subchnl->cdsc_absaddr.lba = 25170 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25171 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25172 subchnl->cdsc_reladdr.lba = 25173 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 25174 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 25175 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 25176 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 25177 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 25178 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 25179 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 25180 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 25181 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 25182 } else { 25183 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 25184 subchnl->cdsc_absaddr.msf.second = buffer[10]; 25185 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 25186 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 25187 subchnl->cdsc_reladdr.msf.second = buffer[14]; 25188 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 25189 } 25190 kmem_free(buffer, 16); 25191 kmem_free(com, sizeof (*com)); 25192 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 25193 != 0) { 25194 return (EFAULT); 25195 } 25196 return (rval); 25197 } 25198 25199 25200 /* 25201 * Function: sr_read_tocentry() 25202 * 25203 * Description: This routine is the driver entry point for handling CD-ROM 25204 * ioctl requests to read from the Table of Contents (TOC) 25205 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 25206 * fields, the starting address (LBA or MSF format per the user) 25207 * and the data mode if the user specified track is a data track. 25208 * 25209 * Note: The READ HEADER (0x44) command used in this routine is 25210 * obsolete per the SCSI MMC spec but still supported in the 25211 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 25212 * therefore the command is still implemented in this routine. 25213 * 25214 * Arguments: dev - the device 'dev_t' 25215 * data - pointer to user provided toc entry structure, 25216 * specifying the track # and the address format 25217 * (LBA or MSF). 25218 * flag - this argument is a pass through to ddi_copyxxx() 25219 * directly from the mode argument of ioctl(). 25220 * 25221 * Return Code: the code returned by sd_send_scsi_cmd() 25222 * EFAULT if ddi_copyxxx() fails 25223 * ENXIO if fail ddi_get_soft_state 25224 * EINVAL if data pointer is NULL 25225 */ 25226 25227 static int 25228 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 25229 { 25230 struct sd_lun *un = NULL; 25231 struct uscsi_cmd *com; 25232 struct cdrom_tocentry toc_entry; 25233 struct cdrom_tocentry *entry = &toc_entry; 25234 caddr_t buffer; 25235 int rval; 25236 char cdb[CDB_GROUP1]; 25237 25238 if (data == NULL) { 25239 return (EINVAL); 25240 } 25241 25242 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25243 (un->un_state == SD_STATE_OFFLINE)) { 25244 return (ENXIO); 25245 } 25246 25247 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 25248 return (EFAULT); 25249 } 25250 25251 /* Validate the requested track and address format */ 25252 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 25253 return (EINVAL); 25254 } 25255 25256 if (entry->cdte_track == 0) { 25257 return (EINVAL); 25258 } 25259 25260 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 25261 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25262 bzero(cdb, CDB_GROUP1); 25263 25264 cdb[0] = SCMD_READ_TOC; 25265 /* Set the MSF bit based on the user requested address format */ 25266 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 25267 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25268 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 25269 } else { 25270 cdb[6] = entry->cdte_track; 25271 } 25272 25273 /* 25274 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 25275 * (4 byte TOC response header + 8 byte track descriptor) 25276 */ 25277 cdb[8] = 12; 25278 com->uscsi_cdb = cdb; 25279 com->uscsi_cdblen = CDB_GROUP1; 25280 com->uscsi_bufaddr = buffer; 25281 com->uscsi_buflen = 0x0C; 25282 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 25283 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25284 SD_PATH_STANDARD); 25285 if (rval != 0) { 25286 kmem_free(buffer, 12); 25287 kmem_free(com, sizeof (*com)); 25288 return (rval); 25289 } 25290 25291 /* Process the toc entry */ 25292 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 25293 entry->cdte_ctrl = (buffer[5] & 0x0F); 25294 if (entry->cdte_format & CDROM_LBA) { 25295 entry->cdte_addr.lba = 25296 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25297 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25298 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 25299 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 25300 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 25301 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 25302 /* 25303 * Send a READ TOC command using the LBA address format to get 25304 * the LBA for the track requested so it can be used in the 25305 * READ HEADER request 25306 * 25307 * Note: The MSF bit of the READ HEADER command specifies the 25308 * output format. The block address specified in that command 25309 * must be in LBA format. 25310 */ 25311 cdb[1] = 0; 25312 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25313 SD_PATH_STANDARD); 25314 if (rval != 0) { 25315 kmem_free(buffer, 12); 25316 kmem_free(com, sizeof (*com)); 25317 return (rval); 25318 } 25319 } else { 25320 entry->cdte_addr.msf.minute = buffer[9]; 25321 entry->cdte_addr.msf.second = buffer[10]; 25322 entry->cdte_addr.msf.frame = buffer[11]; 25323 /* 25324 * Send a READ TOC command using the LBA address format to get 25325 * the LBA for the track requested so it can be used in the 25326 * READ HEADER request 25327 * 25328 * Note: The MSF bit of the READ HEADER command specifies the 25329 * output format. The block address specified in that command 25330 * must be in LBA format. 25331 */ 25332 cdb[1] = 0; 25333 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25334 SD_PATH_STANDARD); 25335 if (rval != 0) { 25336 kmem_free(buffer, 12); 25337 kmem_free(com, sizeof (*com)); 25338 return (rval); 25339 } 25340 } 25341 25342 /* 25343 * Build and send the READ HEADER command to determine the data mode of 25344 * the user specified track. 25345 */ 25346 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 25347 (entry->cdte_track != CDROM_LEADOUT)) { 25348 bzero(cdb, CDB_GROUP1); 25349 cdb[0] = SCMD_READ_HEADER; 25350 cdb[2] = buffer[8]; 25351 cdb[3] = buffer[9]; 25352 cdb[4] = buffer[10]; 25353 cdb[5] = buffer[11]; 25354 cdb[8] = 0x08; 25355 com->uscsi_buflen = 0x08; 25356 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25357 SD_PATH_STANDARD); 25358 if (rval == 0) { 25359 entry->cdte_datamode = buffer[0]; 25360 } else { 25361 /* 25362 * READ HEADER command failed, since this is 25363 * obsoleted in one spec, its better to return 25364 * -1 for an invlid track so that we can still 25365 * receive the rest of the TOC data. 25366 */ 25367 entry->cdte_datamode = (uchar_t)-1; 25368 } 25369 } else { 25370 entry->cdte_datamode = (uchar_t)-1; 25371 } 25372 25373 kmem_free(buffer, 12); 25374 kmem_free(com, sizeof (*com)); 25375 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 25376 return (EFAULT); 25377 25378 return (rval); 25379 } 25380 25381 25382 /* 25383 * Function: sr_read_tochdr() 25384 * 25385 * Description: This routine is the driver entry point for handling CD-ROM 25386 * ioctl requests to read the Table of Contents (TOC) header 25387 * (CDROMREADTOHDR). The TOC header consists of the disk starting 25388 * and ending track numbers 25389 * 25390 * Arguments: dev - the device 'dev_t' 25391 * data - pointer to user provided toc header structure, 25392 * specifying the starting and ending track numbers. 25393 * flag - this argument is a pass through to ddi_copyxxx() 25394 * directly from the mode argument of ioctl(). 25395 * 25396 * Return Code: the code returned by sd_send_scsi_cmd() 25397 * EFAULT if ddi_copyxxx() fails 25398 * ENXIO if fail ddi_get_soft_state 25399 * EINVAL if data pointer is NULL 25400 */ 25401 25402 static int 25403 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 25404 { 25405 struct sd_lun *un; 25406 struct uscsi_cmd *com; 25407 struct cdrom_tochdr toc_header; 25408 struct cdrom_tochdr *hdr = &toc_header; 25409 char cdb[CDB_GROUP1]; 25410 int rval; 25411 caddr_t buffer; 25412 25413 if (data == NULL) { 25414 return (EINVAL); 25415 } 25416 25417 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25418 (un->un_state == SD_STATE_OFFLINE)) { 25419 return (ENXIO); 25420 } 25421 25422 buffer = kmem_zalloc(4, KM_SLEEP); 25423 bzero(cdb, CDB_GROUP1); 25424 cdb[0] = SCMD_READ_TOC; 25425 /* 25426 * Specifying a track number of 0x00 in the READ TOC command indicates 25427 * that the TOC header should be returned 25428 */ 25429 cdb[6] = 0x00; 25430 /* 25431 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 25432 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 25433 */ 25434 cdb[8] = 0x04; 25435 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25436 com->uscsi_cdb = cdb; 25437 com->uscsi_cdblen = CDB_GROUP1; 25438 com->uscsi_bufaddr = buffer; 25439 com->uscsi_buflen = 0x04; 25440 com->uscsi_timeout = 300; 25441 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25442 25443 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25444 SD_PATH_STANDARD); 25445 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25446 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 25447 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 25448 } else { 25449 hdr->cdth_trk0 = buffer[2]; 25450 hdr->cdth_trk1 = buffer[3]; 25451 } 25452 kmem_free(buffer, 4); 25453 kmem_free(com, sizeof (*com)); 25454 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 25455 return (EFAULT); 25456 } 25457 return (rval); 25458 } 25459 25460 25461 /* 25462 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 25463 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 25464 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 25465 * digital audio and extended architecture digital audio. These modes are 25466 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 25467 * MMC specs. 25468 * 25469 * In addition to support for the various data formats these routines also 25470 * include support for devices that implement only the direct access READ 25471 * commands (0x08, 0x28), devices that implement the READ_CD commands 25472 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 25473 * READ CDXA commands (0xD8, 0xDB) 25474 */ 25475 25476 /* 25477 * Function: sr_read_mode1() 25478 * 25479 * Description: This routine is the driver entry point for handling CD-ROM 25480 * ioctl read mode1 requests (CDROMREADMODE1). 25481 * 25482 * Arguments: dev - the device 'dev_t' 25483 * data - pointer to user provided cd read structure specifying 25484 * the lba buffer address and length. 25485 * flag - this argument is a pass through to ddi_copyxxx() 25486 * directly from the mode argument of ioctl(). 25487 * 25488 * Return Code: the code returned by sd_send_scsi_cmd() 25489 * EFAULT if ddi_copyxxx() fails 25490 * ENXIO if fail ddi_get_soft_state 25491 * EINVAL if data pointer is NULL 25492 */ 25493 25494 static int 25495 sr_read_mode1(dev_t dev, caddr_t data, int flag) 25496 { 25497 struct sd_lun *un; 25498 struct cdrom_read mode1_struct; 25499 struct cdrom_read *mode1 = &mode1_struct; 25500 int rval; 25501 #ifdef _MULTI_DATAMODEL 25502 /* To support ILP32 applications in an LP64 world */ 25503 struct cdrom_read32 cdrom_read32; 25504 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25505 #endif /* _MULTI_DATAMODEL */ 25506 25507 if (data == NULL) { 25508 return (EINVAL); 25509 } 25510 25511 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25512 (un->un_state == SD_STATE_OFFLINE)) { 25513 return (ENXIO); 25514 } 25515 25516 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25517 "sd_read_mode1: entry: un:0x%p\n", un); 25518 25519 #ifdef _MULTI_DATAMODEL 25520 switch (ddi_model_convert_from(flag & FMODELS)) { 25521 case DDI_MODEL_ILP32: 25522 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25523 return (EFAULT); 25524 } 25525 /* Convert the ILP32 uscsi data from the application to LP64 */ 25526 cdrom_read32tocdrom_read(cdrd32, mode1); 25527 break; 25528 case DDI_MODEL_NONE: 25529 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25530 return (EFAULT); 25531 } 25532 } 25533 #else /* ! _MULTI_DATAMODEL */ 25534 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25535 return (EFAULT); 25536 } 25537 #endif /* _MULTI_DATAMODEL */ 25538 25539 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 25540 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 25541 25542 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25543 "sd_read_mode1: exit: un:0x%p\n", un); 25544 25545 return (rval); 25546 } 25547 25548 25549 /* 25550 * Function: sr_read_cd_mode2() 25551 * 25552 * Description: This routine is the driver entry point for handling CD-ROM 25553 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25554 * support the READ CD (0xBE) command or the 1st generation 25555 * READ CD (0xD4) command. 25556 * 25557 * Arguments: dev - the device 'dev_t' 25558 * data - pointer to user provided cd read structure specifying 25559 * the lba buffer address and length. 25560 * flag - this argument is a pass through to ddi_copyxxx() 25561 * directly from the mode argument of ioctl(). 25562 * 25563 * Return Code: the code returned by sd_send_scsi_cmd() 25564 * EFAULT if ddi_copyxxx() fails 25565 * ENXIO if fail ddi_get_soft_state 25566 * EINVAL if data pointer is NULL 25567 */ 25568 25569 static int 25570 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 25571 { 25572 struct sd_lun *un; 25573 struct uscsi_cmd *com; 25574 struct cdrom_read mode2_struct; 25575 struct cdrom_read *mode2 = &mode2_struct; 25576 uchar_t cdb[CDB_GROUP5]; 25577 int nblocks; 25578 int rval; 25579 #ifdef _MULTI_DATAMODEL 25580 /* To support ILP32 applications in an LP64 world */ 25581 struct cdrom_read32 cdrom_read32; 25582 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25583 #endif /* _MULTI_DATAMODEL */ 25584 25585 if (data == NULL) { 25586 return (EINVAL); 25587 } 25588 25589 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25590 (un->un_state == SD_STATE_OFFLINE)) { 25591 return (ENXIO); 25592 } 25593 25594 #ifdef _MULTI_DATAMODEL 25595 switch (ddi_model_convert_from(flag & FMODELS)) { 25596 case DDI_MODEL_ILP32: 25597 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25598 return (EFAULT); 25599 } 25600 /* Convert the ILP32 uscsi data from the application to LP64 */ 25601 cdrom_read32tocdrom_read(cdrd32, mode2); 25602 break; 25603 case DDI_MODEL_NONE: 25604 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25605 return (EFAULT); 25606 } 25607 break; 25608 } 25609 25610 #else /* ! _MULTI_DATAMODEL */ 25611 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25612 return (EFAULT); 25613 } 25614 #endif /* _MULTI_DATAMODEL */ 25615 25616 bzero(cdb, sizeof (cdb)); 25617 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 25618 /* Read command supported by 1st generation atapi drives */ 25619 cdb[0] = SCMD_READ_CDD4; 25620 } else { 25621 /* Universal CD Access Command */ 25622 cdb[0] = SCMD_READ_CD; 25623 } 25624 25625 /* 25626 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 25627 */ 25628 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 25629 25630 /* set the start address */ 25631 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 25632 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 25633 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25634 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 25635 25636 /* set the transfer length */ 25637 nblocks = mode2->cdread_buflen / 2336; 25638 cdb[6] = (uchar_t)(nblocks >> 16); 25639 cdb[7] = (uchar_t)(nblocks >> 8); 25640 cdb[8] = (uchar_t)nblocks; 25641 25642 /* set the filter bits */ 25643 cdb[9] = CDROM_READ_CD_USERDATA; 25644 25645 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25646 com->uscsi_cdb = (caddr_t)cdb; 25647 com->uscsi_cdblen = sizeof (cdb); 25648 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25649 com->uscsi_buflen = mode2->cdread_buflen; 25650 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25651 25652 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25653 SD_PATH_STANDARD); 25654 kmem_free(com, sizeof (*com)); 25655 return (rval); 25656 } 25657 25658 25659 /* 25660 * Function: sr_read_mode2() 25661 * 25662 * Description: This routine is the driver entry point for handling CD-ROM 25663 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25664 * do not support the READ CD (0xBE) command. 25665 * 25666 * Arguments: dev - the device 'dev_t' 25667 * data - pointer to user provided cd read structure specifying 25668 * the lba buffer address and length. 25669 * flag - this argument is a pass through to ddi_copyxxx() 25670 * directly from the mode argument of ioctl(). 25671 * 25672 * Return Code: the code returned by sd_send_scsi_cmd() 25673 * EFAULT if ddi_copyxxx() fails 25674 * ENXIO if fail ddi_get_soft_state 25675 * EINVAL if data pointer is NULL 25676 * EIO if fail to reset block size 25677 * EAGAIN if commands are in progress in the driver 25678 */ 25679 25680 static int 25681 sr_read_mode2(dev_t dev, caddr_t data, int flag) 25682 { 25683 struct sd_lun *un; 25684 struct cdrom_read mode2_struct; 25685 struct cdrom_read *mode2 = &mode2_struct; 25686 int rval; 25687 uint32_t restore_blksize; 25688 struct uscsi_cmd *com; 25689 uchar_t cdb[CDB_GROUP0]; 25690 int nblocks; 25691 25692 #ifdef _MULTI_DATAMODEL 25693 /* To support ILP32 applications in an LP64 world */ 25694 struct cdrom_read32 cdrom_read32; 25695 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25696 #endif /* _MULTI_DATAMODEL */ 25697 25698 if (data == NULL) { 25699 return (EINVAL); 25700 } 25701 25702 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25703 (un->un_state == SD_STATE_OFFLINE)) { 25704 return (ENXIO); 25705 } 25706 25707 /* 25708 * Because this routine will update the device and driver block size 25709 * being used we want to make sure there are no commands in progress. 25710 * If commands are in progress the user will have to try again. 25711 * 25712 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 25713 * in sdioctl to protect commands from sdioctl through to the top of 25714 * sd_uscsi_strategy. See sdioctl for details. 25715 */ 25716 mutex_enter(SD_MUTEX(un)); 25717 if (un->un_ncmds_in_driver != 1) { 25718 mutex_exit(SD_MUTEX(un)); 25719 return (EAGAIN); 25720 } 25721 mutex_exit(SD_MUTEX(un)); 25722 25723 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25724 "sd_read_mode2: entry: un:0x%p\n", un); 25725 25726 #ifdef _MULTI_DATAMODEL 25727 switch (ddi_model_convert_from(flag & FMODELS)) { 25728 case DDI_MODEL_ILP32: 25729 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25730 return (EFAULT); 25731 } 25732 /* Convert the ILP32 uscsi data from the application to LP64 */ 25733 cdrom_read32tocdrom_read(cdrd32, mode2); 25734 break; 25735 case DDI_MODEL_NONE: 25736 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25737 return (EFAULT); 25738 } 25739 break; 25740 } 25741 #else /* ! _MULTI_DATAMODEL */ 25742 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 25743 return (EFAULT); 25744 } 25745 #endif /* _MULTI_DATAMODEL */ 25746 25747 /* Store the current target block size for restoration later */ 25748 restore_blksize = un->un_tgt_blocksize; 25749 25750 /* Change the device and soft state target block size to 2336 */ 25751 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 25752 rval = EIO; 25753 goto done; 25754 } 25755 25756 25757 bzero(cdb, sizeof (cdb)); 25758 25759 /* set READ operation */ 25760 cdb[0] = SCMD_READ; 25761 25762 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 25763 mode2->cdread_lba >>= 2; 25764 25765 /* set the start address */ 25766 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 25767 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25768 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 25769 25770 /* set the transfer length */ 25771 nblocks = mode2->cdread_buflen / 2336; 25772 cdb[4] = (uchar_t)nblocks & 0xFF; 25773 25774 /* build command */ 25775 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25776 com->uscsi_cdb = (caddr_t)cdb; 25777 com->uscsi_cdblen = sizeof (cdb); 25778 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25779 com->uscsi_buflen = mode2->cdread_buflen; 25780 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25781 25782 /* 25783 * Issue SCSI command with user space address for read buffer. 25784 * 25785 * This sends the command through main channel in the driver. 25786 * 25787 * Since this is accessed via an IOCTL call, we go through the 25788 * standard path, so that if the device was powered down, then 25789 * it would be 'awakened' to handle the command. 25790 */ 25791 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25792 SD_PATH_STANDARD); 25793 25794 kmem_free(com, sizeof (*com)); 25795 25796 /* Restore the device and soft state target block size */ 25797 if (sr_sector_mode(dev, restore_blksize) != 0) { 25798 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25799 "can't do switch back to mode 1\n"); 25800 /* 25801 * If sd_send_scsi_READ succeeded we still need to report 25802 * an error because we failed to reset the block size 25803 */ 25804 if (rval == 0) { 25805 rval = EIO; 25806 } 25807 } 25808 25809 done: 25810 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25811 "sd_read_mode2: exit: un:0x%p\n", un); 25812 25813 return (rval); 25814 } 25815 25816 25817 /* 25818 * Function: sr_sector_mode() 25819 * 25820 * Description: This utility function is used by sr_read_mode2 to set the target 25821 * block size based on the user specified size. This is a legacy 25822 * implementation based upon a vendor specific mode page 25823 * 25824 * Arguments: dev - the device 'dev_t' 25825 * data - flag indicating if block size is being set to 2336 or 25826 * 512. 25827 * 25828 * Return Code: the code returned by sd_send_scsi_cmd() 25829 * EFAULT if ddi_copyxxx() fails 25830 * ENXIO if fail ddi_get_soft_state 25831 * EINVAL if data pointer is NULL 25832 */ 25833 25834 static int 25835 sr_sector_mode(dev_t dev, uint32_t blksize) 25836 { 25837 struct sd_lun *un; 25838 uchar_t *sense; 25839 uchar_t *select; 25840 int rval; 25841 25842 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25843 (un->un_state == SD_STATE_OFFLINE)) { 25844 return (ENXIO); 25845 } 25846 25847 sense = kmem_zalloc(20, KM_SLEEP); 25848 25849 /* Note: This is a vendor specific mode page (0x81) */ 25850 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 25851 SD_PATH_STANDARD)) != 0) { 25852 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25853 "sr_sector_mode: Mode Sense failed\n"); 25854 kmem_free(sense, 20); 25855 return (rval); 25856 } 25857 select = kmem_zalloc(20, KM_SLEEP); 25858 select[3] = 0x08; 25859 select[10] = ((blksize >> 8) & 0xff); 25860 select[11] = (blksize & 0xff); 25861 select[12] = 0x01; 25862 select[13] = 0x06; 25863 select[14] = sense[14]; 25864 select[15] = sense[15]; 25865 if (blksize == SD_MODE2_BLKSIZE) { 25866 select[14] |= 0x01; 25867 } 25868 25869 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 25870 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 25871 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25872 "sr_sector_mode: Mode Select failed\n"); 25873 } else { 25874 /* 25875 * Only update the softstate block size if we successfully 25876 * changed the device block mode. 25877 */ 25878 mutex_enter(SD_MUTEX(un)); 25879 sd_update_block_info(un, blksize, 0); 25880 mutex_exit(SD_MUTEX(un)); 25881 } 25882 kmem_free(sense, 20); 25883 kmem_free(select, 20); 25884 return (rval); 25885 } 25886 25887 25888 /* 25889 * Function: sr_read_cdda() 25890 * 25891 * Description: This routine is the driver entry point for handling CD-ROM 25892 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 25893 * the target supports CDDA these requests are handled via a vendor 25894 * specific command (0xD8) If the target does not support CDDA 25895 * these requests are handled via the READ CD command (0xBE). 25896 * 25897 * Arguments: dev - the device 'dev_t' 25898 * data - pointer to user provided CD-DA structure specifying 25899 * the track starting address, transfer length, and 25900 * subcode options. 25901 * flag - this argument is a pass through to ddi_copyxxx() 25902 * directly from the mode argument of ioctl(). 25903 * 25904 * Return Code: the code returned by sd_send_scsi_cmd() 25905 * EFAULT if ddi_copyxxx() fails 25906 * ENXIO if fail ddi_get_soft_state 25907 * EINVAL if invalid arguments are provided 25908 * ENOTTY 25909 */ 25910 25911 static int 25912 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25913 { 25914 struct sd_lun *un; 25915 struct uscsi_cmd *com; 25916 struct cdrom_cdda *cdda; 25917 int rval; 25918 size_t buflen; 25919 char cdb[CDB_GROUP5]; 25920 25921 #ifdef _MULTI_DATAMODEL 25922 /* To support ILP32 applications in an LP64 world */ 25923 struct cdrom_cdda32 cdrom_cdda32; 25924 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25925 #endif /* _MULTI_DATAMODEL */ 25926 25927 if (data == NULL) { 25928 return (EINVAL); 25929 } 25930 25931 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25932 return (ENXIO); 25933 } 25934 25935 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25936 25937 #ifdef _MULTI_DATAMODEL 25938 switch (ddi_model_convert_from(flag & FMODELS)) { 25939 case DDI_MODEL_ILP32: 25940 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25941 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25942 "sr_read_cdda: ddi_copyin Failed\n"); 25943 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25944 return (EFAULT); 25945 } 25946 /* Convert the ILP32 uscsi data from the application to LP64 */ 25947 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25948 break; 25949 case DDI_MODEL_NONE: 25950 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25951 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25952 "sr_read_cdda: ddi_copyin Failed\n"); 25953 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25954 return (EFAULT); 25955 } 25956 break; 25957 } 25958 #else /* ! _MULTI_DATAMODEL */ 25959 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25960 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25961 "sr_read_cdda: ddi_copyin Failed\n"); 25962 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25963 return (EFAULT); 25964 } 25965 #endif /* _MULTI_DATAMODEL */ 25966 25967 /* 25968 * Since MMC-2 expects max 3 bytes for length, check if the 25969 * length input is greater than 3 bytes 25970 */ 25971 if ((cdda->cdda_length & 0xFF000000) != 0) { 25972 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25973 "cdrom transfer length too large: %d (limit %d)\n", 25974 cdda->cdda_length, 0xFFFFFF); 25975 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25976 return (EINVAL); 25977 } 25978 25979 switch (cdda->cdda_subcode) { 25980 case CDROM_DA_NO_SUBCODE: 25981 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25982 break; 25983 case CDROM_DA_SUBQ: 25984 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25985 break; 25986 case CDROM_DA_ALL_SUBCODE: 25987 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25988 break; 25989 case CDROM_DA_SUBCODE_ONLY: 25990 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25991 break; 25992 default: 25993 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25994 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25995 cdda->cdda_subcode); 25996 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25997 return (EINVAL); 25998 } 25999 26000 /* Build and send the command */ 26001 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26002 bzero(cdb, CDB_GROUP5); 26003 26004 if (un->un_f_cfg_cdda == TRUE) { 26005 cdb[0] = (char)SCMD_READ_CD; 26006 cdb[1] = 0x04; 26007 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 26008 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 26009 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 26010 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 26011 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 26012 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 26013 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 26014 cdb[9] = 0x10; 26015 switch (cdda->cdda_subcode) { 26016 case CDROM_DA_NO_SUBCODE : 26017 cdb[10] = 0x0; 26018 break; 26019 case CDROM_DA_SUBQ : 26020 cdb[10] = 0x2; 26021 break; 26022 case CDROM_DA_ALL_SUBCODE : 26023 cdb[10] = 0x1; 26024 break; 26025 case CDROM_DA_SUBCODE_ONLY : 26026 /* FALLTHROUGH */ 26027 default : 26028 kmem_free(cdda, sizeof (struct cdrom_cdda)); 26029 kmem_free(com, sizeof (*com)); 26030 return (ENOTTY); 26031 } 26032 } else { 26033 cdb[0] = (char)SCMD_READ_CDDA; 26034 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 26035 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 26036 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 26037 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 26038 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 26039 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 26040 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 26041 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 26042 cdb[10] = cdda->cdda_subcode; 26043 } 26044 26045 com->uscsi_cdb = cdb; 26046 com->uscsi_cdblen = CDB_GROUP5; 26047 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 26048 com->uscsi_buflen = buflen; 26049 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26050 26051 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26052 SD_PATH_STANDARD); 26053 26054 kmem_free(cdda, sizeof (struct cdrom_cdda)); 26055 kmem_free(com, sizeof (*com)); 26056 return (rval); 26057 } 26058 26059 26060 /* 26061 * Function: sr_read_cdxa() 26062 * 26063 * Description: This routine is the driver entry point for handling CD-ROM 26064 * ioctl requests to return CD-XA (Extended Architecture) data. 26065 * (CDROMCDXA). 26066 * 26067 * Arguments: dev - the device 'dev_t' 26068 * data - pointer to user provided CD-XA structure specifying 26069 * the data starting address, transfer length, and format 26070 * flag - this argument is a pass through to ddi_copyxxx() 26071 * directly from the mode argument of ioctl(). 26072 * 26073 * Return Code: the code returned by sd_send_scsi_cmd() 26074 * EFAULT if ddi_copyxxx() fails 26075 * ENXIO if fail ddi_get_soft_state 26076 * EINVAL if data pointer is NULL 26077 */ 26078 26079 static int 26080 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 26081 { 26082 struct sd_lun *un; 26083 struct uscsi_cmd *com; 26084 struct cdrom_cdxa *cdxa; 26085 int rval; 26086 size_t buflen; 26087 char cdb[CDB_GROUP5]; 26088 uchar_t read_flags; 26089 26090 #ifdef _MULTI_DATAMODEL 26091 /* To support ILP32 applications in an LP64 world */ 26092 struct cdrom_cdxa32 cdrom_cdxa32; 26093 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 26094 #endif /* _MULTI_DATAMODEL */ 26095 26096 if (data == NULL) { 26097 return (EINVAL); 26098 } 26099 26100 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26101 return (ENXIO); 26102 } 26103 26104 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 26105 26106 #ifdef _MULTI_DATAMODEL 26107 switch (ddi_model_convert_from(flag & FMODELS)) { 26108 case DDI_MODEL_ILP32: 26109 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 26110 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26111 return (EFAULT); 26112 } 26113 /* 26114 * Convert the ILP32 uscsi data from the 26115 * application to LP64 for internal use. 26116 */ 26117 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 26118 break; 26119 case DDI_MODEL_NONE: 26120 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 26121 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26122 return (EFAULT); 26123 } 26124 break; 26125 } 26126 #else /* ! _MULTI_DATAMODEL */ 26127 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 26128 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26129 return (EFAULT); 26130 } 26131 #endif /* _MULTI_DATAMODEL */ 26132 26133 /* 26134 * Since MMC-2 expects max 3 bytes for length, check if the 26135 * length input is greater than 3 bytes 26136 */ 26137 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 26138 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 26139 "cdrom transfer length too large: %d (limit %d)\n", 26140 cdxa->cdxa_length, 0xFFFFFF); 26141 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26142 return (EINVAL); 26143 } 26144 26145 switch (cdxa->cdxa_format) { 26146 case CDROM_XA_DATA: 26147 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 26148 read_flags = 0x10; 26149 break; 26150 case CDROM_XA_SECTOR_DATA: 26151 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 26152 read_flags = 0xf8; 26153 break; 26154 case CDROM_XA_DATA_W_ERROR: 26155 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 26156 read_flags = 0xfc; 26157 break; 26158 default: 26159 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26160 "sr_read_cdxa: Format '0x%x' Not Supported\n", 26161 cdxa->cdxa_format); 26162 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26163 return (EINVAL); 26164 } 26165 26166 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26167 bzero(cdb, CDB_GROUP5); 26168 if (un->un_f_mmc_cap == TRUE) { 26169 cdb[0] = (char)SCMD_READ_CD; 26170 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 26171 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 26172 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 26173 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 26174 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 26175 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 26176 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 26177 cdb[9] = (char)read_flags; 26178 } else { 26179 /* 26180 * Note: A vendor specific command (0xDB) is being used her to 26181 * request a read of all subcodes. 26182 */ 26183 cdb[0] = (char)SCMD_READ_CDXA; 26184 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 26185 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 26186 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 26187 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 26188 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 26189 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 26190 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 26191 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 26192 cdb[10] = cdxa->cdxa_format; 26193 } 26194 com->uscsi_cdb = cdb; 26195 com->uscsi_cdblen = CDB_GROUP5; 26196 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 26197 com->uscsi_buflen = buflen; 26198 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26199 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26200 SD_PATH_STANDARD); 26201 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26202 kmem_free(com, sizeof (*com)); 26203 return (rval); 26204 } 26205 26206 26207 /* 26208 * Function: sr_eject() 26209 * 26210 * Description: This routine is the driver entry point for handling CD-ROM 26211 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 26212 * 26213 * Arguments: dev - the device 'dev_t' 26214 * 26215 * Return Code: the code returned by sd_send_scsi_cmd() 26216 */ 26217 26218 static int 26219 sr_eject(dev_t dev) 26220 { 26221 struct sd_lun *un; 26222 int rval; 26223 26224 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26225 (un->un_state == SD_STATE_OFFLINE)) { 26226 return (ENXIO); 26227 } 26228 26229 /* 26230 * To prevent race conditions with the eject 26231 * command, keep track of an eject command as 26232 * it progresses. If we are already handling 26233 * an eject command in the driver for the given 26234 * unit and another request to eject is received 26235 * immediately return EAGAIN so we don't lose 26236 * the command if the current eject command fails. 26237 */ 26238 mutex_enter(SD_MUTEX(un)); 26239 if (un->un_f_ejecting == TRUE) { 26240 mutex_exit(SD_MUTEX(un)); 26241 return (EAGAIN); 26242 } 26243 un->un_f_ejecting = TRUE; 26244 mutex_exit(SD_MUTEX(un)); 26245 26246 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 26247 SD_PATH_STANDARD)) != 0) { 26248 mutex_enter(SD_MUTEX(un)); 26249 un->un_f_ejecting = FALSE; 26250 mutex_exit(SD_MUTEX(un)); 26251 return (rval); 26252 } 26253 26254 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 26255 SD_PATH_STANDARD); 26256 26257 if (rval == 0) { 26258 mutex_enter(SD_MUTEX(un)); 26259 sr_ejected(un); 26260 un->un_mediastate = DKIO_EJECTED; 26261 un->un_f_ejecting = FALSE; 26262 cv_broadcast(&un->un_state_cv); 26263 mutex_exit(SD_MUTEX(un)); 26264 } else { 26265 mutex_enter(SD_MUTEX(un)); 26266 un->un_f_ejecting = FALSE; 26267 mutex_exit(SD_MUTEX(un)); 26268 } 26269 return (rval); 26270 } 26271 26272 26273 /* 26274 * Function: sr_ejected() 26275 * 26276 * Description: This routine updates the soft state structure to invalidate the 26277 * geometry information after the media has been ejected or a 26278 * media eject has been detected. 26279 * 26280 * Arguments: un - driver soft state (unit) structure 26281 */ 26282 26283 static void 26284 sr_ejected(struct sd_lun *un) 26285 { 26286 struct sd_errstats *stp; 26287 26288 ASSERT(un != NULL); 26289 ASSERT(mutex_owned(SD_MUTEX(un))); 26290 26291 un->un_f_blockcount_is_valid = FALSE; 26292 un->un_f_tgt_blocksize_is_valid = FALSE; 26293 mutex_exit(SD_MUTEX(un)); 26294 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 26295 mutex_enter(SD_MUTEX(un)); 26296 26297 if (un->un_errstats != NULL) { 26298 stp = (struct sd_errstats *)un->un_errstats->ks_data; 26299 stp->sd_capacity.value.ui64 = 0; 26300 } 26301 } 26302 26303 26304 /* 26305 * Function: sr_check_wp() 26306 * 26307 * Description: This routine checks the write protection of a removable 26308 * media disk and hotpluggable devices via the write protect bit of 26309 * the Mode Page Header device specific field. Some devices choke 26310 * on unsupported mode page. In order to workaround this issue, 26311 * this routine has been implemented to use 0x3f mode page(request 26312 * for all pages) for all device types. 26313 * 26314 * Arguments: dev - the device 'dev_t' 26315 * 26316 * Return Code: int indicating if the device is write protected (1) or not (0) 26317 * 26318 * Context: Kernel thread. 26319 * 26320 */ 26321 26322 static int 26323 sr_check_wp(dev_t dev) 26324 { 26325 struct sd_lun *un; 26326 uchar_t device_specific; 26327 uchar_t *sense; 26328 int hdrlen; 26329 int rval = FALSE; 26330 26331 /* 26332 * Note: The return codes for this routine should be reworked to 26333 * properly handle the case of a NULL softstate. 26334 */ 26335 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26336 return (FALSE); 26337 } 26338 26339 if (un->un_f_cfg_is_atapi == TRUE) { 26340 /* 26341 * The mode page contents are not required; set the allocation 26342 * length for the mode page header only 26343 */ 26344 hdrlen = MODE_HEADER_LENGTH_GRP2; 26345 sense = kmem_zalloc(hdrlen, KM_SLEEP); 26346 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 26347 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 26348 goto err_exit; 26349 device_specific = 26350 ((struct mode_header_grp2 *)sense)->device_specific; 26351 } else { 26352 hdrlen = MODE_HEADER_LENGTH; 26353 sense = kmem_zalloc(hdrlen, KM_SLEEP); 26354 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 26355 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 26356 goto err_exit; 26357 device_specific = 26358 ((struct mode_header *)sense)->device_specific; 26359 } 26360 26361 /* 26362 * Write protect mode sense failed; not all disks 26363 * understand this query. Return FALSE assuming that 26364 * these devices are not writable. 26365 */ 26366 if (device_specific & WRITE_PROTECT) { 26367 rval = TRUE; 26368 } 26369 26370 err_exit: 26371 kmem_free(sense, hdrlen); 26372 return (rval); 26373 } 26374 26375 /* 26376 * Function: sr_volume_ctrl() 26377 * 26378 * Description: This routine is the driver entry point for handling CD-ROM 26379 * audio output volume ioctl requests. (CDROMVOLCTRL) 26380 * 26381 * Arguments: dev - the device 'dev_t' 26382 * data - pointer to user audio volume control structure 26383 * flag - this argument is a pass through to ddi_copyxxx() 26384 * directly from the mode argument of ioctl(). 26385 * 26386 * Return Code: the code returned by sd_send_scsi_cmd() 26387 * EFAULT if ddi_copyxxx() fails 26388 * ENXIO if fail ddi_get_soft_state 26389 * EINVAL if data pointer is NULL 26390 * 26391 */ 26392 26393 static int 26394 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 26395 { 26396 struct sd_lun *un; 26397 struct cdrom_volctrl volume; 26398 struct cdrom_volctrl *vol = &volume; 26399 uchar_t *sense_page; 26400 uchar_t *select_page; 26401 uchar_t *sense; 26402 uchar_t *select; 26403 int sense_buflen; 26404 int select_buflen; 26405 int rval; 26406 26407 if (data == NULL) { 26408 return (EINVAL); 26409 } 26410 26411 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26412 (un->un_state == SD_STATE_OFFLINE)) { 26413 return (ENXIO); 26414 } 26415 26416 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 26417 return (EFAULT); 26418 } 26419 26420 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26421 struct mode_header_grp2 *sense_mhp; 26422 struct mode_header_grp2 *select_mhp; 26423 int bd_len; 26424 26425 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 26426 select_buflen = MODE_HEADER_LENGTH_GRP2 + 26427 MODEPAGE_AUDIO_CTRL_LEN; 26428 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26429 select = kmem_zalloc(select_buflen, KM_SLEEP); 26430 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 26431 sense_buflen, MODEPAGE_AUDIO_CTRL, 26432 SD_PATH_STANDARD)) != 0) { 26433 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 26434 "sr_volume_ctrl: Mode Sense Failed\n"); 26435 kmem_free(sense, sense_buflen); 26436 kmem_free(select, select_buflen); 26437 return (rval); 26438 } 26439 sense_mhp = (struct mode_header_grp2 *)sense; 26440 select_mhp = (struct mode_header_grp2 *)select; 26441 bd_len = (sense_mhp->bdesc_length_hi << 8) | 26442 sense_mhp->bdesc_length_lo; 26443 if (bd_len > MODE_BLK_DESC_LENGTH) { 26444 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26445 "sr_volume_ctrl: Mode Sense returned invalid " 26446 "block descriptor length\n"); 26447 kmem_free(sense, sense_buflen); 26448 kmem_free(select, select_buflen); 26449 return (EIO); 26450 } 26451 sense_page = (uchar_t *) 26452 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26453 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 26454 select_mhp->length_msb = 0; 26455 select_mhp->length_lsb = 0; 26456 select_mhp->bdesc_length_hi = 0; 26457 select_mhp->bdesc_length_lo = 0; 26458 } else { 26459 struct mode_header *sense_mhp, *select_mhp; 26460 26461 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26462 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26463 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26464 select = kmem_zalloc(select_buflen, KM_SLEEP); 26465 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26466 sense_buflen, MODEPAGE_AUDIO_CTRL, 26467 SD_PATH_STANDARD)) != 0) { 26468 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26469 "sr_volume_ctrl: Mode Sense Failed\n"); 26470 kmem_free(sense, sense_buflen); 26471 kmem_free(select, select_buflen); 26472 return (rval); 26473 } 26474 sense_mhp = (struct mode_header *)sense; 26475 select_mhp = (struct mode_header *)select; 26476 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 26477 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26478 "sr_volume_ctrl: Mode Sense returned invalid " 26479 "block descriptor length\n"); 26480 kmem_free(sense, sense_buflen); 26481 kmem_free(select, select_buflen); 26482 return (EIO); 26483 } 26484 sense_page = (uchar_t *) 26485 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26486 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 26487 select_mhp->length = 0; 26488 select_mhp->bdesc_length = 0; 26489 } 26490 /* 26491 * Note: An audio control data structure could be created and overlayed 26492 * on the following in place of the array indexing method implemented. 26493 */ 26494 26495 /* Build the select data for the user volume data */ 26496 select_page[0] = MODEPAGE_AUDIO_CTRL; 26497 select_page[1] = 0xE; 26498 /* Set the immediate bit */ 26499 select_page[2] = 0x04; 26500 /* Zero out reserved fields */ 26501 select_page[3] = 0x00; 26502 select_page[4] = 0x00; 26503 /* Return sense data for fields not to be modified */ 26504 select_page[5] = sense_page[5]; 26505 select_page[6] = sense_page[6]; 26506 select_page[7] = sense_page[7]; 26507 /* Set the user specified volume levels for channel 0 and 1 */ 26508 select_page[8] = 0x01; 26509 select_page[9] = vol->channel0; 26510 select_page[10] = 0x02; 26511 select_page[11] = vol->channel1; 26512 /* Channel 2 and 3 are currently unsupported so return the sense data */ 26513 select_page[12] = sense_page[12]; 26514 select_page[13] = sense_page[13]; 26515 select_page[14] = sense_page[14]; 26516 select_page[15] = sense_page[15]; 26517 26518 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26519 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 26520 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26521 } else { 26522 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26523 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26524 } 26525 26526 kmem_free(sense, sense_buflen); 26527 kmem_free(select, select_buflen); 26528 return (rval); 26529 } 26530 26531 26532 /* 26533 * Function: sr_read_sony_session_offset() 26534 * 26535 * Description: This routine is the driver entry point for handling CD-ROM 26536 * ioctl requests for session offset information. (CDROMREADOFFSET) 26537 * The address of the first track in the last session of a 26538 * multi-session CD-ROM is returned 26539 * 26540 * Note: This routine uses a vendor specific key value in the 26541 * command control field without implementing any vendor check here 26542 * or in the ioctl routine. 26543 * 26544 * Arguments: dev - the device 'dev_t' 26545 * data - pointer to an int to hold the requested address 26546 * flag - this argument is a pass through to ddi_copyxxx() 26547 * directly from the mode argument of ioctl(). 26548 * 26549 * Return Code: the code returned by sd_send_scsi_cmd() 26550 * EFAULT if ddi_copyxxx() fails 26551 * ENXIO if fail ddi_get_soft_state 26552 * EINVAL if data pointer is NULL 26553 */ 26554 26555 static int 26556 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 26557 { 26558 struct sd_lun *un; 26559 struct uscsi_cmd *com; 26560 caddr_t buffer; 26561 char cdb[CDB_GROUP1]; 26562 int session_offset = 0; 26563 int rval; 26564 26565 if (data == NULL) { 26566 return (EINVAL); 26567 } 26568 26569 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26570 (un->un_state == SD_STATE_OFFLINE)) { 26571 return (ENXIO); 26572 } 26573 26574 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 26575 bzero(cdb, CDB_GROUP1); 26576 cdb[0] = SCMD_READ_TOC; 26577 /* 26578 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26579 * (4 byte TOC response header + 8 byte response data) 26580 */ 26581 cdb[8] = SONY_SESSION_OFFSET_LEN; 26582 /* Byte 9 is the control byte. A vendor specific value is used */ 26583 cdb[9] = SONY_SESSION_OFFSET_KEY; 26584 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26585 com->uscsi_cdb = cdb; 26586 com->uscsi_cdblen = CDB_GROUP1; 26587 com->uscsi_bufaddr = buffer; 26588 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 26589 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26590 26591 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26592 SD_PATH_STANDARD); 26593 if (rval != 0) { 26594 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26595 kmem_free(com, sizeof (*com)); 26596 return (rval); 26597 } 26598 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 26599 session_offset = 26600 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26601 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26602 /* 26603 * Offset returned offset in current lbasize block's. Convert to 26604 * 2k block's to return to the user 26605 */ 26606 if (un->un_tgt_blocksize == CDROM_BLK_512) { 26607 session_offset >>= 2; 26608 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 26609 session_offset >>= 1; 26610 } 26611 } 26612 26613 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 26614 rval = EFAULT; 26615 } 26616 26617 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26618 kmem_free(com, sizeof (*com)); 26619 return (rval); 26620 } 26621 26622 26623 /* 26624 * Function: sd_wm_cache_constructor() 26625 * 26626 * Description: Cache Constructor for the wmap cache for the read/modify/write 26627 * devices. 26628 * 26629 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26630 * un - sd_lun structure for the device. 26631 * flag - the km flags passed to constructor 26632 * 26633 * Return Code: 0 on success. 26634 * -1 on failure. 26635 */ 26636 26637 /*ARGSUSED*/ 26638 static int 26639 sd_wm_cache_constructor(void *wm, void *un, int flags) 26640 { 26641 bzero(wm, sizeof (struct sd_w_map)); 26642 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 26643 return (0); 26644 } 26645 26646 26647 /* 26648 * Function: sd_wm_cache_destructor() 26649 * 26650 * Description: Cache destructor for the wmap cache for the read/modify/write 26651 * devices. 26652 * 26653 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26654 * un - sd_lun structure for the device. 26655 */ 26656 /*ARGSUSED*/ 26657 static void 26658 sd_wm_cache_destructor(void *wm, void *un) 26659 { 26660 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 26661 } 26662 26663 26664 /* 26665 * Function: sd_range_lock() 26666 * 26667 * Description: Lock the range of blocks specified as parameter to ensure 26668 * that read, modify write is atomic and no other i/o writes 26669 * to the same location. The range is specified in terms 26670 * of start and end blocks. Block numbers are the actual 26671 * media block numbers and not system. 26672 * 26673 * Arguments: un - sd_lun structure for the device. 26674 * startb - The starting block number 26675 * endb - The end block number 26676 * typ - type of i/o - simple/read_modify_write 26677 * 26678 * Return Code: wm - pointer to the wmap structure. 26679 * 26680 * Context: This routine can sleep. 26681 */ 26682 26683 static struct sd_w_map * 26684 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 26685 { 26686 struct sd_w_map *wmp = NULL; 26687 struct sd_w_map *sl_wmp = NULL; 26688 struct sd_w_map *tmp_wmp; 26689 wm_state state = SD_WM_CHK_LIST; 26690 26691 26692 ASSERT(un != NULL); 26693 ASSERT(!mutex_owned(SD_MUTEX(un))); 26694 26695 mutex_enter(SD_MUTEX(un)); 26696 26697 while (state != SD_WM_DONE) { 26698 26699 switch (state) { 26700 case SD_WM_CHK_LIST: 26701 /* 26702 * This is the starting state. Check the wmap list 26703 * to see if the range is currently available. 26704 */ 26705 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 26706 /* 26707 * If this is a simple write and no rmw 26708 * i/o is pending then try to lock the 26709 * range as the range should be available. 26710 */ 26711 state = SD_WM_LOCK_RANGE; 26712 } else { 26713 tmp_wmp = sd_get_range(un, startb, endb); 26714 if (tmp_wmp != NULL) { 26715 if ((wmp != NULL) && ONLIST(un, wmp)) { 26716 /* 26717 * Should not keep onlist wmps 26718 * while waiting this macro 26719 * will also do wmp = NULL; 26720 */ 26721 FREE_ONLIST_WMAP(un, wmp); 26722 } 26723 /* 26724 * sl_wmp is the wmap on which wait 26725 * is done, since the tmp_wmp points 26726 * to the inuse wmap, set sl_wmp to 26727 * tmp_wmp and change the state to sleep 26728 */ 26729 sl_wmp = tmp_wmp; 26730 state = SD_WM_WAIT_MAP; 26731 } else { 26732 state = SD_WM_LOCK_RANGE; 26733 } 26734 26735 } 26736 break; 26737 26738 case SD_WM_LOCK_RANGE: 26739 ASSERT(un->un_wm_cache); 26740 /* 26741 * The range need to be locked, try to get a wmap. 26742 * First attempt it with NO_SLEEP, want to avoid a sleep 26743 * if possible as we will have to release the sd mutex 26744 * if we have to sleep. 26745 */ 26746 if (wmp == NULL) 26747 wmp = kmem_cache_alloc(un->un_wm_cache, 26748 KM_NOSLEEP); 26749 if (wmp == NULL) { 26750 mutex_exit(SD_MUTEX(un)); 26751 _NOTE(DATA_READABLE_WITHOUT_LOCK 26752 (sd_lun::un_wm_cache)) 26753 wmp = kmem_cache_alloc(un->un_wm_cache, 26754 KM_SLEEP); 26755 mutex_enter(SD_MUTEX(un)); 26756 /* 26757 * we released the mutex so recheck and go to 26758 * check list state. 26759 */ 26760 state = SD_WM_CHK_LIST; 26761 } else { 26762 /* 26763 * We exit out of state machine since we 26764 * have the wmap. Do the housekeeping first. 26765 * place the wmap on the wmap list if it is not 26766 * on it already and then set the state to done. 26767 */ 26768 wmp->wm_start = startb; 26769 wmp->wm_end = endb; 26770 wmp->wm_flags = typ | SD_WM_BUSY; 26771 if (typ & SD_WTYPE_RMW) { 26772 un->un_rmw_count++; 26773 } 26774 /* 26775 * If not already on the list then link 26776 */ 26777 if (!ONLIST(un, wmp)) { 26778 wmp->wm_next = un->un_wm; 26779 wmp->wm_prev = NULL; 26780 if (wmp->wm_next) 26781 wmp->wm_next->wm_prev = wmp; 26782 un->un_wm = wmp; 26783 } 26784 state = SD_WM_DONE; 26785 } 26786 break; 26787 26788 case SD_WM_WAIT_MAP: 26789 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 26790 /* 26791 * Wait is done on sl_wmp, which is set in the 26792 * check_list state. 26793 */ 26794 sl_wmp->wm_wanted_count++; 26795 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 26796 sl_wmp->wm_wanted_count--; 26797 /* 26798 * We can reuse the memory from the completed sl_wmp 26799 * lock range for our new lock, but only if noone is 26800 * waiting for it. 26801 */ 26802 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 26803 if (sl_wmp->wm_wanted_count == 0) { 26804 if (wmp != NULL) 26805 CHK_N_FREEWMP(un, wmp); 26806 wmp = sl_wmp; 26807 } 26808 sl_wmp = NULL; 26809 /* 26810 * After waking up, need to recheck for availability of 26811 * range. 26812 */ 26813 state = SD_WM_CHK_LIST; 26814 break; 26815 26816 default: 26817 panic("sd_range_lock: " 26818 "Unknown state %d in sd_range_lock", state); 26819 /*NOTREACHED*/ 26820 } /* switch(state) */ 26821 26822 } /* while(state != SD_WM_DONE) */ 26823 26824 mutex_exit(SD_MUTEX(un)); 26825 26826 ASSERT(wmp != NULL); 26827 26828 return (wmp); 26829 } 26830 26831 26832 /* 26833 * Function: sd_get_range() 26834 * 26835 * Description: Find if there any overlapping I/O to this one 26836 * Returns the write-map of 1st such I/O, NULL otherwise. 26837 * 26838 * Arguments: un - sd_lun structure for the device. 26839 * startb - The starting block number 26840 * endb - The end block number 26841 * 26842 * Return Code: wm - pointer to the wmap structure. 26843 */ 26844 26845 static struct sd_w_map * 26846 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 26847 { 26848 struct sd_w_map *wmp; 26849 26850 ASSERT(un != NULL); 26851 26852 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 26853 if (!(wmp->wm_flags & SD_WM_BUSY)) { 26854 continue; 26855 } 26856 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 26857 break; 26858 } 26859 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 26860 break; 26861 } 26862 } 26863 26864 return (wmp); 26865 } 26866 26867 26868 /* 26869 * Function: sd_free_inlist_wmap() 26870 * 26871 * Description: Unlink and free a write map struct. 26872 * 26873 * Arguments: un - sd_lun structure for the device. 26874 * wmp - sd_w_map which needs to be unlinked. 26875 */ 26876 26877 static void 26878 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 26879 { 26880 ASSERT(un != NULL); 26881 26882 if (un->un_wm == wmp) { 26883 un->un_wm = wmp->wm_next; 26884 } else { 26885 wmp->wm_prev->wm_next = wmp->wm_next; 26886 } 26887 26888 if (wmp->wm_next) { 26889 wmp->wm_next->wm_prev = wmp->wm_prev; 26890 } 26891 26892 wmp->wm_next = wmp->wm_prev = NULL; 26893 26894 kmem_cache_free(un->un_wm_cache, wmp); 26895 } 26896 26897 26898 /* 26899 * Function: sd_range_unlock() 26900 * 26901 * Description: Unlock the range locked by wm. 26902 * Free write map if nobody else is waiting on it. 26903 * 26904 * Arguments: un - sd_lun structure for the device. 26905 * wmp - sd_w_map which needs to be unlinked. 26906 */ 26907 26908 static void 26909 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26910 { 26911 ASSERT(un != NULL); 26912 ASSERT(wm != NULL); 26913 ASSERT(!mutex_owned(SD_MUTEX(un))); 26914 26915 mutex_enter(SD_MUTEX(un)); 26916 26917 if (wm->wm_flags & SD_WTYPE_RMW) { 26918 un->un_rmw_count--; 26919 } 26920 26921 if (wm->wm_wanted_count) { 26922 wm->wm_flags = 0; 26923 /* 26924 * Broadcast that the wmap is available now. 26925 */ 26926 cv_broadcast(&wm->wm_avail); 26927 } else { 26928 /* 26929 * If no one is waiting on the map, it should be free'ed. 26930 */ 26931 sd_free_inlist_wmap(un, wm); 26932 } 26933 26934 mutex_exit(SD_MUTEX(un)); 26935 } 26936 26937 26938 /* 26939 * Function: sd_read_modify_write_task 26940 * 26941 * Description: Called from a taskq thread to initiate the write phase of 26942 * a read-modify-write request. This is used for targets where 26943 * un->un_sys_blocksize != un->un_tgt_blocksize. 26944 * 26945 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26946 * 26947 * Context: Called under taskq thread context. 26948 */ 26949 26950 static void 26951 sd_read_modify_write_task(void *arg) 26952 { 26953 struct sd_mapblocksize_info *bsp; 26954 struct buf *bp; 26955 struct sd_xbuf *xp; 26956 struct sd_lun *un; 26957 26958 bp = arg; /* The bp is given in arg */ 26959 ASSERT(bp != NULL); 26960 26961 /* Get the pointer to the layer-private data struct */ 26962 xp = SD_GET_XBUF(bp); 26963 ASSERT(xp != NULL); 26964 bsp = xp->xb_private; 26965 ASSERT(bsp != NULL); 26966 26967 un = SD_GET_UN(bp); 26968 ASSERT(un != NULL); 26969 ASSERT(!mutex_owned(SD_MUTEX(un))); 26970 26971 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26972 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26973 26974 /* 26975 * This is the write phase of a read-modify-write request, called 26976 * under the context of a taskq thread in response to the completion 26977 * of the read portion of the rmw request completing under interrupt 26978 * context. The write request must be sent from here down the iostart 26979 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26980 * we use the layer index saved in the layer-private data area. 26981 */ 26982 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26983 26984 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26985 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26986 } 26987 26988 26989 /* 26990 * Function: sddump_do_read_of_rmw() 26991 * 26992 * Description: This routine will be called from sddump, If sddump is called 26993 * with an I/O which not aligned on device blocksize boundary 26994 * then the write has to be converted to read-modify-write. 26995 * Do the read part here in order to keep sddump simple. 26996 * Note - That the sd_mutex is held across the call to this 26997 * routine. 26998 * 26999 * Arguments: un - sd_lun 27000 * blkno - block number in terms of media block size. 27001 * nblk - number of blocks. 27002 * bpp - pointer to pointer to the buf structure. On return 27003 * from this function, *bpp points to the valid buffer 27004 * to which the write has to be done. 27005 * 27006 * Return Code: 0 for success or errno-type return code 27007 */ 27008 27009 static int 27010 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 27011 struct buf **bpp) 27012 { 27013 int err; 27014 int i; 27015 int rval; 27016 struct buf *bp; 27017 struct scsi_pkt *pkt = NULL; 27018 uint32_t target_blocksize; 27019 27020 ASSERT(un != NULL); 27021 ASSERT(mutex_owned(SD_MUTEX(un))); 27022 27023 target_blocksize = un->un_tgt_blocksize; 27024 27025 mutex_exit(SD_MUTEX(un)); 27026 27027 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 27028 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 27029 if (bp == NULL) { 27030 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27031 "no resources for dumping; giving up"); 27032 err = ENOMEM; 27033 goto done; 27034 } 27035 27036 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 27037 blkno, nblk); 27038 if (rval != 0) { 27039 scsi_free_consistent_buf(bp); 27040 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27041 "no resources for dumping; giving up"); 27042 err = ENOMEM; 27043 goto done; 27044 } 27045 27046 pkt->pkt_flags |= FLAG_NOINTR; 27047 27048 err = EIO; 27049 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 27050 27051 /* 27052 * Scsi_poll returns 0 (success) if the command completes and 27053 * the status block is STATUS_GOOD. We should only check 27054 * errors if this condition is not true. Even then we should 27055 * send our own request sense packet only if we have a check 27056 * condition and auto request sense has not been performed by 27057 * the hba. 27058 */ 27059 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 27060 27061 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 27062 err = 0; 27063 break; 27064 } 27065 27066 /* 27067 * Check CMD_DEV_GONE 1st, give up if device is gone, 27068 * no need to read RQS data. 27069 */ 27070 if (pkt->pkt_reason == CMD_DEV_GONE) { 27071 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27072 "Error while dumping state with rmw..." 27073 "Device is gone\n"); 27074 break; 27075 } 27076 27077 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 27078 SD_INFO(SD_LOG_DUMP, un, 27079 "sddump: read failed with CHECK, try # %d\n", i); 27080 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 27081 (void) sd_send_polled_RQS(un); 27082 } 27083 27084 continue; 27085 } 27086 27087 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 27088 int reset_retval = 0; 27089 27090 SD_INFO(SD_LOG_DUMP, un, 27091 "sddump: read failed with BUSY, try # %d\n", i); 27092 27093 if (un->un_f_lun_reset_enabled == TRUE) { 27094 reset_retval = scsi_reset(SD_ADDRESS(un), 27095 RESET_LUN); 27096 } 27097 if (reset_retval == 0) { 27098 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 27099 } 27100 (void) sd_send_polled_RQS(un); 27101 27102 } else { 27103 SD_INFO(SD_LOG_DUMP, un, 27104 "sddump: read failed with 0x%x, try # %d\n", 27105 SD_GET_PKT_STATUS(pkt), i); 27106 mutex_enter(SD_MUTEX(un)); 27107 sd_reset_target(un, pkt); 27108 mutex_exit(SD_MUTEX(un)); 27109 } 27110 27111 /* 27112 * If we are not getting anywhere with lun/target resets, 27113 * let's reset the bus. 27114 */ 27115 if (i > SD_NDUMP_RETRIES/2) { 27116 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 27117 (void) sd_send_polled_RQS(un); 27118 } 27119 27120 } 27121 scsi_destroy_pkt(pkt); 27122 27123 if (err != 0) { 27124 scsi_free_consistent_buf(bp); 27125 *bpp = NULL; 27126 } else { 27127 *bpp = bp; 27128 } 27129 27130 done: 27131 mutex_enter(SD_MUTEX(un)); 27132 return (err); 27133 } 27134 27135 27136 /* 27137 * Function: sd_failfast_flushq 27138 * 27139 * Description: Take all bp's on the wait queue that have B_FAILFAST set 27140 * in b_flags and move them onto the failfast queue, then kick 27141 * off a thread to return all bp's on the failfast queue to 27142 * their owners with an error set. 27143 * 27144 * Arguments: un - pointer to the soft state struct for the instance. 27145 * 27146 * Context: may execute in interrupt context. 27147 */ 27148 27149 static void 27150 sd_failfast_flushq(struct sd_lun *un) 27151 { 27152 struct buf *bp; 27153 struct buf *next_waitq_bp; 27154 struct buf *prev_waitq_bp = NULL; 27155 27156 ASSERT(un != NULL); 27157 ASSERT(mutex_owned(SD_MUTEX(un))); 27158 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 27159 ASSERT(un->un_failfast_bp == NULL); 27160 27161 SD_TRACE(SD_LOG_IO_FAILFAST, un, 27162 "sd_failfast_flushq: entry: un:0x%p\n", un); 27163 27164 /* 27165 * Check if we should flush all bufs when entering failfast state, or 27166 * just those with B_FAILFAST set. 27167 */ 27168 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 27169 /* 27170 * Move *all* bp's on the wait queue to the failfast flush 27171 * queue, including those that do NOT have B_FAILFAST set. 27172 */ 27173 if (un->un_failfast_headp == NULL) { 27174 ASSERT(un->un_failfast_tailp == NULL); 27175 un->un_failfast_headp = un->un_waitq_headp; 27176 } else { 27177 ASSERT(un->un_failfast_tailp != NULL); 27178 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 27179 } 27180 27181 un->un_failfast_tailp = un->un_waitq_tailp; 27182 27183 /* update kstat for each bp moved out of the waitq */ 27184 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 27185 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 27186 } 27187 27188 /* empty the waitq */ 27189 un->un_waitq_headp = un->un_waitq_tailp = NULL; 27190 27191 } else { 27192 /* 27193 * Go thru the wait queue, pick off all entries with 27194 * B_FAILFAST set, and move these onto the failfast queue. 27195 */ 27196 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 27197 /* 27198 * Save the pointer to the next bp on the wait queue, 27199 * so we get to it on the next iteration of this loop. 27200 */ 27201 next_waitq_bp = bp->av_forw; 27202 27203 /* 27204 * If this bp from the wait queue does NOT have 27205 * B_FAILFAST set, just move on to the next element 27206 * in the wait queue. Note, this is the only place 27207 * where it is correct to set prev_waitq_bp. 27208 */ 27209 if ((bp->b_flags & B_FAILFAST) == 0) { 27210 prev_waitq_bp = bp; 27211 continue; 27212 } 27213 27214 /* 27215 * Remove the bp from the wait queue. 27216 */ 27217 if (bp == un->un_waitq_headp) { 27218 /* The bp is the first element of the waitq. */ 27219 un->un_waitq_headp = next_waitq_bp; 27220 if (un->un_waitq_headp == NULL) { 27221 /* The wait queue is now empty */ 27222 un->un_waitq_tailp = NULL; 27223 } 27224 } else { 27225 /* 27226 * The bp is either somewhere in the middle 27227 * or at the end of the wait queue. 27228 */ 27229 ASSERT(un->un_waitq_headp != NULL); 27230 ASSERT(prev_waitq_bp != NULL); 27231 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 27232 == 0); 27233 if (bp == un->un_waitq_tailp) { 27234 /* bp is the last entry on the waitq. */ 27235 ASSERT(next_waitq_bp == NULL); 27236 un->un_waitq_tailp = prev_waitq_bp; 27237 } 27238 prev_waitq_bp->av_forw = next_waitq_bp; 27239 } 27240 bp->av_forw = NULL; 27241 27242 /* 27243 * update kstat since the bp is moved out of 27244 * the waitq 27245 */ 27246 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 27247 27248 /* 27249 * Now put the bp onto the failfast queue. 27250 */ 27251 if (un->un_failfast_headp == NULL) { 27252 /* failfast queue is currently empty */ 27253 ASSERT(un->un_failfast_tailp == NULL); 27254 un->un_failfast_headp = 27255 un->un_failfast_tailp = bp; 27256 } else { 27257 /* Add the bp to the end of the failfast q */ 27258 ASSERT(un->un_failfast_tailp != NULL); 27259 ASSERT(un->un_failfast_tailp->b_flags & 27260 B_FAILFAST); 27261 un->un_failfast_tailp->av_forw = bp; 27262 un->un_failfast_tailp = bp; 27263 } 27264 } 27265 } 27266 27267 /* 27268 * Now return all bp's on the failfast queue to their owners. 27269 */ 27270 while ((bp = un->un_failfast_headp) != NULL) { 27271 27272 un->un_failfast_headp = bp->av_forw; 27273 if (un->un_failfast_headp == NULL) { 27274 un->un_failfast_tailp = NULL; 27275 } 27276 27277 /* 27278 * We want to return the bp with a failure error code, but 27279 * we do not want a call to sd_start_cmds() to occur here, 27280 * so use sd_return_failed_command_no_restart() instead of 27281 * sd_return_failed_command(). 27282 */ 27283 sd_return_failed_command_no_restart(un, bp, EIO); 27284 } 27285 27286 /* Flush the xbuf queues if required. */ 27287 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 27288 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 27289 } 27290 27291 SD_TRACE(SD_LOG_IO_FAILFAST, un, 27292 "sd_failfast_flushq: exit: un:0x%p\n", un); 27293 } 27294 27295 27296 /* 27297 * Function: sd_failfast_flushq_callback 27298 * 27299 * Description: Return TRUE if the given bp meets the criteria for failfast 27300 * flushing. Used with ddi_xbuf_flushq(9F). 27301 * 27302 * Arguments: bp - ptr to buf struct to be examined. 27303 * 27304 * Context: Any 27305 */ 27306 27307 static int 27308 sd_failfast_flushq_callback(struct buf *bp) 27309 { 27310 /* 27311 * Return TRUE if (1) we want to flush ALL bufs when the failfast 27312 * state is entered; OR (2) the given bp has B_FAILFAST set. 27313 */ 27314 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 27315 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 27316 } 27317 27318 27319 27320 /* 27321 * Function: sd_setup_next_xfer 27322 * 27323 * Description: Prepare next I/O operation using DMA_PARTIAL 27324 * 27325 */ 27326 27327 static int 27328 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 27329 struct scsi_pkt *pkt, struct sd_xbuf *xp) 27330 { 27331 ssize_t num_blks_not_xfered; 27332 daddr_t strt_blk_num; 27333 ssize_t bytes_not_xfered; 27334 int rval; 27335 27336 ASSERT(pkt->pkt_resid == 0); 27337 27338 /* 27339 * Calculate next block number and amount to be transferred. 27340 * 27341 * How much data NOT transfered to the HBA yet. 27342 */ 27343 bytes_not_xfered = xp->xb_dma_resid; 27344 27345 /* 27346 * figure how many blocks NOT transfered to the HBA yet. 27347 */ 27348 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 27349 27350 /* 27351 * set starting block number to the end of what WAS transfered. 27352 */ 27353 strt_blk_num = xp->xb_blkno + 27354 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 27355 27356 /* 27357 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 27358 * will call scsi_initpkt with NULL_FUNC so we do not have to release 27359 * the disk mutex here. 27360 */ 27361 rval = sd_setup_next_rw_pkt(un, pkt, bp, 27362 strt_blk_num, num_blks_not_xfered); 27363 27364 if (rval == 0) { 27365 27366 /* 27367 * Success. 27368 * 27369 * Adjust things if there are still more blocks to be 27370 * transfered. 27371 */ 27372 xp->xb_dma_resid = pkt->pkt_resid; 27373 pkt->pkt_resid = 0; 27374 27375 return (1); 27376 } 27377 27378 /* 27379 * There's really only one possible return value from 27380 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 27381 * returns NULL. 27382 */ 27383 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 27384 27385 bp->b_resid = bp->b_bcount; 27386 bp->b_flags |= B_ERROR; 27387 27388 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27389 "Error setting up next portion of DMA transfer\n"); 27390 27391 return (0); 27392 } 27393 27394 /* 27395 * Function: sd_panic_for_res_conflict 27396 * 27397 * Description: Call panic with a string formatted with "Reservation Conflict" 27398 * and a human readable identifier indicating the SD instance 27399 * that experienced the reservation conflict. 27400 * 27401 * Arguments: un - pointer to the soft state struct for the instance. 27402 * 27403 * Context: may execute in interrupt context. 27404 */ 27405 27406 #define SD_RESV_CONFLICT_FMT_LEN 40 27407 void 27408 sd_panic_for_res_conflict(struct sd_lun *un) 27409 { 27410 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 27411 char path_str[MAXPATHLEN]; 27412 27413 (void) snprintf(panic_str, sizeof (panic_str), 27414 "Reservation Conflict\nDisk: %s", 27415 ddi_pathname(SD_DEVINFO(un), path_str)); 27416 27417 panic(panic_str); 27418 } 27419 27420 /* 27421 * Note: The following sd_faultinjection_ioctl( ) routines implement 27422 * driver support for handling fault injection for error analysis 27423 * causing faults in multiple layers of the driver. 27424 * 27425 */ 27426 27427 #ifdef SD_FAULT_INJECTION 27428 static uint_t sd_fault_injection_on = 0; 27429 27430 /* 27431 * Function: sd_faultinjection_ioctl() 27432 * 27433 * Description: This routine is the driver entry point for handling 27434 * faultinjection ioctls to inject errors into the 27435 * layer model 27436 * 27437 * Arguments: cmd - the ioctl cmd received 27438 * arg - the arguments from user and returns 27439 */ 27440 27441 static void 27442 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 27443 27444 uint_t i; 27445 uint_t rval; 27446 27447 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 27448 27449 mutex_enter(SD_MUTEX(un)); 27450 27451 switch (cmd) { 27452 case SDIOCRUN: 27453 /* Allow pushed faults to be injected */ 27454 SD_INFO(SD_LOG_SDTEST, un, 27455 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 27456 27457 sd_fault_injection_on = 1; 27458 27459 SD_INFO(SD_LOG_IOERR, un, 27460 "sd_faultinjection_ioctl: run finished\n"); 27461 break; 27462 27463 case SDIOCSTART: 27464 /* Start Injection Session */ 27465 SD_INFO(SD_LOG_SDTEST, un, 27466 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 27467 27468 sd_fault_injection_on = 0; 27469 un->sd_injection_mask = 0xFFFFFFFF; 27470 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27471 un->sd_fi_fifo_pkt[i] = NULL; 27472 un->sd_fi_fifo_xb[i] = NULL; 27473 un->sd_fi_fifo_un[i] = NULL; 27474 un->sd_fi_fifo_arq[i] = NULL; 27475 } 27476 un->sd_fi_fifo_start = 0; 27477 un->sd_fi_fifo_end = 0; 27478 27479 mutex_enter(&(un->un_fi_mutex)); 27480 un->sd_fi_log[0] = '\0'; 27481 un->sd_fi_buf_len = 0; 27482 mutex_exit(&(un->un_fi_mutex)); 27483 27484 SD_INFO(SD_LOG_IOERR, un, 27485 "sd_faultinjection_ioctl: start finished\n"); 27486 break; 27487 27488 case SDIOCSTOP: 27489 /* Stop Injection Session */ 27490 SD_INFO(SD_LOG_SDTEST, un, 27491 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 27492 sd_fault_injection_on = 0; 27493 un->sd_injection_mask = 0x0; 27494 27495 /* Empty stray or unuseds structs from fifo */ 27496 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27497 if (un->sd_fi_fifo_pkt[i] != NULL) { 27498 kmem_free(un->sd_fi_fifo_pkt[i], 27499 sizeof (struct sd_fi_pkt)); 27500 } 27501 if (un->sd_fi_fifo_xb[i] != NULL) { 27502 kmem_free(un->sd_fi_fifo_xb[i], 27503 sizeof (struct sd_fi_xb)); 27504 } 27505 if (un->sd_fi_fifo_un[i] != NULL) { 27506 kmem_free(un->sd_fi_fifo_un[i], 27507 sizeof (struct sd_fi_un)); 27508 } 27509 if (un->sd_fi_fifo_arq[i] != NULL) { 27510 kmem_free(un->sd_fi_fifo_arq[i], 27511 sizeof (struct sd_fi_arq)); 27512 } 27513 un->sd_fi_fifo_pkt[i] = NULL; 27514 un->sd_fi_fifo_un[i] = NULL; 27515 un->sd_fi_fifo_xb[i] = NULL; 27516 un->sd_fi_fifo_arq[i] = NULL; 27517 } 27518 un->sd_fi_fifo_start = 0; 27519 un->sd_fi_fifo_end = 0; 27520 27521 SD_INFO(SD_LOG_IOERR, un, 27522 "sd_faultinjection_ioctl: stop finished\n"); 27523 break; 27524 27525 case SDIOCINSERTPKT: 27526 /* Store a packet struct to be pushed onto fifo */ 27527 SD_INFO(SD_LOG_SDTEST, un, 27528 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 27529 27530 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27531 27532 sd_fault_injection_on = 0; 27533 27534 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 27535 if (un->sd_fi_fifo_pkt[i] != NULL) { 27536 kmem_free(un->sd_fi_fifo_pkt[i], 27537 sizeof (struct sd_fi_pkt)); 27538 } 27539 if (arg != NULL) { 27540 un->sd_fi_fifo_pkt[i] = 27541 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 27542 if (un->sd_fi_fifo_pkt[i] == NULL) { 27543 /* Alloc failed don't store anything */ 27544 break; 27545 } 27546 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 27547 sizeof (struct sd_fi_pkt), 0); 27548 if (rval == -1) { 27549 kmem_free(un->sd_fi_fifo_pkt[i], 27550 sizeof (struct sd_fi_pkt)); 27551 un->sd_fi_fifo_pkt[i] = NULL; 27552 } 27553 } else { 27554 SD_INFO(SD_LOG_IOERR, un, 27555 "sd_faultinjection_ioctl: pkt null\n"); 27556 } 27557 break; 27558 27559 case SDIOCINSERTXB: 27560 /* Store a xb struct to be pushed onto fifo */ 27561 SD_INFO(SD_LOG_SDTEST, un, 27562 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 27563 27564 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27565 27566 sd_fault_injection_on = 0; 27567 27568 if (un->sd_fi_fifo_xb[i] != NULL) { 27569 kmem_free(un->sd_fi_fifo_xb[i], 27570 sizeof (struct sd_fi_xb)); 27571 un->sd_fi_fifo_xb[i] = NULL; 27572 } 27573 if (arg != NULL) { 27574 un->sd_fi_fifo_xb[i] = 27575 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 27576 if (un->sd_fi_fifo_xb[i] == NULL) { 27577 /* Alloc failed don't store anything */ 27578 break; 27579 } 27580 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 27581 sizeof (struct sd_fi_xb), 0); 27582 27583 if (rval == -1) { 27584 kmem_free(un->sd_fi_fifo_xb[i], 27585 sizeof (struct sd_fi_xb)); 27586 un->sd_fi_fifo_xb[i] = NULL; 27587 } 27588 } else { 27589 SD_INFO(SD_LOG_IOERR, un, 27590 "sd_faultinjection_ioctl: xb null\n"); 27591 } 27592 break; 27593 27594 case SDIOCINSERTUN: 27595 /* Store a un struct to be pushed onto fifo */ 27596 SD_INFO(SD_LOG_SDTEST, un, 27597 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 27598 27599 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27600 27601 sd_fault_injection_on = 0; 27602 27603 if (un->sd_fi_fifo_un[i] != NULL) { 27604 kmem_free(un->sd_fi_fifo_un[i], 27605 sizeof (struct sd_fi_un)); 27606 un->sd_fi_fifo_un[i] = NULL; 27607 } 27608 if (arg != NULL) { 27609 un->sd_fi_fifo_un[i] = 27610 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 27611 if (un->sd_fi_fifo_un[i] == NULL) { 27612 /* Alloc failed don't store anything */ 27613 break; 27614 } 27615 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 27616 sizeof (struct sd_fi_un), 0); 27617 if (rval == -1) { 27618 kmem_free(un->sd_fi_fifo_un[i], 27619 sizeof (struct sd_fi_un)); 27620 un->sd_fi_fifo_un[i] = NULL; 27621 } 27622 27623 } else { 27624 SD_INFO(SD_LOG_IOERR, un, 27625 "sd_faultinjection_ioctl: un null\n"); 27626 } 27627 27628 break; 27629 27630 case SDIOCINSERTARQ: 27631 /* Store a arq struct to be pushed onto fifo */ 27632 SD_INFO(SD_LOG_SDTEST, un, 27633 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 27634 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27635 27636 sd_fault_injection_on = 0; 27637 27638 if (un->sd_fi_fifo_arq[i] != NULL) { 27639 kmem_free(un->sd_fi_fifo_arq[i], 27640 sizeof (struct sd_fi_arq)); 27641 un->sd_fi_fifo_arq[i] = NULL; 27642 } 27643 if (arg != NULL) { 27644 un->sd_fi_fifo_arq[i] = 27645 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 27646 if (un->sd_fi_fifo_arq[i] == NULL) { 27647 /* Alloc failed don't store anything */ 27648 break; 27649 } 27650 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 27651 sizeof (struct sd_fi_arq), 0); 27652 if (rval == -1) { 27653 kmem_free(un->sd_fi_fifo_arq[i], 27654 sizeof (struct sd_fi_arq)); 27655 un->sd_fi_fifo_arq[i] = NULL; 27656 } 27657 27658 } else { 27659 SD_INFO(SD_LOG_IOERR, un, 27660 "sd_faultinjection_ioctl: arq null\n"); 27661 } 27662 27663 break; 27664 27665 case SDIOCPUSH: 27666 /* Push stored xb, pkt, un, and arq onto fifo */ 27667 sd_fault_injection_on = 0; 27668 27669 if (arg != NULL) { 27670 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 27671 if (rval != -1 && 27672 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27673 un->sd_fi_fifo_end += i; 27674 } 27675 } else { 27676 SD_INFO(SD_LOG_IOERR, un, 27677 "sd_faultinjection_ioctl: push arg null\n"); 27678 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27679 un->sd_fi_fifo_end++; 27680 } 27681 } 27682 SD_INFO(SD_LOG_IOERR, un, 27683 "sd_faultinjection_ioctl: push to end=%d\n", 27684 un->sd_fi_fifo_end); 27685 break; 27686 27687 case SDIOCRETRIEVE: 27688 /* Return buffer of log from Injection session */ 27689 SD_INFO(SD_LOG_SDTEST, un, 27690 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 27691 27692 sd_fault_injection_on = 0; 27693 27694 mutex_enter(&(un->un_fi_mutex)); 27695 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 27696 un->sd_fi_buf_len+1, 0); 27697 mutex_exit(&(un->un_fi_mutex)); 27698 27699 if (rval == -1) { 27700 /* 27701 * arg is possibly invalid setting 27702 * it to NULL for return 27703 */ 27704 arg = NULL; 27705 } 27706 break; 27707 } 27708 27709 mutex_exit(SD_MUTEX(un)); 27710 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 27711 " exit\n"); 27712 } 27713 27714 27715 /* 27716 * Function: sd_injection_log() 27717 * 27718 * Description: This routine adds buff to the already existing injection log 27719 * for retrieval via faultinjection_ioctl for use in fault 27720 * detection and recovery 27721 * 27722 * Arguments: buf - the string to add to the log 27723 */ 27724 27725 static void 27726 sd_injection_log(char *buf, struct sd_lun *un) 27727 { 27728 uint_t len; 27729 27730 ASSERT(un != NULL); 27731 ASSERT(buf != NULL); 27732 27733 mutex_enter(&(un->un_fi_mutex)); 27734 27735 len = min(strlen(buf), 255); 27736 /* Add logged value to Injection log to be returned later */ 27737 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 27738 uint_t offset = strlen((char *)un->sd_fi_log); 27739 char *destp = (char *)un->sd_fi_log + offset; 27740 int i; 27741 for (i = 0; i < len; i++) { 27742 *destp++ = *buf++; 27743 } 27744 un->sd_fi_buf_len += len; 27745 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 27746 } 27747 27748 mutex_exit(&(un->un_fi_mutex)); 27749 } 27750 27751 27752 /* 27753 * Function: sd_faultinjection() 27754 * 27755 * Description: This routine takes the pkt and changes its 27756 * content based on error injection scenerio. 27757 * 27758 * Arguments: pktp - packet to be changed 27759 */ 27760 27761 static void 27762 sd_faultinjection(struct scsi_pkt *pktp) 27763 { 27764 uint_t i; 27765 struct sd_fi_pkt *fi_pkt; 27766 struct sd_fi_xb *fi_xb; 27767 struct sd_fi_un *fi_un; 27768 struct sd_fi_arq *fi_arq; 27769 struct buf *bp; 27770 struct sd_xbuf *xb; 27771 struct sd_lun *un; 27772 27773 ASSERT(pktp != NULL); 27774 27775 /* pull bp xb and un from pktp */ 27776 bp = (struct buf *)pktp->pkt_private; 27777 xb = SD_GET_XBUF(bp); 27778 un = SD_GET_UN(bp); 27779 27780 ASSERT(un != NULL); 27781 27782 mutex_enter(SD_MUTEX(un)); 27783 27784 SD_TRACE(SD_LOG_SDTEST, un, 27785 "sd_faultinjection: entry Injection from sdintr\n"); 27786 27787 /* if injection is off return */ 27788 if (sd_fault_injection_on == 0 || 27789 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 27790 mutex_exit(SD_MUTEX(un)); 27791 return; 27792 } 27793 27794 27795 /* take next set off fifo */ 27796 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 27797 27798 fi_pkt = un->sd_fi_fifo_pkt[i]; 27799 fi_xb = un->sd_fi_fifo_xb[i]; 27800 fi_un = un->sd_fi_fifo_un[i]; 27801 fi_arq = un->sd_fi_fifo_arq[i]; 27802 27803 27804 /* set variables accordingly */ 27805 /* set pkt if it was on fifo */ 27806 if (fi_pkt != NULL) { 27807 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 27808 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 27809 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 27810 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 27811 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 27812 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 27813 27814 } 27815 27816 /* set xb if it was on fifo */ 27817 if (fi_xb != NULL) { 27818 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 27819 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 27820 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 27821 SD_CONDSET(xb, xb, xb_victim_retry_count, 27822 "xb_victim_retry_count"); 27823 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 27824 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 27825 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 27826 27827 /* copy in block data from sense */ 27828 if (fi_xb->xb_sense_data[0] != -1) { 27829 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 27830 SENSE_LENGTH); 27831 } 27832 27833 /* copy in extended sense codes */ 27834 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 27835 "es_code"); 27836 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 27837 "es_key"); 27838 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 27839 "es_add_code"); 27840 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 27841 es_qual_code, "es_qual_code"); 27842 } 27843 27844 /* set un if it was on fifo */ 27845 if (fi_un != NULL) { 27846 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 27847 SD_CONDSET(un, un, un_ctype, "un_ctype"); 27848 SD_CONDSET(un, un, un_reset_retry_count, 27849 "un_reset_retry_count"); 27850 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 27851 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 27852 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 27853 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 27854 "un_f_allow_bus_device_reset"); 27855 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 27856 27857 } 27858 27859 /* copy in auto request sense if it was on fifo */ 27860 if (fi_arq != NULL) { 27861 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 27862 } 27863 27864 /* free structs */ 27865 if (un->sd_fi_fifo_pkt[i] != NULL) { 27866 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 27867 } 27868 if (un->sd_fi_fifo_xb[i] != NULL) { 27869 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 27870 } 27871 if (un->sd_fi_fifo_un[i] != NULL) { 27872 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 27873 } 27874 if (un->sd_fi_fifo_arq[i] != NULL) { 27875 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 27876 } 27877 27878 /* 27879 * kmem_free does not gurantee to set to NULL 27880 * since we uses these to determine if we set 27881 * values or not lets confirm they are always 27882 * NULL after free 27883 */ 27884 un->sd_fi_fifo_pkt[i] = NULL; 27885 un->sd_fi_fifo_un[i] = NULL; 27886 un->sd_fi_fifo_xb[i] = NULL; 27887 un->sd_fi_fifo_arq[i] = NULL; 27888 27889 un->sd_fi_fifo_start++; 27890 27891 mutex_exit(SD_MUTEX(un)); 27892 27893 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 27894 } 27895 27896 #endif /* SD_FAULT_INJECTION */ 27897 27898 /* 27899 * This routine is invoked in sd_unit_attach(). Before calling it, the 27900 * properties in conf file should be processed already, and "hotpluggable" 27901 * property was processed also. 27902 * 27903 * The sd driver distinguishes 3 different type of devices: removable media, 27904 * non-removable media, and hotpluggable. Below the differences are defined: 27905 * 27906 * 1. Device ID 27907 * 27908 * The device ID of a device is used to identify this device. Refer to 27909 * ddi_devid_register(9F). 27910 * 27911 * For a non-removable media disk device which can provide 0x80 or 0x83 27912 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27913 * device ID is created to identify this device. For other non-removable 27914 * media devices, a default device ID is created only if this device has 27915 * at least 2 alter cylinders. Otherwise, this device has no devid. 27916 * 27917 * ------------------------------------------------------- 27918 * removable media hotpluggable | Can Have Device ID 27919 * ------------------------------------------------------- 27920 * false false | Yes 27921 * false true | Yes 27922 * true x | No 27923 * ------------------------------------------------------ 27924 * 27925 * 27926 * 2. SCSI group 4 commands 27927 * 27928 * In SCSI specs, only some commands in group 4 command set can use 27929 * 8-byte addresses that can be used to access >2TB storage spaces. 27930 * Other commands have no such capability. Without supporting group4, 27931 * it is impossible to make full use of storage spaces of a disk with 27932 * capacity larger than 2TB. 27933 * 27934 * ----------------------------------------------- 27935 * removable media hotpluggable LP64 | Group 27936 * ----------------------------------------------- 27937 * false false false | 1 27938 * false false true | 4 27939 * false true false | 1 27940 * false true true | 4 27941 * true x x | 5 27942 * ----------------------------------------------- 27943 * 27944 * 27945 * 3. Check for VTOC Label 27946 * 27947 * If a direct-access disk has no EFI label, sd will check if it has a 27948 * valid VTOC label. Now, sd also does that check for removable media 27949 * and hotpluggable devices. 27950 * 27951 * -------------------------------------------------------------- 27952 * Direct-Access removable media hotpluggable | Check Label 27953 * ------------------------------------------------------------- 27954 * false false false | No 27955 * false false true | No 27956 * false true false | Yes 27957 * false true true | Yes 27958 * true x x | Yes 27959 * -------------------------------------------------------------- 27960 * 27961 * 27962 * 4. Building default VTOC label 27963 * 27964 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27965 * If those devices have no valid VTOC label, sd(7d) will attempt to 27966 * create default VTOC for them. Currently sd creates default VTOC label 27967 * for all devices on x86 platform (VTOC_16), but only for removable 27968 * media devices on SPARC (VTOC_8). 27969 * 27970 * ----------------------------------------------------------- 27971 * removable media hotpluggable platform | Default Label 27972 * ----------------------------------------------------------- 27973 * false false sparc | No 27974 * false true x86 | Yes 27975 * false true sparc | Yes 27976 * true x x | Yes 27977 * ---------------------------------------------------------- 27978 * 27979 * 27980 * 5. Supported blocksizes of target devices 27981 * 27982 * Sd supports non-512-byte blocksize for removable media devices only. 27983 * For other devices, only 512-byte blocksize is supported. This may be 27984 * changed in near future because some RAID devices require non-512-byte 27985 * blocksize 27986 * 27987 * ----------------------------------------------------------- 27988 * removable media hotpluggable | non-512-byte blocksize 27989 * ----------------------------------------------------------- 27990 * false false | No 27991 * false true | No 27992 * true x | Yes 27993 * ----------------------------------------------------------- 27994 * 27995 * 27996 * 6. Automatic mount & unmount 27997 * 27998 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27999 * if a device is removable media device. It return 1 for removable media 28000 * devices, and 0 for others. 28001 * 28002 * The automatic mounting subsystem should distinguish between the types 28003 * of devices and apply automounting policies to each. 28004 * 28005 * 28006 * 7. fdisk partition management 28007 * 28008 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 28009 * just supports fdisk partitions on x86 platform. On sparc platform, sd 28010 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 28011 * fdisk partitions on both x86 and SPARC platform. 28012 * 28013 * ----------------------------------------------------------- 28014 * platform removable media USB/1394 | fdisk supported 28015 * ----------------------------------------------------------- 28016 * x86 X X | true 28017 * ------------------------------------------------------------ 28018 * sparc X X | false 28019 * ------------------------------------------------------------ 28020 * 28021 * 28022 * 8. MBOOT/MBR 28023 * 28024 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 28025 * read/write mboot for removable media devices on sparc platform. 28026 * 28027 * ----------------------------------------------------------- 28028 * platform removable media USB/1394 | mboot supported 28029 * ----------------------------------------------------------- 28030 * x86 X X | true 28031 * ------------------------------------------------------------ 28032 * sparc false false | false 28033 * sparc false true | true 28034 * sparc true false | true 28035 * sparc true true | true 28036 * ------------------------------------------------------------ 28037 * 28038 * 28039 * 9. error handling during opening device 28040 * 28041 * If failed to open a disk device, an errno is returned. For some kinds 28042 * of errors, different errno is returned depending on if this device is 28043 * a removable media device. This brings USB/1394 hard disks in line with 28044 * expected hard disk behavior. It is not expected that this breaks any 28045 * application. 28046 * 28047 * ------------------------------------------------------ 28048 * removable media hotpluggable | errno 28049 * ------------------------------------------------------ 28050 * false false | EIO 28051 * false true | EIO 28052 * true x | ENXIO 28053 * ------------------------------------------------------ 28054 * 28055 * 28056 * 11. ioctls: DKIOCEJECT, CDROMEJECT 28057 * 28058 * These IOCTLs are applicable only to removable media devices. 28059 * 28060 * ----------------------------------------------------------- 28061 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 28062 * ----------------------------------------------------------- 28063 * false false | No 28064 * false true | No 28065 * true x | Yes 28066 * ----------------------------------------------------------- 28067 * 28068 * 28069 * 12. Kstats for partitions 28070 * 28071 * sd creates partition kstat for non-removable media devices. USB and 28072 * Firewire hard disks now have partition kstats 28073 * 28074 * ------------------------------------------------------ 28075 * removable media hotpluggable | kstat 28076 * ------------------------------------------------------ 28077 * false false | Yes 28078 * false true | Yes 28079 * true x | No 28080 * ------------------------------------------------------ 28081 * 28082 * 28083 * 13. Removable media & hotpluggable properties 28084 * 28085 * Sd driver creates a "removable-media" property for removable media 28086 * devices. Parent nexus drivers create a "hotpluggable" property if 28087 * it supports hotplugging. 28088 * 28089 * --------------------------------------------------------------------- 28090 * removable media hotpluggable | "removable-media" " hotpluggable" 28091 * --------------------------------------------------------------------- 28092 * false false | No No 28093 * false true | No Yes 28094 * true false | Yes No 28095 * true true | Yes Yes 28096 * --------------------------------------------------------------------- 28097 * 28098 * 28099 * 14. Power Management 28100 * 28101 * sd only power manages removable media devices or devices that support 28102 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 28103 * 28104 * A parent nexus that supports hotplugging can also set "pm-capable" 28105 * if the disk can be power managed. 28106 * 28107 * ------------------------------------------------------------ 28108 * removable media hotpluggable pm-capable | power manage 28109 * ------------------------------------------------------------ 28110 * false false false | No 28111 * false false true | Yes 28112 * false true false | No 28113 * false true true | Yes 28114 * true x x | Yes 28115 * ------------------------------------------------------------ 28116 * 28117 * USB and firewire hard disks can now be power managed independently 28118 * of the framebuffer 28119 * 28120 * 28121 * 15. Support for USB disks with capacity larger than 1TB 28122 * 28123 * Currently, sd doesn't permit a fixed disk device with capacity 28124 * larger than 1TB to be used in a 32-bit operating system environment. 28125 * However, sd doesn't do that for removable media devices. Instead, it 28126 * assumes that removable media devices cannot have a capacity larger 28127 * than 1TB. Therefore, using those devices on 32-bit system is partially 28128 * supported, which can cause some unexpected results. 28129 * 28130 * --------------------------------------------------------------------- 28131 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 28132 * --------------------------------------------------------------------- 28133 * false false | true | no 28134 * false true | true | no 28135 * true false | true | Yes 28136 * true true | true | Yes 28137 * --------------------------------------------------------------------- 28138 * 28139 * 28140 * 16. Check write-protection at open time 28141 * 28142 * When a removable media device is being opened for writing without NDELAY 28143 * flag, sd will check if this device is writable. If attempting to open 28144 * without NDELAY flag a write-protected device, this operation will abort. 28145 * 28146 * ------------------------------------------------------------ 28147 * removable media USB/1394 | WP Check 28148 * ------------------------------------------------------------ 28149 * false false | No 28150 * false true | No 28151 * true false | Yes 28152 * true true | Yes 28153 * ------------------------------------------------------------ 28154 * 28155 * 28156 * 17. syslog when corrupted VTOC is encountered 28157 * 28158 * Currently, if an invalid VTOC is encountered, sd only print syslog 28159 * for fixed SCSI disks. 28160 * ------------------------------------------------------------ 28161 * removable media USB/1394 | print syslog 28162 * ------------------------------------------------------------ 28163 * false false | Yes 28164 * false true | No 28165 * true false | No 28166 * true true | No 28167 * ------------------------------------------------------------ 28168 */ 28169 static void 28170 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 28171 { 28172 int pm_capable_prop; 28173 28174 ASSERT(un->un_sd); 28175 ASSERT(un->un_sd->sd_inq); 28176 28177 /* 28178 * Enable SYNC CACHE support for all devices. 28179 */ 28180 un->un_f_sync_cache_supported = TRUE; 28181 28182 /* 28183 * Set the sync cache required flag to false. 28184 * This would ensure that there is no SYNC CACHE 28185 * sent when there are no writes 28186 */ 28187 un->un_f_sync_cache_required = FALSE; 28188 28189 if (un->un_sd->sd_inq->inq_rmb) { 28190 /* 28191 * The media of this device is removable. And for this kind 28192 * of devices, it is possible to change medium after opening 28193 * devices. Thus we should support this operation. 28194 */ 28195 un->un_f_has_removable_media = TRUE; 28196 28197 /* 28198 * support non-512-byte blocksize of removable media devices 28199 */ 28200 un->un_f_non_devbsize_supported = TRUE; 28201 28202 /* 28203 * Assume that all removable media devices support DOOR_LOCK 28204 */ 28205 un->un_f_doorlock_supported = TRUE; 28206 28207 /* 28208 * For a removable media device, it is possible to be opened 28209 * with NDELAY flag when there is no media in drive, in this 28210 * case we don't care if device is writable. But if without 28211 * NDELAY flag, we need to check if media is write-protected. 28212 */ 28213 un->un_f_chk_wp_open = TRUE; 28214 28215 /* 28216 * need to start a SCSI watch thread to monitor media state, 28217 * when media is being inserted or ejected, notify syseventd. 28218 */ 28219 un->un_f_monitor_media_state = TRUE; 28220 28221 /* 28222 * Some devices don't support START_STOP_UNIT command. 28223 * Therefore, we'd better check if a device supports it 28224 * before sending it. 28225 */ 28226 un->un_f_check_start_stop = TRUE; 28227 28228 /* 28229 * support eject media ioctl: 28230 * FDEJECT, DKIOCEJECT, CDROMEJECT 28231 */ 28232 un->un_f_eject_media_supported = TRUE; 28233 28234 /* 28235 * Because many removable-media devices don't support 28236 * LOG_SENSE, we couldn't use this command to check if 28237 * a removable media device support power-management. 28238 * We assume that they support power-management via 28239 * START_STOP_UNIT command and can be spun up and down 28240 * without limitations. 28241 */ 28242 un->un_f_pm_supported = TRUE; 28243 28244 /* 28245 * Need to create a zero length (Boolean) property 28246 * removable-media for the removable media devices. 28247 * Note that the return value of the property is not being 28248 * checked, since if unable to create the property 28249 * then do not want the attach to fail altogether. Consistent 28250 * with other property creation in attach. 28251 */ 28252 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 28253 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 28254 28255 } else { 28256 /* 28257 * create device ID for device 28258 */ 28259 un->un_f_devid_supported = TRUE; 28260 28261 /* 28262 * Spin up non-removable-media devices once it is attached 28263 */ 28264 un->un_f_attach_spinup = TRUE; 28265 28266 /* 28267 * According to SCSI specification, Sense data has two kinds of 28268 * format: fixed format, and descriptor format. At present, we 28269 * don't support descriptor format sense data for removable 28270 * media. 28271 */ 28272 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 28273 un->un_f_descr_format_supported = TRUE; 28274 } 28275 28276 /* 28277 * kstats are created only for non-removable media devices. 28278 * 28279 * Set this in sd.conf to 0 in order to disable kstats. The 28280 * default is 1, so they are enabled by default. 28281 */ 28282 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 28283 SD_DEVINFO(un), DDI_PROP_DONTPASS, 28284 "enable-partition-kstats", 1)); 28285 28286 /* 28287 * Check if HBA has set the "pm-capable" property. 28288 * If "pm-capable" exists and is non-zero then we can 28289 * power manage the device without checking the start/stop 28290 * cycle count log sense page. 28291 * 28292 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 28293 * then we should not power manage the device. 28294 * 28295 * If "pm-capable" doesn't exist then pm_capable_prop will 28296 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 28297 * sd will check the start/stop cycle count log sense page 28298 * and power manage the device if the cycle count limit has 28299 * not been exceeded. 28300 */ 28301 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 28302 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 28303 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 28304 un->un_f_log_sense_supported = TRUE; 28305 } else { 28306 /* 28307 * pm-capable property exists. 28308 * 28309 * Convert "TRUE" values for pm_capable_prop to 28310 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 28311 * later. "TRUE" values are any values except 28312 * SD_PM_CAPABLE_FALSE (0) and 28313 * SD_PM_CAPABLE_UNDEFINED (-1) 28314 */ 28315 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 28316 un->un_f_log_sense_supported = FALSE; 28317 } else { 28318 un->un_f_pm_supported = TRUE; 28319 } 28320 28321 SD_INFO(SD_LOG_ATTACH_DETACH, un, 28322 "sd_unit_attach: un:0x%p pm-capable " 28323 "property set to %d.\n", un, un->un_f_pm_supported); 28324 } 28325 } 28326 28327 if (un->un_f_is_hotpluggable) { 28328 28329 /* 28330 * Have to watch hotpluggable devices as well, since 28331 * that's the only way for userland applications to 28332 * detect hot removal while device is busy/mounted. 28333 */ 28334 un->un_f_monitor_media_state = TRUE; 28335 28336 un->un_f_check_start_stop = TRUE; 28337 28338 } 28339 } 28340 28341 /* 28342 * sd_tg_rdwr: 28343 * Provides rdwr access for cmlb via sd_tgops. The start_block is 28344 * in sys block size, req_length in bytes. 28345 * 28346 */ 28347 static int 28348 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 28349 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 28350 { 28351 struct sd_lun *un; 28352 int path_flag = (int)(uintptr_t)tg_cookie; 28353 char *dkl = NULL; 28354 diskaddr_t real_addr = start_block; 28355 diskaddr_t first_byte, end_block; 28356 28357 size_t buffer_size = reqlength; 28358 int rval; 28359 diskaddr_t cap; 28360 uint32_t lbasize; 28361 28362 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28363 if (un == NULL) 28364 return (ENXIO); 28365 28366 if (cmd != TG_READ && cmd != TG_WRITE) 28367 return (EINVAL); 28368 28369 mutex_enter(SD_MUTEX(un)); 28370 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 28371 mutex_exit(SD_MUTEX(un)); 28372 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28373 &lbasize, path_flag); 28374 if (rval != 0) 28375 return (rval); 28376 mutex_enter(SD_MUTEX(un)); 28377 sd_update_block_info(un, lbasize, cap); 28378 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 28379 mutex_exit(SD_MUTEX(un)); 28380 return (EIO); 28381 } 28382 } 28383 28384 if (NOT_DEVBSIZE(un)) { 28385 /* 28386 * sys_blocksize != tgt_blocksize, need to re-adjust 28387 * blkno and save the index to beginning of dk_label 28388 */ 28389 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 28390 real_addr = first_byte / un->un_tgt_blocksize; 28391 28392 end_block = (first_byte + reqlength + 28393 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 28394 28395 /* round up buffer size to multiple of target block size */ 28396 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 28397 28398 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 28399 "label_addr: 0x%x allocation size: 0x%x\n", 28400 real_addr, buffer_size); 28401 28402 if (((first_byte % un->un_tgt_blocksize) != 0) || 28403 (reqlength % un->un_tgt_blocksize) != 0) 28404 /* the request is not aligned */ 28405 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 28406 } 28407 28408 /* 28409 * The MMC standard allows READ CAPACITY to be 28410 * inaccurate by a bounded amount (in the interest of 28411 * response latency). As a result, failed READs are 28412 * commonplace (due to the reading of metadata and not 28413 * data). Depending on the per-Vendor/drive Sense data, 28414 * the failed READ can cause many (unnecessary) retries. 28415 */ 28416 28417 if (ISCD(un) && (cmd == TG_READ) && 28418 (un->un_f_blockcount_is_valid == TRUE) && 28419 ((start_block == (un->un_blockcount - 1))|| 28420 (start_block == (un->un_blockcount - 2)))) { 28421 path_flag = SD_PATH_DIRECT_PRIORITY; 28422 } 28423 28424 mutex_exit(SD_MUTEX(un)); 28425 if (cmd == TG_READ) { 28426 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 28427 buffer_size, real_addr, path_flag); 28428 if (dkl != NULL) 28429 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 28430 real_addr), bufaddr, reqlength); 28431 } else { 28432 if (dkl) { 28433 rval = sd_send_scsi_READ(un, dkl, buffer_size, 28434 real_addr, path_flag); 28435 if (rval) { 28436 kmem_free(dkl, buffer_size); 28437 return (rval); 28438 } 28439 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 28440 real_addr), reqlength); 28441 } 28442 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 28443 buffer_size, real_addr, path_flag); 28444 } 28445 28446 if (dkl != NULL) 28447 kmem_free(dkl, buffer_size); 28448 28449 return (rval); 28450 } 28451 28452 28453 static int 28454 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 28455 { 28456 28457 struct sd_lun *un; 28458 diskaddr_t cap; 28459 uint32_t lbasize; 28460 int path_flag = (int)(uintptr_t)tg_cookie; 28461 int ret = 0; 28462 28463 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28464 if (un == NULL) 28465 return (ENXIO); 28466 28467 switch (cmd) { 28468 case TG_GETPHYGEOM: 28469 case TG_GETVIRTGEOM: 28470 case TG_GETCAPACITY: 28471 case TG_GETBLOCKSIZE: 28472 mutex_enter(SD_MUTEX(un)); 28473 28474 if ((un->un_f_blockcount_is_valid == TRUE) && 28475 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 28476 cap = un->un_blockcount; 28477 lbasize = un->un_tgt_blocksize; 28478 mutex_exit(SD_MUTEX(un)); 28479 } else { 28480 mutex_exit(SD_MUTEX(un)); 28481 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28482 &lbasize, path_flag); 28483 if (ret != 0) 28484 return (ret); 28485 mutex_enter(SD_MUTEX(un)); 28486 sd_update_block_info(un, lbasize, cap); 28487 if ((un->un_f_blockcount_is_valid == FALSE) || 28488 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 28489 mutex_exit(SD_MUTEX(un)); 28490 return (EIO); 28491 } 28492 mutex_exit(SD_MUTEX(un)); 28493 } 28494 28495 if (cmd == TG_GETCAPACITY) { 28496 *(diskaddr_t *)arg = cap; 28497 return (0); 28498 } 28499 28500 if (cmd == TG_GETBLOCKSIZE) { 28501 *(uint32_t *)arg = lbasize; 28502 return (0); 28503 } 28504 28505 if (cmd == TG_GETPHYGEOM) 28506 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 28507 cap, lbasize, path_flag); 28508 else 28509 /* TG_GETVIRTGEOM */ 28510 ret = sd_get_virtual_geometry(un, 28511 (cmlb_geom_t *)arg, cap, lbasize); 28512 28513 return (ret); 28514 28515 case TG_GETATTR: 28516 mutex_enter(SD_MUTEX(un)); 28517 ((tg_attribute_t *)arg)->media_is_writable = 28518 un->un_f_mmc_writable_media; 28519 mutex_exit(SD_MUTEX(un)); 28520 return (0); 28521 default: 28522 return (ENOTTY); 28523 28524 } 28525 28526 } 28527