1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * hosts.c Copyright (C) 1992 Drew Eckhardt
4 * Copyright (C) 1993, 1994, 1995 Eric Youngdale
5 * Copyright (C) 2002-2003 Christoph Hellwig
6 *
7 * mid to lowlevel SCSI driver interface
8 * Initial versions: Drew Eckhardt
9 * Subsequent revisions: Eric Youngdale
10 *
11 * <drew@colorado.edu>
12 *
13 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
14 * Added QLOGIC QLA1280 SCSI controller kernel host support.
15 * August 4, 1999 Fred Lewis, Intel DuPont
16 *
17 * Updated to reflect the new initialization scheme for the higher
18 * level of scsi drivers (sd/sr/st)
19 * September 17, 2000 Torben Mathiasen <tmm@image.dk>
20 *
21 * Restructured scsi_host lists and associated functions.
22 * September 04, 2002 Mike Anderson (andmike@us.ibm.com)
23 */
24
25 #include <linux/module.h>
26 #include <linux/blkdev.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/kthread.h>
30 #include <linux/string.h>
31 #include <linux/mm.h>
32 #include <linux/init.h>
33 #include <linux/completion.h>
34 #include <linux/transport_class.h>
35 #include <linux/platform_device.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/idr.h>
38 #include <scsi/scsi_device.h>
39 #include <scsi/scsi_host.h>
40 #include <scsi/scsi_transport.h>
41 #include <scsi/scsi_cmnd.h>
42
43 #include "scsi_priv.h"
44 #include "scsi_logging.h"
45
46
47 static int shost_eh_deadline = -1;
48
49 module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR);
50 MODULE_PARM_DESC(eh_deadline,
51 "SCSI EH timeout in seconds (should be between 0 and 2^31-1)");
52
53 static DEFINE_IDA(host_index_ida);
54
55
scsi_host_cls_release(struct device * dev)56 static void scsi_host_cls_release(struct device *dev)
57 {
58 put_device(&class_to_shost(dev)->shost_gendev);
59 }
60
61 static struct class shost_class = {
62 .name = "scsi_host",
63 .dev_release = scsi_host_cls_release,
64 .dev_groups = scsi_shost_groups,
65 };
66
67 /**
68 * scsi_host_set_state - Take the given host through the host state model.
69 * @shost: scsi host to change the state of.
70 * @state: state to change to.
71 *
72 * Returns zero if unsuccessful or an error if the requested
73 * transition is illegal.
74 **/
scsi_host_set_state(struct Scsi_Host * shost,enum scsi_host_state state)75 int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
76 {
77 enum scsi_host_state oldstate = shost->shost_state;
78
79 if (state == oldstate)
80 return 0;
81
82 switch (state) {
83 case SHOST_CREATED:
84 /* There are no legal states that come back to
85 * created. This is the manually initialised start
86 * state */
87 goto illegal;
88
89 case SHOST_RUNNING:
90 switch (oldstate) {
91 case SHOST_CREATED:
92 case SHOST_RECOVERY:
93 break;
94 default:
95 goto illegal;
96 }
97 break;
98
99 case SHOST_RECOVERY:
100 switch (oldstate) {
101 case SHOST_RUNNING:
102 break;
103 default:
104 goto illegal;
105 }
106 break;
107
108 case SHOST_CANCEL:
109 switch (oldstate) {
110 case SHOST_CREATED:
111 case SHOST_RUNNING:
112 case SHOST_CANCEL_RECOVERY:
113 break;
114 default:
115 goto illegal;
116 }
117 break;
118
119 case SHOST_DEL:
120 switch (oldstate) {
121 case SHOST_CANCEL:
122 case SHOST_DEL_RECOVERY:
123 break;
124 default:
125 goto illegal;
126 }
127 break;
128
129 case SHOST_CANCEL_RECOVERY:
130 switch (oldstate) {
131 case SHOST_CANCEL:
132 case SHOST_RECOVERY:
133 break;
134 default:
135 goto illegal;
136 }
137 break;
138
139 case SHOST_DEL_RECOVERY:
140 switch (oldstate) {
141 case SHOST_CANCEL_RECOVERY:
142 break;
143 default:
144 goto illegal;
145 }
146 break;
147 }
148 shost->shost_state = state;
149 return 0;
150
151 illegal:
152 SCSI_LOG_ERROR_RECOVERY(1,
153 shost_printk(KERN_ERR, shost,
154 "Illegal host state transition"
155 "%s->%s\n",
156 scsi_host_state_name(oldstate),
157 scsi_host_state_name(state)));
158 return -EINVAL;
159 }
160
161 /**
162 * scsi_remove_host - remove a scsi host
163 * @shost: a pointer to a scsi host to remove
164 **/
scsi_remove_host(struct Scsi_Host * shost)165 void scsi_remove_host(struct Scsi_Host *shost)
166 {
167 unsigned long flags;
168
169 mutex_lock(&shost->scan_mutex);
170 spin_lock_irqsave(shost->host_lock, flags);
171 if (scsi_host_set_state(shost, SHOST_CANCEL))
172 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) {
173 spin_unlock_irqrestore(shost->host_lock, flags);
174 mutex_unlock(&shost->scan_mutex);
175 return;
176 }
177 spin_unlock_irqrestore(shost->host_lock, flags);
178
179 scsi_autopm_get_host(shost);
180 flush_workqueue(shost->tmf_work_q);
181 scsi_forget_host(shost);
182 mutex_unlock(&shost->scan_mutex);
183 scsi_proc_host_rm(shost);
184 scsi_proc_hostdir_rm(shost->hostt);
185
186 /*
187 * New SCSI devices cannot be attached anymore because of the SCSI host
188 * state so drop the tag set refcnt. Wait until the tag set refcnt drops
189 * to zero because .exit_cmd_priv implementations may need the host
190 * pointer.
191 */
192 kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
193 wait_for_completion(&shost->tagset_freed);
194
195 spin_lock_irqsave(shost->host_lock, flags);
196 if (scsi_host_set_state(shost, SHOST_DEL))
197 BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
198 spin_unlock_irqrestore(shost->host_lock, flags);
199
200 transport_unregister_device(&shost->shost_gendev);
201 device_unregister(&shost->shost_dev);
202 device_del(&shost->shost_gendev);
203 }
204 EXPORT_SYMBOL(scsi_remove_host);
205
206 /**
207 * scsi_add_host_with_dma - add a scsi host with dma device
208 * @shost: scsi host pointer to add
209 * @dev: a struct device of type scsi class
210 * @dma_dev: dma device for the host
211 *
212 * Note: You rarely need to worry about this unless you're in a
213 * virtualised host environments, so use the simpler scsi_add_host()
214 * function instead.
215 *
216 * Return value:
217 * 0 on success / != 0 for error
218 **/
scsi_add_host_with_dma(struct Scsi_Host * shost,struct device * dev,struct device * dma_dev)219 int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
220 struct device *dma_dev)
221 {
222 const struct scsi_host_template *sht = shost->hostt;
223 int error = -EINVAL;
224
225 shost_printk(KERN_INFO, shost, "%s\n",
226 sht->info ? sht->info(shost) : sht->name);
227
228 if (!shost->can_queue) {
229 shost_printk(KERN_ERR, shost,
230 "can_queue = 0 no longer supported\n");
231 goto fail;
232 }
233
234 /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
235 shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
236 shost->can_queue);
237
238 error = scsi_init_sense_cache(shost);
239 if (error)
240 goto fail;
241
242 if (!shost->shost_gendev.parent)
243 shost->shost_gendev.parent = dev ? dev : &platform_bus;
244 if (!dma_dev)
245 dma_dev = shost->shost_gendev.parent;
246
247 shost->dma_dev = dma_dev;
248
249 if (dma_dev->dma_mask) {
250 shost->max_sectors = min_t(unsigned int, shost->max_sectors,
251 dma_max_mapping_size(dma_dev) >> SECTOR_SHIFT);
252 }
253
254 error = scsi_mq_setup_tags(shost);
255 if (error)
256 goto fail;
257
258 kref_init(&shost->tagset_refcnt);
259 init_completion(&shost->tagset_freed);
260
261 /*
262 * Increase usage count temporarily here so that calling
263 * scsi_autopm_put_host() will trigger runtime idle if there is
264 * nothing else preventing suspending the device.
265 */
266 pm_runtime_get_noresume(&shost->shost_gendev);
267 pm_runtime_set_active(&shost->shost_gendev);
268 pm_runtime_enable(&shost->shost_gendev);
269 device_enable_async_suspend(&shost->shost_gendev);
270
271 error = device_add(&shost->shost_gendev);
272 if (error)
273 goto out_disable_runtime_pm;
274
275 scsi_host_set_state(shost, SHOST_RUNNING);
276 get_device(shost->shost_gendev.parent);
277
278 device_enable_async_suspend(&shost->shost_dev);
279
280 get_device(&shost->shost_gendev);
281 error = device_add(&shost->shost_dev);
282 if (error)
283 goto out_del_gendev;
284
285 if (shost->transportt->host_size) {
286 shost->shost_data = kzalloc(shost->transportt->host_size,
287 GFP_KERNEL);
288 if (shost->shost_data == NULL) {
289 error = -ENOMEM;
290 goto out_del_dev;
291 }
292 }
293
294 if (shost->transportt->create_work_queue) {
295 shost->work_q = alloc_workqueue(
296 "scsi_wq_%d",
297 WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
298 shost->host_no);
299
300 if (!shost->work_q) {
301 error = -EINVAL;
302 goto out_del_dev;
303 }
304 }
305
306 error = scsi_sysfs_add_host(shost);
307 if (error)
308 goto out_del_dev;
309
310 scsi_proc_host_add(shost);
311 scsi_autopm_put_host(shost);
312 return error;
313
314 /*
315 * Any host allocation in this function will be freed in
316 * scsi_host_dev_release().
317 */
318 out_del_dev:
319 device_del(&shost->shost_dev);
320 out_del_gendev:
321 /*
322 * Host state is SHOST_RUNNING so we have to explicitly release
323 * ->shost_dev.
324 */
325 put_device(&shost->shost_dev);
326 device_del(&shost->shost_gendev);
327 out_disable_runtime_pm:
328 device_disable_async_suspend(&shost->shost_gendev);
329 pm_runtime_disable(&shost->shost_gendev);
330 pm_runtime_set_suspended(&shost->shost_gendev);
331 pm_runtime_put_noidle(&shost->shost_gendev);
332 kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
333 fail:
334 return error;
335 }
336 EXPORT_SYMBOL(scsi_add_host_with_dma);
337
scsi_host_dev_release(struct device * dev)338 static void scsi_host_dev_release(struct device *dev)
339 {
340 struct Scsi_Host *shost = dev_to_shost(dev);
341 struct device *parent = dev->parent;
342
343 /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
344 rcu_barrier();
345
346 if (shost->tmf_work_q)
347 destroy_workqueue(shost->tmf_work_q);
348 if (shost->ehandler)
349 kthread_stop(shost->ehandler);
350 if (shost->work_q)
351 destroy_workqueue(shost->work_q);
352
353 if (shost->shost_state == SHOST_CREATED) {
354 /*
355 * Free the shost_dev device name and remove the proc host dir
356 * here if scsi_host_{alloc,put}() have been called but neither
357 * scsi_host_add() nor scsi_remove_host() has been called.
358 * This avoids that the memory allocated for the shost_dev
359 * name as well as the proc dir structure are leaked.
360 */
361 scsi_proc_hostdir_rm(shost->hostt);
362 kfree(dev_name(&shost->shost_dev));
363 }
364
365 kfree(shost->shost_data);
366
367 ida_free(&host_index_ida, shost->host_no);
368
369 if (shost->shost_state != SHOST_CREATED)
370 put_device(parent);
371 kfree(shost);
372 }
373
374 static const struct device_type scsi_host_type = {
375 .name = "scsi_host",
376 .release = scsi_host_dev_release,
377 };
378
379 /**
380 * scsi_host_alloc - register a scsi host adapter instance.
381 * @sht: pointer to scsi host template
382 * @privsize: extra bytes to allocate for driver
383 *
384 * Note:
385 * Allocate a new Scsi_Host and perform basic initialization.
386 * The host is not published to the scsi midlayer until scsi_add_host
387 * is called.
388 *
389 * Return value:
390 * Pointer to a new Scsi_Host
391 **/
scsi_host_alloc(const struct scsi_host_template * sht,int privsize)392 struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int privsize)
393 {
394 struct Scsi_Host *shost;
395 int index;
396
397 shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL);
398 if (!shost)
399 return NULL;
400
401 shost->host_lock = &shost->default_lock;
402 spin_lock_init(shost->host_lock);
403 shost->shost_state = SHOST_CREATED;
404 INIT_LIST_HEAD(&shost->__devices);
405 INIT_LIST_HEAD(&shost->__targets);
406 INIT_LIST_HEAD(&shost->eh_abort_list);
407 INIT_LIST_HEAD(&shost->eh_cmd_q);
408 INIT_LIST_HEAD(&shost->starved_list);
409 init_waitqueue_head(&shost->host_wait);
410 mutex_init(&shost->scan_mutex);
411
412 index = ida_alloc(&host_index_ida, GFP_KERNEL);
413 if (index < 0) {
414 kfree(shost);
415 return NULL;
416 }
417 shost->host_no = index;
418
419 shost->dma_channel = 0xff;
420
421 /* These three are default values which can be overridden */
422 shost->max_channel = 0;
423 shost->max_id = 8;
424 shost->max_lun = 8;
425
426 /* Give each shost a default transportt */
427 shost->transportt = &blank_transport_template;
428
429 /*
430 * All drivers right now should be able to handle 12 byte
431 * commands. Every so often there are requests for 16 byte
432 * commands, but individual low-level drivers need to certify that
433 * they actually do something sensible with such commands.
434 */
435 shost->max_cmd_len = 12;
436 shost->hostt = sht;
437 shost->this_id = sht->this_id;
438 shost->can_queue = sht->can_queue;
439 shost->sg_tablesize = sht->sg_tablesize;
440 shost->sg_prot_tablesize = sht->sg_prot_tablesize;
441 shost->cmd_per_lun = sht->cmd_per_lun;
442 shost->no_write_same = sht->no_write_same;
443 shost->host_tagset = sht->host_tagset;
444 shost->queuecommand_may_block = sht->queuecommand_may_block;
445
446 if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
447 shost->eh_deadline = -1;
448 else if ((ulong) shost_eh_deadline * HZ > INT_MAX) {
449 shost_printk(KERN_WARNING, shost,
450 "eh_deadline %u too large, setting to %u\n",
451 shost_eh_deadline, INT_MAX / HZ);
452 shost->eh_deadline = INT_MAX;
453 } else
454 shost->eh_deadline = shost_eh_deadline * HZ;
455
456 if (sht->supported_mode == MODE_UNKNOWN)
457 /* means we didn't set it ... default to INITIATOR */
458 shost->active_mode = MODE_INITIATOR;
459 else
460 shost->active_mode = sht->supported_mode;
461
462 if (sht->max_host_blocked)
463 shost->max_host_blocked = sht->max_host_blocked;
464 else
465 shost->max_host_blocked = SCSI_DEFAULT_HOST_BLOCKED;
466
467 /*
468 * If the driver imposes no hard sector transfer limit, start at
469 * machine infinity initially.
470 */
471 if (sht->max_sectors)
472 shost->max_sectors = sht->max_sectors;
473 else
474 shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
475
476 if (sht->max_segment_size)
477 shost->max_segment_size = sht->max_segment_size;
478 else
479 shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
480
481 /* 32-byte (dword) is a common minimum for HBAs. */
482 if (sht->dma_alignment)
483 shost->dma_alignment = sht->dma_alignment;
484 else
485 shost->dma_alignment = 3;
486
487 /*
488 * assume a 4GB boundary, if not set
489 */
490 if (sht->dma_boundary)
491 shost->dma_boundary = sht->dma_boundary;
492 else
493 shost->dma_boundary = 0xffffffff;
494
495 if (sht->virt_boundary_mask)
496 shost->virt_boundary_mask = sht->virt_boundary_mask;
497
498 device_initialize(&shost->shost_gendev);
499 dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
500 shost->shost_gendev.bus = &scsi_bus_type;
501 shost->shost_gendev.type = &scsi_host_type;
502 scsi_enable_async_suspend(&shost->shost_gendev);
503
504 device_initialize(&shost->shost_dev);
505 shost->shost_dev.parent = &shost->shost_gendev;
506 shost->shost_dev.class = &shost_class;
507 dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
508 shost->shost_dev.groups = sht->shost_groups;
509
510 shost->ehandler = kthread_run(scsi_error_handler, shost,
511 "scsi_eh_%d", shost->host_no);
512 if (IS_ERR(shost->ehandler)) {
513 shost_printk(KERN_WARNING, shost,
514 "error handler thread failed to spawn, error = %ld\n",
515 PTR_ERR(shost->ehandler));
516 shost->ehandler = NULL;
517 goto fail;
518 }
519
520 shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
521 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
522 1, shost->host_no);
523 if (!shost->tmf_work_q) {
524 shost_printk(KERN_WARNING, shost,
525 "failed to create tmf workq\n");
526 goto fail;
527 }
528 if (scsi_proc_hostdir_add(shost->hostt) < 0)
529 goto fail;
530 return shost;
531 fail:
532 /*
533 * Host state is still SHOST_CREATED and that is enough to release
534 * ->shost_gendev. scsi_host_dev_release() will free
535 * dev_name(&shost->shost_dev).
536 */
537 put_device(&shost->shost_gendev);
538
539 return NULL;
540 }
541 EXPORT_SYMBOL(scsi_host_alloc);
542
__scsi_host_match(struct device * dev,const void * data)543 static int __scsi_host_match(struct device *dev, const void *data)
544 {
545 struct Scsi_Host *p;
546 const unsigned int *hostnum = data;
547
548 p = class_to_shost(dev);
549 return p->host_no == *hostnum;
550 }
551
552 /**
553 * scsi_host_lookup - get a reference to a Scsi_Host by host no
554 * @hostnum: host number to locate
555 *
556 * Return value:
557 * A pointer to located Scsi_Host or NULL.
558 *
559 * The caller must do a scsi_host_put() to drop the reference
560 * that scsi_host_get() took. The put_device() below dropped
561 * the reference from class_find_device().
562 **/
scsi_host_lookup(unsigned int hostnum)563 struct Scsi_Host *scsi_host_lookup(unsigned int hostnum)
564 {
565 struct device *cdev;
566 struct Scsi_Host *shost = NULL;
567
568 cdev = class_find_device(&shost_class, NULL, &hostnum,
569 __scsi_host_match);
570 if (cdev) {
571 shost = scsi_host_get(class_to_shost(cdev));
572 put_device(cdev);
573 }
574 return shost;
575 }
576 EXPORT_SYMBOL(scsi_host_lookup);
577
578 /**
579 * scsi_host_get - inc a Scsi_Host ref count
580 * @shost: Pointer to Scsi_Host to inc.
581 **/
scsi_host_get(struct Scsi_Host * shost)582 struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
583 {
584 if ((shost->shost_state == SHOST_DEL) ||
585 !get_device(&shost->shost_gendev))
586 return NULL;
587 return shost;
588 }
589 EXPORT_SYMBOL(scsi_host_get);
590
scsi_host_check_in_flight(struct request * rq,void * data)591 static bool scsi_host_check_in_flight(struct request *rq, void *data)
592 {
593 int *count = data;
594 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
595
596 if (test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
597 (*count)++;
598
599 return true;
600 }
601
602 /**
603 * scsi_host_busy - Return the host busy counter
604 * @shost: Pointer to Scsi_Host to inc.
605 **/
scsi_host_busy(struct Scsi_Host * shost)606 int scsi_host_busy(struct Scsi_Host *shost)
607 {
608 int cnt = 0;
609
610 blk_mq_tagset_busy_iter(&shost->tag_set,
611 scsi_host_check_in_flight, &cnt);
612 return cnt;
613 }
614 EXPORT_SYMBOL(scsi_host_busy);
615
616 /**
617 * scsi_host_put - dec a Scsi_Host ref count
618 * @shost: Pointer to Scsi_Host to dec.
619 **/
scsi_host_put(struct Scsi_Host * shost)620 void scsi_host_put(struct Scsi_Host *shost)
621 {
622 put_device(&shost->shost_gendev);
623 }
624 EXPORT_SYMBOL(scsi_host_put);
625
scsi_init_hosts(void)626 int scsi_init_hosts(void)
627 {
628 return class_register(&shost_class);
629 }
630
scsi_exit_hosts(void)631 void scsi_exit_hosts(void)
632 {
633 class_unregister(&shost_class);
634 ida_destroy(&host_index_ida);
635 }
636
scsi_is_host_device(const struct device * dev)637 int scsi_is_host_device(const struct device *dev)
638 {
639 return dev->type == &scsi_host_type;
640 }
641 EXPORT_SYMBOL(scsi_is_host_device);
642
643 /**
644 * scsi_queue_work - Queue work to the Scsi_Host workqueue.
645 * @shost: Pointer to Scsi_Host.
646 * @work: Work to queue for execution.
647 *
648 * Return value:
649 * 1 - work queued for execution
650 * 0 - work is already queued
651 * -EINVAL - work queue doesn't exist
652 **/
scsi_queue_work(struct Scsi_Host * shost,struct work_struct * work)653 int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
654 {
655 if (unlikely(!shost->work_q)) {
656 shost_printk(KERN_ERR, shost,
657 "ERROR: Scsi host '%s' attempted to queue scsi-work, "
658 "when no workqueue created.\n", shost->hostt->name);
659 dump_stack();
660
661 return -EINVAL;
662 }
663
664 return queue_work(shost->work_q, work);
665 }
666 EXPORT_SYMBOL_GPL(scsi_queue_work);
667
668 /**
669 * scsi_flush_work - Flush a Scsi_Host's workqueue.
670 * @shost: Pointer to Scsi_Host.
671 **/
scsi_flush_work(struct Scsi_Host * shost)672 void scsi_flush_work(struct Scsi_Host *shost)
673 {
674 if (!shost->work_q) {
675 shost_printk(KERN_ERR, shost,
676 "ERROR: Scsi host '%s' attempted to flush scsi-work, "
677 "when no workqueue created.\n", shost->hostt->name);
678 dump_stack();
679 return;
680 }
681
682 flush_workqueue(shost->work_q);
683 }
684 EXPORT_SYMBOL_GPL(scsi_flush_work);
685
complete_all_cmds_iter(struct request * rq,void * data)686 static bool complete_all_cmds_iter(struct request *rq, void *data)
687 {
688 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
689 enum scsi_host_status status = *(enum scsi_host_status *)data;
690
691 scsi_dma_unmap(scmd);
692 scmd->result = 0;
693 set_host_byte(scmd, status);
694 scsi_done(scmd);
695 return true;
696 }
697
698 /**
699 * scsi_host_complete_all_commands - Terminate all running commands
700 * @shost: Scsi Host on which commands should be terminated
701 * @status: Status to be set for the terminated commands
702 *
703 * There is no protection against modification of the number
704 * of outstanding commands. It is the responsibility of the
705 * caller to ensure that concurrent I/O submission and/or
706 * completion is stopped when calling this function.
707 */
scsi_host_complete_all_commands(struct Scsi_Host * shost,enum scsi_host_status status)708 void scsi_host_complete_all_commands(struct Scsi_Host *shost,
709 enum scsi_host_status status)
710 {
711 blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter,
712 &status);
713 }
714 EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands);
715
716 struct scsi_host_busy_iter_data {
717 bool (*fn)(struct scsi_cmnd *, void *);
718 void *priv;
719 };
720
__scsi_host_busy_iter_fn(struct request * req,void * priv)721 static bool __scsi_host_busy_iter_fn(struct request *req, void *priv)
722 {
723 struct scsi_host_busy_iter_data *iter_data = priv;
724 struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req);
725
726 return iter_data->fn(sc, iter_data->priv);
727 }
728
729 /**
730 * scsi_host_busy_iter - Iterate over all busy commands
731 * @shost: Pointer to Scsi_Host.
732 * @fn: Function to call on each busy command
733 * @priv: Data pointer passed to @fn
734 *
735 * If locking against concurrent command completions is required
736 * ithas to be provided by the caller
737 **/
scsi_host_busy_iter(struct Scsi_Host * shost,bool (* fn)(struct scsi_cmnd *,void *),void * priv)738 void scsi_host_busy_iter(struct Scsi_Host *shost,
739 bool (*fn)(struct scsi_cmnd *, void *),
740 void *priv)
741 {
742 struct scsi_host_busy_iter_data iter_data = {
743 .fn = fn,
744 .priv = priv,
745 };
746
747 blk_mq_tagset_busy_iter(&shost->tag_set, __scsi_host_busy_iter_fn,
748 &iter_data);
749 }
750 EXPORT_SYMBOL_GPL(scsi_host_busy_iter);
751