xref: /linux/drivers/scsi/libsas/sas_scsi_host.c (revision 2da68a77)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Serial Attached SCSI (SAS) class SCSI Host glue.
4  *
5  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
6  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7  */
8 
9 #include <linux/kthread.h>
10 #include <linux/firmware.h>
11 #include <linux/export.h>
12 #include <linux/ctype.h>
13 #include <linux/kernel.h>
14 
15 #include "sas_internal.h"
16 
17 #include <scsi/scsi_host.h>
18 #include <scsi/scsi_device.h>
19 #include <scsi/scsi_tcq.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_eh.h>
22 #include <scsi/scsi_transport.h>
23 #include <scsi/scsi_transport_sas.h>
24 #include <scsi/sas_ata.h>
25 #include "scsi_sas_internal.h"
26 #include "scsi_transport_api.h"
27 #include "scsi_priv.h"
28 
29 #include <linux/err.h>
30 #include <linux/blkdev.h>
31 #include <linux/freezer.h>
32 #include <linux/gfp.h>
33 #include <linux/scatterlist.h>
34 #include <linux/libata.h>
35 
36 /* record final status and free the task */
37 static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
38 {
39 	struct task_status_struct *ts = &task->task_status;
40 	enum scsi_host_status hs = DID_OK;
41 	enum exec_status stat = SAS_SAM_STAT_GOOD;
42 
43 	if (ts->resp == SAS_TASK_UNDELIVERED) {
44 		/* transport error */
45 		hs = DID_NO_CONNECT;
46 	} else { /* ts->resp == SAS_TASK_COMPLETE */
47 		/* task delivered, what happened afterwards? */
48 		switch (ts->stat) {
49 		case SAS_DEV_NO_RESPONSE:
50 		case SAS_INTERRUPTED:
51 		case SAS_PHY_DOWN:
52 		case SAS_NAK_R_ERR:
53 		case SAS_OPEN_TO:
54 			hs = DID_NO_CONNECT;
55 			break;
56 		case SAS_DATA_UNDERRUN:
57 			scsi_set_resid(sc, ts->residual);
58 			if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
59 				hs = DID_ERROR;
60 			break;
61 		case SAS_DATA_OVERRUN:
62 			hs = DID_ERROR;
63 			break;
64 		case SAS_QUEUE_FULL:
65 			hs = DID_SOFT_ERROR; /* retry */
66 			break;
67 		case SAS_DEVICE_UNKNOWN:
68 			hs = DID_BAD_TARGET;
69 			break;
70 		case SAS_OPEN_REJECT:
71 			if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
72 				hs = DID_SOFT_ERROR; /* retry */
73 			else
74 				hs = DID_ERROR;
75 			break;
76 		case SAS_PROTO_RESPONSE:
77 			pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n",
78 				  task->dev->port->ha->sas_ha_name);
79 			break;
80 		case SAS_ABORTED_TASK:
81 			hs = DID_ABORT;
82 			break;
83 		case SAS_SAM_STAT_CHECK_CONDITION:
84 			memcpy(sc->sense_buffer, ts->buf,
85 			       min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
86 			stat = SAS_SAM_STAT_CHECK_CONDITION;
87 			break;
88 		default:
89 			stat = ts->stat;
90 			break;
91 		}
92 	}
93 
94 	sc->result = (hs << 16) | stat;
95 	ASSIGN_SAS_TASK(sc, NULL);
96 	sas_free_task(task);
97 }
98 
99 static void sas_scsi_task_done(struct sas_task *task)
100 {
101 	struct scsi_cmnd *sc = task->uldd_task;
102 	struct domain_device *dev = task->dev;
103 	struct sas_ha_struct *ha = dev->port->ha;
104 	unsigned long flags;
105 
106 	spin_lock_irqsave(&dev->done_lock, flags);
107 	if (test_bit(SAS_HA_FROZEN, &ha->state))
108 		task = NULL;
109 	else
110 		ASSIGN_SAS_TASK(sc, NULL);
111 	spin_unlock_irqrestore(&dev->done_lock, flags);
112 
113 	if (unlikely(!task)) {
114 		/* task will be completed by the error handler */
115 		pr_debug("task done but aborted\n");
116 		return;
117 	}
118 
119 	if (unlikely(!sc)) {
120 		pr_debug("task_done called with non existing SCSI cmnd!\n");
121 		sas_free_task(task);
122 		return;
123 	}
124 
125 	sas_end_task(sc, task);
126 	scsi_done(sc);
127 }
128 
129 static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
130 					       struct domain_device *dev,
131 					       gfp_t gfp_flags)
132 {
133 	struct sas_task *task = sas_alloc_task(gfp_flags);
134 	struct scsi_lun lun;
135 
136 	if (!task)
137 		return NULL;
138 
139 	task->uldd_task = cmd;
140 	ASSIGN_SAS_TASK(cmd, task);
141 
142 	task->dev = dev;
143 	task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
144 
145 	task->ssp_task.retry_count = 1;
146 	int_to_scsilun(cmd->device->lun, &lun);
147 	memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
148 	task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
149 	task->ssp_task.cmd = cmd;
150 
151 	task->scatter = scsi_sglist(cmd);
152 	task->num_scatter = scsi_sg_count(cmd);
153 	task->total_xfer_len = scsi_bufflen(cmd);
154 	task->data_dir = cmd->sc_data_direction;
155 
156 	task->task_done = sas_scsi_task_done;
157 
158 	return task;
159 }
160 
161 int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
162 {
163 	struct sas_internal *i = to_sas_internal(host->transportt);
164 	struct domain_device *dev = cmd_to_domain_dev(cmd);
165 	struct sas_task *task;
166 	int res = 0;
167 
168 	/* If the device fell off, no sense in issuing commands */
169 	if (test_bit(SAS_DEV_GONE, &dev->state)) {
170 		cmd->result = DID_BAD_TARGET << 16;
171 		goto out_done;
172 	}
173 
174 	if (dev_is_sata(dev)) {
175 		spin_lock_irq(dev->sata_dev.ap->lock);
176 		res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
177 		spin_unlock_irq(dev->sata_dev.ap->lock);
178 		return res;
179 	}
180 
181 	task = sas_create_task(cmd, dev, GFP_ATOMIC);
182 	if (!task)
183 		return SCSI_MLQUEUE_HOST_BUSY;
184 
185 	res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
186 	if (res)
187 		goto out_free_task;
188 	return 0;
189 
190 out_free_task:
191 	pr_debug("lldd_execute_task returned: %d\n", res);
192 	ASSIGN_SAS_TASK(cmd, NULL);
193 	sas_free_task(task);
194 	if (res == -SAS_QUEUE_FULL)
195 		cmd->result = DID_SOFT_ERROR << 16; /* retry */
196 	else
197 		cmd->result = DID_ERROR << 16;
198 out_done:
199 	scsi_done(cmd);
200 	return 0;
201 }
202 EXPORT_SYMBOL_GPL(sas_queuecommand);
203 
204 static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
205 {
206 	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
207 	struct domain_device *dev = cmd_to_domain_dev(cmd);
208 	struct sas_task *task = TO_SAS_TASK(cmd);
209 
210 	/* At this point, we only get called following an actual abort
211 	 * of the task, so we should be guaranteed not to be racing with
212 	 * any completions from the LLD.  Task is freed after this.
213 	 */
214 	sas_end_task(cmd, task);
215 
216 	if (dev_is_sata(dev)) {
217 		/* defer commands to libata so that libata EH can
218 		 * handle ata qcs correctly
219 		 */
220 		list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
221 		return;
222 	}
223 
224 	/* now finish the command and move it on to the error
225 	 * handler done list, this also takes it off the
226 	 * error handler pending list.
227 	 */
228 	scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
229 }
230 
231 static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
232 {
233 	struct scsi_cmnd *cmd, *n;
234 
235 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
236 		if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
237 		    cmd->device->lun == my_cmd->device->lun)
238 			sas_eh_finish_cmd(cmd);
239 	}
240 }
241 
242 static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
243 				     struct domain_device *dev)
244 {
245 	struct scsi_cmnd *cmd, *n;
246 
247 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
248 		struct domain_device *x = cmd_to_domain_dev(cmd);
249 
250 		if (x == dev)
251 			sas_eh_finish_cmd(cmd);
252 	}
253 }
254 
255 static void sas_scsi_clear_queue_port(struct list_head *error_q,
256 				      struct asd_sas_port *port)
257 {
258 	struct scsi_cmnd *cmd, *n;
259 
260 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
261 		struct domain_device *dev = cmd_to_domain_dev(cmd);
262 		struct asd_sas_port *x = dev->port;
263 
264 		if (x == port)
265 			sas_eh_finish_cmd(cmd);
266 	}
267 }
268 
269 enum task_disposition {
270 	TASK_IS_DONE,
271 	TASK_IS_ABORTED,
272 	TASK_IS_AT_LU,
273 	TASK_IS_NOT_AT_LU,
274 	TASK_ABORT_FAILED,
275 };
276 
277 static enum task_disposition sas_scsi_find_task(struct sas_task *task)
278 {
279 	unsigned long flags;
280 	int i, res;
281 	struct sas_internal *si =
282 		to_sas_internal(task->dev->port->ha->core.shost->transportt);
283 
284 	for (i = 0; i < 5; i++) {
285 		pr_notice("%s: aborting task 0x%p\n", __func__, task);
286 		res = si->dft->lldd_abort_task(task);
287 
288 		spin_lock_irqsave(&task->task_state_lock, flags);
289 		if (task->task_state_flags & SAS_TASK_STATE_DONE) {
290 			spin_unlock_irqrestore(&task->task_state_lock, flags);
291 			pr_debug("%s: task 0x%p is done\n", __func__, task);
292 			return TASK_IS_DONE;
293 		}
294 		spin_unlock_irqrestore(&task->task_state_lock, flags);
295 
296 		if (res == TMF_RESP_FUNC_COMPLETE) {
297 			pr_notice("%s: task 0x%p is aborted\n",
298 				  __func__, task);
299 			return TASK_IS_ABORTED;
300 		} else if (si->dft->lldd_query_task) {
301 			pr_notice("%s: querying task 0x%p\n", __func__, task);
302 			res = si->dft->lldd_query_task(task);
303 			switch (res) {
304 			case TMF_RESP_FUNC_SUCC:
305 				pr_notice("%s: task 0x%p at LU\n", __func__,
306 					  task);
307 				return TASK_IS_AT_LU;
308 			case TMF_RESP_FUNC_COMPLETE:
309 				pr_notice("%s: task 0x%p not at LU\n",
310 					  __func__, task);
311 				return TASK_IS_NOT_AT_LU;
312 			case TMF_RESP_FUNC_FAILED:
313 				pr_notice("%s: task 0x%p failed to abort\n",
314 					  __func__, task);
315 				return TASK_ABORT_FAILED;
316 			default:
317 				pr_notice("%s: task 0x%p result code %d not handled\n",
318 					  __func__, task, res);
319 			}
320 		}
321 	}
322 	return TASK_ABORT_FAILED;
323 }
324 
325 static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
326 {
327 	int res = TMF_RESP_FUNC_FAILED;
328 	struct scsi_lun lun;
329 	struct sas_internal *i =
330 		to_sas_internal(dev->port->ha->core.shost->transportt);
331 
332 	int_to_scsilun(cmd->device->lun, &lun);
333 
334 	pr_notice("eh: device %016llx LUN 0x%llx has the task\n",
335 		  SAS_ADDR(dev->sas_addr),
336 		  cmd->device->lun);
337 
338 	if (i->dft->lldd_abort_task_set)
339 		res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
340 
341 	if (res == TMF_RESP_FUNC_FAILED) {
342 		if (i->dft->lldd_clear_task_set)
343 			res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
344 	}
345 
346 	if (res == TMF_RESP_FUNC_FAILED) {
347 		if (i->dft->lldd_lu_reset)
348 			res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
349 	}
350 
351 	return res;
352 }
353 
354 static int sas_recover_I_T(struct domain_device *dev)
355 {
356 	int res = TMF_RESP_FUNC_FAILED;
357 	struct sas_internal *i =
358 		to_sas_internal(dev->port->ha->core.shost->transportt);
359 
360 	pr_notice("I_T nexus reset for dev %016llx\n",
361 		  SAS_ADDR(dev->sas_addr));
362 
363 	if (i->dft->lldd_I_T_nexus_reset)
364 		res = i->dft->lldd_I_T_nexus_reset(dev);
365 
366 	return res;
367 }
368 
369 /* take a reference on the last known good phy for this device */
370 struct sas_phy *sas_get_local_phy(struct domain_device *dev)
371 {
372 	struct sas_ha_struct *ha = dev->port->ha;
373 	struct sas_phy *phy;
374 	unsigned long flags;
375 
376 	/* a published domain device always has a valid phy, it may be
377 	 * stale, but it is never NULL
378 	 */
379 	BUG_ON(!dev->phy);
380 
381 	spin_lock_irqsave(&ha->phy_port_lock, flags);
382 	phy = dev->phy;
383 	get_device(&phy->dev);
384 	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
385 
386 	return phy;
387 }
388 EXPORT_SYMBOL_GPL(sas_get_local_phy);
389 
390 static void sas_wait_eh(struct domain_device *dev)
391 {
392 	struct sas_ha_struct *ha = dev->port->ha;
393 	DEFINE_WAIT(wait);
394 
395 	if (dev_is_sata(dev)) {
396 		ata_port_wait_eh(dev->sata_dev.ap);
397 		return;
398 	}
399  retry:
400 	spin_lock_irq(&ha->lock);
401 
402 	while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
403 		prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
404 		spin_unlock_irq(&ha->lock);
405 		schedule();
406 		spin_lock_irq(&ha->lock);
407 	}
408 	finish_wait(&ha->eh_wait_q, &wait);
409 
410 	spin_unlock_irq(&ha->lock);
411 
412 	/* make sure SCSI EH is complete */
413 	if (scsi_host_in_recovery(ha->core.shost)) {
414 		msleep(10);
415 		goto retry;
416 	}
417 }
418 
419 static int sas_queue_reset(struct domain_device *dev, int reset_type,
420 			   u64 lun, int wait)
421 {
422 	struct sas_ha_struct *ha = dev->port->ha;
423 	int scheduled = 0, tries = 100;
424 
425 	/* ata: promote lun reset to bus reset */
426 	if (dev_is_sata(dev)) {
427 		sas_ata_schedule_reset(dev);
428 		if (wait)
429 			sas_ata_wait_eh(dev);
430 		return SUCCESS;
431 	}
432 
433 	while (!scheduled && tries--) {
434 		spin_lock_irq(&ha->lock);
435 		if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
436 		    !test_bit(reset_type, &dev->state)) {
437 			scheduled = 1;
438 			ha->eh_active++;
439 			list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
440 			set_bit(SAS_DEV_EH_PENDING, &dev->state);
441 			set_bit(reset_type, &dev->state);
442 			int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
443 			scsi_schedule_eh(ha->core.shost);
444 		}
445 		spin_unlock_irq(&ha->lock);
446 
447 		if (wait)
448 			sas_wait_eh(dev);
449 
450 		if (scheduled)
451 			return SUCCESS;
452 	}
453 
454 	pr_warn("%s reset of %s failed\n",
455 		reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
456 		dev_name(&dev->rphy->dev));
457 
458 	return FAILED;
459 }
460 
461 int sas_eh_abort_handler(struct scsi_cmnd *cmd)
462 {
463 	int res = TMF_RESP_FUNC_FAILED;
464 	struct sas_task *task = TO_SAS_TASK(cmd);
465 	struct Scsi_Host *host = cmd->device->host;
466 	struct domain_device *dev = cmd_to_domain_dev(cmd);
467 	struct sas_internal *i = to_sas_internal(host->transportt);
468 	unsigned long flags;
469 
470 	if (!i->dft->lldd_abort_task)
471 		return FAILED;
472 
473 	spin_lock_irqsave(host->host_lock, flags);
474 	/* We cannot do async aborts for SATA devices */
475 	if (dev_is_sata(dev) && !host->host_eh_scheduled) {
476 		spin_unlock_irqrestore(host->host_lock, flags);
477 		return FAILED;
478 	}
479 	spin_unlock_irqrestore(host->host_lock, flags);
480 
481 	if (task)
482 		res = i->dft->lldd_abort_task(task);
483 	else
484 		pr_notice("no task to abort\n");
485 	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
486 		return SUCCESS;
487 
488 	return FAILED;
489 }
490 EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
491 
492 /* Attempt to send a LUN reset message to a device */
493 int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
494 {
495 	int res;
496 	struct scsi_lun lun;
497 	struct Scsi_Host *host = cmd->device->host;
498 	struct domain_device *dev = cmd_to_domain_dev(cmd);
499 	struct sas_internal *i = to_sas_internal(host->transportt);
500 
501 	if (current != host->ehandler)
502 		return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
503 
504 	int_to_scsilun(cmd->device->lun, &lun);
505 
506 	if (!i->dft->lldd_lu_reset)
507 		return FAILED;
508 
509 	res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
510 	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
511 		return SUCCESS;
512 
513 	return FAILED;
514 }
515 EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
516 
517 int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
518 {
519 	int res;
520 	struct Scsi_Host *host = cmd->device->host;
521 	struct domain_device *dev = cmd_to_domain_dev(cmd);
522 	struct sas_internal *i = to_sas_internal(host->transportt);
523 
524 	if (current != host->ehandler)
525 		return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
526 
527 	if (!i->dft->lldd_I_T_nexus_reset)
528 		return FAILED;
529 
530 	res = i->dft->lldd_I_T_nexus_reset(dev);
531 	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
532 	    res == -ENODEV)
533 		return SUCCESS;
534 
535 	return FAILED;
536 }
537 EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
538 
539 /* Try to reset a device */
540 static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
541 {
542 	int res;
543 	struct Scsi_Host *shost = cmd->device->host;
544 
545 	if (!shost->hostt->eh_device_reset_handler)
546 		goto try_target_reset;
547 
548 	res = shost->hostt->eh_device_reset_handler(cmd);
549 	if (res == SUCCESS)
550 		return res;
551 
552 try_target_reset:
553 	if (shost->hostt->eh_target_reset_handler)
554 		return shost->hostt->eh_target_reset_handler(cmd);
555 
556 	return FAILED;
557 }
558 
559 static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
560 {
561 	struct scsi_cmnd *cmd, *n;
562 	enum task_disposition res = TASK_IS_DONE;
563 	int tmf_resp, need_reset;
564 	struct sas_internal *i = to_sas_internal(shost->transportt);
565 	unsigned long flags;
566 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
567 	LIST_HEAD(done);
568 
569 	/* clean out any commands that won the completion vs eh race */
570 	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
571 		struct domain_device *dev = cmd_to_domain_dev(cmd);
572 		struct sas_task *task;
573 
574 		spin_lock_irqsave(&dev->done_lock, flags);
575 		/* by this point the lldd has either observed
576 		 * SAS_HA_FROZEN and is leaving the task alone, or has
577 		 * won the race with eh and decided to complete it
578 		 */
579 		task = TO_SAS_TASK(cmd);
580 		spin_unlock_irqrestore(&dev->done_lock, flags);
581 
582 		if (!task)
583 			list_move_tail(&cmd->eh_entry, &done);
584 	}
585 
586  Again:
587 	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
588 		struct sas_task *task = TO_SAS_TASK(cmd);
589 
590 		list_del_init(&cmd->eh_entry);
591 
592 		spin_lock_irqsave(&task->task_state_lock, flags);
593 		need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
594 		spin_unlock_irqrestore(&task->task_state_lock, flags);
595 
596 		if (need_reset) {
597 			pr_notice("%s: task 0x%p requests reset\n",
598 				  __func__, task);
599 			goto reset;
600 		}
601 
602 		pr_debug("trying to find task 0x%p\n", task);
603 		res = sas_scsi_find_task(task);
604 
605 		switch (res) {
606 		case TASK_IS_DONE:
607 			pr_notice("%s: task 0x%p is done\n", __func__,
608 				    task);
609 			sas_eh_finish_cmd(cmd);
610 			continue;
611 		case TASK_IS_ABORTED:
612 			pr_notice("%s: task 0x%p is aborted\n",
613 				  __func__, task);
614 			sas_eh_finish_cmd(cmd);
615 			continue;
616 		case TASK_IS_AT_LU:
617 			pr_info("task 0x%p is at LU: lu recover\n", task);
618  reset:
619 			tmf_resp = sas_recover_lu(task->dev, cmd);
620 			if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
621 				pr_notice("dev %016llx LU 0x%llx is recovered\n",
622 					  SAS_ADDR(task->dev),
623 					  cmd->device->lun);
624 				sas_eh_finish_cmd(cmd);
625 				sas_scsi_clear_queue_lu(work_q, cmd);
626 				goto Again;
627 			}
628 			fallthrough;
629 		case TASK_IS_NOT_AT_LU:
630 		case TASK_ABORT_FAILED:
631 			pr_notice("task 0x%p is not at LU: I_T recover\n",
632 				  task);
633 			tmf_resp = sas_recover_I_T(task->dev);
634 			if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
635 			    tmf_resp == -ENODEV) {
636 				struct domain_device *dev = task->dev;
637 				pr_notice("I_T %016llx recovered\n",
638 					  SAS_ADDR(task->dev->sas_addr));
639 				sas_eh_finish_cmd(cmd);
640 				sas_scsi_clear_queue_I_T(work_q, dev);
641 				goto Again;
642 			}
643 			/* Hammer time :-) */
644 			try_to_reset_cmd_device(cmd);
645 			if (i->dft->lldd_clear_nexus_port) {
646 				struct asd_sas_port *port = task->dev->port;
647 				pr_debug("clearing nexus for port:%d\n",
648 					  port->id);
649 				res = i->dft->lldd_clear_nexus_port(port);
650 				if (res == TMF_RESP_FUNC_COMPLETE) {
651 					pr_notice("clear nexus port:%d succeeded\n",
652 						  port->id);
653 					sas_eh_finish_cmd(cmd);
654 					sas_scsi_clear_queue_port(work_q,
655 								  port);
656 					goto Again;
657 				}
658 			}
659 			if (i->dft->lldd_clear_nexus_ha) {
660 				pr_debug("clear nexus ha\n");
661 				res = i->dft->lldd_clear_nexus_ha(ha);
662 				if (res == TMF_RESP_FUNC_COMPLETE) {
663 					pr_notice("clear nexus ha succeeded\n");
664 					sas_eh_finish_cmd(cmd);
665 					goto clear_q;
666 				}
667 			}
668 			/* If we are here -- this means that no amount
669 			 * of effort could recover from errors.  Quite
670 			 * possibly the HA just disappeared.
671 			 */
672 			pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n",
673 			       SAS_ADDR(task->dev->sas_addr),
674 			       cmd->device->lun);
675 
676 			sas_eh_finish_cmd(cmd);
677 			goto clear_q;
678 		}
679 	}
680  out:
681 	list_splice_tail(&done, work_q);
682 	list_splice_tail_init(&ha->eh_ata_q, work_q);
683 	return;
684 
685  clear_q:
686 	pr_debug("--- Exit %s -- clear_q\n", __func__);
687 	list_for_each_entry_safe(cmd, n, work_q, eh_entry)
688 		sas_eh_finish_cmd(cmd);
689 	goto out;
690 }
691 
692 static void sas_eh_handle_resets(struct Scsi_Host *shost)
693 {
694 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
695 	struct sas_internal *i = to_sas_internal(shost->transportt);
696 
697 	/* handle directed resets to sas devices */
698 	spin_lock_irq(&ha->lock);
699 	while (!list_empty(&ha->eh_dev_q)) {
700 		struct domain_device *dev;
701 		struct ssp_device *ssp;
702 
703 		ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
704 		list_del_init(&ssp->eh_list_node);
705 		dev = container_of(ssp, typeof(*dev), ssp_dev);
706 		kref_get(&dev->kref);
707 		WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
708 
709 		spin_unlock_irq(&ha->lock);
710 
711 		if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
712 			i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
713 
714 		if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
715 			i->dft->lldd_I_T_nexus_reset(dev);
716 
717 		sas_put_device(dev);
718 		spin_lock_irq(&ha->lock);
719 		clear_bit(SAS_DEV_EH_PENDING, &dev->state);
720 		ha->eh_active--;
721 	}
722 	spin_unlock_irq(&ha->lock);
723 }
724 
725 
726 void sas_scsi_recover_host(struct Scsi_Host *shost)
727 {
728 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
729 	LIST_HEAD(eh_work_q);
730 	int tries = 0;
731 	bool retry;
732 
733 retry:
734 	tries++;
735 	retry = true;
736 	spin_lock_irq(shost->host_lock);
737 	list_splice_init(&shost->eh_cmd_q, &eh_work_q);
738 	spin_unlock_irq(shost->host_lock);
739 
740 	pr_notice("Enter %s busy: %d failed: %d\n",
741 		  __func__, scsi_host_busy(shost), shost->host_failed);
742 	/*
743 	 * Deal with commands that still have SAS tasks (i.e. they didn't
744 	 * complete via the normal sas_task completion mechanism),
745 	 * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
746 	 */
747 	set_bit(SAS_HA_FROZEN, &ha->state);
748 	sas_eh_handle_sas_errors(shost, &eh_work_q);
749 	clear_bit(SAS_HA_FROZEN, &ha->state);
750 	if (list_empty(&eh_work_q))
751 		goto out;
752 
753 	/*
754 	 * Now deal with SCSI commands that completed ok but have a an error
755 	 * code (and hopefully sense data) attached.  This is roughly what
756 	 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
757 	 * command we see here has no sas_task and is thus unknown to the HA.
758 	 */
759 	sas_ata_eh(shost, &eh_work_q);
760 	if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
761 		scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
762 
763 out:
764 	sas_eh_handle_resets(shost);
765 
766 	/* now link into libata eh --- if we have any ata devices */
767 	sas_ata_strategy_handler(shost);
768 
769 	scsi_eh_flush_done_q(&ha->eh_done_q);
770 
771 	/* check if any new eh work was scheduled during the last run */
772 	spin_lock_irq(&ha->lock);
773 	if (ha->eh_active == 0) {
774 		shost->host_eh_scheduled = 0;
775 		retry = false;
776 	}
777 	spin_unlock_irq(&ha->lock);
778 
779 	if (retry)
780 		goto retry;
781 
782 	pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n",
783 		  __func__, scsi_host_busy(shost),
784 		  shost->host_failed, tries);
785 }
786 
787 int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
788 {
789 	struct domain_device *dev = sdev_to_domain_dev(sdev);
790 
791 	if (dev_is_sata(dev))
792 		return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
793 
794 	return -EINVAL;
795 }
796 EXPORT_SYMBOL_GPL(sas_ioctl);
797 
798 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
799 {
800 	struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
801 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
802 	struct domain_device *found_dev = NULL;
803 	int i;
804 	unsigned long flags;
805 
806 	spin_lock_irqsave(&ha->phy_port_lock, flags);
807 	for (i = 0; i < ha->num_phys; i++) {
808 		struct asd_sas_port *port = ha->sas_port[i];
809 		struct domain_device *dev;
810 
811 		spin_lock(&port->dev_list_lock);
812 		list_for_each_entry(dev, &port->dev_list, dev_list_node) {
813 			if (rphy == dev->rphy) {
814 				found_dev = dev;
815 				spin_unlock(&port->dev_list_lock);
816 				goto found;
817 			}
818 		}
819 		spin_unlock(&port->dev_list_lock);
820 	}
821  found:
822 	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
823 
824 	return found_dev;
825 }
826 
827 int sas_target_alloc(struct scsi_target *starget)
828 {
829 	struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
830 	struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
831 
832 	if (!found_dev)
833 		return -ENODEV;
834 
835 	kref_get(&found_dev->kref);
836 	starget->hostdata = found_dev;
837 	return 0;
838 }
839 EXPORT_SYMBOL_GPL(sas_target_alloc);
840 
841 #define SAS_DEF_QD 256
842 
843 int sas_slave_configure(struct scsi_device *scsi_dev)
844 {
845 	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
846 
847 	BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
848 
849 	if (dev_is_sata(dev)) {
850 		ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
851 		return 0;
852 	}
853 
854 	sas_read_port_mode_page(scsi_dev);
855 
856 	if (scsi_dev->tagged_supported) {
857 		scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
858 	} else {
859 		pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n",
860 			  SAS_ADDR(dev->sas_addr), scsi_dev->lun);
861 		scsi_change_queue_depth(scsi_dev, 1);
862 	}
863 
864 	scsi_dev->allow_restart = 1;
865 
866 	return 0;
867 }
868 EXPORT_SYMBOL_GPL(sas_slave_configure);
869 
870 int sas_change_queue_depth(struct scsi_device *sdev, int depth)
871 {
872 	struct domain_device *dev = sdev_to_domain_dev(sdev);
873 
874 	if (dev_is_sata(dev))
875 		return ata_change_queue_depth(dev->sata_dev.ap,
876 					      sas_to_ata_dev(dev), sdev, depth);
877 
878 	if (!sdev->tagged_supported)
879 		depth = 1;
880 	return scsi_change_queue_depth(sdev, depth);
881 }
882 EXPORT_SYMBOL_GPL(sas_change_queue_depth);
883 
884 int sas_bios_param(struct scsi_device *scsi_dev,
885 			  struct block_device *bdev,
886 			  sector_t capacity, int *hsc)
887 {
888 	hsc[0] = 255;
889 	hsc[1] = 63;
890 	sector_div(capacity, 255*63);
891 	hsc[2] = capacity;
892 
893 	return 0;
894 }
895 EXPORT_SYMBOL_GPL(sas_bios_param);
896 
897 void sas_task_internal_done(struct sas_task *task)
898 {
899 	del_timer(&task->slow_task->timer);
900 	complete(&task->slow_task->completion);
901 }
902 
903 void sas_task_internal_timedout(struct timer_list *t)
904 {
905 	struct sas_task_slow *slow = from_timer(slow, t, timer);
906 	struct sas_task *task = slow->task;
907 	bool is_completed = true;
908 	unsigned long flags;
909 
910 	spin_lock_irqsave(&task->task_state_lock, flags);
911 	if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
912 		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
913 		is_completed = false;
914 	}
915 	spin_unlock_irqrestore(&task->task_state_lock, flags);
916 
917 	if (!is_completed)
918 		complete(&task->slow_task->completion);
919 }
920 
921 #define TASK_TIMEOUT			(20 * HZ)
922 #define TASK_RETRY			3
923 
924 static int sas_execute_internal_abort(struct domain_device *device,
925 				      enum sas_internal_abort type, u16 tag,
926 				      unsigned int qid, void *data)
927 {
928 	struct sas_ha_struct *ha = device->port->ha;
929 	struct sas_internal *i = to_sas_internal(ha->core.shost->transportt);
930 	struct sas_task *task = NULL;
931 	int res, retry;
932 
933 	for (retry = 0; retry < TASK_RETRY; retry++) {
934 		task = sas_alloc_slow_task(GFP_KERNEL);
935 		if (!task)
936 			return -ENOMEM;
937 
938 		task->dev = device;
939 		task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT;
940 		task->task_done = sas_task_internal_done;
941 		task->slow_task->timer.function = sas_task_internal_timedout;
942 		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
943 		add_timer(&task->slow_task->timer);
944 
945 		task->abort_task.tag = tag;
946 		task->abort_task.type = type;
947 		task->abort_task.qid = qid;
948 
949 		res = i->dft->lldd_execute_task(task, GFP_KERNEL);
950 		if (res) {
951 			del_timer_sync(&task->slow_task->timer);
952 			pr_err("Executing internal abort failed %016llx (%d)\n",
953 			       SAS_ADDR(device->sas_addr), res);
954 			break;
955 		}
956 
957 		wait_for_completion(&task->slow_task->completion);
958 		res = TMF_RESP_FUNC_FAILED;
959 
960 		/* Even if the internal abort timed out, return direct. */
961 		if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
962 			bool quit = true;
963 
964 			if (i->dft->lldd_abort_timeout)
965 				quit = i->dft->lldd_abort_timeout(task, data);
966 			else
967 				pr_err("Internal abort: timeout %016llx\n",
968 				       SAS_ADDR(device->sas_addr));
969 			res = -EIO;
970 			if (quit)
971 				break;
972 		}
973 
974 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
975 			task->task_status.stat == SAS_SAM_STAT_GOOD) {
976 			res = TMF_RESP_FUNC_COMPLETE;
977 			break;
978 		}
979 
980 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
981 			task->task_status.stat == TMF_RESP_FUNC_SUCC) {
982 			res = TMF_RESP_FUNC_SUCC;
983 			break;
984 		}
985 
986 		pr_err("Internal abort: task to dev %016llx response: 0x%x status 0x%x\n",
987 		       SAS_ADDR(device->sas_addr), task->task_status.resp,
988 		       task->task_status.stat);
989 		sas_free_task(task);
990 		task = NULL;
991 	}
992 	BUG_ON(retry == TASK_RETRY && task != NULL);
993 	sas_free_task(task);
994 	return res;
995 }
996 
997 int sas_execute_internal_abort_single(struct domain_device *device, u16 tag,
998 				      unsigned int qid, void *data)
999 {
1000 	return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_SINGLE,
1001 					  tag, qid, data);
1002 }
1003 EXPORT_SYMBOL_GPL(sas_execute_internal_abort_single);
1004 
1005 int sas_execute_internal_abort_dev(struct domain_device *device,
1006 				   unsigned int qid, void *data)
1007 {
1008 	return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_DEV,
1009 					  SCSI_NO_TAG, qid, data);
1010 }
1011 EXPORT_SYMBOL_GPL(sas_execute_internal_abort_dev);
1012 
1013 int sas_execute_tmf(struct domain_device *device, void *parameter,
1014 		    int para_len, int force_phy_id,
1015 		    struct sas_tmf_task *tmf)
1016 {
1017 	struct sas_task *task;
1018 	struct sas_internal *i =
1019 		to_sas_internal(device->port->ha->core.shost->transportt);
1020 	int res, retry;
1021 
1022 	for (retry = 0; retry < TASK_RETRY; retry++) {
1023 		task = sas_alloc_slow_task(GFP_KERNEL);
1024 		if (!task)
1025 			return -ENOMEM;
1026 
1027 		task->dev = device;
1028 		task->task_proto = device->tproto;
1029 
1030 		if (dev_is_sata(device)) {
1031 			task->ata_task.device_control_reg_update = 1;
1032 			if (force_phy_id >= 0) {
1033 				task->ata_task.force_phy = true;
1034 				task->ata_task.force_phy_id = force_phy_id;
1035 			}
1036 			memcpy(&task->ata_task.fis, parameter, para_len);
1037 		} else {
1038 			memcpy(&task->ssp_task, parameter, para_len);
1039 		}
1040 
1041 		task->task_done = sas_task_internal_done;
1042 		task->tmf = tmf;
1043 
1044 		task->slow_task->timer.function = sas_task_internal_timedout;
1045 		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
1046 		add_timer(&task->slow_task->timer);
1047 
1048 		res = i->dft->lldd_execute_task(task, GFP_KERNEL);
1049 		if (res) {
1050 			del_timer_sync(&task->slow_task->timer);
1051 			pr_err("executing TMF task failed %016llx (%d)\n",
1052 			       SAS_ADDR(device->sas_addr), res);
1053 			break;
1054 		}
1055 
1056 		wait_for_completion(&task->slow_task->completion);
1057 
1058 		if (i->dft->lldd_tmf_exec_complete)
1059 			i->dft->lldd_tmf_exec_complete(device);
1060 
1061 		res = TMF_RESP_FUNC_FAILED;
1062 
1063 		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1064 			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1065 				pr_err("TMF task timeout for %016llx and not done\n",
1066 				       SAS_ADDR(device->sas_addr));
1067 				if (i->dft->lldd_tmf_aborted)
1068 					i->dft->lldd_tmf_aborted(task);
1069 				break;
1070 			}
1071 			pr_warn("TMF task timeout for %016llx and done\n",
1072 				SAS_ADDR(device->sas_addr));
1073 		}
1074 
1075 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1076 		    task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1077 			res = TMF_RESP_FUNC_COMPLETE;
1078 			break;
1079 		}
1080 
1081 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1082 		    task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1083 			res = TMF_RESP_FUNC_SUCC;
1084 			break;
1085 		}
1086 
1087 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1088 		    task->task_status.stat == SAS_DATA_UNDERRUN) {
1089 			/* no error, but return the number of bytes of
1090 			 * underrun
1091 			 */
1092 			pr_warn("TMF task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
1093 				SAS_ADDR(device->sas_addr),
1094 				task->task_status.resp,
1095 				task->task_status.stat);
1096 			res = task->task_status.residual;
1097 			break;
1098 		}
1099 
1100 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1101 		    task->task_status.stat == SAS_DATA_OVERRUN) {
1102 			pr_warn("TMF task blocked task error %016llx\n",
1103 				SAS_ADDR(device->sas_addr));
1104 			res = -EMSGSIZE;
1105 			break;
1106 		}
1107 
1108 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1109 		    task->task_status.stat == SAS_OPEN_REJECT) {
1110 			pr_warn("TMF task open reject failed  %016llx\n",
1111 				SAS_ADDR(device->sas_addr));
1112 			res = -EIO;
1113 		} else {
1114 			pr_warn("TMF task to dev %016llx resp: 0x%x status 0x%x\n",
1115 				SAS_ADDR(device->sas_addr),
1116 				task->task_status.resp,
1117 				task->task_status.stat);
1118 		}
1119 		sas_free_task(task);
1120 		task = NULL;
1121 	}
1122 
1123 	if (retry == TASK_RETRY)
1124 		pr_warn("executing TMF for %016llx failed after %d attempts!\n",
1125 			SAS_ADDR(device->sas_addr), TASK_RETRY);
1126 	sas_free_task(task);
1127 
1128 	return res;
1129 }
1130 
1131 static int sas_execute_ssp_tmf(struct domain_device *device, u8 *lun,
1132 			       struct sas_tmf_task *tmf)
1133 {
1134 	struct sas_ssp_task ssp_task;
1135 
1136 	if (!(device->tproto & SAS_PROTOCOL_SSP))
1137 		return TMF_RESP_FUNC_ESUPP;
1138 
1139 	memcpy(ssp_task.LUN, lun, 8);
1140 
1141 	return sas_execute_tmf(device, &ssp_task, sizeof(ssp_task), -1, tmf);
1142 }
1143 
1144 int sas_abort_task_set(struct domain_device *dev, u8 *lun)
1145 {
1146 	struct sas_tmf_task tmf_task = {
1147 		.tmf = TMF_ABORT_TASK_SET,
1148 	};
1149 
1150 	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1151 }
1152 EXPORT_SYMBOL_GPL(sas_abort_task_set);
1153 
1154 int sas_clear_task_set(struct domain_device *dev, u8 *lun)
1155 {
1156 	struct sas_tmf_task tmf_task = {
1157 		.tmf = TMF_CLEAR_TASK_SET,
1158 	};
1159 
1160 	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1161 }
1162 EXPORT_SYMBOL_GPL(sas_clear_task_set);
1163 
1164 int sas_lu_reset(struct domain_device *dev, u8 *lun)
1165 {
1166 	struct sas_tmf_task tmf_task = {
1167 		.tmf = TMF_LU_RESET,
1168 	};
1169 
1170 	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1171 }
1172 EXPORT_SYMBOL_GPL(sas_lu_reset);
1173 
1174 int sas_query_task(struct sas_task *task, u16 tag)
1175 {
1176 	struct sas_tmf_task tmf_task = {
1177 		.tmf = TMF_QUERY_TASK,
1178 		.tag_of_task_to_be_managed = tag,
1179 	};
1180 	struct scsi_cmnd *cmnd = task->uldd_task;
1181 	struct domain_device *dev = task->dev;
1182 	struct scsi_lun lun;
1183 
1184 	int_to_scsilun(cmnd->device->lun, &lun);
1185 
1186 	return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1187 }
1188 EXPORT_SYMBOL_GPL(sas_query_task);
1189 
1190 int sas_abort_task(struct sas_task *task, u16 tag)
1191 {
1192 	struct sas_tmf_task tmf_task = {
1193 		.tmf = TMF_ABORT_TASK,
1194 		.tag_of_task_to_be_managed = tag,
1195 	};
1196 	struct scsi_cmnd *cmnd = task->uldd_task;
1197 	struct domain_device *dev = task->dev;
1198 	struct scsi_lun lun;
1199 
1200 	int_to_scsilun(cmnd->device->lun, &lun);
1201 
1202 	return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1203 }
1204 EXPORT_SYMBOL_GPL(sas_abort_task);
1205 
1206 /*
1207  * Tell an upper layer that it needs to initiate an abort for a given task.
1208  * This should only ever be called by an LLDD.
1209  */
1210 void sas_task_abort(struct sas_task *task)
1211 {
1212 	struct scsi_cmnd *sc = task->uldd_task;
1213 
1214 	/* Escape for libsas internal commands */
1215 	if (!sc) {
1216 		struct sas_task_slow *slow = task->slow_task;
1217 
1218 		if (!slow)
1219 			return;
1220 		if (!del_timer(&slow->timer))
1221 			return;
1222 		slow->timer.function(&slow->timer);
1223 		return;
1224 	}
1225 
1226 	if (dev_is_sata(task->dev))
1227 		sas_ata_task_abort(task);
1228 	else
1229 		blk_abort_request(scsi_cmd_to_rq(sc));
1230 }
1231 EXPORT_SYMBOL_GPL(sas_task_abort);
1232 
1233 int sas_slave_alloc(struct scsi_device *sdev)
1234 {
1235 	if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
1236 		return -ENXIO;
1237 
1238 	return 0;
1239 }
1240 EXPORT_SYMBOL_GPL(sas_slave_alloc);
1241 
1242 void sas_target_destroy(struct scsi_target *starget)
1243 {
1244 	struct domain_device *found_dev = starget->hostdata;
1245 
1246 	if (!found_dev)
1247 		return;
1248 
1249 	starget->hostdata = NULL;
1250 	sas_put_device(found_dev);
1251 }
1252 EXPORT_SYMBOL_GPL(sas_target_destroy);
1253 
1254 #define SAS_STRING_ADDR_SIZE	16
1255 
1256 int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
1257 {
1258 	int res;
1259 	const struct firmware *fw;
1260 
1261 	res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
1262 	if (res)
1263 		return res;
1264 
1265 	if (fw->size < SAS_STRING_ADDR_SIZE) {
1266 		res = -ENODEV;
1267 		goto out;
1268 	}
1269 
1270 	res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
1271 	if (res)
1272 		goto out;
1273 
1274 out:
1275 	release_firmware(fw);
1276 	return res;
1277 }
1278 EXPORT_SYMBOL_GPL(sas_request_addr);
1279 
1280