1 /*
2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3 *
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5 * Copyright (C) 2012-2014 LSI Corporation
6 * Copyright (C) 2013-2014 Avago Technologies
7 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
42 * USA.
43 */
44
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/aer.h>
56 #include <linux/raid_class.h>
57 #include <linux/blk-mq-pci.h>
58 #include <asm/unaligned.h>
59
60 #include "mpt3sas_base.h"
61
62 #define RAID_CHANNEL 1
63
64 #define PCIE_CHANNEL 2
65
66 /* forward proto's */
67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 struct _sas_node *sas_expander);
69 static void _firmware_event_work(struct work_struct *work);
70
71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 struct _sas_device *sas_device);
73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 u8 retry_count, u8 is_pd);
75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
76 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 struct _pcie_device *pcie_device);
78 static void
79 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
80 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
81
82 /* global parameters */
83 LIST_HEAD(mpt3sas_ioc_list);
84 /* global ioc lock for list operations */
85 DEFINE_SPINLOCK(gioc_lock);
86
87 MODULE_AUTHOR(MPT3SAS_AUTHOR);
88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
91 MODULE_ALIAS("mpt2sas");
92
93 /* local parameters */
94 static u8 scsi_io_cb_idx = -1;
95 static u8 tm_cb_idx = -1;
96 static u8 ctl_cb_idx = -1;
97 static u8 base_cb_idx = -1;
98 static u8 port_enable_cb_idx = -1;
99 static u8 transport_cb_idx = -1;
100 static u8 scsih_cb_idx = -1;
101 static u8 config_cb_idx = -1;
102 static int mpt2_ids;
103 static int mpt3_ids;
104
105 static u8 tm_tr_cb_idx = -1 ;
106 static u8 tm_tr_volume_cb_idx = -1 ;
107 static u8 tm_sas_control_cb_idx = -1;
108
109 /* command line options */
110 static u32 logging_level;
111 MODULE_PARM_DESC(logging_level,
112 " bits for enabling additional logging info (default=0)");
113
114
115 static ushort max_sectors = 0xFFFF;
116 module_param(max_sectors, ushort, 0444);
117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
118
119
120 static int missing_delay[2] = {-1, -1};
121 module_param_array(missing_delay, int, NULL, 0444);
122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
123
124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
125 #define MPT3SAS_MAX_LUN (16895)
126 static u64 max_lun = MPT3SAS_MAX_LUN;
127 module_param(max_lun, ullong, 0444);
128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
129
130 static ushort hbas_to_enumerate;
131 module_param(hbas_to_enumerate, ushort, 0444);
132 MODULE_PARM_DESC(hbas_to_enumerate,
133 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
134 1 - enumerates only SAS 2.0 generation HBAs\n \
135 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
136
137 /* diag_buffer_enable is bitwise
138 * bit 0 set = TRACE
139 * bit 1 set = SNAPSHOT
140 * bit 2 set = EXTENDED
141 *
142 * Either bit can be set, or both
143 */
144 static int diag_buffer_enable = -1;
145 module_param(diag_buffer_enable, int, 0444);
146 MODULE_PARM_DESC(diag_buffer_enable,
147 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
148 static int disable_discovery = -1;
149 module_param(disable_discovery, int, 0444);
150 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
151
152
153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
154 static int prot_mask = -1;
155 module_param(prot_mask, int, 0444);
156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
157
158 static bool enable_sdev_max_qd;
159 module_param(enable_sdev_max_qd, bool, 0444);
160 MODULE_PARM_DESC(enable_sdev_max_qd,
161 "Enable sdev max qd as can_queue, def=disabled(0)");
162
163 static int multipath_on_hba = -1;
164 module_param(multipath_on_hba, int, 0);
165 MODULE_PARM_DESC(multipath_on_hba,
166 "Multipath support to add same target device\n\t\t"
167 "as many times as it is visible to HBA from various paths\n\t\t"
168 "(by default:\n\t\t"
169 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
170 "\t SAS 3.5 HBA - This will be enabled)");
171
172 static int host_tagset_enable = 1;
173 module_param(host_tagset_enable, int, 0444);
174 MODULE_PARM_DESC(host_tagset_enable,
175 "Shared host tagset enable/disable Default: enable(1)");
176
177 /* raid transport support */
178 static struct raid_template *mpt3sas_raid_template;
179 static struct raid_template *mpt2sas_raid_template;
180
181
182 /**
183 * struct sense_info - common structure for obtaining sense keys
184 * @skey: sense key
185 * @asc: additional sense code
186 * @ascq: additional sense code qualifier
187 */
188 struct sense_info {
189 u8 skey;
190 u8 asc;
191 u8 ascq;
192 };
193
194 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
195 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
196 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
197 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
198 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
199 /**
200 * struct fw_event_work - firmware event struct
201 * @list: link list framework
202 * @work: work object (ioc->fault_reset_work_q)
203 * @ioc: per adapter object
204 * @device_handle: device handle
205 * @VF_ID: virtual function id
206 * @VP_ID: virtual port id
207 * @ignore: flag meaning this event has been marked to ignore
208 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
209 * @refcount: kref for this event
210 * @event_data: reply event data payload follows
211 *
212 * This object stored on ioc->fw_event_list.
213 */
214 struct fw_event_work {
215 struct list_head list;
216 struct work_struct work;
217
218 struct MPT3SAS_ADAPTER *ioc;
219 u16 device_handle;
220 u8 VF_ID;
221 u8 VP_ID;
222 u8 ignore;
223 u16 event;
224 struct kref refcount;
225 char event_data[] __aligned(4);
226 };
227
fw_event_work_free(struct kref * r)228 static void fw_event_work_free(struct kref *r)
229 {
230 kfree(container_of(r, struct fw_event_work, refcount));
231 }
232
fw_event_work_get(struct fw_event_work * fw_work)233 static void fw_event_work_get(struct fw_event_work *fw_work)
234 {
235 kref_get(&fw_work->refcount);
236 }
237
fw_event_work_put(struct fw_event_work * fw_work)238 static void fw_event_work_put(struct fw_event_work *fw_work)
239 {
240 kref_put(&fw_work->refcount, fw_event_work_free);
241 }
242
alloc_fw_event_work(int len)243 static struct fw_event_work *alloc_fw_event_work(int len)
244 {
245 struct fw_event_work *fw_event;
246
247 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
248 if (!fw_event)
249 return NULL;
250
251 kref_init(&fw_event->refcount);
252 return fw_event;
253 }
254
255 /**
256 * struct _scsi_io_transfer - scsi io transfer
257 * @handle: sas device handle (assigned by firmware)
258 * @is_raid: flag set for hidden raid components
259 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
260 * @data_length: data transfer length
261 * @data_dma: dma pointer to data
262 * @sense: sense data
263 * @lun: lun number
264 * @cdb_length: cdb length
265 * @cdb: cdb contents
266 * @timeout: timeout for this command
267 * @VF_ID: virtual function id
268 * @VP_ID: virtual port id
269 * @valid_reply: flag set for reply message
270 * @sense_length: sense length
271 * @ioc_status: ioc status
272 * @scsi_state: scsi state
273 * @scsi_status: scsi staus
274 * @log_info: log information
275 * @transfer_length: data length transfer when there is a reply message
276 *
277 * Used for sending internal scsi commands to devices within this module.
278 * Refer to _scsi_send_scsi_io().
279 */
280 struct _scsi_io_transfer {
281 u16 handle;
282 u8 is_raid;
283 enum dma_data_direction dir;
284 u32 data_length;
285 dma_addr_t data_dma;
286 u8 sense[SCSI_SENSE_BUFFERSIZE];
287 u32 lun;
288 u8 cdb_length;
289 u8 cdb[32];
290 u8 timeout;
291 u8 VF_ID;
292 u8 VP_ID;
293 u8 valid_reply;
294 /* the following bits are only valid when 'valid_reply = 1' */
295 u32 sense_length;
296 u16 ioc_status;
297 u8 scsi_state;
298 u8 scsi_status;
299 u32 log_info;
300 u32 transfer_length;
301 };
302
303 /**
304 * _scsih_set_debug_level - global setting of ioc->logging_level.
305 * @val: ?
306 * @kp: ?
307 *
308 * Note: The logging levels are defined in mpt3sas_debug.h.
309 */
310 static int
_scsih_set_debug_level(const char * val,const struct kernel_param * kp)311 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
312 {
313 int ret = param_set_int(val, kp);
314 struct MPT3SAS_ADAPTER *ioc;
315
316 if (ret)
317 return ret;
318
319 pr_info("setting logging_level(0x%08x)\n", logging_level);
320 spin_lock(&gioc_lock);
321 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
322 ioc->logging_level = logging_level;
323 spin_unlock(&gioc_lock);
324 return 0;
325 }
326 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
327 &logging_level, 0644);
328
329 /**
330 * _scsih_srch_boot_sas_address - search based on sas_address
331 * @sas_address: sas address
332 * @boot_device: boot device object from bios page 2
333 *
334 * Return: 1 when there's a match, 0 means no match.
335 */
336 static inline int
_scsih_srch_boot_sas_address(u64 sas_address,Mpi2BootDeviceSasWwid_t * boot_device)337 _scsih_srch_boot_sas_address(u64 sas_address,
338 Mpi2BootDeviceSasWwid_t *boot_device)
339 {
340 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
341 }
342
343 /**
344 * _scsih_srch_boot_device_name - search based on device name
345 * @device_name: device name specified in INDENTIFY fram
346 * @boot_device: boot device object from bios page 2
347 *
348 * Return: 1 when there's a match, 0 means no match.
349 */
350 static inline int
_scsih_srch_boot_device_name(u64 device_name,Mpi2BootDeviceDeviceName_t * boot_device)351 _scsih_srch_boot_device_name(u64 device_name,
352 Mpi2BootDeviceDeviceName_t *boot_device)
353 {
354 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
355 }
356
357 /**
358 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
359 * @enclosure_logical_id: enclosure logical id
360 * @slot_number: slot number
361 * @boot_device: boot device object from bios page 2
362 *
363 * Return: 1 when there's a match, 0 means no match.
364 */
365 static inline int
_scsih_srch_boot_encl_slot(u64 enclosure_logical_id,u16 slot_number,Mpi2BootDeviceEnclosureSlot_t * boot_device)366 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
367 Mpi2BootDeviceEnclosureSlot_t *boot_device)
368 {
369 return (enclosure_logical_id == le64_to_cpu(boot_device->
370 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
371 SlotNumber)) ? 1 : 0;
372 }
373
374 /**
375 * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
376 * port number from port list
377 * @ioc: per adapter object
378 * @port_id: port number
379 * @bypass_dirty_port_flag: when set look the matching hba port entry even
380 * if hba port entry is marked as dirty.
381 *
382 * Search for hba port entry corresponding to provided port number,
383 * if available return port object otherwise return NULL.
384 */
385 struct hba_port *
mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER * ioc,u8 port_id,u8 bypass_dirty_port_flag)386 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
387 u8 port_id, u8 bypass_dirty_port_flag)
388 {
389 struct hba_port *port, *port_next;
390
391 /*
392 * When multipath_on_hba is disabled then
393 * search the hba_port entry using default
394 * port id i.e. 255
395 */
396 if (!ioc->multipath_on_hba)
397 port_id = MULTIPATH_DISABLED_PORT_ID;
398
399 list_for_each_entry_safe(port, port_next,
400 &ioc->port_table_list, list) {
401 if (port->port_id != port_id)
402 continue;
403 if (bypass_dirty_port_flag)
404 return port;
405 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
406 continue;
407 return port;
408 }
409
410 /*
411 * Allocate hba_port object for default port id (i.e. 255)
412 * when multipath_on_hba is disabled for the HBA.
413 * And add this object to port_table_list.
414 */
415 if (!ioc->multipath_on_hba) {
416 port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
417 if (!port)
418 return NULL;
419
420 port->port_id = port_id;
421 ioc_info(ioc,
422 "hba_port entry: %p, port: %d is added to hba_port list\n",
423 port, port->port_id);
424 list_add_tail(&port->list,
425 &ioc->port_table_list);
426 return port;
427 }
428 return NULL;
429 }
430
431 /**
432 * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
433 * @ioc: per adapter object
434 * @port: hba_port object
435 * @phy: phy number
436 *
437 * Return virtual_phy object corresponding to phy number.
438 */
439 struct virtual_phy *
mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port,u32 phy)440 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
441 struct hba_port *port, u32 phy)
442 {
443 struct virtual_phy *vphy, *vphy_next;
444
445 if (!port->vphys_mask)
446 return NULL;
447
448 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
449 if (vphy->phy_mask & (1 << phy))
450 return vphy;
451 }
452 return NULL;
453 }
454
455 /**
456 * _scsih_is_boot_device - search for matching boot device.
457 * @sas_address: sas address
458 * @device_name: device name specified in INDENTIFY fram
459 * @enclosure_logical_id: enclosure logical id
460 * @slot: slot number
461 * @form: specifies boot device form
462 * @boot_device: boot device object from bios page 2
463 *
464 * Return: 1 when there's a match, 0 means no match.
465 */
466 static int
_scsih_is_boot_device(u64 sas_address,u64 device_name,u64 enclosure_logical_id,u16 slot,u8 form,Mpi2BiosPage2BootDevice_t * boot_device)467 _scsih_is_boot_device(u64 sas_address, u64 device_name,
468 u64 enclosure_logical_id, u16 slot, u8 form,
469 Mpi2BiosPage2BootDevice_t *boot_device)
470 {
471 int rc = 0;
472
473 switch (form) {
474 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
475 if (!sas_address)
476 break;
477 rc = _scsih_srch_boot_sas_address(
478 sas_address, &boot_device->SasWwid);
479 break;
480 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
481 if (!enclosure_logical_id)
482 break;
483 rc = _scsih_srch_boot_encl_slot(
484 enclosure_logical_id,
485 slot, &boot_device->EnclosureSlot);
486 break;
487 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
488 if (!device_name)
489 break;
490 rc = _scsih_srch_boot_device_name(
491 device_name, &boot_device->DeviceName);
492 break;
493 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
494 break;
495 }
496
497 return rc;
498 }
499
500 /**
501 * _scsih_get_sas_address - set the sas_address for given device handle
502 * @ioc: ?
503 * @handle: device handle
504 * @sas_address: sas address
505 *
506 * Return: 0 success, non-zero when failure
507 */
508 static int
_scsih_get_sas_address(struct MPT3SAS_ADAPTER * ioc,u16 handle,u64 * sas_address)509 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
510 u64 *sas_address)
511 {
512 Mpi2SasDevicePage0_t sas_device_pg0;
513 Mpi2ConfigReply_t mpi_reply;
514 u32 ioc_status;
515
516 *sas_address = 0;
517
518 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
519 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
520 ioc_err(ioc, "failure at %s:%d/%s()!\n",
521 __FILE__, __LINE__, __func__);
522 return -ENXIO;
523 }
524
525 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
526 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
527 /* For HBA, vSES doesn't return HBA SAS address. Instead return
528 * vSES's sas address.
529 */
530 if ((handle <= ioc->sas_hba.num_phys) &&
531 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
532 MPI2_SAS_DEVICE_INFO_SEP)))
533 *sas_address = ioc->sas_hba.sas_address;
534 else
535 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
536 return 0;
537 }
538
539 /* we hit this because the given parent handle doesn't exist */
540 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
541 return -ENXIO;
542
543 /* else error case */
544 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
545 handle, ioc_status, __FILE__, __LINE__, __func__);
546 return -EIO;
547 }
548
549 /**
550 * _scsih_determine_boot_device - determine boot device.
551 * @ioc: per adapter object
552 * @device: sas_device or pcie_device object
553 * @channel: SAS or PCIe channel
554 *
555 * Determines whether this device should be first reported device to
556 * to scsi-ml or sas transport, this purpose is for persistent boot device.
557 * There are primary, alternate, and current entries in bios page 2. The order
558 * priority is primary, alternate, then current. This routine saves
559 * the corresponding device object.
560 * The saved data to be used later in _scsih_probe_boot_devices().
561 */
562 static void
_scsih_determine_boot_device(struct MPT3SAS_ADAPTER * ioc,void * device,u32 channel)563 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
564 u32 channel)
565 {
566 struct _sas_device *sas_device;
567 struct _pcie_device *pcie_device;
568 struct _raid_device *raid_device;
569 u64 sas_address;
570 u64 device_name;
571 u64 enclosure_logical_id;
572 u16 slot;
573
574 /* only process this function when driver loads */
575 if (!ioc->is_driver_loading)
576 return;
577
578 /* no Bios, return immediately */
579 if (!ioc->bios_pg3.BiosVersion)
580 return;
581
582 if (channel == RAID_CHANNEL) {
583 raid_device = device;
584 sas_address = raid_device->wwid;
585 device_name = 0;
586 enclosure_logical_id = 0;
587 slot = 0;
588 } else if (channel == PCIE_CHANNEL) {
589 pcie_device = device;
590 sas_address = pcie_device->wwid;
591 device_name = 0;
592 enclosure_logical_id = 0;
593 slot = 0;
594 } else {
595 sas_device = device;
596 sas_address = sas_device->sas_address;
597 device_name = sas_device->device_name;
598 enclosure_logical_id = sas_device->enclosure_logical_id;
599 slot = sas_device->slot;
600 }
601
602 if (!ioc->req_boot_device.device) {
603 if (_scsih_is_boot_device(sas_address, device_name,
604 enclosure_logical_id, slot,
605 (ioc->bios_pg2.ReqBootDeviceForm &
606 MPI2_BIOSPAGE2_FORM_MASK),
607 &ioc->bios_pg2.RequestedBootDevice)) {
608 dinitprintk(ioc,
609 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
610 __func__, (u64)sas_address));
611 ioc->req_boot_device.device = device;
612 ioc->req_boot_device.channel = channel;
613 }
614 }
615
616 if (!ioc->req_alt_boot_device.device) {
617 if (_scsih_is_boot_device(sas_address, device_name,
618 enclosure_logical_id, slot,
619 (ioc->bios_pg2.ReqAltBootDeviceForm &
620 MPI2_BIOSPAGE2_FORM_MASK),
621 &ioc->bios_pg2.RequestedAltBootDevice)) {
622 dinitprintk(ioc,
623 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
624 __func__, (u64)sas_address));
625 ioc->req_alt_boot_device.device = device;
626 ioc->req_alt_boot_device.channel = channel;
627 }
628 }
629
630 if (!ioc->current_boot_device.device) {
631 if (_scsih_is_boot_device(sas_address, device_name,
632 enclosure_logical_id, slot,
633 (ioc->bios_pg2.CurrentBootDeviceForm &
634 MPI2_BIOSPAGE2_FORM_MASK),
635 &ioc->bios_pg2.CurrentBootDevice)) {
636 dinitprintk(ioc,
637 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
638 __func__, (u64)sas_address));
639 ioc->current_boot_device.device = device;
640 ioc->current_boot_device.channel = channel;
641 }
642 }
643 }
644
645 static struct _sas_device *
__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)646 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
647 struct MPT3SAS_TARGET *tgt_priv)
648 {
649 struct _sas_device *ret;
650
651 assert_spin_locked(&ioc->sas_device_lock);
652
653 ret = tgt_priv->sas_dev;
654 if (ret)
655 sas_device_get(ret);
656
657 return ret;
658 }
659
660 static struct _sas_device *
mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)661 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
662 struct MPT3SAS_TARGET *tgt_priv)
663 {
664 struct _sas_device *ret;
665 unsigned long flags;
666
667 spin_lock_irqsave(&ioc->sas_device_lock, flags);
668 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
669 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
670
671 return ret;
672 }
673
674 static struct _pcie_device *
__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)675 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
676 struct MPT3SAS_TARGET *tgt_priv)
677 {
678 struct _pcie_device *ret;
679
680 assert_spin_locked(&ioc->pcie_device_lock);
681
682 ret = tgt_priv->pcie_dev;
683 if (ret)
684 pcie_device_get(ret);
685
686 return ret;
687 }
688
689 /**
690 * mpt3sas_get_pdev_from_target - pcie device search
691 * @ioc: per adapter object
692 * @tgt_priv: starget private object
693 *
694 * Context: This function will acquire ioc->pcie_device_lock and will release
695 * before returning the pcie_device object.
696 *
697 * This searches for pcie_device from target, then return pcie_device object.
698 */
699 static struct _pcie_device *
mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)700 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
701 struct MPT3SAS_TARGET *tgt_priv)
702 {
703 struct _pcie_device *ret;
704 unsigned long flags;
705
706 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
707 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
708 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
709
710 return ret;
711 }
712
713
714 /**
715 * __mpt3sas_get_sdev_by_rphy - sas device search
716 * @ioc: per adapter object
717 * @rphy: sas_rphy pointer
718 *
719 * Context: This function will acquire ioc->sas_device_lock and will release
720 * before returning the sas_device object.
721 *
722 * This searches for sas_device from rphy object
723 * then return sas_device object.
724 */
725 struct _sas_device *
__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER * ioc,struct sas_rphy * rphy)726 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
727 struct sas_rphy *rphy)
728 {
729 struct _sas_device *sas_device;
730
731 assert_spin_locked(&ioc->sas_device_lock);
732
733 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
734 if (sas_device->rphy != rphy)
735 continue;
736 sas_device_get(sas_device);
737 return sas_device;
738 }
739
740 sas_device = NULL;
741 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
742 if (sas_device->rphy != rphy)
743 continue;
744 sas_device_get(sas_device);
745 return sas_device;
746 }
747
748 return NULL;
749 }
750
751 /**
752 * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
753 * sas address from sas_device_list list
754 * @ioc: per adapter object
755 * @sas_address: device sas address
756 * @port: port number
757 *
758 * Search for _sas_device object corresponding to provided sas address,
759 * if available return _sas_device object address otherwise return NULL.
760 */
761 struct _sas_device *
__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)762 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
763 u64 sas_address, struct hba_port *port)
764 {
765 struct _sas_device *sas_device;
766
767 if (!port)
768 return NULL;
769
770 assert_spin_locked(&ioc->sas_device_lock);
771
772 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
773 if (sas_device->sas_address != sas_address)
774 continue;
775 if (sas_device->port != port)
776 continue;
777 sas_device_get(sas_device);
778 return sas_device;
779 }
780
781 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
782 if (sas_device->sas_address != sas_address)
783 continue;
784 if (sas_device->port != port)
785 continue;
786 sas_device_get(sas_device);
787 return sas_device;
788 }
789
790 return NULL;
791 }
792
793 /**
794 * mpt3sas_get_sdev_by_addr - sas device search
795 * @ioc: per adapter object
796 * @sas_address: sas address
797 * @port: hba port entry
798 * Context: Calling function should acquire ioc->sas_device_lock
799 *
800 * This searches for sas_device based on sas_address & port number,
801 * then return sas_device object.
802 */
803 struct _sas_device *
mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)804 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
805 u64 sas_address, struct hba_port *port)
806 {
807 struct _sas_device *sas_device;
808 unsigned long flags;
809
810 spin_lock_irqsave(&ioc->sas_device_lock, flags);
811 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
812 sas_address, port);
813 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
814
815 return sas_device;
816 }
817
818 static struct _sas_device *
__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)819 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
820 {
821 struct _sas_device *sas_device;
822
823 assert_spin_locked(&ioc->sas_device_lock);
824
825 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
826 if (sas_device->handle == handle)
827 goto found_device;
828
829 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
830 if (sas_device->handle == handle)
831 goto found_device;
832
833 return NULL;
834
835 found_device:
836 sas_device_get(sas_device);
837 return sas_device;
838 }
839
840 /**
841 * mpt3sas_get_sdev_by_handle - sas device search
842 * @ioc: per adapter object
843 * @handle: sas device handle (assigned by firmware)
844 * Context: Calling function should acquire ioc->sas_device_lock
845 *
846 * This searches for sas_device based on sas_address, then return sas_device
847 * object.
848 */
849 struct _sas_device *
mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)850 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
851 {
852 struct _sas_device *sas_device;
853 unsigned long flags;
854
855 spin_lock_irqsave(&ioc->sas_device_lock, flags);
856 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
857 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
858
859 return sas_device;
860 }
861
862 /**
863 * _scsih_display_enclosure_chassis_info - display device location info
864 * @ioc: per adapter object
865 * @sas_device: per sas device object
866 * @sdev: scsi device struct
867 * @starget: scsi target struct
868 */
869 static void
_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device,struct scsi_device * sdev,struct scsi_target * starget)870 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
871 struct _sas_device *sas_device, struct scsi_device *sdev,
872 struct scsi_target *starget)
873 {
874 if (sdev) {
875 if (sas_device->enclosure_handle != 0)
876 sdev_printk(KERN_INFO, sdev,
877 "enclosure logical id (0x%016llx), slot(%d) \n",
878 (unsigned long long)
879 sas_device->enclosure_logical_id,
880 sas_device->slot);
881 if (sas_device->connector_name[0] != '\0')
882 sdev_printk(KERN_INFO, sdev,
883 "enclosure level(0x%04x), connector name( %s)\n",
884 sas_device->enclosure_level,
885 sas_device->connector_name);
886 if (sas_device->is_chassis_slot_valid)
887 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
888 sas_device->chassis_slot);
889 } else if (starget) {
890 if (sas_device->enclosure_handle != 0)
891 starget_printk(KERN_INFO, starget,
892 "enclosure logical id(0x%016llx), slot(%d) \n",
893 (unsigned long long)
894 sas_device->enclosure_logical_id,
895 sas_device->slot);
896 if (sas_device->connector_name[0] != '\0')
897 starget_printk(KERN_INFO, starget,
898 "enclosure level(0x%04x), connector name( %s)\n",
899 sas_device->enclosure_level,
900 sas_device->connector_name);
901 if (sas_device->is_chassis_slot_valid)
902 starget_printk(KERN_INFO, starget,
903 "chassis slot(0x%04x)\n",
904 sas_device->chassis_slot);
905 } else {
906 if (sas_device->enclosure_handle != 0)
907 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
908 (u64)sas_device->enclosure_logical_id,
909 sas_device->slot);
910 if (sas_device->connector_name[0] != '\0')
911 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
912 sas_device->enclosure_level,
913 sas_device->connector_name);
914 if (sas_device->is_chassis_slot_valid)
915 ioc_info(ioc, "chassis slot(0x%04x)\n",
916 sas_device->chassis_slot);
917 }
918 }
919
920 /**
921 * _scsih_sas_device_remove - remove sas_device from list.
922 * @ioc: per adapter object
923 * @sas_device: the sas_device object
924 * Context: This function will acquire ioc->sas_device_lock.
925 *
926 * If sas_device is on the list, remove it and decrement its reference count.
927 */
928 static void
_scsih_sas_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)929 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
930 struct _sas_device *sas_device)
931 {
932 unsigned long flags;
933
934 if (!sas_device)
935 return;
936 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
937 sas_device->handle, (u64)sas_device->sas_address);
938
939 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
940
941 /*
942 * The lock serializes access to the list, but we still need to verify
943 * that nobody removed the entry while we were waiting on the lock.
944 */
945 spin_lock_irqsave(&ioc->sas_device_lock, flags);
946 if (!list_empty(&sas_device->list)) {
947 list_del_init(&sas_device->list);
948 sas_device_put(sas_device);
949 }
950 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
951 }
952
953 /**
954 * _scsih_device_remove_by_handle - removing device object by handle
955 * @ioc: per adapter object
956 * @handle: device handle
957 */
958 static void
_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)959 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
960 {
961 struct _sas_device *sas_device;
962 unsigned long flags;
963
964 if (ioc->shost_recovery)
965 return;
966
967 spin_lock_irqsave(&ioc->sas_device_lock, flags);
968 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
969 if (sas_device) {
970 list_del_init(&sas_device->list);
971 sas_device_put(sas_device);
972 }
973 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
974 if (sas_device) {
975 _scsih_remove_device(ioc, sas_device);
976 sas_device_put(sas_device);
977 }
978 }
979
980 /**
981 * mpt3sas_device_remove_by_sas_address - removing device object by
982 * sas address & port number
983 * @ioc: per adapter object
984 * @sas_address: device sas_address
985 * @port: hba port entry
986 *
987 * Return nothing.
988 */
989 void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)990 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
991 u64 sas_address, struct hba_port *port)
992 {
993 struct _sas_device *sas_device;
994 unsigned long flags;
995
996 if (ioc->shost_recovery)
997 return;
998
999 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1000 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1001 if (sas_device) {
1002 list_del_init(&sas_device->list);
1003 sas_device_put(sas_device);
1004 }
1005 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1006 if (sas_device) {
1007 _scsih_remove_device(ioc, sas_device);
1008 sas_device_put(sas_device);
1009 }
1010 }
1011
1012 /**
1013 * _scsih_sas_device_add - insert sas_device to the list.
1014 * @ioc: per adapter object
1015 * @sas_device: the sas_device object
1016 * Context: This function will acquire ioc->sas_device_lock.
1017 *
1018 * Adding new object to the ioc->sas_device_list.
1019 */
1020 static void
_scsih_sas_device_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)1021 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1022 struct _sas_device *sas_device)
1023 {
1024 unsigned long flags;
1025
1026 dewtprintk(ioc,
1027 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1028 __func__, sas_device->handle,
1029 (u64)sas_device->sas_address));
1030
1031 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1032 NULL, NULL));
1033
1034 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1035 sas_device_get(sas_device);
1036 list_add_tail(&sas_device->list, &ioc->sas_device_list);
1037 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1038
1039 if (ioc->hide_drives) {
1040 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1041 return;
1042 }
1043
1044 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1045 sas_device->sas_address_parent, sas_device->port)) {
1046 _scsih_sas_device_remove(ioc, sas_device);
1047 } else if (!sas_device->starget) {
1048 /*
1049 * When asyn scanning is enabled, its not possible to remove
1050 * devices while scanning is turned on due to an oops in
1051 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1052 */
1053 if (!ioc->is_driver_loading) {
1054 mpt3sas_transport_port_remove(ioc,
1055 sas_device->sas_address,
1056 sas_device->sas_address_parent,
1057 sas_device->port);
1058 _scsih_sas_device_remove(ioc, sas_device);
1059 }
1060 } else
1061 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1062 }
1063
1064 /**
1065 * _scsih_sas_device_init_add - insert sas_device to the list.
1066 * @ioc: per adapter object
1067 * @sas_device: the sas_device object
1068 * Context: This function will acquire ioc->sas_device_lock.
1069 *
1070 * Adding new object at driver load time to the ioc->sas_device_init_list.
1071 */
1072 static void
_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)1073 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1074 struct _sas_device *sas_device)
1075 {
1076 unsigned long flags;
1077
1078 dewtprintk(ioc,
1079 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1080 __func__, sas_device->handle,
1081 (u64)sas_device->sas_address));
1082
1083 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1084 NULL, NULL));
1085
1086 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1087 sas_device_get(sas_device);
1088 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1089 _scsih_determine_boot_device(ioc, sas_device, 0);
1090 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1091 }
1092
1093
1094 static struct _pcie_device *
__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1095 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1096 {
1097 struct _pcie_device *pcie_device;
1098
1099 assert_spin_locked(&ioc->pcie_device_lock);
1100
1101 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1102 if (pcie_device->wwid == wwid)
1103 goto found_device;
1104
1105 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1106 if (pcie_device->wwid == wwid)
1107 goto found_device;
1108
1109 return NULL;
1110
1111 found_device:
1112 pcie_device_get(pcie_device);
1113 return pcie_device;
1114 }
1115
1116
1117 /**
1118 * mpt3sas_get_pdev_by_wwid - pcie device search
1119 * @ioc: per adapter object
1120 * @wwid: wwid
1121 *
1122 * Context: This function will acquire ioc->pcie_device_lock and will release
1123 * before returning the pcie_device object.
1124 *
1125 * This searches for pcie_device based on wwid, then return pcie_device object.
1126 */
1127 static struct _pcie_device *
mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1128 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1129 {
1130 struct _pcie_device *pcie_device;
1131 unsigned long flags;
1132
1133 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1134 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1135 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1136
1137 return pcie_device;
1138 }
1139
1140
1141 static struct _pcie_device *
__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1142 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1143 int channel)
1144 {
1145 struct _pcie_device *pcie_device;
1146
1147 assert_spin_locked(&ioc->pcie_device_lock);
1148
1149 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1150 if (pcie_device->id == id && pcie_device->channel == channel)
1151 goto found_device;
1152
1153 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1154 if (pcie_device->id == id && pcie_device->channel == channel)
1155 goto found_device;
1156
1157 return NULL;
1158
1159 found_device:
1160 pcie_device_get(pcie_device);
1161 return pcie_device;
1162 }
1163
1164 static struct _pcie_device *
__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1165 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1166 {
1167 struct _pcie_device *pcie_device;
1168
1169 assert_spin_locked(&ioc->pcie_device_lock);
1170
1171 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1172 if (pcie_device->handle == handle)
1173 goto found_device;
1174
1175 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1176 if (pcie_device->handle == handle)
1177 goto found_device;
1178
1179 return NULL;
1180
1181 found_device:
1182 pcie_device_get(pcie_device);
1183 return pcie_device;
1184 }
1185
1186
1187 /**
1188 * mpt3sas_get_pdev_by_handle - pcie device search
1189 * @ioc: per adapter object
1190 * @handle: Firmware device handle
1191 *
1192 * Context: This function will acquire ioc->pcie_device_lock and will release
1193 * before returning the pcie_device object.
1194 *
1195 * This searches for pcie_device based on handle, then return pcie_device
1196 * object.
1197 */
1198 struct _pcie_device *
mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1199 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1200 {
1201 struct _pcie_device *pcie_device;
1202 unsigned long flags;
1203
1204 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1205 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1206 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1207
1208 return pcie_device;
1209 }
1210
1211 /**
1212 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1213 * @ioc: per adapter object
1214 * Context: This function will acquire ioc->pcie_device_lock
1215 *
1216 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1217 * which has reported maximum among all available NVMe drives.
1218 * Minimum max_shutdown_latency will be six seconds.
1219 */
1220 static void
_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER * ioc)1221 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1222 {
1223 struct _pcie_device *pcie_device;
1224 unsigned long flags;
1225 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1226
1227 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1228 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1229 if (pcie_device->shutdown_latency) {
1230 if (shutdown_latency < pcie_device->shutdown_latency)
1231 shutdown_latency =
1232 pcie_device->shutdown_latency;
1233 }
1234 }
1235 ioc->max_shutdown_latency = shutdown_latency;
1236 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1237 }
1238
1239 /**
1240 * _scsih_pcie_device_remove - remove pcie_device from list.
1241 * @ioc: per adapter object
1242 * @pcie_device: the pcie_device object
1243 * Context: This function will acquire ioc->pcie_device_lock.
1244 *
1245 * If pcie_device is on the list, remove it and decrement its reference count.
1246 */
1247 static void
_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1248 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1249 struct _pcie_device *pcie_device)
1250 {
1251 unsigned long flags;
1252 int was_on_pcie_device_list = 0;
1253 u8 update_latency = 0;
1254
1255 if (!pcie_device)
1256 return;
1257 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1258 pcie_device->handle, (u64)pcie_device->wwid);
1259 if (pcie_device->enclosure_handle != 0)
1260 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1261 (u64)pcie_device->enclosure_logical_id,
1262 pcie_device->slot);
1263 if (pcie_device->connector_name[0] != '\0')
1264 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1265 pcie_device->enclosure_level,
1266 pcie_device->connector_name);
1267
1268 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1269 if (!list_empty(&pcie_device->list)) {
1270 list_del_init(&pcie_device->list);
1271 was_on_pcie_device_list = 1;
1272 }
1273 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1274 update_latency = 1;
1275 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1276 if (was_on_pcie_device_list) {
1277 kfree(pcie_device->serial_number);
1278 pcie_device_put(pcie_device);
1279 }
1280
1281 /*
1282 * This device's RTD3 Entry Latency matches IOC's
1283 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1284 * from the available drives as current drive is getting removed.
1285 */
1286 if (update_latency)
1287 _scsih_set_nvme_max_shutdown_latency(ioc);
1288 }
1289
1290
1291 /**
1292 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1293 * @ioc: per adapter object
1294 * @handle: device handle
1295 */
1296 static void
_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1297 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1298 {
1299 struct _pcie_device *pcie_device;
1300 unsigned long flags;
1301 int was_on_pcie_device_list = 0;
1302 u8 update_latency = 0;
1303
1304 if (ioc->shost_recovery)
1305 return;
1306
1307 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1308 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1309 if (pcie_device) {
1310 if (!list_empty(&pcie_device->list)) {
1311 list_del_init(&pcie_device->list);
1312 was_on_pcie_device_list = 1;
1313 pcie_device_put(pcie_device);
1314 }
1315 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1316 update_latency = 1;
1317 }
1318 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1319 if (was_on_pcie_device_list) {
1320 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1321 pcie_device_put(pcie_device);
1322 }
1323
1324 /*
1325 * This device's RTD3 Entry Latency matches IOC's
1326 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1327 * from the available drives as current drive is getting removed.
1328 */
1329 if (update_latency)
1330 _scsih_set_nvme_max_shutdown_latency(ioc);
1331 }
1332
1333 /**
1334 * _scsih_pcie_device_add - add pcie_device object
1335 * @ioc: per adapter object
1336 * @pcie_device: pcie_device object
1337 *
1338 * This is added to the pcie_device_list link list.
1339 */
1340 static void
_scsih_pcie_device_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1341 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1342 struct _pcie_device *pcie_device)
1343 {
1344 unsigned long flags;
1345
1346 dewtprintk(ioc,
1347 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1348 __func__,
1349 pcie_device->handle, (u64)pcie_device->wwid));
1350 if (pcie_device->enclosure_handle != 0)
1351 dewtprintk(ioc,
1352 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1353 __func__,
1354 (u64)pcie_device->enclosure_logical_id,
1355 pcie_device->slot));
1356 if (pcie_device->connector_name[0] != '\0')
1357 dewtprintk(ioc,
1358 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1359 __func__, pcie_device->enclosure_level,
1360 pcie_device->connector_name));
1361
1362 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1363 pcie_device_get(pcie_device);
1364 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1365 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1366
1367 if (pcie_device->access_status ==
1368 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1369 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1370 return;
1371 }
1372 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1373 _scsih_pcie_device_remove(ioc, pcie_device);
1374 } else if (!pcie_device->starget) {
1375 if (!ioc->is_driver_loading) {
1376 /*TODO-- Need to find out whether this condition will occur or not*/
1377 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1378 }
1379 } else
1380 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1381 }
1382
1383 /*
1384 * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1385 * @ioc: per adapter object
1386 * @pcie_device: the pcie_device object
1387 * Context: This function will acquire ioc->pcie_device_lock.
1388 *
1389 * Adding new object at driver load time to the ioc->pcie_device_init_list.
1390 */
1391 static void
_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1392 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1393 struct _pcie_device *pcie_device)
1394 {
1395 unsigned long flags;
1396
1397 dewtprintk(ioc,
1398 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1399 __func__,
1400 pcie_device->handle, (u64)pcie_device->wwid));
1401 if (pcie_device->enclosure_handle != 0)
1402 dewtprintk(ioc,
1403 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1404 __func__,
1405 (u64)pcie_device->enclosure_logical_id,
1406 pcie_device->slot));
1407 if (pcie_device->connector_name[0] != '\0')
1408 dewtprintk(ioc,
1409 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1410 __func__, pcie_device->enclosure_level,
1411 pcie_device->connector_name));
1412
1413 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1414 pcie_device_get(pcie_device);
1415 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1416 if (pcie_device->access_status !=
1417 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1418 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1419 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1420 }
1421 /**
1422 * _scsih_raid_device_find_by_id - raid device search
1423 * @ioc: per adapter object
1424 * @id: sas device target id
1425 * @channel: sas device channel
1426 * Context: Calling function should acquire ioc->raid_device_lock
1427 *
1428 * This searches for raid_device based on target id, then return raid_device
1429 * object.
1430 */
1431 static struct _raid_device *
_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1432 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1433 {
1434 struct _raid_device *raid_device, *r;
1435
1436 r = NULL;
1437 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1438 if (raid_device->id == id && raid_device->channel == channel) {
1439 r = raid_device;
1440 goto out;
1441 }
1442 }
1443
1444 out:
1445 return r;
1446 }
1447
1448 /**
1449 * mpt3sas_raid_device_find_by_handle - raid device search
1450 * @ioc: per adapter object
1451 * @handle: sas device handle (assigned by firmware)
1452 * Context: Calling function should acquire ioc->raid_device_lock
1453 *
1454 * This searches for raid_device based on handle, then return raid_device
1455 * object.
1456 */
1457 struct _raid_device *
mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1458 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1459 {
1460 struct _raid_device *raid_device, *r;
1461
1462 r = NULL;
1463 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1464 if (raid_device->handle != handle)
1465 continue;
1466 r = raid_device;
1467 goto out;
1468 }
1469
1470 out:
1471 return r;
1472 }
1473
1474 /**
1475 * _scsih_raid_device_find_by_wwid - raid device search
1476 * @ioc: per adapter object
1477 * @wwid: ?
1478 * Context: Calling function should acquire ioc->raid_device_lock
1479 *
1480 * This searches for raid_device based on wwid, then return raid_device
1481 * object.
1482 */
1483 static struct _raid_device *
_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1484 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1485 {
1486 struct _raid_device *raid_device, *r;
1487
1488 r = NULL;
1489 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1490 if (raid_device->wwid != wwid)
1491 continue;
1492 r = raid_device;
1493 goto out;
1494 }
1495
1496 out:
1497 return r;
1498 }
1499
1500 /**
1501 * _scsih_raid_device_add - add raid_device object
1502 * @ioc: per adapter object
1503 * @raid_device: raid_device object
1504 *
1505 * This is added to the raid_device_list link list.
1506 */
1507 static void
_scsih_raid_device_add(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1508 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1509 struct _raid_device *raid_device)
1510 {
1511 unsigned long flags;
1512
1513 dewtprintk(ioc,
1514 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1515 __func__,
1516 raid_device->handle, (u64)raid_device->wwid));
1517
1518 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1519 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1520 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1521 }
1522
1523 /**
1524 * _scsih_raid_device_remove - delete raid_device object
1525 * @ioc: per adapter object
1526 * @raid_device: raid_device object
1527 *
1528 */
1529 static void
_scsih_raid_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1530 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1531 struct _raid_device *raid_device)
1532 {
1533 unsigned long flags;
1534
1535 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1536 list_del(&raid_device->list);
1537 kfree(raid_device);
1538 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1539 }
1540
1541 /**
1542 * mpt3sas_scsih_expander_find_by_handle - expander device search
1543 * @ioc: per adapter object
1544 * @handle: expander handle (assigned by firmware)
1545 * Context: Calling function should acquire ioc->sas_device_lock
1546 *
1547 * This searches for expander device based on handle, then returns the
1548 * sas_node object.
1549 */
1550 struct _sas_node *
mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1551 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1552 {
1553 struct _sas_node *sas_expander, *r;
1554
1555 r = NULL;
1556 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1557 if (sas_expander->handle != handle)
1558 continue;
1559 r = sas_expander;
1560 goto out;
1561 }
1562 out:
1563 return r;
1564 }
1565
1566 /**
1567 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1568 * @ioc: per adapter object
1569 * @handle: enclosure handle (assigned by firmware)
1570 * Context: Calling function should acquire ioc->sas_device_lock
1571 *
1572 * This searches for enclosure device based on handle, then returns the
1573 * enclosure object.
1574 */
1575 static struct _enclosure_node *
mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1576 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1577 {
1578 struct _enclosure_node *enclosure_dev, *r;
1579
1580 r = NULL;
1581 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1582 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1583 continue;
1584 r = enclosure_dev;
1585 goto out;
1586 }
1587 out:
1588 return r;
1589 }
1590 /**
1591 * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1592 * @ioc: per adapter object
1593 * @sas_address: sas address
1594 * @port: hba port entry
1595 * Context: Calling function should acquire ioc->sas_node_lock.
1596 *
1597 * This searches for expander device based on sas_address & port number,
1598 * then returns the sas_node object.
1599 */
1600 struct _sas_node *
mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)1601 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1602 u64 sas_address, struct hba_port *port)
1603 {
1604 struct _sas_node *sas_expander, *r = NULL;
1605
1606 if (!port)
1607 return r;
1608
1609 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1610 if (sas_expander->sas_address != sas_address)
1611 continue;
1612 if (sas_expander->port != port)
1613 continue;
1614 r = sas_expander;
1615 goto out;
1616 }
1617 out:
1618 return r;
1619 }
1620
1621 /**
1622 * _scsih_expander_node_add - insert expander device to the list.
1623 * @ioc: per adapter object
1624 * @sas_expander: the sas_device object
1625 * Context: This function will acquire ioc->sas_node_lock.
1626 *
1627 * Adding new object to the ioc->sas_expander_list.
1628 */
1629 static void
_scsih_expander_node_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)1630 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1631 struct _sas_node *sas_expander)
1632 {
1633 unsigned long flags;
1634
1635 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1636 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1637 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1638 }
1639
1640 /**
1641 * _scsih_is_end_device - determines if device is an end device
1642 * @device_info: bitfield providing information about the device.
1643 * Context: none
1644 *
1645 * Return: 1 if end device.
1646 */
1647 static int
_scsih_is_end_device(u32 device_info)1648 _scsih_is_end_device(u32 device_info)
1649 {
1650 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1651 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1652 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1653 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1654 return 1;
1655 else
1656 return 0;
1657 }
1658
1659 /**
1660 * _scsih_is_nvme_pciescsi_device - determines if
1661 * device is an pcie nvme/scsi device
1662 * @device_info: bitfield providing information about the device.
1663 * Context: none
1664 *
1665 * Returns 1 if device is pcie device type nvme/scsi.
1666 */
1667 static int
_scsih_is_nvme_pciescsi_device(u32 device_info)1668 _scsih_is_nvme_pciescsi_device(u32 device_info)
1669 {
1670 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1671 == MPI26_PCIE_DEVINFO_NVME) ||
1672 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1673 == MPI26_PCIE_DEVINFO_SCSI))
1674 return 1;
1675 else
1676 return 0;
1677 }
1678
1679 /**
1680 * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1681 * @ioc: per adapter object
1682 * @id: target id
1683 * @channel: channel
1684 * Context: This function will acquire ioc->scsi_lookup_lock.
1685 *
1686 * This will search for a matching channel:id in the scsi_lookup array,
1687 * returning 1 if found.
1688 */
1689 static u8
_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1690 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1691 int channel)
1692 {
1693 int smid;
1694 struct scsi_cmnd *scmd;
1695
1696 for (smid = 1;
1697 smid <= ioc->shost->can_queue; smid++) {
1698 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1699 if (!scmd)
1700 continue;
1701 if (scmd->device->id == id &&
1702 scmd->device->channel == channel)
1703 return 1;
1704 }
1705 return 0;
1706 }
1707
1708 /**
1709 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1710 * @ioc: per adapter object
1711 * @id: target id
1712 * @lun: lun number
1713 * @channel: channel
1714 * Context: This function will acquire ioc->scsi_lookup_lock.
1715 *
1716 * This will search for a matching channel:id:lun in the scsi_lookup array,
1717 * returning 1 if found.
1718 */
1719 static u8
_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER * ioc,int id,unsigned int lun,int channel)1720 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1721 unsigned int lun, int channel)
1722 {
1723 int smid;
1724 struct scsi_cmnd *scmd;
1725
1726 for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1727
1728 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1729 if (!scmd)
1730 continue;
1731 if (scmd->device->id == id &&
1732 scmd->device->channel == channel &&
1733 scmd->device->lun == lun)
1734 return 1;
1735 }
1736 return 0;
1737 }
1738
1739 /**
1740 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1741 * @ioc: per adapter object
1742 * @smid: system request message index
1743 *
1744 * Return: the smid stored scmd pointer.
1745 * Then will dereference the stored scmd pointer.
1746 */
1747 struct scsi_cmnd *
mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER * ioc,u16 smid)1748 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1749 {
1750 struct scsi_cmnd *scmd = NULL;
1751 struct scsiio_tracker *st;
1752 Mpi25SCSIIORequest_t *mpi_request;
1753 u16 tag = smid - 1;
1754
1755 if (smid > 0 &&
1756 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1757 u32 unique_tag =
1758 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1759
1760 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1761
1762 /*
1763 * If SCSI IO request is outstanding at driver level then
1764 * DevHandle filed must be non-zero. If DevHandle is zero
1765 * then it means that this smid is free at driver level,
1766 * so return NULL.
1767 */
1768 if (!mpi_request->DevHandle)
1769 return scmd;
1770
1771 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1772 if (scmd) {
1773 st = scsi_cmd_priv(scmd);
1774 if (st->cb_idx == 0xFF || st->smid == 0)
1775 scmd = NULL;
1776 }
1777 }
1778 return scmd;
1779 }
1780
1781 /**
1782 * scsih_change_queue_depth - setting device queue depth
1783 * @sdev: scsi device struct
1784 * @qdepth: requested queue depth
1785 *
1786 * Return: queue depth.
1787 */
1788 static int
scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1789 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1790 {
1791 struct Scsi_Host *shost = sdev->host;
1792 int max_depth;
1793 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1794 struct MPT3SAS_DEVICE *sas_device_priv_data;
1795 struct MPT3SAS_TARGET *sas_target_priv_data;
1796 struct _sas_device *sas_device;
1797 unsigned long flags;
1798
1799 max_depth = shost->can_queue;
1800
1801 /*
1802 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1803 * is disabled.
1804 */
1805 if (ioc->enable_sdev_max_qd)
1806 goto not_sata;
1807
1808 sas_device_priv_data = sdev->hostdata;
1809 if (!sas_device_priv_data)
1810 goto not_sata;
1811 sas_target_priv_data = sas_device_priv_data->sas_target;
1812 if (!sas_target_priv_data)
1813 goto not_sata;
1814 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1815 goto not_sata;
1816
1817 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1818 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1819 if (sas_device) {
1820 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1821 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1822
1823 sas_device_put(sas_device);
1824 }
1825 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1826
1827 not_sata:
1828
1829 if (!sdev->tagged_supported)
1830 max_depth = 1;
1831 if (qdepth > max_depth)
1832 qdepth = max_depth;
1833 scsi_change_queue_depth(sdev, qdepth);
1834 sdev_printk(KERN_INFO, sdev,
1835 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1836 sdev->queue_depth, sdev->tagged_supported,
1837 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1838 return sdev->queue_depth;
1839 }
1840
1841 /**
1842 * mpt3sas_scsih_change_queue_depth - setting device queue depth
1843 * @sdev: scsi device struct
1844 * @qdepth: requested queue depth
1845 *
1846 * Returns nothing.
1847 */
1848 void
mpt3sas_scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1849 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1850 {
1851 struct Scsi_Host *shost = sdev->host;
1852 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1853
1854 if (ioc->enable_sdev_max_qd)
1855 qdepth = shost->can_queue;
1856
1857 scsih_change_queue_depth(sdev, qdepth);
1858 }
1859
1860 /**
1861 * scsih_target_alloc - target add routine
1862 * @starget: scsi target struct
1863 *
1864 * Return: 0 if ok. Any other return is assumed to be an error and
1865 * the device is ignored.
1866 */
1867 static int
scsih_target_alloc(struct scsi_target * starget)1868 scsih_target_alloc(struct scsi_target *starget)
1869 {
1870 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1871 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1872 struct MPT3SAS_TARGET *sas_target_priv_data;
1873 struct _sas_device *sas_device;
1874 struct _raid_device *raid_device;
1875 struct _pcie_device *pcie_device;
1876 unsigned long flags;
1877 struct sas_rphy *rphy;
1878
1879 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1880 GFP_KERNEL);
1881 if (!sas_target_priv_data)
1882 return -ENOMEM;
1883
1884 starget->hostdata = sas_target_priv_data;
1885 sas_target_priv_data->starget = starget;
1886 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1887
1888 /* RAID volumes */
1889 if (starget->channel == RAID_CHANNEL) {
1890 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1891 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1892 starget->channel);
1893 if (raid_device) {
1894 sas_target_priv_data->handle = raid_device->handle;
1895 sas_target_priv_data->sas_address = raid_device->wwid;
1896 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1897 if (ioc->is_warpdrive)
1898 sas_target_priv_data->raid_device = raid_device;
1899 raid_device->starget = starget;
1900 }
1901 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1902 return 0;
1903 }
1904
1905 /* PCIe devices */
1906 if (starget->channel == PCIE_CHANNEL) {
1907 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1908 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1909 starget->channel);
1910 if (pcie_device) {
1911 sas_target_priv_data->handle = pcie_device->handle;
1912 sas_target_priv_data->sas_address = pcie_device->wwid;
1913 sas_target_priv_data->port = NULL;
1914 sas_target_priv_data->pcie_dev = pcie_device;
1915 pcie_device->starget = starget;
1916 pcie_device->id = starget->id;
1917 pcie_device->channel = starget->channel;
1918 sas_target_priv_data->flags |=
1919 MPT_TARGET_FLAGS_PCIE_DEVICE;
1920 if (pcie_device->fast_path)
1921 sas_target_priv_data->flags |=
1922 MPT_TARGET_FASTPATH_IO;
1923 }
1924 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1925 return 0;
1926 }
1927
1928 /* sas/sata devices */
1929 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1930 rphy = dev_to_rphy(starget->dev.parent);
1931 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1932
1933 if (sas_device) {
1934 sas_target_priv_data->handle = sas_device->handle;
1935 sas_target_priv_data->sas_address = sas_device->sas_address;
1936 sas_target_priv_data->port = sas_device->port;
1937 sas_target_priv_data->sas_dev = sas_device;
1938 sas_device->starget = starget;
1939 sas_device->id = starget->id;
1940 sas_device->channel = starget->channel;
1941 if (test_bit(sas_device->handle, ioc->pd_handles))
1942 sas_target_priv_data->flags |=
1943 MPT_TARGET_FLAGS_RAID_COMPONENT;
1944 if (sas_device->fast_path)
1945 sas_target_priv_data->flags |=
1946 MPT_TARGET_FASTPATH_IO;
1947 }
1948 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1949
1950 return 0;
1951 }
1952
1953 /**
1954 * scsih_target_destroy - target destroy routine
1955 * @starget: scsi target struct
1956 */
1957 static void
scsih_target_destroy(struct scsi_target * starget)1958 scsih_target_destroy(struct scsi_target *starget)
1959 {
1960 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1961 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1962 struct MPT3SAS_TARGET *sas_target_priv_data;
1963 struct _sas_device *sas_device;
1964 struct _raid_device *raid_device;
1965 struct _pcie_device *pcie_device;
1966 unsigned long flags;
1967
1968 sas_target_priv_data = starget->hostdata;
1969 if (!sas_target_priv_data)
1970 return;
1971
1972 if (starget->channel == RAID_CHANNEL) {
1973 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1974 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1975 starget->channel);
1976 if (raid_device) {
1977 raid_device->starget = NULL;
1978 raid_device->sdev = NULL;
1979 }
1980 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1981 goto out;
1982 }
1983
1984 if (starget->channel == PCIE_CHANNEL) {
1985 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1986 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1987 sas_target_priv_data);
1988 if (pcie_device && (pcie_device->starget == starget) &&
1989 (pcie_device->id == starget->id) &&
1990 (pcie_device->channel == starget->channel))
1991 pcie_device->starget = NULL;
1992
1993 if (pcie_device) {
1994 /*
1995 * Corresponding get() is in _scsih_target_alloc()
1996 */
1997 sas_target_priv_data->pcie_dev = NULL;
1998 pcie_device_put(pcie_device);
1999 pcie_device_put(pcie_device);
2000 }
2001 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2002 goto out;
2003 }
2004
2005 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2006 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2007 if (sas_device && (sas_device->starget == starget) &&
2008 (sas_device->id == starget->id) &&
2009 (sas_device->channel == starget->channel))
2010 sas_device->starget = NULL;
2011
2012 if (sas_device) {
2013 /*
2014 * Corresponding get() is in _scsih_target_alloc()
2015 */
2016 sas_target_priv_data->sas_dev = NULL;
2017 sas_device_put(sas_device);
2018
2019 sas_device_put(sas_device);
2020 }
2021 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2022
2023 out:
2024 kfree(sas_target_priv_data);
2025 starget->hostdata = NULL;
2026 }
2027
2028 /**
2029 * scsih_slave_alloc - device add routine
2030 * @sdev: scsi device struct
2031 *
2032 * Return: 0 if ok. Any other return is assumed to be an error and
2033 * the device is ignored.
2034 */
2035 static int
scsih_slave_alloc(struct scsi_device * sdev)2036 scsih_slave_alloc(struct scsi_device *sdev)
2037 {
2038 struct Scsi_Host *shost;
2039 struct MPT3SAS_ADAPTER *ioc;
2040 struct MPT3SAS_TARGET *sas_target_priv_data;
2041 struct MPT3SAS_DEVICE *sas_device_priv_data;
2042 struct scsi_target *starget;
2043 struct _raid_device *raid_device;
2044 struct _sas_device *sas_device;
2045 struct _pcie_device *pcie_device;
2046 unsigned long flags;
2047
2048 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2049 GFP_KERNEL);
2050 if (!sas_device_priv_data)
2051 return -ENOMEM;
2052
2053 sas_device_priv_data->lun = sdev->lun;
2054 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2055
2056 starget = scsi_target(sdev);
2057 sas_target_priv_data = starget->hostdata;
2058 sas_target_priv_data->num_luns++;
2059 sas_device_priv_data->sas_target = sas_target_priv_data;
2060 sdev->hostdata = sas_device_priv_data;
2061 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2062 sdev->no_uld_attach = 1;
2063
2064 shost = dev_to_shost(&starget->dev);
2065 ioc = shost_priv(shost);
2066 if (starget->channel == RAID_CHANNEL) {
2067 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2068 raid_device = _scsih_raid_device_find_by_id(ioc,
2069 starget->id, starget->channel);
2070 if (raid_device)
2071 raid_device->sdev = sdev; /* raid is single lun */
2072 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2073 }
2074 if (starget->channel == PCIE_CHANNEL) {
2075 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2076 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2077 sas_target_priv_data->sas_address);
2078 if (pcie_device && (pcie_device->starget == NULL)) {
2079 sdev_printk(KERN_INFO, sdev,
2080 "%s : pcie_device->starget set to starget @ %d\n",
2081 __func__, __LINE__);
2082 pcie_device->starget = starget;
2083 }
2084
2085 if (pcie_device)
2086 pcie_device_put(pcie_device);
2087 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2088
2089 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2090 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2091 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2092 sas_target_priv_data->sas_address,
2093 sas_target_priv_data->port);
2094 if (sas_device && (sas_device->starget == NULL)) {
2095 sdev_printk(KERN_INFO, sdev,
2096 "%s : sas_device->starget set to starget @ %d\n",
2097 __func__, __LINE__);
2098 sas_device->starget = starget;
2099 }
2100
2101 if (sas_device)
2102 sas_device_put(sas_device);
2103
2104 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2105 }
2106
2107 return 0;
2108 }
2109
2110 /**
2111 * scsih_slave_destroy - device destroy routine
2112 * @sdev: scsi device struct
2113 */
2114 static void
scsih_slave_destroy(struct scsi_device * sdev)2115 scsih_slave_destroy(struct scsi_device *sdev)
2116 {
2117 struct MPT3SAS_TARGET *sas_target_priv_data;
2118 struct scsi_target *starget;
2119 struct Scsi_Host *shost;
2120 struct MPT3SAS_ADAPTER *ioc;
2121 struct _sas_device *sas_device;
2122 struct _pcie_device *pcie_device;
2123 unsigned long flags;
2124
2125 if (!sdev->hostdata)
2126 return;
2127
2128 starget = scsi_target(sdev);
2129 sas_target_priv_data = starget->hostdata;
2130 sas_target_priv_data->num_luns--;
2131
2132 shost = dev_to_shost(&starget->dev);
2133 ioc = shost_priv(shost);
2134
2135 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2136 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2137 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2138 sas_target_priv_data);
2139 if (pcie_device && !sas_target_priv_data->num_luns)
2140 pcie_device->starget = NULL;
2141
2142 if (pcie_device)
2143 pcie_device_put(pcie_device);
2144
2145 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2146
2147 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2148 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2149 sas_device = __mpt3sas_get_sdev_from_target(ioc,
2150 sas_target_priv_data);
2151 if (sas_device && !sas_target_priv_data->num_luns)
2152 sas_device->starget = NULL;
2153
2154 if (sas_device)
2155 sas_device_put(sas_device);
2156 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2157 }
2158
2159 kfree(sdev->hostdata);
2160 sdev->hostdata = NULL;
2161 }
2162
2163 /**
2164 * _scsih_display_sata_capabilities - sata capabilities
2165 * @ioc: per adapter object
2166 * @handle: device handle
2167 * @sdev: scsi device struct
2168 */
2169 static void
_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER * ioc,u16 handle,struct scsi_device * sdev)2170 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2171 u16 handle, struct scsi_device *sdev)
2172 {
2173 Mpi2ConfigReply_t mpi_reply;
2174 Mpi2SasDevicePage0_t sas_device_pg0;
2175 u32 ioc_status;
2176 u16 flags;
2177 u32 device_info;
2178
2179 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2180 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2181 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2182 __FILE__, __LINE__, __func__);
2183 return;
2184 }
2185
2186 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2187 MPI2_IOCSTATUS_MASK;
2188 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2189 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2190 __FILE__, __LINE__, __func__);
2191 return;
2192 }
2193
2194 flags = le16_to_cpu(sas_device_pg0.Flags);
2195 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2196
2197 sdev_printk(KERN_INFO, sdev,
2198 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2199 "sw_preserve(%s)\n",
2200 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2201 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2202 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2203 "n",
2204 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2206 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2207 }
2208
2209 /*
2210 * raid transport support -
2211 * Enabled for SLES11 and newer, in older kernels the driver will panic when
2212 * unloading the driver followed by a load - I believe that the subroutine
2213 * raid_class_release() is not cleaning up properly.
2214 */
2215
2216 /**
2217 * scsih_is_raid - return boolean indicating device is raid volume
2218 * @dev: the device struct object
2219 */
2220 static int
scsih_is_raid(struct device * dev)2221 scsih_is_raid(struct device *dev)
2222 {
2223 struct scsi_device *sdev = to_scsi_device(dev);
2224 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2225
2226 if (ioc->is_warpdrive)
2227 return 0;
2228 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2229 }
2230
2231 static int
scsih_is_nvme(struct device * dev)2232 scsih_is_nvme(struct device *dev)
2233 {
2234 struct scsi_device *sdev = to_scsi_device(dev);
2235
2236 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2237 }
2238
2239 /**
2240 * scsih_get_resync - get raid volume resync percent complete
2241 * @dev: the device struct object
2242 */
2243 static void
scsih_get_resync(struct device * dev)2244 scsih_get_resync(struct device *dev)
2245 {
2246 struct scsi_device *sdev = to_scsi_device(dev);
2247 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2248 static struct _raid_device *raid_device;
2249 unsigned long flags;
2250 Mpi2RaidVolPage0_t vol_pg0;
2251 Mpi2ConfigReply_t mpi_reply;
2252 u32 volume_status_flags;
2253 u8 percent_complete;
2254 u16 handle;
2255
2256 percent_complete = 0;
2257 handle = 0;
2258 if (ioc->is_warpdrive)
2259 goto out;
2260
2261 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2262 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2263 sdev->channel);
2264 if (raid_device) {
2265 handle = raid_device->handle;
2266 percent_complete = raid_device->percent_complete;
2267 }
2268 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2269
2270 if (!handle)
2271 goto out;
2272
2273 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2274 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2275 sizeof(Mpi2RaidVolPage0_t))) {
2276 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2277 __FILE__, __LINE__, __func__);
2278 percent_complete = 0;
2279 goto out;
2280 }
2281
2282 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2283 if (!(volume_status_flags &
2284 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2285 percent_complete = 0;
2286
2287 out:
2288
2289 switch (ioc->hba_mpi_version_belonged) {
2290 case MPI2_VERSION:
2291 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2292 break;
2293 case MPI25_VERSION:
2294 case MPI26_VERSION:
2295 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2296 break;
2297 }
2298 }
2299
2300 /**
2301 * scsih_get_state - get raid volume level
2302 * @dev: the device struct object
2303 */
2304 static void
scsih_get_state(struct device * dev)2305 scsih_get_state(struct device *dev)
2306 {
2307 struct scsi_device *sdev = to_scsi_device(dev);
2308 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2309 static struct _raid_device *raid_device;
2310 unsigned long flags;
2311 Mpi2RaidVolPage0_t vol_pg0;
2312 Mpi2ConfigReply_t mpi_reply;
2313 u32 volstate;
2314 enum raid_state state = RAID_STATE_UNKNOWN;
2315 u16 handle = 0;
2316
2317 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2318 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2319 sdev->channel);
2320 if (raid_device)
2321 handle = raid_device->handle;
2322 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2323
2324 if (!raid_device)
2325 goto out;
2326
2327 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2328 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2329 sizeof(Mpi2RaidVolPage0_t))) {
2330 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2331 __FILE__, __LINE__, __func__);
2332 goto out;
2333 }
2334
2335 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2336 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2337 state = RAID_STATE_RESYNCING;
2338 goto out;
2339 }
2340
2341 switch (vol_pg0.VolumeState) {
2342 case MPI2_RAID_VOL_STATE_OPTIMAL:
2343 case MPI2_RAID_VOL_STATE_ONLINE:
2344 state = RAID_STATE_ACTIVE;
2345 break;
2346 case MPI2_RAID_VOL_STATE_DEGRADED:
2347 state = RAID_STATE_DEGRADED;
2348 break;
2349 case MPI2_RAID_VOL_STATE_FAILED:
2350 case MPI2_RAID_VOL_STATE_MISSING:
2351 state = RAID_STATE_OFFLINE;
2352 break;
2353 }
2354 out:
2355 switch (ioc->hba_mpi_version_belonged) {
2356 case MPI2_VERSION:
2357 raid_set_state(mpt2sas_raid_template, dev, state);
2358 break;
2359 case MPI25_VERSION:
2360 case MPI26_VERSION:
2361 raid_set_state(mpt3sas_raid_template, dev, state);
2362 break;
2363 }
2364 }
2365
2366 /**
2367 * _scsih_set_level - set raid level
2368 * @ioc: ?
2369 * @sdev: scsi device struct
2370 * @volume_type: volume type
2371 */
2372 static void
_scsih_set_level(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev,u8 volume_type)2373 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2374 struct scsi_device *sdev, u8 volume_type)
2375 {
2376 enum raid_level level = RAID_LEVEL_UNKNOWN;
2377
2378 switch (volume_type) {
2379 case MPI2_RAID_VOL_TYPE_RAID0:
2380 level = RAID_LEVEL_0;
2381 break;
2382 case MPI2_RAID_VOL_TYPE_RAID10:
2383 level = RAID_LEVEL_10;
2384 break;
2385 case MPI2_RAID_VOL_TYPE_RAID1E:
2386 level = RAID_LEVEL_1E;
2387 break;
2388 case MPI2_RAID_VOL_TYPE_RAID1:
2389 level = RAID_LEVEL_1;
2390 break;
2391 }
2392
2393 switch (ioc->hba_mpi_version_belonged) {
2394 case MPI2_VERSION:
2395 raid_set_level(mpt2sas_raid_template,
2396 &sdev->sdev_gendev, level);
2397 break;
2398 case MPI25_VERSION:
2399 case MPI26_VERSION:
2400 raid_set_level(mpt3sas_raid_template,
2401 &sdev->sdev_gendev, level);
2402 break;
2403 }
2404 }
2405
2406
2407 /**
2408 * _scsih_get_volume_capabilities - volume capabilities
2409 * @ioc: per adapter object
2410 * @raid_device: the raid_device object
2411 *
2412 * Return: 0 for success, else 1
2413 */
2414 static int
_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)2415 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2416 struct _raid_device *raid_device)
2417 {
2418 Mpi2RaidVolPage0_t *vol_pg0;
2419 Mpi2RaidPhysDiskPage0_t pd_pg0;
2420 Mpi2SasDevicePage0_t sas_device_pg0;
2421 Mpi2ConfigReply_t mpi_reply;
2422 u16 sz;
2423 u8 num_pds;
2424
2425 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2426 &num_pds)) || !num_pds) {
2427 dfailprintk(ioc,
2428 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2429 __FILE__, __LINE__, __func__));
2430 return 1;
2431 }
2432
2433 raid_device->num_pds = num_pds;
2434 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2435 sizeof(Mpi2RaidVol0PhysDisk_t));
2436 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2437 if (!vol_pg0) {
2438 dfailprintk(ioc,
2439 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2440 __FILE__, __LINE__, __func__));
2441 return 1;
2442 }
2443
2444 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2445 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2446 dfailprintk(ioc,
2447 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2448 __FILE__, __LINE__, __func__));
2449 kfree(vol_pg0);
2450 return 1;
2451 }
2452
2453 raid_device->volume_type = vol_pg0->VolumeType;
2454
2455 /* figure out what the underlying devices are by
2456 * obtaining the device_info bits for the 1st device
2457 */
2458 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2459 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2460 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2461 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2462 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2463 le16_to_cpu(pd_pg0.DevHandle)))) {
2464 raid_device->device_info =
2465 le32_to_cpu(sas_device_pg0.DeviceInfo);
2466 }
2467 }
2468
2469 kfree(vol_pg0);
2470 return 0;
2471 }
2472
2473 /**
2474 * _scsih_enable_tlr - setting TLR flags
2475 * @ioc: per adapter object
2476 * @sdev: scsi device struct
2477 *
2478 * Enabling Transaction Layer Retries for tape devices when
2479 * vpd page 0x90 is present
2480 *
2481 */
2482 static void
_scsih_enable_tlr(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev)2483 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2484 {
2485
2486 /* only for TAPE */
2487 if (sdev->type != TYPE_TAPE)
2488 return;
2489
2490 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2491 return;
2492
2493 sas_enable_tlr(sdev);
2494 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2495 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2496 return;
2497
2498 }
2499
2500 /**
2501 * scsih_slave_configure - device configure routine.
2502 * @sdev: scsi device struct
2503 *
2504 * Return: 0 if ok. Any other return is assumed to be an error and
2505 * the device is ignored.
2506 */
2507 static int
scsih_slave_configure(struct scsi_device * sdev)2508 scsih_slave_configure(struct scsi_device *sdev)
2509 {
2510 struct Scsi_Host *shost = sdev->host;
2511 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2512 struct MPT3SAS_DEVICE *sas_device_priv_data;
2513 struct MPT3SAS_TARGET *sas_target_priv_data;
2514 struct _sas_device *sas_device;
2515 struct _pcie_device *pcie_device;
2516 struct _raid_device *raid_device;
2517 unsigned long flags;
2518 int qdepth;
2519 u8 ssp_target = 0;
2520 char *ds = "";
2521 char *r_level = "";
2522 u16 handle, volume_handle = 0;
2523 u64 volume_wwid = 0;
2524
2525 qdepth = 1;
2526 sas_device_priv_data = sdev->hostdata;
2527 sas_device_priv_data->configured_lun = 1;
2528 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2529 sas_target_priv_data = sas_device_priv_data->sas_target;
2530 handle = sas_target_priv_data->handle;
2531
2532 /* raid volume handling */
2533 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2534
2535 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2536 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2537 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2538 if (!raid_device) {
2539 dfailprintk(ioc,
2540 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2541 __FILE__, __LINE__, __func__));
2542 return 1;
2543 }
2544
2545 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2546 dfailprintk(ioc,
2547 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2548 __FILE__, __LINE__, __func__));
2549 return 1;
2550 }
2551
2552 /*
2553 * WARPDRIVE: Initialize the required data for Direct IO
2554 */
2555 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2556
2557 /* RAID Queue Depth Support
2558 * IS volume = underlying qdepth of drive type, either
2559 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2560 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2561 */
2562 if (raid_device->device_info &
2563 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2564 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2565 ds = "SSP";
2566 } else {
2567 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2568 if (raid_device->device_info &
2569 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2570 ds = "SATA";
2571 else
2572 ds = "STP";
2573 }
2574
2575 switch (raid_device->volume_type) {
2576 case MPI2_RAID_VOL_TYPE_RAID0:
2577 r_level = "RAID0";
2578 break;
2579 case MPI2_RAID_VOL_TYPE_RAID1E:
2580 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2581 if (ioc->manu_pg10.OEMIdentifier &&
2582 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2583 MFG10_GF0_R10_DISPLAY) &&
2584 !(raid_device->num_pds % 2))
2585 r_level = "RAID10";
2586 else
2587 r_level = "RAID1E";
2588 break;
2589 case MPI2_RAID_VOL_TYPE_RAID1:
2590 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2591 r_level = "RAID1";
2592 break;
2593 case MPI2_RAID_VOL_TYPE_RAID10:
2594 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2595 r_level = "RAID10";
2596 break;
2597 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2598 default:
2599 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2600 r_level = "RAIDX";
2601 break;
2602 }
2603
2604 if (!ioc->hide_ir_msg)
2605 sdev_printk(KERN_INFO, sdev,
2606 "%s: handle(0x%04x), wwid(0x%016llx),"
2607 " pd_count(%d), type(%s)\n",
2608 r_level, raid_device->handle,
2609 (unsigned long long)raid_device->wwid,
2610 raid_device->num_pds, ds);
2611
2612 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2613 blk_queue_max_hw_sectors(sdev->request_queue,
2614 MPT3SAS_RAID_MAX_SECTORS);
2615 sdev_printk(KERN_INFO, sdev,
2616 "Set queue's max_sector to: %u\n",
2617 MPT3SAS_RAID_MAX_SECTORS);
2618 }
2619
2620 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2621
2622 /* raid transport support */
2623 if (!ioc->is_warpdrive)
2624 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2625 return 0;
2626 }
2627
2628 /* non-raid handling */
2629 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2630 if (mpt3sas_config_get_volume_handle(ioc, handle,
2631 &volume_handle)) {
2632 dfailprintk(ioc,
2633 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2634 __FILE__, __LINE__, __func__));
2635 return 1;
2636 }
2637 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2638 volume_handle, &volume_wwid)) {
2639 dfailprintk(ioc,
2640 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2641 __FILE__, __LINE__, __func__));
2642 return 1;
2643 }
2644 }
2645
2646 /* PCIe handling */
2647 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2648 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2649 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2650 sas_device_priv_data->sas_target->sas_address);
2651 if (!pcie_device) {
2652 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2653 dfailprintk(ioc,
2654 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2655 __FILE__, __LINE__, __func__));
2656 return 1;
2657 }
2658
2659 qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2660 ds = "NVMe";
2661 sdev_printk(KERN_INFO, sdev,
2662 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2663 ds, handle, (unsigned long long)pcie_device->wwid,
2664 pcie_device->port_num);
2665 if (pcie_device->enclosure_handle != 0)
2666 sdev_printk(KERN_INFO, sdev,
2667 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2668 ds,
2669 (unsigned long long)pcie_device->enclosure_logical_id,
2670 pcie_device->slot);
2671 if (pcie_device->connector_name[0] != '\0')
2672 sdev_printk(KERN_INFO, sdev,
2673 "%s: enclosure level(0x%04x),"
2674 "connector name( %s)\n", ds,
2675 pcie_device->enclosure_level,
2676 pcie_device->connector_name);
2677
2678 if (pcie_device->nvme_mdts)
2679 blk_queue_max_hw_sectors(sdev->request_queue,
2680 pcie_device->nvme_mdts/512);
2681
2682 pcie_device_put(pcie_device);
2683 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2684 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2685 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2686 ** merged and can eliminate holes created during merging
2687 ** operation.
2688 **/
2689 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2690 sdev->request_queue);
2691 blk_queue_virt_boundary(sdev->request_queue,
2692 ioc->page_size - 1);
2693 return 0;
2694 }
2695
2696 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2697 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2698 sas_device_priv_data->sas_target->sas_address,
2699 sas_device_priv_data->sas_target->port);
2700 if (!sas_device) {
2701 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2702 dfailprintk(ioc,
2703 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2704 __FILE__, __LINE__, __func__));
2705 return 1;
2706 }
2707
2708 sas_device->volume_handle = volume_handle;
2709 sas_device->volume_wwid = volume_wwid;
2710 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2711 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2712 ssp_target = 1;
2713 if (sas_device->device_info &
2714 MPI2_SAS_DEVICE_INFO_SEP) {
2715 sdev_printk(KERN_WARNING, sdev,
2716 "set ignore_delay_remove for handle(0x%04x)\n",
2717 sas_device_priv_data->sas_target->handle);
2718 sas_device_priv_data->ignore_delay_remove = 1;
2719 ds = "SES";
2720 } else
2721 ds = "SSP";
2722 } else {
2723 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2724 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2725 ds = "STP";
2726 else if (sas_device->device_info &
2727 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2728 ds = "SATA";
2729 }
2730
2731 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2732 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2733 ds, handle, (unsigned long long)sas_device->sas_address,
2734 sas_device->phy, (unsigned long long)sas_device->device_name);
2735
2736 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2737
2738 sas_device_put(sas_device);
2739 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2740
2741 if (!ssp_target)
2742 _scsih_display_sata_capabilities(ioc, handle, sdev);
2743
2744
2745 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2746
2747 if (ssp_target) {
2748 sas_read_port_mode_page(sdev);
2749 _scsih_enable_tlr(ioc, sdev);
2750 }
2751
2752 return 0;
2753 }
2754
2755 /**
2756 * scsih_bios_param - fetch head, sector, cylinder info for a disk
2757 * @sdev: scsi device struct
2758 * @bdev: pointer to block device context
2759 * @capacity: device size (in 512 byte sectors)
2760 * @params: three element array to place output:
2761 * params[0] number of heads (max 255)
2762 * params[1] number of sectors (max 63)
2763 * params[2] number of cylinders
2764 */
2765 static int
scsih_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int params[])2766 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2767 sector_t capacity, int params[])
2768 {
2769 int heads;
2770 int sectors;
2771 sector_t cylinders;
2772 ulong dummy;
2773
2774 heads = 64;
2775 sectors = 32;
2776
2777 dummy = heads * sectors;
2778 cylinders = capacity;
2779 sector_div(cylinders, dummy);
2780
2781 /*
2782 * Handle extended translation size for logical drives
2783 * > 1Gb
2784 */
2785 if ((ulong)capacity >= 0x200000) {
2786 heads = 255;
2787 sectors = 63;
2788 dummy = heads * sectors;
2789 cylinders = capacity;
2790 sector_div(cylinders, dummy);
2791 }
2792
2793 /* return result */
2794 params[0] = heads;
2795 params[1] = sectors;
2796 params[2] = cylinders;
2797
2798 return 0;
2799 }
2800
2801 /**
2802 * _scsih_response_code - translation of device response code
2803 * @ioc: per adapter object
2804 * @response_code: response code returned by the device
2805 */
2806 static void
_scsih_response_code(struct MPT3SAS_ADAPTER * ioc,u8 response_code)2807 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2808 {
2809 char *desc;
2810
2811 switch (response_code) {
2812 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2813 desc = "task management request completed";
2814 break;
2815 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2816 desc = "invalid frame";
2817 break;
2818 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2819 desc = "task management request not supported";
2820 break;
2821 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2822 desc = "task management request failed";
2823 break;
2824 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2825 desc = "task management request succeeded";
2826 break;
2827 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2828 desc = "invalid lun";
2829 break;
2830 case 0xA:
2831 desc = "overlapped tag attempted";
2832 break;
2833 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2834 desc = "task queued, however not sent to target";
2835 break;
2836 default:
2837 desc = "unknown";
2838 break;
2839 }
2840 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2841 }
2842
2843 /**
2844 * _scsih_tm_done - tm completion routine
2845 * @ioc: per adapter object
2846 * @smid: system request message index
2847 * @msix_index: MSIX table index supplied by the OS
2848 * @reply: reply message frame(lower 32bit addr)
2849 * Context: none.
2850 *
2851 * The callback handler when using scsih_issue_tm.
2852 *
2853 * Return: 1 meaning mf should be freed from _base_interrupt
2854 * 0 means the mf is freed from this function.
2855 */
2856 static u8
_scsih_tm_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)2857 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2858 {
2859 MPI2DefaultReply_t *mpi_reply;
2860
2861 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2862 return 1;
2863 if (ioc->tm_cmds.smid != smid)
2864 return 1;
2865 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2866 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2867 if (mpi_reply) {
2868 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2869 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2870 }
2871 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2872 complete(&ioc->tm_cmds.done);
2873 return 1;
2874 }
2875
2876 /**
2877 * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2878 * @ioc: per adapter object
2879 * @handle: device handle
2880 *
2881 * During taskmangement request, we need to freeze the device queue.
2882 */
2883 void
mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2884 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2885 {
2886 struct MPT3SAS_DEVICE *sas_device_priv_data;
2887 struct scsi_device *sdev;
2888 u8 skip = 0;
2889
2890 shost_for_each_device(sdev, ioc->shost) {
2891 if (skip)
2892 continue;
2893 sas_device_priv_data = sdev->hostdata;
2894 if (!sas_device_priv_data)
2895 continue;
2896 if (sas_device_priv_data->sas_target->handle == handle) {
2897 sas_device_priv_data->sas_target->tm_busy = 1;
2898 skip = 1;
2899 ioc->ignore_loginfos = 1;
2900 }
2901 }
2902 }
2903
2904 /**
2905 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2906 * @ioc: per adapter object
2907 * @handle: device handle
2908 *
2909 * During taskmangement request, we need to freeze the device queue.
2910 */
2911 void
mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2912 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2913 {
2914 struct MPT3SAS_DEVICE *sas_device_priv_data;
2915 struct scsi_device *sdev;
2916 u8 skip = 0;
2917
2918 shost_for_each_device(sdev, ioc->shost) {
2919 if (skip)
2920 continue;
2921 sas_device_priv_data = sdev->hostdata;
2922 if (!sas_device_priv_data)
2923 continue;
2924 if (sas_device_priv_data->sas_target->handle == handle) {
2925 sas_device_priv_data->sas_target->tm_busy = 0;
2926 skip = 1;
2927 ioc->ignore_loginfos = 0;
2928 }
2929 }
2930 }
2931
2932 /**
2933 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2934 * @ioc: per adapter object
2935 * @channel: the channel assigned by the OS
2936 * @id: the id assigned by the OS
2937 * @lun: lun number
2938 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2939 * @smid_task: smid assigned to the task
2940 *
2941 * Look whether TM has aborted the timed out SCSI command, if
2942 * TM has aborted the IO then return SUCCESS else return FAILED.
2943 */
2944 static int
scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER * ioc,uint channel,uint id,uint lun,u8 type,u16 smid_task)2945 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2946 uint id, uint lun, u8 type, u16 smid_task)
2947 {
2948
2949 if (smid_task <= ioc->shost->can_queue) {
2950 switch (type) {
2951 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2952 if (!(_scsih_scsi_lookup_find_by_target(ioc,
2953 id, channel)))
2954 return SUCCESS;
2955 break;
2956 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2957 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2958 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2959 lun, channel)))
2960 return SUCCESS;
2961 break;
2962 default:
2963 return SUCCESS;
2964 }
2965 } else if (smid_task == ioc->scsih_cmds.smid) {
2966 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2967 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2968 return SUCCESS;
2969 } else if (smid_task == ioc->ctl_cmds.smid) {
2970 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2971 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2972 return SUCCESS;
2973 }
2974
2975 return FAILED;
2976 }
2977
2978 /**
2979 * scsih_tm_post_processing - post processing of target & LUN reset
2980 * @ioc: per adapter object
2981 * @handle: device handle
2982 * @channel: the channel assigned by the OS
2983 * @id: the id assigned by the OS
2984 * @lun: lun number
2985 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2986 * @smid_task: smid assigned to the task
2987 *
2988 * Post processing of target & LUN reset. Due to interrupt latency
2989 * issue it possible that interrupt for aborted IO might not be
2990 * received yet. So before returning failure status, poll the
2991 * reply descriptor pools for the reply of timed out SCSI command.
2992 * Return FAILED status if reply for timed out is not received
2993 * otherwise return SUCCESS.
2994 */
2995 static int
scsih_tm_post_processing(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,uint lun,u8 type,u16 smid_task)2996 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2997 uint channel, uint id, uint lun, u8 type, u16 smid_task)
2998 {
2999 int rc;
3000
3001 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3002 if (rc == SUCCESS)
3003 return rc;
3004
3005 ioc_info(ioc,
3006 "Poll ReplyDescriptor queues for completion of"
3007 " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3008 smid_task, type, handle);
3009
3010 /*
3011 * Due to interrupt latency issues, driver may receive interrupt for
3012 * TM first and then for aborted SCSI IO command. So, poll all the
3013 * ReplyDescriptor pools before returning the FAILED status to SML.
3014 */
3015 mpt3sas_base_mask_interrupts(ioc);
3016 mpt3sas_base_sync_reply_irqs(ioc, 1);
3017 mpt3sas_base_unmask_interrupts(ioc);
3018
3019 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3020 }
3021
3022 /**
3023 * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3024 * @ioc: per adapter struct
3025 * @handle: device handle
3026 * @channel: the channel assigned by the OS
3027 * @id: the id assigned by the OS
3028 * @lun: lun number
3029 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3030 * @smid_task: smid assigned to the task
3031 * @msix_task: MSIX table index supplied by the OS
3032 * @timeout: timeout in seconds
3033 * @tr_method: Target Reset Method
3034 * Context: user
3035 *
3036 * A generic API for sending task management requests to firmware.
3037 *
3038 * The callback index is set inside `ioc->tm_cb_idx`.
3039 * The caller is responsible to check for outstanding commands.
3040 *
3041 * Return: SUCCESS or FAILED.
3042 */
3043 int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)3044 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3045 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3046 u8 timeout, u8 tr_method)
3047 {
3048 Mpi2SCSITaskManagementRequest_t *mpi_request;
3049 Mpi2SCSITaskManagementReply_t *mpi_reply;
3050 Mpi25SCSIIORequest_t *request;
3051 u16 smid = 0;
3052 u32 ioc_state;
3053 int rc;
3054 u8 issue_reset = 0;
3055
3056 lockdep_assert_held(&ioc->tm_cmds.mutex);
3057
3058 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3059 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3060 return FAILED;
3061 }
3062
3063 if (ioc->shost_recovery || ioc->remove_host ||
3064 ioc->pci_error_recovery) {
3065 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3066 return FAILED;
3067 }
3068
3069 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3070 if (ioc_state & MPI2_DOORBELL_USED) {
3071 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3072 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3073 return (!rc) ? SUCCESS : FAILED;
3074 }
3075
3076 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3077 mpt3sas_print_fault_code(ioc, ioc_state &
3078 MPI2_DOORBELL_DATA_MASK);
3079 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3080 return (!rc) ? SUCCESS : FAILED;
3081 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3082 MPI2_IOC_STATE_COREDUMP) {
3083 mpt3sas_print_coredump_info(ioc, ioc_state &
3084 MPI2_DOORBELL_DATA_MASK);
3085 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3086 return (!rc) ? SUCCESS : FAILED;
3087 }
3088
3089 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3090 if (!smid) {
3091 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3092 return FAILED;
3093 }
3094
3095 dtmprintk(ioc,
3096 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3097 handle, type, smid_task, timeout, tr_method));
3098 ioc->tm_cmds.status = MPT3_CMD_PENDING;
3099 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3100 ioc->tm_cmds.smid = smid;
3101 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3102 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3103 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3104 mpi_request->DevHandle = cpu_to_le16(handle);
3105 mpi_request->TaskType = type;
3106 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3107 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3108 mpi_request->MsgFlags = tr_method;
3109 mpi_request->TaskMID = cpu_to_le16(smid_task);
3110 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3111 mpt3sas_scsih_set_tm_flag(ioc, handle);
3112 init_completion(&ioc->tm_cmds.done);
3113 ioc->put_smid_hi_priority(ioc, smid, msix_task);
3114 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3115 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3116 mpt3sas_check_cmd_timeout(ioc,
3117 ioc->tm_cmds.status, mpi_request,
3118 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3119 if (issue_reset) {
3120 rc = mpt3sas_base_hard_reset_handler(ioc,
3121 FORCE_BIG_HAMMER);
3122 rc = (!rc) ? SUCCESS : FAILED;
3123 goto out;
3124 }
3125 }
3126
3127 /* sync IRQs in case those were busy during flush. */
3128 mpt3sas_base_sync_reply_irqs(ioc, 0);
3129
3130 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3131 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3132 mpi_reply = ioc->tm_cmds.reply;
3133 dtmprintk(ioc,
3134 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3135 le16_to_cpu(mpi_reply->IOCStatus),
3136 le32_to_cpu(mpi_reply->IOCLogInfo),
3137 le32_to_cpu(mpi_reply->TerminationCount)));
3138 if (ioc->logging_level & MPT_DEBUG_TM) {
3139 _scsih_response_code(ioc, mpi_reply->ResponseCode);
3140 if (mpi_reply->IOCStatus)
3141 _debug_dump_mf(mpi_request,
3142 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3143 }
3144 }
3145
3146 switch (type) {
3147 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3148 rc = SUCCESS;
3149 /*
3150 * If DevHandle filed in smid_task's entry of request pool
3151 * doesn't match with device handle on which this task abort
3152 * TM is received then it means that TM has successfully
3153 * aborted the timed out command. Since smid_task's entry in
3154 * request pool will be memset to zero once the timed out
3155 * command is returned to the SML. If the command is not
3156 * aborted then smid_task’s entry won’t be cleared and it
3157 * will have same DevHandle value on which this task abort TM
3158 * is received and driver will return the TM status as FAILED.
3159 */
3160 request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3161 if (le16_to_cpu(request->DevHandle) != handle)
3162 break;
3163
3164 ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3165 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3166 handle, timeout, tr_method, smid_task, msix_task);
3167 rc = FAILED;
3168 break;
3169
3170 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3171 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3172 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3173 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3174 type, smid_task);
3175 break;
3176 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3177 rc = SUCCESS;
3178 break;
3179 default:
3180 rc = FAILED;
3181 break;
3182 }
3183
3184 out:
3185 mpt3sas_scsih_clear_tm_flag(ioc, handle);
3186 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3187 return rc;
3188 }
3189
mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)3190 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3191 uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3192 u16 msix_task, u8 timeout, u8 tr_method)
3193 {
3194 int ret;
3195
3196 mutex_lock(&ioc->tm_cmds.mutex);
3197 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3198 smid_task, msix_task, timeout, tr_method);
3199 mutex_unlock(&ioc->tm_cmds.mutex);
3200
3201 return ret;
3202 }
3203
3204 /**
3205 * _scsih_tm_display_info - displays info about the device
3206 * @ioc: per adapter struct
3207 * @scmd: pointer to scsi command object
3208 *
3209 * Called by task management callback handlers.
3210 */
3211 static void
_scsih_tm_display_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)3212 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3213 {
3214 struct scsi_target *starget = scmd->device->sdev_target;
3215 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3216 struct _sas_device *sas_device = NULL;
3217 struct _pcie_device *pcie_device = NULL;
3218 unsigned long flags;
3219 char *device_str = NULL;
3220
3221 if (!priv_target)
3222 return;
3223 if (ioc->hide_ir_msg)
3224 device_str = "WarpDrive";
3225 else
3226 device_str = "volume";
3227
3228 scsi_print_command(scmd);
3229 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3230 starget_printk(KERN_INFO, starget,
3231 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
3232 device_str, priv_target->handle,
3233 device_str, (unsigned long long)priv_target->sas_address);
3234
3235 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3236 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3237 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3238 if (pcie_device) {
3239 starget_printk(KERN_INFO, starget,
3240 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3241 pcie_device->handle,
3242 (unsigned long long)pcie_device->wwid,
3243 pcie_device->port_num);
3244 if (pcie_device->enclosure_handle != 0)
3245 starget_printk(KERN_INFO, starget,
3246 "enclosure logical id(0x%016llx), slot(%d)\n",
3247 (unsigned long long)
3248 pcie_device->enclosure_logical_id,
3249 pcie_device->slot);
3250 if (pcie_device->connector_name[0] != '\0')
3251 starget_printk(KERN_INFO, starget,
3252 "enclosure level(0x%04x), connector name( %s)\n",
3253 pcie_device->enclosure_level,
3254 pcie_device->connector_name);
3255 pcie_device_put(pcie_device);
3256 }
3257 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3258
3259 } else {
3260 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3261 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3262 if (sas_device) {
3263 if (priv_target->flags &
3264 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3265 starget_printk(KERN_INFO, starget,
3266 "volume handle(0x%04x), "
3267 "volume wwid(0x%016llx)\n",
3268 sas_device->volume_handle,
3269 (unsigned long long)sas_device->volume_wwid);
3270 }
3271 starget_printk(KERN_INFO, starget,
3272 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3273 sas_device->handle,
3274 (unsigned long long)sas_device->sas_address,
3275 sas_device->phy);
3276
3277 _scsih_display_enclosure_chassis_info(NULL, sas_device,
3278 NULL, starget);
3279
3280 sas_device_put(sas_device);
3281 }
3282 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3283 }
3284 }
3285
3286 /**
3287 * scsih_abort - eh threads main abort routine
3288 * @scmd: pointer to scsi command object
3289 *
3290 * Return: SUCCESS if command aborted else FAILED
3291 */
3292 static int
scsih_abort(struct scsi_cmnd * scmd)3293 scsih_abort(struct scsi_cmnd *scmd)
3294 {
3295 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3296 struct MPT3SAS_DEVICE *sas_device_priv_data;
3297 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3298 u16 handle;
3299 int r;
3300
3301 u8 timeout = 30;
3302 struct _pcie_device *pcie_device = NULL;
3303 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3304 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3305 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3306 (scmd->request->timeout / HZ) * 1000);
3307 _scsih_tm_display_info(ioc, scmd);
3308
3309 sas_device_priv_data = scmd->device->hostdata;
3310 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3311 ioc->remove_host) {
3312 sdev_printk(KERN_INFO, scmd->device,
3313 "device been deleted! scmd(0x%p)\n", scmd);
3314 scmd->result = DID_NO_CONNECT << 16;
3315 scmd->scsi_done(scmd);
3316 r = SUCCESS;
3317 goto out;
3318 }
3319
3320 /* check for completed command */
3321 if (st == NULL || st->cb_idx == 0xFF) {
3322 sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3323 "driver, assuming scmd(0x%p) might have completed\n", scmd);
3324 scmd->result = DID_RESET << 16;
3325 r = SUCCESS;
3326 goto out;
3327 }
3328
3329 /* for hidden raid components and volumes this is not supported */
3330 if (sas_device_priv_data->sas_target->flags &
3331 MPT_TARGET_FLAGS_RAID_COMPONENT ||
3332 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3333 scmd->result = DID_RESET << 16;
3334 r = FAILED;
3335 goto out;
3336 }
3337
3338 mpt3sas_halt_firmware(ioc);
3339
3340 handle = sas_device_priv_data->sas_target->handle;
3341 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3342 if (pcie_device && (!ioc->tm_custom_handling) &&
3343 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3344 timeout = ioc->nvme_abort_timeout;
3345 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3346 scmd->device->id, scmd->device->lun,
3347 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3348 st->smid, st->msix_io, timeout, 0);
3349 /* Command must be cleared after abort */
3350 if (r == SUCCESS && st->cb_idx != 0xFF)
3351 r = FAILED;
3352 out:
3353 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3354 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3355 if (pcie_device)
3356 pcie_device_put(pcie_device);
3357 return r;
3358 }
3359
3360 /**
3361 * scsih_dev_reset - eh threads main device reset routine
3362 * @scmd: pointer to scsi command object
3363 *
3364 * Return: SUCCESS if command aborted else FAILED
3365 */
3366 static int
scsih_dev_reset(struct scsi_cmnd * scmd)3367 scsih_dev_reset(struct scsi_cmnd *scmd)
3368 {
3369 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3370 struct MPT3SAS_DEVICE *sas_device_priv_data;
3371 struct _sas_device *sas_device = NULL;
3372 struct _pcie_device *pcie_device = NULL;
3373 u16 handle;
3374 u8 tr_method = 0;
3375 u8 tr_timeout = 30;
3376 int r;
3377
3378 struct scsi_target *starget = scmd->device->sdev_target;
3379 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3380
3381 sdev_printk(KERN_INFO, scmd->device,
3382 "attempting device reset! scmd(0x%p)\n", scmd);
3383 _scsih_tm_display_info(ioc, scmd);
3384
3385 sas_device_priv_data = scmd->device->hostdata;
3386 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3387 ioc->remove_host) {
3388 sdev_printk(KERN_INFO, scmd->device,
3389 "device been deleted! scmd(0x%p)\n", scmd);
3390 scmd->result = DID_NO_CONNECT << 16;
3391 scmd->scsi_done(scmd);
3392 r = SUCCESS;
3393 goto out;
3394 }
3395
3396 /* for hidden raid components obtain the volume_handle */
3397 handle = 0;
3398 if (sas_device_priv_data->sas_target->flags &
3399 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3400 sas_device = mpt3sas_get_sdev_from_target(ioc,
3401 target_priv_data);
3402 if (sas_device)
3403 handle = sas_device->volume_handle;
3404 } else
3405 handle = sas_device_priv_data->sas_target->handle;
3406
3407 if (!handle) {
3408 scmd->result = DID_RESET << 16;
3409 r = FAILED;
3410 goto out;
3411 }
3412
3413 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3414
3415 if (pcie_device && (!ioc->tm_custom_handling) &&
3416 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3417 tr_timeout = pcie_device->reset_timeout;
3418 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3419 } else
3420 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3421
3422 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3423 scmd->device->id, scmd->device->lun,
3424 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3425 tr_timeout, tr_method);
3426 /* Check for busy commands after reset */
3427 if (r == SUCCESS && scsi_device_busy(scmd->device))
3428 r = FAILED;
3429 out:
3430 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3431 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3432
3433 if (sas_device)
3434 sas_device_put(sas_device);
3435 if (pcie_device)
3436 pcie_device_put(pcie_device);
3437
3438 return r;
3439 }
3440
3441 /**
3442 * scsih_target_reset - eh threads main target reset routine
3443 * @scmd: pointer to scsi command object
3444 *
3445 * Return: SUCCESS if command aborted else FAILED
3446 */
3447 static int
scsih_target_reset(struct scsi_cmnd * scmd)3448 scsih_target_reset(struct scsi_cmnd *scmd)
3449 {
3450 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3451 struct MPT3SAS_DEVICE *sas_device_priv_data;
3452 struct _sas_device *sas_device = NULL;
3453 struct _pcie_device *pcie_device = NULL;
3454 u16 handle;
3455 u8 tr_method = 0;
3456 u8 tr_timeout = 30;
3457 int r;
3458 struct scsi_target *starget = scmd->device->sdev_target;
3459 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3460
3461 starget_printk(KERN_INFO, starget,
3462 "attempting target reset! scmd(0x%p)\n", scmd);
3463 _scsih_tm_display_info(ioc, scmd);
3464
3465 sas_device_priv_data = scmd->device->hostdata;
3466 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3467 ioc->remove_host) {
3468 starget_printk(KERN_INFO, starget,
3469 "target been deleted! scmd(0x%p)\n", scmd);
3470 scmd->result = DID_NO_CONNECT << 16;
3471 scmd->scsi_done(scmd);
3472 r = SUCCESS;
3473 goto out;
3474 }
3475
3476 /* for hidden raid components obtain the volume_handle */
3477 handle = 0;
3478 if (sas_device_priv_data->sas_target->flags &
3479 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3480 sas_device = mpt3sas_get_sdev_from_target(ioc,
3481 target_priv_data);
3482 if (sas_device)
3483 handle = sas_device->volume_handle;
3484 } else
3485 handle = sas_device_priv_data->sas_target->handle;
3486
3487 if (!handle) {
3488 scmd->result = DID_RESET << 16;
3489 r = FAILED;
3490 goto out;
3491 }
3492
3493 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3494
3495 if (pcie_device && (!ioc->tm_custom_handling) &&
3496 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3497 tr_timeout = pcie_device->reset_timeout;
3498 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3499 } else
3500 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3501 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3502 scmd->device->id, 0,
3503 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3504 tr_timeout, tr_method);
3505 /* Check for busy commands after reset */
3506 if (r == SUCCESS && atomic_read(&starget->target_busy))
3507 r = FAILED;
3508 out:
3509 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3510 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3511
3512 if (sas_device)
3513 sas_device_put(sas_device);
3514 if (pcie_device)
3515 pcie_device_put(pcie_device);
3516 return r;
3517 }
3518
3519
3520 /**
3521 * scsih_host_reset - eh threads main host reset routine
3522 * @scmd: pointer to scsi command object
3523 *
3524 * Return: SUCCESS if command aborted else FAILED
3525 */
3526 static int
scsih_host_reset(struct scsi_cmnd * scmd)3527 scsih_host_reset(struct scsi_cmnd *scmd)
3528 {
3529 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3530 int r, retval;
3531
3532 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3533 scsi_print_command(scmd);
3534
3535 if (ioc->is_driver_loading || ioc->remove_host) {
3536 ioc_info(ioc, "Blocking the host reset\n");
3537 r = FAILED;
3538 goto out;
3539 }
3540
3541 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3542 r = (retval < 0) ? FAILED : SUCCESS;
3543 out:
3544 ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3545 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3546
3547 return r;
3548 }
3549
3550 /**
3551 * _scsih_fw_event_add - insert and queue up fw_event
3552 * @ioc: per adapter object
3553 * @fw_event: object describing the event
3554 * Context: This function will acquire ioc->fw_event_lock.
3555 *
3556 * This adds the firmware event object into link list, then queues it up to
3557 * be processed from user context.
3558 */
3559 static void
_scsih_fw_event_add(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3560 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3561 {
3562 unsigned long flags;
3563
3564 if (ioc->firmware_event_thread == NULL)
3565 return;
3566
3567 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3568 fw_event_work_get(fw_event);
3569 INIT_LIST_HEAD(&fw_event->list);
3570 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3571 INIT_WORK(&fw_event->work, _firmware_event_work);
3572 fw_event_work_get(fw_event);
3573 queue_work(ioc->firmware_event_thread, &fw_event->work);
3574 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3575 }
3576
3577 /**
3578 * _scsih_fw_event_del_from_list - delete fw_event from the list
3579 * @ioc: per adapter object
3580 * @fw_event: object describing the event
3581 * Context: This function will acquire ioc->fw_event_lock.
3582 *
3583 * If the fw_event is on the fw_event_list, remove it and do a put.
3584 */
3585 static void
_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3586 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3587 *fw_event)
3588 {
3589 unsigned long flags;
3590
3591 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3592 if (!list_empty(&fw_event->list)) {
3593 list_del_init(&fw_event->list);
3594 fw_event_work_put(fw_event);
3595 }
3596 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3597 }
3598
3599
3600 /**
3601 * mpt3sas_send_trigger_data_event - send event for processing trigger data
3602 * @ioc: per adapter object
3603 * @event_data: trigger event data
3604 */
3605 void
mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER * ioc,struct SL_WH_TRIGGERS_EVENT_DATA_T * event_data)3606 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3607 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3608 {
3609 struct fw_event_work *fw_event;
3610 u16 sz;
3611
3612 if (ioc->is_driver_loading)
3613 return;
3614 sz = sizeof(*event_data);
3615 fw_event = alloc_fw_event_work(sz);
3616 if (!fw_event)
3617 return;
3618 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3619 fw_event->ioc = ioc;
3620 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3621 _scsih_fw_event_add(ioc, fw_event);
3622 fw_event_work_put(fw_event);
3623 }
3624
3625 /**
3626 * _scsih_error_recovery_delete_devices - remove devices not responding
3627 * @ioc: per adapter object
3628 */
3629 static void
_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER * ioc)3630 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3631 {
3632 struct fw_event_work *fw_event;
3633
3634 if (ioc->is_driver_loading)
3635 return;
3636 fw_event = alloc_fw_event_work(0);
3637 if (!fw_event)
3638 return;
3639 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3640 fw_event->ioc = ioc;
3641 _scsih_fw_event_add(ioc, fw_event);
3642 fw_event_work_put(fw_event);
3643 }
3644
3645 /**
3646 * mpt3sas_port_enable_complete - port enable completed (fake event)
3647 * @ioc: per adapter object
3648 */
3649 void
mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER * ioc)3650 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3651 {
3652 struct fw_event_work *fw_event;
3653
3654 fw_event = alloc_fw_event_work(0);
3655 if (!fw_event)
3656 return;
3657 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3658 fw_event->ioc = ioc;
3659 _scsih_fw_event_add(ioc, fw_event);
3660 fw_event_work_put(fw_event);
3661 }
3662
dequeue_next_fw_event(struct MPT3SAS_ADAPTER * ioc)3663 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3664 {
3665 unsigned long flags;
3666 struct fw_event_work *fw_event = NULL;
3667
3668 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3669 if (!list_empty(&ioc->fw_event_list)) {
3670 fw_event = list_first_entry(&ioc->fw_event_list,
3671 struct fw_event_work, list);
3672 list_del_init(&fw_event->list);
3673 }
3674 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3675
3676 return fw_event;
3677 }
3678
3679 /**
3680 * _scsih_fw_event_cleanup_queue - cleanup event queue
3681 * @ioc: per adapter object
3682 *
3683 * Walk the firmware event queue, either killing timers, or waiting
3684 * for outstanding events to complete
3685 *
3686 * Context: task, can sleep
3687 */
3688 static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER * ioc)3689 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3690 {
3691 struct fw_event_work *fw_event;
3692
3693 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3694 !ioc->firmware_event_thread)
3695 return;
3696
3697 ioc->fw_events_cleanup = 1;
3698 while ((fw_event = dequeue_next_fw_event(ioc)) ||
3699 (fw_event = ioc->current_event)) {
3700 /*
3701 * Wait on the fw_event to complete. If this returns 1, then
3702 * the event was never executed, and we need a put for the
3703 * reference the work had on the fw_event.
3704 *
3705 * If it did execute, we wait for it to finish, and the put will
3706 * happen from _firmware_event_work()
3707 */
3708 if (cancel_work_sync(&fw_event->work))
3709 fw_event_work_put(fw_event);
3710
3711 fw_event_work_put(fw_event);
3712 }
3713 ioc->fw_events_cleanup = 0;
3714 }
3715
3716 /**
3717 * _scsih_internal_device_block - block the sdev device
3718 * @sdev: per device object
3719 * @sas_device_priv_data : per device driver private data
3720 *
3721 * make sure device is blocked without error, if not
3722 * print an error
3723 */
3724 static void
_scsih_internal_device_block(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3725 _scsih_internal_device_block(struct scsi_device *sdev,
3726 struct MPT3SAS_DEVICE *sas_device_priv_data)
3727 {
3728 int r = 0;
3729
3730 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3731 sas_device_priv_data->sas_target->handle);
3732 sas_device_priv_data->block = 1;
3733
3734 r = scsi_internal_device_block_nowait(sdev);
3735 if (r == -EINVAL)
3736 sdev_printk(KERN_WARNING, sdev,
3737 "device_block failed with return(%d) for handle(0x%04x)\n",
3738 r, sas_device_priv_data->sas_target->handle);
3739 }
3740
3741 /**
3742 * _scsih_internal_device_unblock - unblock the sdev device
3743 * @sdev: per device object
3744 * @sas_device_priv_data : per device driver private data
3745 * make sure device is unblocked without error, if not retry
3746 * by blocking and then unblocking
3747 */
3748
3749 static void
_scsih_internal_device_unblock(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3750 _scsih_internal_device_unblock(struct scsi_device *sdev,
3751 struct MPT3SAS_DEVICE *sas_device_priv_data)
3752 {
3753 int r = 0;
3754
3755 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3756 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3757 sas_device_priv_data->block = 0;
3758 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3759 if (r == -EINVAL) {
3760 /* The device has been set to SDEV_RUNNING by SD layer during
3761 * device addition but the request queue is still stopped by
3762 * our earlier block call. We need to perform a block again
3763 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3764
3765 sdev_printk(KERN_WARNING, sdev,
3766 "device_unblock failed with return(%d) for handle(0x%04x) "
3767 "performing a block followed by an unblock\n",
3768 r, sas_device_priv_data->sas_target->handle);
3769 sas_device_priv_data->block = 1;
3770 r = scsi_internal_device_block_nowait(sdev);
3771 if (r)
3772 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3773 "failed with return(%d) for handle(0x%04x)\n",
3774 r, sas_device_priv_data->sas_target->handle);
3775
3776 sas_device_priv_data->block = 0;
3777 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3778 if (r)
3779 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3780 " failed with return(%d) for handle(0x%04x)\n",
3781 r, sas_device_priv_data->sas_target->handle);
3782 }
3783 }
3784
3785 /**
3786 * _scsih_ublock_io_all_device - unblock every device
3787 * @ioc: per adapter object
3788 *
3789 * change the device state from block to running
3790 */
3791 static void
_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER * ioc)3792 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3793 {
3794 struct MPT3SAS_DEVICE *sas_device_priv_data;
3795 struct scsi_device *sdev;
3796
3797 shost_for_each_device(sdev, ioc->shost) {
3798 sas_device_priv_data = sdev->hostdata;
3799 if (!sas_device_priv_data)
3800 continue;
3801 if (!sas_device_priv_data->block)
3802 continue;
3803
3804 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3805 "device_running, handle(0x%04x)\n",
3806 sas_device_priv_data->sas_target->handle));
3807 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3808 }
3809 }
3810
3811
3812 /**
3813 * _scsih_ublock_io_device - prepare device to be deleted
3814 * @ioc: per adapter object
3815 * @sas_address: sas address
3816 * @port: hba port entry
3817 *
3818 * unblock then put device in offline state
3819 */
3820 static void
_scsih_ublock_io_device(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)3821 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3822 u64 sas_address, struct hba_port *port)
3823 {
3824 struct MPT3SAS_DEVICE *sas_device_priv_data;
3825 struct scsi_device *sdev;
3826
3827 shost_for_each_device(sdev, ioc->shost) {
3828 sas_device_priv_data = sdev->hostdata;
3829 if (!sas_device_priv_data)
3830 continue;
3831 if (sas_device_priv_data->sas_target->sas_address
3832 != sas_address)
3833 continue;
3834 if (sas_device_priv_data->sas_target->port != port)
3835 continue;
3836 if (sas_device_priv_data->block)
3837 _scsih_internal_device_unblock(sdev,
3838 sas_device_priv_data);
3839 }
3840 }
3841
3842 /**
3843 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3844 * @ioc: per adapter object
3845 *
3846 * During device pull we need to appropriately set the sdev state.
3847 */
3848 static void
_scsih_block_io_all_device(struct MPT3SAS_ADAPTER * ioc)3849 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3850 {
3851 struct MPT3SAS_DEVICE *sas_device_priv_data;
3852 struct scsi_device *sdev;
3853
3854 shost_for_each_device(sdev, ioc->shost) {
3855 sas_device_priv_data = sdev->hostdata;
3856 if (!sas_device_priv_data)
3857 continue;
3858 if (sas_device_priv_data->block)
3859 continue;
3860 if (sas_device_priv_data->ignore_delay_remove) {
3861 sdev_printk(KERN_INFO, sdev,
3862 "%s skip device_block for SES handle(0x%04x)\n",
3863 __func__, sas_device_priv_data->sas_target->handle);
3864 continue;
3865 }
3866 _scsih_internal_device_block(sdev, sas_device_priv_data);
3867 }
3868 }
3869
3870 /**
3871 * _scsih_block_io_device - set the device state to SDEV_BLOCK
3872 * @ioc: per adapter object
3873 * @handle: device handle
3874 *
3875 * During device pull we need to appropriately set the sdev state.
3876 */
3877 static void
_scsih_block_io_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)3878 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3879 {
3880 struct MPT3SAS_DEVICE *sas_device_priv_data;
3881 struct scsi_device *sdev;
3882 struct _sas_device *sas_device;
3883
3884 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3885
3886 shost_for_each_device(sdev, ioc->shost) {
3887 sas_device_priv_data = sdev->hostdata;
3888 if (!sas_device_priv_data)
3889 continue;
3890 if (sas_device_priv_data->sas_target->handle != handle)
3891 continue;
3892 if (sas_device_priv_data->block)
3893 continue;
3894 if (sas_device && sas_device->pend_sas_rphy_add)
3895 continue;
3896 if (sas_device_priv_data->ignore_delay_remove) {
3897 sdev_printk(KERN_INFO, sdev,
3898 "%s skip device_block for SES handle(0x%04x)\n",
3899 __func__, sas_device_priv_data->sas_target->handle);
3900 continue;
3901 }
3902 _scsih_internal_device_block(sdev, sas_device_priv_data);
3903 }
3904
3905 if (sas_device)
3906 sas_device_put(sas_device);
3907 }
3908
3909 /**
3910 * _scsih_block_io_to_children_attached_to_ex
3911 * @ioc: per adapter object
3912 * @sas_expander: the sas_device object
3913 *
3914 * This routine set sdev state to SDEV_BLOCK for all devices
3915 * attached to this expander. This function called when expander is
3916 * pulled.
3917 */
3918 static void
_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)3919 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3920 struct _sas_node *sas_expander)
3921 {
3922 struct _sas_port *mpt3sas_port;
3923 struct _sas_device *sas_device;
3924 struct _sas_node *expander_sibling;
3925 unsigned long flags;
3926
3927 if (!sas_expander)
3928 return;
3929
3930 list_for_each_entry(mpt3sas_port,
3931 &sas_expander->sas_port_list, port_list) {
3932 if (mpt3sas_port->remote_identify.device_type ==
3933 SAS_END_DEVICE) {
3934 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3935 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3936 mpt3sas_port->remote_identify.sas_address,
3937 mpt3sas_port->hba_port);
3938 if (sas_device) {
3939 set_bit(sas_device->handle,
3940 ioc->blocking_handles);
3941 sas_device_put(sas_device);
3942 }
3943 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3944 }
3945 }
3946
3947 list_for_each_entry(mpt3sas_port,
3948 &sas_expander->sas_port_list, port_list) {
3949
3950 if (mpt3sas_port->remote_identify.device_type ==
3951 SAS_EDGE_EXPANDER_DEVICE ||
3952 mpt3sas_port->remote_identify.device_type ==
3953 SAS_FANOUT_EXPANDER_DEVICE) {
3954 expander_sibling =
3955 mpt3sas_scsih_expander_find_by_sas_address(
3956 ioc, mpt3sas_port->remote_identify.sas_address,
3957 mpt3sas_port->hba_port);
3958 _scsih_block_io_to_children_attached_to_ex(ioc,
3959 expander_sibling);
3960 }
3961 }
3962 }
3963
3964 /**
3965 * _scsih_block_io_to_children_attached_directly
3966 * @ioc: per adapter object
3967 * @event_data: topology change event data
3968 *
3969 * This routine set sdev state to SDEV_BLOCK for all devices
3970 * direct attached during device pull.
3971 */
3972 static void
_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)3973 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3974 Mpi2EventDataSasTopologyChangeList_t *event_data)
3975 {
3976 int i;
3977 u16 handle;
3978 u16 reason_code;
3979
3980 for (i = 0; i < event_data->NumEntries; i++) {
3981 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3982 if (!handle)
3983 continue;
3984 reason_code = event_data->PHY[i].PhyStatus &
3985 MPI2_EVENT_SAS_TOPO_RC_MASK;
3986 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3987 _scsih_block_io_device(ioc, handle);
3988 }
3989 }
3990
3991 /**
3992 * _scsih_block_io_to_pcie_children_attached_directly
3993 * @ioc: per adapter object
3994 * @event_data: topology change event data
3995 *
3996 * This routine set sdev state to SDEV_BLOCK for all devices
3997 * direct attached during device pull/reconnect.
3998 */
3999 static void
_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)4000 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4001 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4002 {
4003 int i;
4004 u16 handle;
4005 u16 reason_code;
4006
4007 for (i = 0; i < event_data->NumEntries; i++) {
4008 handle =
4009 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4010 if (!handle)
4011 continue;
4012 reason_code = event_data->PortEntry[i].PortStatus;
4013 if (reason_code ==
4014 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4015 _scsih_block_io_device(ioc, handle);
4016 }
4017 }
4018 /**
4019 * _scsih_tm_tr_send - send task management request
4020 * @ioc: per adapter object
4021 * @handle: device handle
4022 * Context: interrupt time.
4023 *
4024 * This code is to initiate the device removal handshake protocol
4025 * with controller firmware. This function will issue target reset
4026 * using high priority request queue. It will send a sas iounit
4027 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4028 *
4029 * This is designed to send muliple task management request at the same
4030 * time to the fifo. If the fifo is full, we will append the request,
4031 * and process it in a future completion.
4032 */
4033 static void
_scsih_tm_tr_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)4034 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4035 {
4036 Mpi2SCSITaskManagementRequest_t *mpi_request;
4037 u16 smid;
4038 struct _sas_device *sas_device = NULL;
4039 struct _pcie_device *pcie_device = NULL;
4040 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4041 u64 sas_address = 0;
4042 unsigned long flags;
4043 struct _tr_list *delayed_tr;
4044 u32 ioc_state;
4045 u8 tr_method = 0;
4046 struct hba_port *port = NULL;
4047
4048 if (ioc->pci_error_recovery) {
4049 dewtprintk(ioc,
4050 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4051 __func__, handle));
4052 return;
4053 }
4054 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4055 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4056 dewtprintk(ioc,
4057 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4058 __func__, handle));
4059 return;
4060 }
4061
4062 /* if PD, then return */
4063 if (test_bit(handle, ioc->pd_handles))
4064 return;
4065
4066 clear_bit(handle, ioc->pend_os_device_add);
4067
4068 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4069 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4070 if (sas_device && sas_device->starget &&
4071 sas_device->starget->hostdata) {
4072 sas_target_priv_data = sas_device->starget->hostdata;
4073 sas_target_priv_data->deleted = 1;
4074 sas_address = sas_device->sas_address;
4075 port = sas_device->port;
4076 }
4077 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4078 if (!sas_device) {
4079 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4080 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4081 if (pcie_device && pcie_device->starget &&
4082 pcie_device->starget->hostdata) {
4083 sas_target_priv_data = pcie_device->starget->hostdata;
4084 sas_target_priv_data->deleted = 1;
4085 sas_address = pcie_device->wwid;
4086 }
4087 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4088 if (pcie_device && (!ioc->tm_custom_handling) &&
4089 (!(mpt3sas_scsih_is_pcie_scsi_device(
4090 pcie_device->device_info))))
4091 tr_method =
4092 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4093 else
4094 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4095 }
4096 if (sas_target_priv_data) {
4097 dewtprintk(ioc,
4098 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4099 handle, (u64)sas_address));
4100 if (sas_device) {
4101 if (sas_device->enclosure_handle != 0)
4102 dewtprintk(ioc,
4103 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4104 (u64)sas_device->enclosure_logical_id,
4105 sas_device->slot));
4106 if (sas_device->connector_name[0] != '\0')
4107 dewtprintk(ioc,
4108 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4109 sas_device->enclosure_level,
4110 sas_device->connector_name));
4111 } else if (pcie_device) {
4112 if (pcie_device->enclosure_handle != 0)
4113 dewtprintk(ioc,
4114 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4115 (u64)pcie_device->enclosure_logical_id,
4116 pcie_device->slot));
4117 if (pcie_device->connector_name[0] != '\0')
4118 dewtprintk(ioc,
4119 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4120 pcie_device->enclosure_level,
4121 pcie_device->connector_name));
4122 }
4123 _scsih_ublock_io_device(ioc, sas_address, port);
4124 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4125 }
4126
4127 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4128 if (!smid) {
4129 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4130 if (!delayed_tr)
4131 goto out;
4132 INIT_LIST_HEAD(&delayed_tr->list);
4133 delayed_tr->handle = handle;
4134 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4135 dewtprintk(ioc,
4136 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4137 handle));
4138 goto out;
4139 }
4140
4141 dewtprintk(ioc,
4142 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4143 handle, smid, ioc->tm_tr_cb_idx));
4144 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4145 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4146 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4147 mpi_request->DevHandle = cpu_to_le16(handle);
4148 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4149 mpi_request->MsgFlags = tr_method;
4150 set_bit(handle, ioc->device_remove_in_progress);
4151 ioc->put_smid_hi_priority(ioc, smid, 0);
4152 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4153
4154 out:
4155 if (sas_device)
4156 sas_device_put(sas_device);
4157 if (pcie_device)
4158 pcie_device_put(pcie_device);
4159 }
4160
4161 /**
4162 * _scsih_tm_tr_complete -
4163 * @ioc: per adapter object
4164 * @smid: system request message index
4165 * @msix_index: MSIX table index supplied by the OS
4166 * @reply: reply message frame(lower 32bit addr)
4167 * Context: interrupt time.
4168 *
4169 * This is the target reset completion routine.
4170 * This code is part of the code to initiate the device removal
4171 * handshake protocol with controller firmware.
4172 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4173 *
4174 * Return: 1 meaning mf should be freed from _base_interrupt
4175 * 0 means the mf is freed from this function.
4176 */
4177 static u8
_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4178 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4179 u32 reply)
4180 {
4181 u16 handle;
4182 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4183 Mpi2SCSITaskManagementReply_t *mpi_reply =
4184 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4185 Mpi2SasIoUnitControlRequest_t *mpi_request;
4186 u16 smid_sas_ctrl;
4187 u32 ioc_state;
4188 struct _sc_list *delayed_sc;
4189
4190 if (ioc->pci_error_recovery) {
4191 dewtprintk(ioc,
4192 ioc_info(ioc, "%s: host in pci error recovery\n",
4193 __func__));
4194 return 1;
4195 }
4196 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4197 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4198 dewtprintk(ioc,
4199 ioc_info(ioc, "%s: host is not operational\n",
4200 __func__));
4201 return 1;
4202 }
4203 if (unlikely(!mpi_reply)) {
4204 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4205 __FILE__, __LINE__, __func__);
4206 return 1;
4207 }
4208 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4209 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4210 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4211 dewtprintk(ioc,
4212 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4213 handle,
4214 le16_to_cpu(mpi_reply->DevHandle), smid));
4215 return 0;
4216 }
4217
4218 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4219 dewtprintk(ioc,
4220 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4221 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4222 le32_to_cpu(mpi_reply->IOCLogInfo),
4223 le32_to_cpu(mpi_reply->TerminationCount)));
4224
4225 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4226 if (!smid_sas_ctrl) {
4227 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4228 if (!delayed_sc)
4229 return _scsih_check_for_pending_tm(ioc, smid);
4230 INIT_LIST_HEAD(&delayed_sc->list);
4231 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4232 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4233 dewtprintk(ioc,
4234 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4235 handle));
4236 return _scsih_check_for_pending_tm(ioc, smid);
4237 }
4238
4239 dewtprintk(ioc,
4240 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4241 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4242 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4243 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4244 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4245 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4246 mpi_request->DevHandle = mpi_request_tm->DevHandle;
4247 ioc->put_smid_default(ioc, smid_sas_ctrl);
4248
4249 return _scsih_check_for_pending_tm(ioc, smid);
4250 }
4251
4252 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4253 * issue to IOC or not.
4254 * @ioc: per adapter object
4255 * @scmd: pointer to scsi command object
4256 *
4257 * Returns true if scmd can be issued to IOC otherwise returns false.
4258 */
_scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)4259 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4260 struct scsi_cmnd *scmd)
4261 {
4262
4263 if (ioc->pci_error_recovery)
4264 return false;
4265
4266 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4267 if (ioc->remove_host)
4268 return false;
4269
4270 return true;
4271 }
4272
4273 if (ioc->remove_host) {
4274
4275 switch (scmd->cmnd[0]) {
4276 case SYNCHRONIZE_CACHE:
4277 case START_STOP:
4278 return true;
4279 default:
4280 return false;
4281 }
4282 }
4283
4284 return true;
4285 }
4286
4287 /**
4288 * _scsih_sas_control_complete - completion routine
4289 * @ioc: per adapter object
4290 * @smid: system request message index
4291 * @msix_index: MSIX table index supplied by the OS
4292 * @reply: reply message frame(lower 32bit addr)
4293 * Context: interrupt time.
4294 *
4295 * This is the sas iounit control completion routine.
4296 * This code is part of the code to initiate the device removal
4297 * handshake protocol with controller firmware.
4298 *
4299 * Return: 1 meaning mf should be freed from _base_interrupt
4300 * 0 means the mf is freed from this function.
4301 */
4302 static u8
_scsih_sas_control_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4303 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4304 u8 msix_index, u32 reply)
4305 {
4306 Mpi2SasIoUnitControlReply_t *mpi_reply =
4307 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4308
4309 if (likely(mpi_reply)) {
4310 dewtprintk(ioc,
4311 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4312 le16_to_cpu(mpi_reply->DevHandle), smid,
4313 le16_to_cpu(mpi_reply->IOCStatus),
4314 le32_to_cpu(mpi_reply->IOCLogInfo)));
4315 if (le16_to_cpu(mpi_reply->IOCStatus) ==
4316 MPI2_IOCSTATUS_SUCCESS) {
4317 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4318 ioc->device_remove_in_progress);
4319 }
4320 } else {
4321 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4322 __FILE__, __LINE__, __func__);
4323 }
4324 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4325 }
4326
4327 /**
4328 * _scsih_tm_tr_volume_send - send target reset request for volumes
4329 * @ioc: per adapter object
4330 * @handle: device handle
4331 * Context: interrupt time.
4332 *
4333 * This is designed to send muliple task management request at the same
4334 * time to the fifo. If the fifo is full, we will append the request,
4335 * and process it in a future completion.
4336 */
4337 static void
_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)4338 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4339 {
4340 Mpi2SCSITaskManagementRequest_t *mpi_request;
4341 u16 smid;
4342 struct _tr_list *delayed_tr;
4343
4344 if (ioc->pci_error_recovery) {
4345 dewtprintk(ioc,
4346 ioc_info(ioc, "%s: host reset in progress!\n",
4347 __func__));
4348 return;
4349 }
4350
4351 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4352 if (!smid) {
4353 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4354 if (!delayed_tr)
4355 return;
4356 INIT_LIST_HEAD(&delayed_tr->list);
4357 delayed_tr->handle = handle;
4358 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4359 dewtprintk(ioc,
4360 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4361 handle));
4362 return;
4363 }
4364
4365 dewtprintk(ioc,
4366 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4367 handle, smid, ioc->tm_tr_volume_cb_idx));
4368 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4369 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4370 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4371 mpi_request->DevHandle = cpu_to_le16(handle);
4372 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4373 ioc->put_smid_hi_priority(ioc, smid, 0);
4374 }
4375
4376 /**
4377 * _scsih_tm_volume_tr_complete - target reset completion
4378 * @ioc: per adapter object
4379 * @smid: system request message index
4380 * @msix_index: MSIX table index supplied by the OS
4381 * @reply: reply message frame(lower 32bit addr)
4382 * Context: interrupt time.
4383 *
4384 * Return: 1 meaning mf should be freed from _base_interrupt
4385 * 0 means the mf is freed from this function.
4386 */
4387 static u8
_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4388 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4389 u8 msix_index, u32 reply)
4390 {
4391 u16 handle;
4392 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4393 Mpi2SCSITaskManagementReply_t *mpi_reply =
4394 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4395
4396 if (ioc->shost_recovery || ioc->pci_error_recovery) {
4397 dewtprintk(ioc,
4398 ioc_info(ioc, "%s: host reset in progress!\n",
4399 __func__));
4400 return 1;
4401 }
4402 if (unlikely(!mpi_reply)) {
4403 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4404 __FILE__, __LINE__, __func__);
4405 return 1;
4406 }
4407
4408 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4409 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4410 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4411 dewtprintk(ioc,
4412 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4413 handle, le16_to_cpu(mpi_reply->DevHandle),
4414 smid));
4415 return 0;
4416 }
4417
4418 dewtprintk(ioc,
4419 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4420 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4421 le32_to_cpu(mpi_reply->IOCLogInfo),
4422 le32_to_cpu(mpi_reply->TerminationCount)));
4423
4424 return _scsih_check_for_pending_tm(ioc, smid);
4425 }
4426
4427 /**
4428 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4429 * @ioc: per adapter object
4430 * @smid: system request message index
4431 * @event: Event ID
4432 * @event_context: used to track events uniquely
4433 *
4434 * Context - processed in interrupt context.
4435 */
4436 static void
_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER * ioc,u16 smid,U16 event,U32 event_context)4437 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4438 U32 event_context)
4439 {
4440 Mpi2EventAckRequest_t *ack_request;
4441 int i = smid - ioc->internal_smid;
4442 unsigned long flags;
4443
4444 /* Without releasing the smid just update the
4445 * call back index and reuse the same smid for
4446 * processing this delayed request
4447 */
4448 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4449 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4450 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4451
4452 dewtprintk(ioc,
4453 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4454 le16_to_cpu(event), smid, ioc->base_cb_idx));
4455 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4456 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4457 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4458 ack_request->Event = event;
4459 ack_request->EventContext = event_context;
4460 ack_request->VF_ID = 0; /* TODO */
4461 ack_request->VP_ID = 0;
4462 ioc->put_smid_default(ioc, smid);
4463 }
4464
4465 /**
4466 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4467 * sas_io_unit_ctrl messages
4468 * @ioc: per adapter object
4469 * @smid: system request message index
4470 * @handle: device handle
4471 *
4472 * Context - processed in interrupt context.
4473 */
4474 static void
_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 handle)4475 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4476 u16 smid, u16 handle)
4477 {
4478 Mpi2SasIoUnitControlRequest_t *mpi_request;
4479 u32 ioc_state;
4480 int i = smid - ioc->internal_smid;
4481 unsigned long flags;
4482
4483 if (ioc->remove_host) {
4484 dewtprintk(ioc,
4485 ioc_info(ioc, "%s: host has been removed\n",
4486 __func__));
4487 return;
4488 } else if (ioc->pci_error_recovery) {
4489 dewtprintk(ioc,
4490 ioc_info(ioc, "%s: host in pci error recovery\n",
4491 __func__));
4492 return;
4493 }
4494 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4495 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4496 dewtprintk(ioc,
4497 ioc_info(ioc, "%s: host is not operational\n",
4498 __func__));
4499 return;
4500 }
4501
4502 /* Without releasing the smid just update the
4503 * call back index and reuse the same smid for
4504 * processing this delayed request
4505 */
4506 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4507 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4508 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4509
4510 dewtprintk(ioc,
4511 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4512 handle, smid, ioc->tm_sas_control_cb_idx));
4513 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4514 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4515 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4516 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4517 mpi_request->DevHandle = cpu_to_le16(handle);
4518 ioc->put_smid_default(ioc, smid);
4519 }
4520
4521 /**
4522 * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
4523 * @ioc: per adapter object
4524 * @smid: system request message index
4525 *
4526 * Context: Executed in interrupt context
4527 *
4528 * This will check delayed internal messages list, and process the
4529 * next request.
4530 *
4531 * Return: 1 meaning mf should be freed from _base_interrupt
4532 * 0 means the mf is freed from this function.
4533 */
4534 u8
mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER * ioc,u16 smid)4535 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4536 {
4537 struct _sc_list *delayed_sc;
4538 struct _event_ack_list *delayed_event_ack;
4539
4540 if (!list_empty(&ioc->delayed_event_ack_list)) {
4541 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4542 struct _event_ack_list, list);
4543 _scsih_issue_delayed_event_ack(ioc, smid,
4544 delayed_event_ack->Event, delayed_event_ack->EventContext);
4545 list_del(&delayed_event_ack->list);
4546 kfree(delayed_event_ack);
4547 return 0;
4548 }
4549
4550 if (!list_empty(&ioc->delayed_sc_list)) {
4551 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4552 struct _sc_list, list);
4553 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4554 delayed_sc->handle);
4555 list_del(&delayed_sc->list);
4556 kfree(delayed_sc);
4557 return 0;
4558 }
4559 return 1;
4560 }
4561
4562 /**
4563 * _scsih_check_for_pending_tm - check for pending task management
4564 * @ioc: per adapter object
4565 * @smid: system request message index
4566 *
4567 * This will check delayed target reset list, and feed the
4568 * next reqeust.
4569 *
4570 * Return: 1 meaning mf should be freed from _base_interrupt
4571 * 0 means the mf is freed from this function.
4572 */
4573 static u8
_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER * ioc,u16 smid)4574 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4575 {
4576 struct _tr_list *delayed_tr;
4577
4578 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4579 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4580 struct _tr_list, list);
4581 mpt3sas_base_free_smid(ioc, smid);
4582 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4583 list_del(&delayed_tr->list);
4584 kfree(delayed_tr);
4585 return 0;
4586 }
4587
4588 if (!list_empty(&ioc->delayed_tr_list)) {
4589 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4590 struct _tr_list, list);
4591 mpt3sas_base_free_smid(ioc, smid);
4592 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4593 list_del(&delayed_tr->list);
4594 kfree(delayed_tr);
4595 return 0;
4596 }
4597
4598 return 1;
4599 }
4600
4601 /**
4602 * _scsih_check_topo_delete_events - sanity check on topo events
4603 * @ioc: per adapter object
4604 * @event_data: the event data payload
4605 *
4606 * This routine added to better handle cable breaker.
4607 *
4608 * This handles the case where driver receives multiple expander
4609 * add and delete events in a single shot. When there is a delete event
4610 * the routine will void any pending add events waiting in the event queue.
4611 */
4612 static void
_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)4613 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4614 Mpi2EventDataSasTopologyChangeList_t *event_data)
4615 {
4616 struct fw_event_work *fw_event;
4617 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4618 u16 expander_handle;
4619 struct _sas_node *sas_expander;
4620 unsigned long flags;
4621 int i, reason_code;
4622 u16 handle;
4623
4624 for (i = 0 ; i < event_data->NumEntries; i++) {
4625 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4626 if (!handle)
4627 continue;
4628 reason_code = event_data->PHY[i].PhyStatus &
4629 MPI2_EVENT_SAS_TOPO_RC_MASK;
4630 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4631 _scsih_tm_tr_send(ioc, handle);
4632 }
4633
4634 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4635 if (expander_handle < ioc->sas_hba.num_phys) {
4636 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4637 return;
4638 }
4639 if (event_data->ExpStatus ==
4640 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4641 /* put expander attached devices into blocking state */
4642 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4643 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4644 expander_handle);
4645 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4646 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4647 do {
4648 handle = find_first_bit(ioc->blocking_handles,
4649 ioc->facts.MaxDevHandle);
4650 if (handle < ioc->facts.MaxDevHandle)
4651 _scsih_block_io_device(ioc, handle);
4652 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4653 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4654 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4655
4656 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4657 return;
4658
4659 /* mark ignore flag for pending events */
4660 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4661 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4662 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4663 fw_event->ignore)
4664 continue;
4665 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4666 fw_event->event_data;
4667 if (local_event_data->ExpStatus ==
4668 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4669 local_event_data->ExpStatus ==
4670 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4671 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4672 expander_handle) {
4673 dewtprintk(ioc,
4674 ioc_info(ioc, "setting ignoring flag\n"));
4675 fw_event->ignore = 1;
4676 }
4677 }
4678 }
4679 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4680 }
4681
4682 /**
4683 * _scsih_check_pcie_topo_remove_events - sanity check on topo
4684 * events
4685 * @ioc: per adapter object
4686 * @event_data: the event data payload
4687 *
4688 * This handles the case where driver receives multiple switch
4689 * or device add and delete events in a single shot. When there
4690 * is a delete event the routine will void any pending add
4691 * events waiting in the event queue.
4692 */
4693 static void
_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)4694 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4695 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4696 {
4697 struct fw_event_work *fw_event;
4698 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4699 unsigned long flags;
4700 int i, reason_code;
4701 u16 handle, switch_handle;
4702
4703 for (i = 0; i < event_data->NumEntries; i++) {
4704 handle =
4705 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4706 if (!handle)
4707 continue;
4708 reason_code = event_data->PortEntry[i].PortStatus;
4709 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4710 _scsih_tm_tr_send(ioc, handle);
4711 }
4712
4713 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4714 if (!switch_handle) {
4715 _scsih_block_io_to_pcie_children_attached_directly(
4716 ioc, event_data);
4717 return;
4718 }
4719 /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4720 if ((event_data->SwitchStatus
4721 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4722 (event_data->SwitchStatus ==
4723 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4724 _scsih_block_io_to_pcie_children_attached_directly(
4725 ioc, event_data);
4726
4727 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4728 return;
4729
4730 /* mark ignore flag for pending events */
4731 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4732 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4733 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4734 fw_event->ignore)
4735 continue;
4736 local_event_data =
4737 (Mpi26EventDataPCIeTopologyChangeList_t *)
4738 fw_event->event_data;
4739 if (local_event_data->SwitchStatus ==
4740 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4741 local_event_data->SwitchStatus ==
4742 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4743 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4744 switch_handle) {
4745 dewtprintk(ioc,
4746 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4747 fw_event->ignore = 1;
4748 }
4749 }
4750 }
4751 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4752 }
4753
4754 /**
4755 * _scsih_set_volume_delete_flag - setting volume delete flag
4756 * @ioc: per adapter object
4757 * @handle: device handle
4758 *
4759 * This returns nothing.
4760 */
4761 static void
_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)4762 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4763 {
4764 struct _raid_device *raid_device;
4765 struct MPT3SAS_TARGET *sas_target_priv_data;
4766 unsigned long flags;
4767
4768 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4769 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4770 if (raid_device && raid_device->starget &&
4771 raid_device->starget->hostdata) {
4772 sas_target_priv_data =
4773 raid_device->starget->hostdata;
4774 sas_target_priv_data->deleted = 1;
4775 dewtprintk(ioc,
4776 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4777 handle, (u64)raid_device->wwid));
4778 }
4779 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4780 }
4781
4782 /**
4783 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4784 * @handle: input handle
4785 * @a: handle for volume a
4786 * @b: handle for volume b
4787 *
4788 * IR firmware only supports two raid volumes. The purpose of this
4789 * routine is to set the volume handle in either a or b. When the given
4790 * input handle is non-zero, or when a and b have not been set before.
4791 */
4792 static void
_scsih_set_volume_handle_for_tr(u16 handle,u16 * a,u16 * b)4793 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4794 {
4795 if (!handle || handle == *a || handle == *b)
4796 return;
4797 if (!*a)
4798 *a = handle;
4799 else if (!*b)
4800 *b = handle;
4801 }
4802
4803 /**
4804 * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4805 * @ioc: per adapter object
4806 * @event_data: the event data payload
4807 * Context: interrupt time.
4808 *
4809 * This routine will send target reset to volume, followed by target
4810 * resets to the PDs. This is called when a PD has been removed, or
4811 * volume has been deleted or removed. When the target reset is sent
4812 * to volume, the PD target resets need to be queued to start upon
4813 * completion of the volume target reset.
4814 */
4815 static void
_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)4816 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4817 Mpi2EventDataIrConfigChangeList_t *event_data)
4818 {
4819 Mpi2EventIrConfigElement_t *element;
4820 int i;
4821 u16 handle, volume_handle, a, b;
4822 struct _tr_list *delayed_tr;
4823
4824 a = 0;
4825 b = 0;
4826
4827 if (ioc->is_warpdrive)
4828 return;
4829
4830 /* Volume Resets for Deleted or Removed */
4831 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4832 for (i = 0; i < event_data->NumElements; i++, element++) {
4833 if (le32_to_cpu(event_data->Flags) &
4834 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4835 continue;
4836 if (element->ReasonCode ==
4837 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4838 element->ReasonCode ==
4839 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4840 volume_handle = le16_to_cpu(element->VolDevHandle);
4841 _scsih_set_volume_delete_flag(ioc, volume_handle);
4842 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4843 }
4844 }
4845
4846 /* Volume Resets for UNHIDE events */
4847 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4848 for (i = 0; i < event_data->NumElements; i++, element++) {
4849 if (le32_to_cpu(event_data->Flags) &
4850 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4851 continue;
4852 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4853 volume_handle = le16_to_cpu(element->VolDevHandle);
4854 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4855 }
4856 }
4857
4858 if (a)
4859 _scsih_tm_tr_volume_send(ioc, a);
4860 if (b)
4861 _scsih_tm_tr_volume_send(ioc, b);
4862
4863 /* PD target resets */
4864 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4865 for (i = 0; i < event_data->NumElements; i++, element++) {
4866 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4867 continue;
4868 handle = le16_to_cpu(element->PhysDiskDevHandle);
4869 volume_handle = le16_to_cpu(element->VolDevHandle);
4870 clear_bit(handle, ioc->pd_handles);
4871 if (!volume_handle)
4872 _scsih_tm_tr_send(ioc, handle);
4873 else if (volume_handle == a || volume_handle == b) {
4874 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4875 BUG_ON(!delayed_tr);
4876 INIT_LIST_HEAD(&delayed_tr->list);
4877 delayed_tr->handle = handle;
4878 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4879 dewtprintk(ioc,
4880 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4881 handle));
4882 } else
4883 _scsih_tm_tr_send(ioc, handle);
4884 }
4885 }
4886
4887
4888 /**
4889 * _scsih_check_volume_delete_events - set delete flag for volumes
4890 * @ioc: per adapter object
4891 * @event_data: the event data payload
4892 * Context: interrupt time.
4893 *
4894 * This will handle the case when the cable connected to entire volume is
4895 * pulled. We will take care of setting the deleted flag so normal IO will
4896 * not be sent.
4897 */
4898 static void
_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrVolume_t * event_data)4899 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4900 Mpi2EventDataIrVolume_t *event_data)
4901 {
4902 u32 state;
4903
4904 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4905 return;
4906 state = le32_to_cpu(event_data->NewValue);
4907 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4908 MPI2_RAID_VOL_STATE_FAILED)
4909 _scsih_set_volume_delete_flag(ioc,
4910 le16_to_cpu(event_data->VolDevHandle));
4911 }
4912
4913 /**
4914 * _scsih_temp_threshold_events - display temperature threshold exceeded events
4915 * @ioc: per adapter object
4916 * @event_data: the temp threshold event data
4917 * Context: interrupt time.
4918 */
4919 static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataTemperature_t * event_data)4920 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4921 Mpi2EventDataTemperature_t *event_data)
4922 {
4923 u32 doorbell;
4924 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4925 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4926 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4927 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4928 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4929 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4930 event_data->SensorNum);
4931 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4932 event_data->CurrentTemperature);
4933 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4934 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4935 if ((doorbell & MPI2_IOC_STATE_MASK) ==
4936 MPI2_IOC_STATE_FAULT) {
4937 mpt3sas_print_fault_code(ioc,
4938 doorbell & MPI2_DOORBELL_DATA_MASK);
4939 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4940 MPI2_IOC_STATE_COREDUMP) {
4941 mpt3sas_print_coredump_info(ioc,
4942 doorbell & MPI2_DOORBELL_DATA_MASK);
4943 }
4944 }
4945 }
4946 }
4947
_scsih_set_satl_pending(struct scsi_cmnd * scmd,bool pending)4948 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4949 {
4950 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4951
4952 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4953 return 0;
4954
4955 if (pending)
4956 return test_and_set_bit(0, &priv->ata_command_pending);
4957
4958 clear_bit(0, &priv->ata_command_pending);
4959 return 0;
4960 }
4961
4962 /**
4963 * _scsih_flush_running_cmds - completing outstanding commands.
4964 * @ioc: per adapter object
4965 *
4966 * The flushing out of all pending scmd commands following host reset,
4967 * where all IO is dropped to the floor.
4968 */
4969 static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER * ioc)4970 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4971 {
4972 struct scsi_cmnd *scmd;
4973 struct scsiio_tracker *st;
4974 u16 smid;
4975 int count = 0;
4976
4977 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4978 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4979 if (!scmd)
4980 continue;
4981 count++;
4982 _scsih_set_satl_pending(scmd, false);
4983 st = scsi_cmd_priv(scmd);
4984 mpt3sas_base_clear_st(ioc, st);
4985 scsi_dma_unmap(scmd);
4986 if (ioc->pci_error_recovery || ioc->remove_host)
4987 scmd->result = DID_NO_CONNECT << 16;
4988 else
4989 scmd->result = DID_RESET << 16;
4990 scmd->scsi_done(scmd);
4991 }
4992 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4993 }
4994
4995 /**
4996 * _scsih_setup_eedp - setup MPI request for EEDP transfer
4997 * @ioc: per adapter object
4998 * @scmd: pointer to scsi command object
4999 * @mpi_request: pointer to the SCSI_IO request message frame
5000 *
5001 * Supporting protection 1 and 3.
5002 */
5003 static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi25SCSIIORequest_t * mpi_request)5004 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5005 Mpi25SCSIIORequest_t *mpi_request)
5006 {
5007 u16 eedp_flags;
5008 unsigned char prot_op = scsi_get_prot_op(scmd);
5009 unsigned char prot_type = scsi_get_prot_type(scmd);
5010 Mpi25SCSIIORequest_t *mpi_request_3v =
5011 (Mpi25SCSIIORequest_t *)mpi_request;
5012
5013 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
5014 return;
5015
5016 if (prot_op == SCSI_PROT_READ_STRIP)
5017 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5018 else if (prot_op == SCSI_PROT_WRITE_INSERT)
5019 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5020 else
5021 return;
5022
5023 switch (prot_type) {
5024 case SCSI_PROT_DIF_TYPE1:
5025 case SCSI_PROT_DIF_TYPE2:
5026
5027 /*
5028 * enable ref/guard checking
5029 * auto increment ref tag
5030 */
5031 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
5032 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
5033 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5034 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5035 cpu_to_be32(t10_pi_ref_tag(scmd->request));
5036 break;
5037
5038 case SCSI_PROT_DIF_TYPE3:
5039
5040 /*
5041 * enable guard checking
5042 */
5043 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5044
5045 break;
5046 }
5047
5048 mpi_request_3v->EEDPBlockSize =
5049 cpu_to_le16(scmd->device->sector_size);
5050
5051 if (ioc->is_gen35_ioc)
5052 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5053 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5054 }
5055
5056 /**
5057 * _scsih_eedp_error_handling - return sense code for EEDP errors
5058 * @scmd: pointer to scsi command object
5059 * @ioc_status: ioc status
5060 */
5061 static void
_scsih_eedp_error_handling(struct scsi_cmnd * scmd,u16 ioc_status)5062 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5063 {
5064 u8 ascq;
5065
5066 switch (ioc_status) {
5067 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5068 ascq = 0x01;
5069 break;
5070 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5071 ascq = 0x02;
5072 break;
5073 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5074 ascq = 0x03;
5075 break;
5076 default:
5077 ascq = 0x00;
5078 break;
5079 }
5080 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
5081 ascq);
5082 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
5083 SAM_STAT_CHECK_CONDITION;
5084 }
5085
5086 /**
5087 * scsih_qcmd - main scsi request entry point
5088 * @shost: SCSI host pointer
5089 * @scmd: pointer to scsi command object
5090 *
5091 * The callback index is set inside `ioc->scsi_io_cb_idx`.
5092 *
5093 * Return: 0 on success. If there's a failure, return either:
5094 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5095 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5096 */
5097 static int
scsih_qcmd(struct Scsi_Host * shost,struct scsi_cmnd * scmd)5098 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5099 {
5100 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5101 struct MPT3SAS_DEVICE *sas_device_priv_data;
5102 struct MPT3SAS_TARGET *sas_target_priv_data;
5103 struct _raid_device *raid_device;
5104 struct request *rq = scmd->request;
5105 int class;
5106 Mpi25SCSIIORequest_t *mpi_request;
5107 struct _pcie_device *pcie_device = NULL;
5108 u32 mpi_control;
5109 u16 smid;
5110 u16 handle;
5111
5112 if (ioc->logging_level & MPT_DEBUG_SCSI)
5113 scsi_print_command(scmd);
5114
5115 sas_device_priv_data = scmd->device->hostdata;
5116 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5117 scmd->result = DID_NO_CONNECT << 16;
5118 scmd->scsi_done(scmd);
5119 return 0;
5120 }
5121
5122 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5123 scmd->result = DID_NO_CONNECT << 16;
5124 scmd->scsi_done(scmd);
5125 return 0;
5126 }
5127
5128 sas_target_priv_data = sas_device_priv_data->sas_target;
5129
5130 /* invalid device handle */
5131 handle = sas_target_priv_data->handle;
5132 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5133 scmd->result = DID_NO_CONNECT << 16;
5134 scmd->scsi_done(scmd);
5135 return 0;
5136 }
5137
5138
5139 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5140 /* host recovery or link resets sent via IOCTLs */
5141 return SCSI_MLQUEUE_HOST_BUSY;
5142 } else if (sas_target_priv_data->deleted) {
5143 /* device has been deleted */
5144 scmd->result = DID_NO_CONNECT << 16;
5145 scmd->scsi_done(scmd);
5146 return 0;
5147 } else if (sas_target_priv_data->tm_busy ||
5148 sas_device_priv_data->block) {
5149 /* device busy with task management */
5150 return SCSI_MLQUEUE_DEVICE_BUSY;
5151 }
5152
5153 /*
5154 * Bug work around for firmware SATL handling. The loop
5155 * is based on atomic operations and ensures consistency
5156 * since we're lockless at this point
5157 */
5158 do {
5159 if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5160 return SCSI_MLQUEUE_DEVICE_BUSY;
5161 } while (_scsih_set_satl_pending(scmd, true));
5162
5163 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5164 mpi_control = MPI2_SCSIIO_CONTROL_READ;
5165 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5166 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5167 else
5168 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5169
5170 /* set tags */
5171 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5172 /* NCQ Prio supported, make sure control indicated high priority */
5173 if (sas_device_priv_data->ncq_prio_enable) {
5174 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5175 if (class == IOPRIO_CLASS_RT)
5176 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5177 }
5178 /* Make sure Device is not raid volume.
5179 * We do not expose raid functionality to upper layer for warpdrive.
5180 */
5181 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5182 && !scsih_is_nvme(&scmd->device->sdev_gendev))
5183 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5184 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5185
5186 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5187 if (!smid) {
5188 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5189 _scsih_set_satl_pending(scmd, false);
5190 goto out;
5191 }
5192 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5193 memset(mpi_request, 0, ioc->request_sz);
5194 _scsih_setup_eedp(ioc, scmd, mpi_request);
5195
5196 if (scmd->cmd_len == 32)
5197 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5198 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5199 if (sas_device_priv_data->sas_target->flags &
5200 MPT_TARGET_FLAGS_RAID_COMPONENT)
5201 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5202 else
5203 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5204 mpi_request->DevHandle = cpu_to_le16(handle);
5205 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5206 mpi_request->Control = cpu_to_le32(mpi_control);
5207 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5208 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5209 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5210 mpi_request->SenseBufferLowAddress =
5211 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5212 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5213 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5214 mpi_request->LUN);
5215 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5216
5217 if (mpi_request->DataLength) {
5218 pcie_device = sas_target_priv_data->pcie_dev;
5219 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5220 mpt3sas_base_free_smid(ioc, smid);
5221 _scsih_set_satl_pending(scmd, false);
5222 goto out;
5223 }
5224 } else
5225 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5226
5227 raid_device = sas_target_priv_data->raid_device;
5228 if (raid_device && raid_device->direct_io_enabled)
5229 mpt3sas_setup_direct_io(ioc, scmd,
5230 raid_device, mpi_request);
5231
5232 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5233 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5234 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5235 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5236 ioc->put_smid_fast_path(ioc, smid, handle);
5237 } else
5238 ioc->put_smid_scsi_io(ioc, smid,
5239 le16_to_cpu(mpi_request->DevHandle));
5240 } else
5241 ioc->put_smid_default(ioc, smid);
5242 return 0;
5243
5244 out:
5245 return SCSI_MLQUEUE_HOST_BUSY;
5246 }
5247
5248 /**
5249 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5250 * @sense_buffer: sense data returned by target
5251 * @data: normalized skey/asc/ascq
5252 */
5253 static void
_scsih_normalize_sense(char * sense_buffer,struct sense_info * data)5254 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5255 {
5256 if ((sense_buffer[0] & 0x7F) >= 0x72) {
5257 /* descriptor format */
5258 data->skey = sense_buffer[1] & 0x0F;
5259 data->asc = sense_buffer[2];
5260 data->ascq = sense_buffer[3];
5261 } else {
5262 /* fixed format */
5263 data->skey = sense_buffer[2] & 0x0F;
5264 data->asc = sense_buffer[12];
5265 data->ascq = sense_buffer[13];
5266 }
5267 }
5268
5269 /**
5270 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
5271 * @ioc: per adapter object
5272 * @scmd: pointer to scsi command object
5273 * @mpi_reply: reply mf payload returned from firmware
5274 * @smid: ?
5275 *
5276 * scsi_status - SCSI Status code returned from target device
5277 * scsi_state - state info associated with SCSI_IO determined by ioc
5278 * ioc_status - ioc supplied status info
5279 */
5280 static void
_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi2SCSIIOReply_t * mpi_reply,u16 smid)5281 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5282 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5283 {
5284 u32 response_info;
5285 u8 *response_bytes;
5286 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5287 MPI2_IOCSTATUS_MASK;
5288 u8 scsi_state = mpi_reply->SCSIState;
5289 u8 scsi_status = mpi_reply->SCSIStatus;
5290 char *desc_ioc_state = NULL;
5291 char *desc_scsi_status = NULL;
5292 char *desc_scsi_state = ioc->tmp_string;
5293 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5294 struct _sas_device *sas_device = NULL;
5295 struct _pcie_device *pcie_device = NULL;
5296 struct scsi_target *starget = scmd->device->sdev_target;
5297 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5298 char *device_str = NULL;
5299
5300 if (!priv_target)
5301 return;
5302 if (ioc->hide_ir_msg)
5303 device_str = "WarpDrive";
5304 else
5305 device_str = "volume";
5306
5307 if (log_info == 0x31170000)
5308 return;
5309
5310 switch (ioc_status) {
5311 case MPI2_IOCSTATUS_SUCCESS:
5312 desc_ioc_state = "success";
5313 break;
5314 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5315 desc_ioc_state = "invalid function";
5316 break;
5317 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5318 desc_ioc_state = "scsi recovered error";
5319 break;
5320 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5321 desc_ioc_state = "scsi invalid dev handle";
5322 break;
5323 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5324 desc_ioc_state = "scsi device not there";
5325 break;
5326 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5327 desc_ioc_state = "scsi data overrun";
5328 break;
5329 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5330 desc_ioc_state = "scsi data underrun";
5331 break;
5332 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5333 desc_ioc_state = "scsi io data error";
5334 break;
5335 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5336 desc_ioc_state = "scsi protocol error";
5337 break;
5338 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5339 desc_ioc_state = "scsi task terminated";
5340 break;
5341 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5342 desc_ioc_state = "scsi residual mismatch";
5343 break;
5344 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5345 desc_ioc_state = "scsi task mgmt failed";
5346 break;
5347 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5348 desc_ioc_state = "scsi ioc terminated";
5349 break;
5350 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5351 desc_ioc_state = "scsi ext terminated";
5352 break;
5353 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5354 desc_ioc_state = "eedp guard error";
5355 break;
5356 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5357 desc_ioc_state = "eedp ref tag error";
5358 break;
5359 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5360 desc_ioc_state = "eedp app tag error";
5361 break;
5362 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5363 desc_ioc_state = "insufficient power";
5364 break;
5365 default:
5366 desc_ioc_state = "unknown";
5367 break;
5368 }
5369
5370 switch (scsi_status) {
5371 case MPI2_SCSI_STATUS_GOOD:
5372 desc_scsi_status = "good";
5373 break;
5374 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5375 desc_scsi_status = "check condition";
5376 break;
5377 case MPI2_SCSI_STATUS_CONDITION_MET:
5378 desc_scsi_status = "condition met";
5379 break;
5380 case MPI2_SCSI_STATUS_BUSY:
5381 desc_scsi_status = "busy";
5382 break;
5383 case MPI2_SCSI_STATUS_INTERMEDIATE:
5384 desc_scsi_status = "intermediate";
5385 break;
5386 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5387 desc_scsi_status = "intermediate condmet";
5388 break;
5389 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5390 desc_scsi_status = "reservation conflict";
5391 break;
5392 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5393 desc_scsi_status = "command terminated";
5394 break;
5395 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5396 desc_scsi_status = "task set full";
5397 break;
5398 case MPI2_SCSI_STATUS_ACA_ACTIVE:
5399 desc_scsi_status = "aca active";
5400 break;
5401 case MPI2_SCSI_STATUS_TASK_ABORTED:
5402 desc_scsi_status = "task aborted";
5403 break;
5404 default:
5405 desc_scsi_status = "unknown";
5406 break;
5407 }
5408
5409 desc_scsi_state[0] = '\0';
5410 if (!scsi_state)
5411 desc_scsi_state = " ";
5412 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5413 strcat(desc_scsi_state, "response info ");
5414 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5415 strcat(desc_scsi_state, "state terminated ");
5416 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5417 strcat(desc_scsi_state, "no status ");
5418 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5419 strcat(desc_scsi_state, "autosense failed ");
5420 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5421 strcat(desc_scsi_state, "autosense valid ");
5422
5423 scsi_print_command(scmd);
5424
5425 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5426 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5427 device_str, (u64)priv_target->sas_address);
5428 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5429 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5430 if (pcie_device) {
5431 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5432 (u64)pcie_device->wwid, pcie_device->port_num);
5433 if (pcie_device->enclosure_handle != 0)
5434 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5435 (u64)pcie_device->enclosure_logical_id,
5436 pcie_device->slot);
5437 if (pcie_device->connector_name[0])
5438 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5439 pcie_device->enclosure_level,
5440 pcie_device->connector_name);
5441 pcie_device_put(pcie_device);
5442 }
5443 } else {
5444 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5445 if (sas_device) {
5446 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5447 (u64)sas_device->sas_address, sas_device->phy);
5448
5449 _scsih_display_enclosure_chassis_info(ioc, sas_device,
5450 NULL, NULL);
5451
5452 sas_device_put(sas_device);
5453 }
5454 }
5455
5456 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5457 le16_to_cpu(mpi_reply->DevHandle),
5458 desc_ioc_state, ioc_status, smid);
5459 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5460 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5461 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5462 le16_to_cpu(mpi_reply->TaskTag),
5463 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5464 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5465 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5466
5467 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5468 struct sense_info data;
5469 _scsih_normalize_sense(scmd->sense_buffer, &data);
5470 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5471 data.skey, data.asc, data.ascq,
5472 le32_to_cpu(mpi_reply->SenseCount));
5473 }
5474 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5475 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5476 response_bytes = (u8 *)&response_info;
5477 _scsih_response_code(ioc, response_bytes[0]);
5478 }
5479 }
5480
5481 /**
5482 * _scsih_turn_on_pfa_led - illuminate PFA LED
5483 * @ioc: per adapter object
5484 * @handle: device handle
5485 * Context: process
5486 */
5487 static void
_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5488 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5489 {
5490 Mpi2SepReply_t mpi_reply;
5491 Mpi2SepRequest_t mpi_request;
5492 struct _sas_device *sas_device;
5493
5494 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5495 if (!sas_device)
5496 return;
5497
5498 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5499 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5500 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5501 mpi_request.SlotStatus =
5502 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5503 mpi_request.DevHandle = cpu_to_le16(handle);
5504 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5505 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5506 &mpi_request)) != 0) {
5507 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5508 __FILE__, __LINE__, __func__);
5509 goto out;
5510 }
5511 sas_device->pfa_led_on = 1;
5512
5513 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5514 dewtprintk(ioc,
5515 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5516 le16_to_cpu(mpi_reply.IOCStatus),
5517 le32_to_cpu(mpi_reply.IOCLogInfo)));
5518 goto out;
5519 }
5520 out:
5521 sas_device_put(sas_device);
5522 }
5523
5524 /**
5525 * _scsih_turn_off_pfa_led - turn off Fault LED
5526 * @ioc: per adapter object
5527 * @sas_device: sas device whose PFA LED has to turned off
5528 * Context: process
5529 */
5530 static void
_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)5531 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5532 struct _sas_device *sas_device)
5533 {
5534 Mpi2SepReply_t mpi_reply;
5535 Mpi2SepRequest_t mpi_request;
5536
5537 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5538 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5539 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5540 mpi_request.SlotStatus = 0;
5541 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5542 mpi_request.DevHandle = 0;
5543 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5544 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5545 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5546 &mpi_request)) != 0) {
5547 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5548 __FILE__, __LINE__, __func__);
5549 return;
5550 }
5551
5552 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5553 dewtprintk(ioc,
5554 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5555 le16_to_cpu(mpi_reply.IOCStatus),
5556 le32_to_cpu(mpi_reply.IOCLogInfo)));
5557 return;
5558 }
5559 }
5560
5561 /**
5562 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5563 * @ioc: per adapter object
5564 * @handle: device handle
5565 * Context: interrupt.
5566 */
5567 static void
_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5568 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5569 {
5570 struct fw_event_work *fw_event;
5571
5572 fw_event = alloc_fw_event_work(0);
5573 if (!fw_event)
5574 return;
5575 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5576 fw_event->device_handle = handle;
5577 fw_event->ioc = ioc;
5578 _scsih_fw_event_add(ioc, fw_event);
5579 fw_event_work_put(fw_event);
5580 }
5581
5582 /**
5583 * _scsih_smart_predicted_fault - process smart errors
5584 * @ioc: per adapter object
5585 * @handle: device handle
5586 * Context: interrupt.
5587 */
5588 static void
_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER * ioc,u16 handle)5589 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5590 {
5591 struct scsi_target *starget;
5592 struct MPT3SAS_TARGET *sas_target_priv_data;
5593 Mpi2EventNotificationReply_t *event_reply;
5594 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5595 struct _sas_device *sas_device;
5596 ssize_t sz;
5597 unsigned long flags;
5598
5599 /* only handle non-raid devices */
5600 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5601 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5602 if (!sas_device)
5603 goto out_unlock;
5604
5605 starget = sas_device->starget;
5606 sas_target_priv_data = starget->hostdata;
5607
5608 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5609 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5610 goto out_unlock;
5611
5612 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5613
5614 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5615
5616 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5617 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5618
5619 /* insert into event log */
5620 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5621 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5622 event_reply = kzalloc(sz, GFP_ATOMIC);
5623 if (!event_reply) {
5624 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5625 __FILE__, __LINE__, __func__);
5626 goto out;
5627 }
5628
5629 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5630 event_reply->Event =
5631 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5632 event_reply->MsgLength = sz/4;
5633 event_reply->EventDataLength =
5634 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5635 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5636 event_reply->EventData;
5637 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5638 event_data->ASC = 0x5D;
5639 event_data->DevHandle = cpu_to_le16(handle);
5640 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5641 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5642 kfree(event_reply);
5643 out:
5644 if (sas_device)
5645 sas_device_put(sas_device);
5646 return;
5647
5648 out_unlock:
5649 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5650 goto out;
5651 }
5652
5653 /**
5654 * _scsih_io_done - scsi request callback
5655 * @ioc: per adapter object
5656 * @smid: system request message index
5657 * @msix_index: MSIX table index supplied by the OS
5658 * @reply: reply message frame(lower 32bit addr)
5659 *
5660 * Callback handler when using _scsih_qcmd.
5661 *
5662 * Return: 1 meaning mf should be freed from _base_interrupt
5663 * 0 means the mf is freed from this function.
5664 */
5665 static u8
_scsih_io_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)5666 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5667 {
5668 Mpi25SCSIIORequest_t *mpi_request;
5669 Mpi2SCSIIOReply_t *mpi_reply;
5670 struct scsi_cmnd *scmd;
5671 struct scsiio_tracker *st;
5672 u16 ioc_status;
5673 u32 xfer_cnt;
5674 u8 scsi_state;
5675 u8 scsi_status;
5676 u32 log_info;
5677 struct MPT3SAS_DEVICE *sas_device_priv_data;
5678 u32 response_code = 0;
5679
5680 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5681
5682 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5683 if (scmd == NULL)
5684 return 1;
5685
5686 _scsih_set_satl_pending(scmd, false);
5687
5688 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5689
5690 if (mpi_reply == NULL) {
5691 scmd->result = DID_OK << 16;
5692 goto out;
5693 }
5694
5695 sas_device_priv_data = scmd->device->hostdata;
5696 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5697 sas_device_priv_data->sas_target->deleted) {
5698 scmd->result = DID_NO_CONNECT << 16;
5699 goto out;
5700 }
5701 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5702
5703 /*
5704 * WARPDRIVE: If direct_io is set then it is directIO,
5705 * the failed direct I/O should be redirected to volume
5706 */
5707 st = scsi_cmd_priv(scmd);
5708 if (st->direct_io &&
5709 ((ioc_status & MPI2_IOCSTATUS_MASK)
5710 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5711 st->direct_io = 0;
5712 st->scmd = scmd;
5713 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5714 mpi_request->DevHandle =
5715 cpu_to_le16(sas_device_priv_data->sas_target->handle);
5716 ioc->put_smid_scsi_io(ioc, smid,
5717 sas_device_priv_data->sas_target->handle);
5718 return 0;
5719 }
5720 /* turning off TLR */
5721 scsi_state = mpi_reply->SCSIState;
5722 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5723 response_code =
5724 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5725 if (!sas_device_priv_data->tlr_snoop_check) {
5726 sas_device_priv_data->tlr_snoop_check++;
5727 if ((!ioc->is_warpdrive &&
5728 !scsih_is_raid(&scmd->device->sdev_gendev) &&
5729 !scsih_is_nvme(&scmd->device->sdev_gendev))
5730 && sas_is_tlr_enabled(scmd->device) &&
5731 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5732 sas_disable_tlr(scmd->device);
5733 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5734 }
5735 }
5736
5737 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5738 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5739 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5740 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5741 else
5742 log_info = 0;
5743 ioc_status &= MPI2_IOCSTATUS_MASK;
5744 scsi_status = mpi_reply->SCSIStatus;
5745
5746 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5747 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5748 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5749 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5750 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5751 }
5752
5753 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5754 struct sense_info data;
5755 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5756 smid);
5757 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5758 le32_to_cpu(mpi_reply->SenseCount));
5759 memcpy(scmd->sense_buffer, sense_data, sz);
5760 _scsih_normalize_sense(scmd->sense_buffer, &data);
5761 /* failure prediction threshold exceeded */
5762 if (data.asc == 0x5D)
5763 _scsih_smart_predicted_fault(ioc,
5764 le16_to_cpu(mpi_reply->DevHandle));
5765 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5766
5767 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5768 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5769 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5770 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5771 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5772 }
5773 switch (ioc_status) {
5774 case MPI2_IOCSTATUS_BUSY:
5775 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5776 scmd->result = SAM_STAT_BUSY;
5777 break;
5778
5779 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5780 scmd->result = DID_NO_CONNECT << 16;
5781 break;
5782
5783 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5784 if (sas_device_priv_data->block) {
5785 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5786 goto out;
5787 }
5788 if (log_info == 0x31110630) {
5789 if (scmd->retries > 2) {
5790 scmd->result = DID_NO_CONNECT << 16;
5791 scsi_device_set_state(scmd->device,
5792 SDEV_OFFLINE);
5793 } else {
5794 scmd->result = DID_SOFT_ERROR << 16;
5795 scmd->device->expecting_cc_ua = 1;
5796 }
5797 break;
5798 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5799 scmd->result = DID_RESET << 16;
5800 break;
5801 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5802 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5803 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5804 scmd->result = DID_RESET << 16;
5805 break;
5806 }
5807 scmd->result = DID_SOFT_ERROR << 16;
5808 break;
5809 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5810 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5811 scmd->result = DID_RESET << 16;
5812 break;
5813
5814 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5815 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5816 scmd->result = DID_SOFT_ERROR << 16;
5817 else
5818 scmd->result = (DID_OK << 16) | scsi_status;
5819 break;
5820
5821 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5822 scmd->result = (DID_OK << 16) | scsi_status;
5823
5824 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5825 break;
5826
5827 if (xfer_cnt < scmd->underflow) {
5828 if (scsi_status == SAM_STAT_BUSY)
5829 scmd->result = SAM_STAT_BUSY;
5830 else
5831 scmd->result = DID_SOFT_ERROR << 16;
5832 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5833 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5834 scmd->result = DID_SOFT_ERROR << 16;
5835 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5836 scmd->result = DID_RESET << 16;
5837 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5838 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5839 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5840 scmd->result = (DRIVER_SENSE << 24) |
5841 SAM_STAT_CHECK_CONDITION;
5842 scmd->sense_buffer[0] = 0x70;
5843 scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5844 scmd->sense_buffer[12] = 0x20;
5845 scmd->sense_buffer[13] = 0;
5846 }
5847 break;
5848
5849 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5850 scsi_set_resid(scmd, 0);
5851 fallthrough;
5852 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5853 case MPI2_IOCSTATUS_SUCCESS:
5854 scmd->result = (DID_OK << 16) | scsi_status;
5855 if (response_code ==
5856 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5857 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5858 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5859 scmd->result = DID_SOFT_ERROR << 16;
5860 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5861 scmd->result = DID_RESET << 16;
5862 break;
5863
5864 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5865 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5866 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5867 _scsih_eedp_error_handling(scmd, ioc_status);
5868 break;
5869
5870 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5871 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5872 case MPI2_IOCSTATUS_INVALID_SGL:
5873 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5874 case MPI2_IOCSTATUS_INVALID_FIELD:
5875 case MPI2_IOCSTATUS_INVALID_STATE:
5876 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5877 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5878 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5879 default:
5880 scmd->result = DID_SOFT_ERROR << 16;
5881 break;
5882
5883 }
5884
5885 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5886 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5887
5888 out:
5889
5890 scsi_dma_unmap(scmd);
5891 mpt3sas_base_free_smid(ioc, smid);
5892 scmd->scsi_done(scmd);
5893 return 0;
5894 }
5895
5896 /**
5897 * _scsih_update_vphys_after_reset - update the Port's
5898 * vphys_list after reset
5899 * @ioc: per adapter object
5900 *
5901 * Returns nothing.
5902 */
5903 static void
_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER * ioc)5904 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5905 {
5906 u16 sz, ioc_status;
5907 int i;
5908 Mpi2ConfigReply_t mpi_reply;
5909 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5910 u16 attached_handle;
5911 u64 attached_sas_addr;
5912 u8 found = 0, port_id;
5913 Mpi2SasPhyPage0_t phy_pg0;
5914 struct hba_port *port, *port_next, *mport;
5915 struct virtual_phy *vphy, *vphy_next;
5916 struct _sas_device *sas_device;
5917
5918 /*
5919 * Mark all the vphys objects as dirty.
5920 */
5921 list_for_each_entry_safe(port, port_next,
5922 &ioc->port_table_list, list) {
5923 if (!port->vphys_mask)
5924 continue;
5925 list_for_each_entry_safe(vphy, vphy_next,
5926 &port->vphys_list, list) {
5927 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5928 }
5929 }
5930
5931 /*
5932 * Read SASIOUnitPage0 to get each HBA Phy's data.
5933 */
5934 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
5935 (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
5936 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5937 if (!sas_iounit_pg0) {
5938 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5939 __FILE__, __LINE__, __func__);
5940 return;
5941 }
5942 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5943 sas_iounit_pg0, sz)) != 0)
5944 goto out;
5945 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5946 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5947 goto out;
5948 /*
5949 * Loop over each HBA Phy.
5950 */
5951 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5952 /*
5953 * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
5954 */
5955 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5956 MPI2_SAS_NEG_LINK_RATE_1_5)
5957 continue;
5958 /*
5959 * Check whether Phy is connected to SEP device or not,
5960 * if it is SEP device then read the Phy's SASPHYPage0 data to
5961 * determine whether Phy is a virtual Phy or not. if it is
5962 * virtual phy then it is conformed that the attached remote
5963 * device is a HBA's vSES device.
5964 */
5965 if (!(le32_to_cpu(
5966 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
5967 MPI2_SAS_DEVICE_INFO_SEP))
5968 continue;
5969
5970 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5971 i))) {
5972 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5973 __FILE__, __LINE__, __func__);
5974 continue;
5975 }
5976
5977 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
5978 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
5979 continue;
5980 /*
5981 * Get the vSES device's SAS Address.
5982 */
5983 attached_handle = le16_to_cpu(
5984 sas_iounit_pg0->PhyData[i].AttachedDevHandle);
5985 if (_scsih_get_sas_address(ioc, attached_handle,
5986 &attached_sas_addr) != 0) {
5987 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5988 __FILE__, __LINE__, __func__);
5989 continue;
5990 }
5991
5992 found = 0;
5993 port = port_next = NULL;
5994 /*
5995 * Loop over each virtual_phy object from
5996 * each port's vphys_list.
5997 */
5998 list_for_each_entry_safe(port,
5999 port_next, &ioc->port_table_list, list) {
6000 if (!port->vphys_mask)
6001 continue;
6002 list_for_each_entry_safe(vphy, vphy_next,
6003 &port->vphys_list, list) {
6004 /*
6005 * Continue with next virtual_phy object
6006 * if the object is not marked as dirty.
6007 */
6008 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6009 continue;
6010
6011 /*
6012 * Continue with next virtual_phy object
6013 * if the object's SAS Address is not equals
6014 * to current Phy's vSES device SAS Address.
6015 */
6016 if (vphy->sas_address != attached_sas_addr)
6017 continue;
6018 /*
6019 * Enable current Phy number bit in object's
6020 * phy_mask field.
6021 */
6022 if (!(vphy->phy_mask & (1 << i)))
6023 vphy->phy_mask = (1 << i);
6024 /*
6025 * Get hba_port object from hba_port table
6026 * corresponding to current phy's Port ID.
6027 * if there is no hba_port object corresponding
6028 * to Phy's Port ID then create a new hba_port
6029 * object & add to hba_port table.
6030 */
6031 port_id = sas_iounit_pg0->PhyData[i].Port;
6032 mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6033 if (!mport) {
6034 mport = kzalloc(
6035 sizeof(struct hba_port), GFP_KERNEL);
6036 if (!mport)
6037 break;
6038 mport->port_id = port_id;
6039 ioc_info(ioc,
6040 "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6041 __func__, mport, mport->port_id);
6042 list_add_tail(&mport->list,
6043 &ioc->port_table_list);
6044 }
6045 /*
6046 * If mport & port pointers are not pointing to
6047 * same hba_port object then it means that vSES
6048 * device's Port ID got changed after reset and
6049 * hence move current virtual_phy object from
6050 * port's vphys_list to mport's vphys_list.
6051 */
6052 if (port != mport) {
6053 if (!mport->vphys_mask)
6054 INIT_LIST_HEAD(
6055 &mport->vphys_list);
6056 mport->vphys_mask |= (1 << i);
6057 port->vphys_mask &= ~(1 << i);
6058 list_move(&vphy->list,
6059 &mport->vphys_list);
6060 sas_device = mpt3sas_get_sdev_by_addr(
6061 ioc, attached_sas_addr, port);
6062 if (sas_device)
6063 sas_device->port = mport;
6064 }
6065 /*
6066 * Earlier while updating the hba_port table,
6067 * it is determined that there is no other
6068 * direct attached device with mport's Port ID,
6069 * Hence mport was marked as dirty. Only vSES
6070 * device has this Port ID, so unmark the mport
6071 * as dirt.
6072 */
6073 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6074 mport->sas_address = 0;
6075 mport->phy_mask = 0;
6076 mport->flags &=
6077 ~HBA_PORT_FLAG_DIRTY_PORT;
6078 }
6079 /*
6080 * Unmark current virtual_phy object as dirty.
6081 */
6082 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6083 found = 1;
6084 break;
6085 }
6086 if (found)
6087 break;
6088 }
6089 }
6090 out:
6091 kfree(sas_iounit_pg0);
6092 }
6093
6094 /**
6095 * _scsih_get_port_table_after_reset - Construct temporary port table
6096 * @ioc: per adapter object
6097 * @port_table: address where port table needs to be constructed
6098 *
6099 * return number of HBA port entries available after reset.
6100 */
6101 static int
_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_table)6102 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6103 struct hba_port *port_table)
6104 {
6105 u16 sz, ioc_status;
6106 int i, j;
6107 Mpi2ConfigReply_t mpi_reply;
6108 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6109 u16 attached_handle;
6110 u64 attached_sas_addr;
6111 u8 found = 0, port_count = 0, port_id;
6112
6113 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6114 * sizeof(Mpi2SasIOUnit0PhyData_t));
6115 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6116 if (!sas_iounit_pg0) {
6117 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6118 __FILE__, __LINE__, __func__);
6119 return port_count;
6120 }
6121
6122 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6123 sas_iounit_pg0, sz)) != 0)
6124 goto out;
6125 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6126 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6127 goto out;
6128 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6129 found = 0;
6130 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6131 MPI2_SAS_NEG_LINK_RATE_1_5)
6132 continue;
6133 attached_handle =
6134 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6135 if (_scsih_get_sas_address(
6136 ioc, attached_handle, &attached_sas_addr) != 0) {
6137 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6138 __FILE__, __LINE__, __func__);
6139 continue;
6140 }
6141
6142 for (j = 0; j < port_count; j++) {
6143 port_id = sas_iounit_pg0->PhyData[i].Port;
6144 if (port_table[j].port_id == port_id &&
6145 port_table[j].sas_address == attached_sas_addr) {
6146 port_table[j].phy_mask |= (1 << i);
6147 found = 1;
6148 break;
6149 }
6150 }
6151
6152 if (found)
6153 continue;
6154
6155 port_id = sas_iounit_pg0->PhyData[i].Port;
6156 port_table[port_count].port_id = port_id;
6157 port_table[port_count].phy_mask = (1 << i);
6158 port_table[port_count].sas_address = attached_sas_addr;
6159 port_count++;
6160 }
6161 out:
6162 kfree(sas_iounit_pg0);
6163 return port_count;
6164 }
6165
6166 enum hba_port_matched_codes {
6167 NOT_MATCHED = 0,
6168 MATCHED_WITH_ADDR_AND_PHYMASK,
6169 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6170 MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6171 MATCHED_WITH_ADDR,
6172 };
6173
6174 /**
6175 * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6176 * from HBA port table
6177 * @ioc: per adapter object
6178 * @port_entry: hba port entry from temporary port table which needs to be
6179 * searched for matched entry in the HBA port table
6180 * @matched_port_entry: save matched hba port entry here
6181 * @count: count of matched entries
6182 *
6183 * return type of matched entry found.
6184 */
6185 static enum hba_port_matched_codes
_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_entry,struct hba_port ** matched_port_entry,int * count)6186 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6187 struct hba_port *port_entry,
6188 struct hba_port **matched_port_entry, int *count)
6189 {
6190 struct hba_port *port_table_entry, *matched_port = NULL;
6191 enum hba_port_matched_codes matched_code = NOT_MATCHED;
6192 int lcount = 0;
6193 *matched_port_entry = NULL;
6194
6195 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6196 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6197 continue;
6198
6199 if ((port_table_entry->sas_address == port_entry->sas_address)
6200 && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6201 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6202 matched_port = port_table_entry;
6203 break;
6204 }
6205
6206 if ((port_table_entry->sas_address == port_entry->sas_address)
6207 && (port_table_entry->phy_mask & port_entry->phy_mask)
6208 && (port_table_entry->port_id == port_entry->port_id)) {
6209 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6210 matched_port = port_table_entry;
6211 continue;
6212 }
6213
6214 if ((port_table_entry->sas_address == port_entry->sas_address)
6215 && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6216 if (matched_code ==
6217 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6218 continue;
6219 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6220 matched_port = port_table_entry;
6221 continue;
6222 }
6223
6224 if (port_table_entry->sas_address == port_entry->sas_address) {
6225 if (matched_code ==
6226 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6227 continue;
6228 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6229 continue;
6230 matched_code = MATCHED_WITH_ADDR;
6231 matched_port = port_table_entry;
6232 lcount++;
6233 }
6234 }
6235
6236 *matched_port_entry = matched_port;
6237 if (matched_code == MATCHED_WITH_ADDR)
6238 *count = lcount;
6239 return matched_code;
6240 }
6241
6242 /**
6243 * _scsih_del_phy_part_of_anther_port - remove phy if it
6244 * is a part of anther port
6245 *@ioc: per adapter object
6246 *@port_table: port table after reset
6247 *@index: hba port entry index
6248 *@port_count: number of ports available after host reset
6249 *@offset: HBA phy bit offset
6250 *
6251 */
6252 static void
_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_table,int index,u8 port_count,int offset)6253 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6254 struct hba_port *port_table,
6255 int index, u8 port_count, int offset)
6256 {
6257 struct _sas_node *sas_node = &ioc->sas_hba;
6258 u32 i, found = 0;
6259
6260 for (i = 0; i < port_count; i++) {
6261 if (i == index)
6262 continue;
6263
6264 if (port_table[i].phy_mask & (1 << offset)) {
6265 mpt3sas_transport_del_phy_from_an_existing_port(
6266 ioc, sas_node, &sas_node->phy[offset]);
6267 found = 1;
6268 break;
6269 }
6270 }
6271 if (!found)
6272 port_table[index].phy_mask |= (1 << offset);
6273 }
6274
6275 /**
6276 * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6277 * right port
6278 *@ioc: per adapter object
6279 *@hba_port_entry: hba port table entry
6280 *@port_table: temporary port table
6281 *@index: hba port entry index
6282 *@port_count: number of ports available after host reset
6283 *
6284 */
6285 static void
_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER * ioc,struct hba_port * hba_port_entry,struct hba_port * port_table,int index,int port_count)6286 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6287 struct hba_port *hba_port_entry, struct hba_port *port_table,
6288 int index, int port_count)
6289 {
6290 u32 phy_mask, offset = 0;
6291 struct _sas_node *sas_node = &ioc->sas_hba;
6292
6293 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6294
6295 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6296 if (phy_mask & (1 << offset)) {
6297 if (!(port_table[index].phy_mask & (1 << offset))) {
6298 _scsih_del_phy_part_of_anther_port(
6299 ioc, port_table, index, port_count,
6300 offset);
6301 continue;
6302 }
6303 if (sas_node->phy[offset].phy_belongs_to_port)
6304 mpt3sas_transport_del_phy_from_an_existing_port(
6305 ioc, sas_node, &sas_node->phy[offset]);
6306 mpt3sas_transport_add_phy_to_an_existing_port(
6307 ioc, sas_node, &sas_node->phy[offset],
6308 hba_port_entry->sas_address,
6309 hba_port_entry);
6310 }
6311 }
6312 }
6313
6314 /**
6315 * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6316 * @ioc: per adapter object
6317 *
6318 * Returns nothing.
6319 */
6320 static void
_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER * ioc)6321 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6322 {
6323 struct hba_port *port, *port_next;
6324 struct virtual_phy *vphy, *vphy_next;
6325
6326 list_for_each_entry_safe(port, port_next,
6327 &ioc->port_table_list, list) {
6328 if (!port->vphys_mask)
6329 continue;
6330 list_for_each_entry_safe(vphy, vphy_next,
6331 &port->vphys_list, list) {
6332 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6333 drsprintk(ioc, ioc_info(ioc,
6334 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6335 vphy, port->port_id,
6336 vphy->phy_mask));
6337 port->vphys_mask &= ~vphy->phy_mask;
6338 list_del(&vphy->list);
6339 kfree(vphy);
6340 }
6341 }
6342 if (!port->vphys_mask && !port->sas_address)
6343 port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6344 }
6345 }
6346
6347 /**
6348 * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6349 * after host reset
6350 *@ioc: per adapter object
6351 *
6352 */
6353 static void
_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER * ioc)6354 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6355 {
6356 struct hba_port *port, *port_next;
6357
6358 list_for_each_entry_safe(port, port_next,
6359 &ioc->port_table_list, list) {
6360 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6361 port->flags & HBA_PORT_FLAG_NEW_PORT)
6362 continue;
6363
6364 drsprintk(ioc, ioc_info(ioc,
6365 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6366 port, port->port_id, port->phy_mask));
6367 list_del(&port->list);
6368 kfree(port);
6369 }
6370 }
6371
6372 /**
6373 * _scsih_sas_port_refresh - Update HBA port table after host reset
6374 * @ioc: per adapter object
6375 */
6376 static void
_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER * ioc)6377 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6378 {
6379 u32 port_count = 0;
6380 struct hba_port *port_table;
6381 struct hba_port *port_table_entry;
6382 struct hba_port *port_entry = NULL;
6383 int i, j, count = 0, lcount = 0;
6384 int ret;
6385 u64 sas_addr;
6386
6387 drsprintk(ioc, ioc_info(ioc,
6388 "updating ports for sas_host(0x%016llx)\n",
6389 (unsigned long long)ioc->sas_hba.sas_address));
6390
6391 port_table = kcalloc(ioc->sas_hba.num_phys,
6392 sizeof(struct hba_port), GFP_KERNEL);
6393 if (!port_table)
6394 return;
6395
6396 port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6397 if (!port_count)
6398 return;
6399
6400 drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6401 for (j = 0; j < port_count; j++)
6402 drsprintk(ioc, ioc_info(ioc,
6403 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6404 port_table[j].port_id,
6405 port_table[j].phy_mask, port_table[j].sas_address));
6406
6407 list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6408 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6409
6410 drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6411 port_table_entry = NULL;
6412 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6413 drsprintk(ioc, ioc_info(ioc,
6414 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6415 port_table_entry->port_id,
6416 port_table_entry->phy_mask,
6417 port_table_entry->sas_address));
6418 }
6419
6420 for (j = 0; j < port_count; j++) {
6421 ret = _scsih_look_and_get_matched_port_entry(ioc,
6422 &port_table[j], &port_entry, &count);
6423 if (!port_entry) {
6424 drsprintk(ioc, ioc_info(ioc,
6425 "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6426 port_table[j].sas_address,
6427 port_table[j].port_id));
6428 continue;
6429 }
6430
6431 switch (ret) {
6432 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6433 case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6434 _scsih_add_or_del_phys_from_existing_port(ioc,
6435 port_entry, port_table, j, port_count);
6436 break;
6437 case MATCHED_WITH_ADDR:
6438 sas_addr = port_table[j].sas_address;
6439 for (i = 0; i < port_count; i++) {
6440 if (port_table[i].sas_address == sas_addr)
6441 lcount++;
6442 }
6443
6444 if (count > 1 || lcount > 1)
6445 port_entry = NULL;
6446 else
6447 _scsih_add_or_del_phys_from_existing_port(ioc,
6448 port_entry, port_table, j, port_count);
6449 }
6450
6451 if (!port_entry)
6452 continue;
6453
6454 if (port_entry->port_id != port_table[j].port_id)
6455 port_entry->port_id = port_table[j].port_id;
6456 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6457 port_entry->phy_mask = port_table[j].phy_mask;
6458 }
6459
6460 port_table_entry = NULL;
6461 }
6462
6463 /**
6464 * _scsih_alloc_vphy - allocate virtual_phy object
6465 * @ioc: per adapter object
6466 * @port_id: Port ID number
6467 * @phy_num: HBA Phy number
6468 *
6469 * Returns allocated virtual_phy object.
6470 */
6471 static struct virtual_phy *
_scsih_alloc_vphy(struct MPT3SAS_ADAPTER * ioc,u8 port_id,u8 phy_num)6472 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6473 {
6474 struct virtual_phy *vphy;
6475 struct hba_port *port;
6476
6477 port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6478 if (!port)
6479 return NULL;
6480
6481 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6482 if (!vphy) {
6483 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6484 if (!vphy)
6485 return NULL;
6486
6487 if (!port->vphys_mask)
6488 INIT_LIST_HEAD(&port->vphys_list);
6489
6490 /*
6491 * Enable bit corresponding to HBA phy number on its
6492 * parent hba_port object's vphys_mask field.
6493 */
6494 port->vphys_mask |= (1 << phy_num);
6495 vphy->phy_mask |= (1 << phy_num);
6496
6497 list_add_tail(&vphy->list, &port->vphys_list);
6498
6499 ioc_info(ioc,
6500 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6501 vphy, port->port_id, phy_num);
6502 }
6503 return vphy;
6504 }
6505
6506 /**
6507 * _scsih_sas_host_refresh - refreshing sas host object contents
6508 * @ioc: per adapter object
6509 * Context: user
6510 *
6511 * During port enable, fw will send topology events for every device. Its
6512 * possible that the handles may change from the previous setting, so this
6513 * code keeping handles updating if changed.
6514 */
6515 static void
_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER * ioc)6516 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6517 {
6518 u16 sz;
6519 u16 ioc_status;
6520 int i;
6521 Mpi2ConfigReply_t mpi_reply;
6522 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6523 u16 attached_handle;
6524 u8 link_rate, port_id;
6525 struct hba_port *port;
6526 Mpi2SasPhyPage0_t phy_pg0;
6527
6528 dtmprintk(ioc,
6529 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6530 (u64)ioc->sas_hba.sas_address));
6531
6532 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6533 * sizeof(Mpi2SasIOUnit0PhyData_t));
6534 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6535 if (!sas_iounit_pg0) {
6536 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6537 __FILE__, __LINE__, __func__);
6538 return;
6539 }
6540
6541 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6542 sas_iounit_pg0, sz)) != 0)
6543 goto out;
6544 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6545 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6546 goto out;
6547 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6548 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6549 if (i == 0)
6550 ioc->sas_hba.handle = le16_to_cpu(
6551 sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6552 port_id = sas_iounit_pg0->PhyData[i].Port;
6553 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6554 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6555 if (!port)
6556 goto out;
6557
6558 port->port_id = port_id;
6559 ioc_info(ioc,
6560 "hba_port entry: %p, port: %d is added to hba_port list\n",
6561 port, port->port_id);
6562 if (ioc->shost_recovery)
6563 port->flags = HBA_PORT_FLAG_NEW_PORT;
6564 list_add_tail(&port->list, &ioc->port_table_list);
6565 }
6566 /*
6567 * Check whether current Phy belongs to HBA vSES device or not.
6568 */
6569 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6570 MPI2_SAS_DEVICE_INFO_SEP &&
6571 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
6572 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6573 &phy_pg0, i))) {
6574 ioc_err(ioc,
6575 "failure at %s:%d/%s()!\n",
6576 __FILE__, __LINE__, __func__);
6577 goto out;
6578 }
6579 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6580 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6581 continue;
6582 /*
6583 * Allocate a virtual_phy object for vSES device, if
6584 * this vSES device is hot added.
6585 */
6586 if (!_scsih_alloc_vphy(ioc, port_id, i))
6587 goto out;
6588 ioc->sas_hba.phy[i].hba_vphy = 1;
6589 }
6590
6591 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6592 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6593 AttachedDevHandle);
6594 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6595 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6596 ioc->sas_hba.phy[i].port =
6597 mpt3sas_get_port_by_id(ioc, port_id, 0);
6598 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6599 attached_handle, i, link_rate,
6600 ioc->sas_hba.phy[i].port);
6601 }
6602 out:
6603 kfree(sas_iounit_pg0);
6604 }
6605
6606 /**
6607 * _scsih_sas_host_add - create sas host object
6608 * @ioc: per adapter object
6609 *
6610 * Creating host side data object, stored in ioc->sas_hba
6611 */
6612 static void
_scsih_sas_host_add(struct MPT3SAS_ADAPTER * ioc)6613 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6614 {
6615 int i;
6616 Mpi2ConfigReply_t mpi_reply;
6617 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6618 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6619 Mpi2SasPhyPage0_t phy_pg0;
6620 Mpi2SasDevicePage0_t sas_device_pg0;
6621 Mpi2SasEnclosurePage0_t enclosure_pg0;
6622 u16 ioc_status;
6623 u16 sz;
6624 u8 device_missing_delay;
6625 u8 num_phys, port_id;
6626 struct hba_port *port;
6627
6628 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6629 if (!num_phys) {
6630 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6631 __FILE__, __LINE__, __func__);
6632 return;
6633 }
6634 ioc->sas_hba.phy = kcalloc(num_phys,
6635 sizeof(struct _sas_phy), GFP_KERNEL);
6636 if (!ioc->sas_hba.phy) {
6637 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6638 __FILE__, __LINE__, __func__);
6639 goto out;
6640 }
6641 ioc->sas_hba.num_phys = num_phys;
6642
6643 /* sas_iounit page 0 */
6644 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
6645 sizeof(Mpi2SasIOUnit0PhyData_t));
6646 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6647 if (!sas_iounit_pg0) {
6648 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6649 __FILE__, __LINE__, __func__);
6650 return;
6651 }
6652 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6653 sas_iounit_pg0, sz))) {
6654 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6655 __FILE__, __LINE__, __func__);
6656 goto out;
6657 }
6658 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6659 MPI2_IOCSTATUS_MASK;
6660 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6661 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6662 __FILE__, __LINE__, __func__);
6663 goto out;
6664 }
6665
6666 /* sas_iounit page 1 */
6667 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
6668 sizeof(Mpi2SasIOUnit1PhyData_t));
6669 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6670 if (!sas_iounit_pg1) {
6671 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6672 __FILE__, __LINE__, __func__);
6673 goto out;
6674 }
6675 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6676 sas_iounit_pg1, sz))) {
6677 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6678 __FILE__, __LINE__, __func__);
6679 goto out;
6680 }
6681 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6682 MPI2_IOCSTATUS_MASK;
6683 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6684 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6685 __FILE__, __LINE__, __func__);
6686 goto out;
6687 }
6688
6689 ioc->io_missing_delay =
6690 sas_iounit_pg1->IODeviceMissingDelay;
6691 device_missing_delay =
6692 sas_iounit_pg1->ReportDeviceMissingDelay;
6693 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6694 ioc->device_missing_delay = (device_missing_delay &
6695 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6696 else
6697 ioc->device_missing_delay = device_missing_delay &
6698 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6699
6700 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6701 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6702 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6703 i))) {
6704 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6705 __FILE__, __LINE__, __func__);
6706 goto out;
6707 }
6708 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6709 MPI2_IOCSTATUS_MASK;
6710 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6711 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6712 __FILE__, __LINE__, __func__);
6713 goto out;
6714 }
6715
6716 if (i == 0)
6717 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6718 PhyData[0].ControllerDevHandle);
6719
6720 port_id = sas_iounit_pg0->PhyData[i].Port;
6721 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6722 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6723 if (!port)
6724 goto out;
6725
6726 port->port_id = port_id;
6727 ioc_info(ioc,
6728 "hba_port entry: %p, port: %d is added to hba_port list\n",
6729 port, port->port_id);
6730 list_add_tail(&port->list,
6731 &ioc->port_table_list);
6732 }
6733
6734 /*
6735 * Check whether current Phy belongs to HBA vSES device or not.
6736 */
6737 if ((le32_to_cpu(phy_pg0.PhyInfo) &
6738 MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6739 (phy_pg0.NegotiatedLinkRate >> 4) >=
6740 MPI2_SAS_NEG_LINK_RATE_1_5) {
6741 /*
6742 * Allocate a virtual_phy object for vSES device.
6743 */
6744 if (!_scsih_alloc_vphy(ioc, port_id, i))
6745 goto out;
6746 ioc->sas_hba.phy[i].hba_vphy = 1;
6747 }
6748
6749 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6750 ioc->sas_hba.phy[i].phy_id = i;
6751 ioc->sas_hba.phy[i].port =
6752 mpt3sas_get_port_by_id(ioc, port_id, 0);
6753 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6754 phy_pg0, ioc->sas_hba.parent_dev);
6755 }
6756 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6757 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6758 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6759 __FILE__, __LINE__, __func__);
6760 goto out;
6761 }
6762 ioc->sas_hba.enclosure_handle =
6763 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6764 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6765 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6766 ioc->sas_hba.handle,
6767 (u64)ioc->sas_hba.sas_address,
6768 ioc->sas_hba.num_phys);
6769
6770 if (ioc->sas_hba.enclosure_handle) {
6771 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6772 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6773 ioc->sas_hba.enclosure_handle)))
6774 ioc->sas_hba.enclosure_logical_id =
6775 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6776 }
6777
6778 out:
6779 kfree(sas_iounit_pg1);
6780 kfree(sas_iounit_pg0);
6781 }
6782
6783 /**
6784 * _scsih_expander_add - creating expander object
6785 * @ioc: per adapter object
6786 * @handle: expander handle
6787 *
6788 * Creating expander object, stored in ioc->sas_expander_list.
6789 *
6790 * Return: 0 for success, else error.
6791 */
6792 static int
_scsih_expander_add(struct MPT3SAS_ADAPTER * ioc,u16 handle)6793 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6794 {
6795 struct _sas_node *sas_expander;
6796 struct _enclosure_node *enclosure_dev;
6797 Mpi2ConfigReply_t mpi_reply;
6798 Mpi2ExpanderPage0_t expander_pg0;
6799 Mpi2ExpanderPage1_t expander_pg1;
6800 u32 ioc_status;
6801 u16 parent_handle;
6802 u64 sas_address, sas_address_parent = 0;
6803 int i;
6804 unsigned long flags;
6805 struct _sas_port *mpt3sas_port = NULL;
6806 u8 port_id;
6807
6808 int rc = 0;
6809
6810 if (!handle)
6811 return -1;
6812
6813 if (ioc->shost_recovery || ioc->pci_error_recovery)
6814 return -1;
6815
6816 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6817 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6818 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6819 __FILE__, __LINE__, __func__);
6820 return -1;
6821 }
6822
6823 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6824 MPI2_IOCSTATUS_MASK;
6825 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6826 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6827 __FILE__, __LINE__, __func__);
6828 return -1;
6829 }
6830
6831 /* handle out of order topology events */
6832 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6833 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6834 != 0) {
6835 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6836 __FILE__, __LINE__, __func__);
6837 return -1;
6838 }
6839
6840 port_id = expander_pg0.PhysicalPort;
6841 if (sas_address_parent != ioc->sas_hba.sas_address) {
6842 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6843 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6844 sas_address_parent,
6845 mpt3sas_get_port_by_id(ioc, port_id, 0));
6846 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6847 if (!sas_expander) {
6848 rc = _scsih_expander_add(ioc, parent_handle);
6849 if (rc != 0)
6850 return rc;
6851 }
6852 }
6853
6854 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6855 sas_address = le64_to_cpu(expander_pg0.SASAddress);
6856 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6857 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6858 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6859
6860 if (sas_expander)
6861 return 0;
6862
6863 sas_expander = kzalloc(sizeof(struct _sas_node),
6864 GFP_KERNEL);
6865 if (!sas_expander) {
6866 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6867 __FILE__, __LINE__, __func__);
6868 return -1;
6869 }
6870
6871 sas_expander->handle = handle;
6872 sas_expander->num_phys = expander_pg0.NumPhys;
6873 sas_expander->sas_address_parent = sas_address_parent;
6874 sas_expander->sas_address = sas_address;
6875 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6876 if (!sas_expander->port) {
6877 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6878 __FILE__, __LINE__, __func__);
6879 rc = -1;
6880 goto out_fail;
6881 }
6882
6883 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6884 handle, parent_handle,
6885 (u64)sas_expander->sas_address, sas_expander->num_phys);
6886
6887 if (!sas_expander->num_phys)
6888 goto out_fail;
6889 sas_expander->phy = kcalloc(sas_expander->num_phys,
6890 sizeof(struct _sas_phy), GFP_KERNEL);
6891 if (!sas_expander->phy) {
6892 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6893 __FILE__, __LINE__, __func__);
6894 rc = -1;
6895 goto out_fail;
6896 }
6897
6898 INIT_LIST_HEAD(&sas_expander->sas_port_list);
6899 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6900 sas_address_parent, sas_expander->port);
6901 if (!mpt3sas_port) {
6902 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6903 __FILE__, __LINE__, __func__);
6904 rc = -1;
6905 goto out_fail;
6906 }
6907 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
6908 sas_expander->rphy = mpt3sas_port->rphy;
6909
6910 for (i = 0 ; i < sas_expander->num_phys ; i++) {
6911 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6912 &expander_pg1, i, handle))) {
6913 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6914 __FILE__, __LINE__, __func__);
6915 rc = -1;
6916 goto out_fail;
6917 }
6918 sas_expander->phy[i].handle = handle;
6919 sas_expander->phy[i].phy_id = i;
6920 sas_expander->phy[i].port =
6921 mpt3sas_get_port_by_id(ioc, port_id, 0);
6922
6923 if ((mpt3sas_transport_add_expander_phy(ioc,
6924 &sas_expander->phy[i], expander_pg1,
6925 sas_expander->parent_dev))) {
6926 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6927 __FILE__, __LINE__, __func__);
6928 rc = -1;
6929 goto out_fail;
6930 }
6931 }
6932
6933 if (sas_expander->enclosure_handle) {
6934 enclosure_dev =
6935 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6936 sas_expander->enclosure_handle);
6937 if (enclosure_dev)
6938 sas_expander->enclosure_logical_id =
6939 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6940 }
6941
6942 _scsih_expander_node_add(ioc, sas_expander);
6943 return 0;
6944
6945 out_fail:
6946
6947 if (mpt3sas_port)
6948 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
6949 sas_address_parent, sas_expander->port);
6950 kfree(sas_expander);
6951 return rc;
6952 }
6953
6954 /**
6955 * mpt3sas_expander_remove - removing expander object
6956 * @ioc: per adapter object
6957 * @sas_address: expander sas_address
6958 * @port: hba port entry
6959 */
6960 void
mpt3sas_expander_remove(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)6961 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
6962 struct hba_port *port)
6963 {
6964 struct _sas_node *sas_expander;
6965 unsigned long flags;
6966
6967 if (ioc->shost_recovery)
6968 return;
6969
6970 if (!port)
6971 return;
6972
6973 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6974 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6975 sas_address, port);
6976 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6977 if (sas_expander)
6978 _scsih_expander_node_remove(ioc, sas_expander);
6979 }
6980
6981 /**
6982 * _scsih_done - internal SCSI_IO callback handler.
6983 * @ioc: per adapter object
6984 * @smid: system request message index
6985 * @msix_index: MSIX table index supplied by the OS
6986 * @reply: reply message frame(lower 32bit addr)
6987 *
6988 * Callback handler when sending internal generated SCSI_IO.
6989 * The callback index passed is `ioc->scsih_cb_idx`
6990 *
6991 * Return: 1 meaning mf should be freed from _base_interrupt
6992 * 0 means the mf is freed from this function.
6993 */
6994 static u8
_scsih_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)6995 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
6996 {
6997 MPI2DefaultReply_t *mpi_reply;
6998
6999 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7000 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
7001 return 1;
7002 if (ioc->scsih_cmds.smid != smid)
7003 return 1;
7004 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7005 if (mpi_reply) {
7006 memcpy(ioc->scsih_cmds.reply, mpi_reply,
7007 mpi_reply->MsgLength*4);
7008 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7009 }
7010 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7011 complete(&ioc->scsih_cmds.done);
7012 return 1;
7013 }
7014
7015
7016
7017
7018 #define MPT3_MAX_LUNS (255)
7019
7020
7021 /**
7022 * _scsih_check_access_status - check access flags
7023 * @ioc: per adapter object
7024 * @sas_address: sas address
7025 * @handle: sas device handle
7026 * @access_status: errors returned during discovery of the device
7027 *
7028 * Return: 0 for success, else failure
7029 */
7030 static u8
_scsih_check_access_status(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,u16 handle,u8 access_status)7031 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7032 u16 handle, u8 access_status)
7033 {
7034 u8 rc = 1;
7035 char *desc = NULL;
7036
7037 switch (access_status) {
7038 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7039 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7040 rc = 0;
7041 break;
7042 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7043 desc = "sata capability failed";
7044 break;
7045 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7046 desc = "sata affiliation conflict";
7047 break;
7048 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7049 desc = "route not addressable";
7050 break;
7051 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7052 desc = "smp error not addressable";
7053 break;
7054 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7055 desc = "device blocked";
7056 break;
7057 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7058 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7059 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7060 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7061 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7062 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7063 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7064 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7065 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7066 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7067 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7068 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7069 desc = "sata initialization failed";
7070 break;
7071 default:
7072 desc = "unknown";
7073 break;
7074 }
7075
7076 if (!rc)
7077 return 0;
7078
7079 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7080 desc, (u64)sas_address, handle);
7081 return rc;
7082 }
7083
7084 /**
7085 * _scsih_check_device - checking device responsiveness
7086 * @ioc: per adapter object
7087 * @parent_sas_address: sas address of parent expander or sas host
7088 * @handle: attached device handle
7089 * @phy_number: phy number
7090 * @link_rate: new link rate
7091 */
7092 static void
_scsih_check_device(struct MPT3SAS_ADAPTER * ioc,u64 parent_sas_address,u16 handle,u8 phy_number,u8 link_rate)7093 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7094 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7095 {
7096 Mpi2ConfigReply_t mpi_reply;
7097 Mpi2SasDevicePage0_t sas_device_pg0;
7098 struct _sas_device *sas_device = NULL;
7099 struct _enclosure_node *enclosure_dev = NULL;
7100 u32 ioc_status;
7101 unsigned long flags;
7102 u64 sas_address;
7103 struct scsi_target *starget;
7104 struct MPT3SAS_TARGET *sas_target_priv_data;
7105 u32 device_info;
7106 struct hba_port *port;
7107
7108 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7109 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7110 return;
7111
7112 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7113 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7114 return;
7115
7116 /* wide port handling ~ we need only handle device once for the phy that
7117 * is matched in sas device page zero
7118 */
7119 if (phy_number != sas_device_pg0.PhyNum)
7120 return;
7121
7122 /* check if this is end device */
7123 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7124 if (!(_scsih_is_end_device(device_info)))
7125 return;
7126
7127 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7128 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7129 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7130 if (!port)
7131 goto out_unlock;
7132 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7133 sas_address, port);
7134
7135 if (!sas_device)
7136 goto out_unlock;
7137
7138 if (unlikely(sas_device->handle != handle)) {
7139 starget = sas_device->starget;
7140 sas_target_priv_data = starget->hostdata;
7141 starget_printk(KERN_INFO, starget,
7142 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7143 sas_device->handle, handle);
7144 sas_target_priv_data->handle = handle;
7145 sas_device->handle = handle;
7146 if (le16_to_cpu(sas_device_pg0.Flags) &
7147 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7148 sas_device->enclosure_level =
7149 sas_device_pg0.EnclosureLevel;
7150 memcpy(sas_device->connector_name,
7151 sas_device_pg0.ConnectorName, 4);
7152 sas_device->connector_name[4] = '\0';
7153 } else {
7154 sas_device->enclosure_level = 0;
7155 sas_device->connector_name[0] = '\0';
7156 }
7157
7158 sas_device->enclosure_handle =
7159 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7160 sas_device->is_chassis_slot_valid = 0;
7161 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7162 sas_device->enclosure_handle);
7163 if (enclosure_dev) {
7164 sas_device->enclosure_logical_id =
7165 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7166 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7167 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7168 sas_device->is_chassis_slot_valid = 1;
7169 sas_device->chassis_slot =
7170 enclosure_dev->pg0.ChassisSlot;
7171 }
7172 }
7173 }
7174
7175 /* check if device is present */
7176 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7177 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7178 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7179 handle);
7180 goto out_unlock;
7181 }
7182
7183 /* check if there were any issues with discovery */
7184 if (_scsih_check_access_status(ioc, sas_address, handle,
7185 sas_device_pg0.AccessStatus))
7186 goto out_unlock;
7187
7188 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7189 _scsih_ublock_io_device(ioc, sas_address, port);
7190
7191 if (sas_device)
7192 sas_device_put(sas_device);
7193 return;
7194
7195 out_unlock:
7196 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7197 if (sas_device)
7198 sas_device_put(sas_device);
7199 }
7200
7201 /**
7202 * _scsih_add_device - creating sas device object
7203 * @ioc: per adapter object
7204 * @handle: sas device handle
7205 * @phy_num: phy number end device attached to
7206 * @is_pd: is this hidden raid component
7207 *
7208 * Creating end device object, stored in ioc->sas_device_list.
7209 *
7210 * Return: 0 for success, non-zero for failure.
7211 */
7212 static int
_scsih_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phy_num,u8 is_pd)7213 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7214 u8 is_pd)
7215 {
7216 Mpi2ConfigReply_t mpi_reply;
7217 Mpi2SasDevicePage0_t sas_device_pg0;
7218 struct _sas_device *sas_device;
7219 struct _enclosure_node *enclosure_dev = NULL;
7220 u32 ioc_status;
7221 u64 sas_address;
7222 u32 device_info;
7223 u8 port_id;
7224
7225 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7226 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7227 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7228 __FILE__, __LINE__, __func__);
7229 return -1;
7230 }
7231
7232 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7233 MPI2_IOCSTATUS_MASK;
7234 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7235 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7236 __FILE__, __LINE__, __func__);
7237 return -1;
7238 }
7239
7240 /* check if this is end device */
7241 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7242 if (!(_scsih_is_end_device(device_info)))
7243 return -1;
7244 set_bit(handle, ioc->pend_os_device_add);
7245 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7246
7247 /* check if device is present */
7248 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7249 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7250 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7251 handle);
7252 return -1;
7253 }
7254
7255 /* check if there were any issues with discovery */
7256 if (_scsih_check_access_status(ioc, sas_address, handle,
7257 sas_device_pg0.AccessStatus))
7258 return -1;
7259
7260 port_id = sas_device_pg0.PhysicalPort;
7261 sas_device = mpt3sas_get_sdev_by_addr(ioc,
7262 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7263 if (sas_device) {
7264 clear_bit(handle, ioc->pend_os_device_add);
7265 sas_device_put(sas_device);
7266 return -1;
7267 }
7268
7269 if (sas_device_pg0.EnclosureHandle) {
7270 enclosure_dev =
7271 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7272 le16_to_cpu(sas_device_pg0.EnclosureHandle));
7273 if (enclosure_dev == NULL)
7274 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7275 sas_device_pg0.EnclosureHandle);
7276 }
7277
7278 sas_device = kzalloc(sizeof(struct _sas_device),
7279 GFP_KERNEL);
7280 if (!sas_device) {
7281 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7282 __FILE__, __LINE__, __func__);
7283 return 0;
7284 }
7285
7286 kref_init(&sas_device->refcount);
7287 sas_device->handle = handle;
7288 if (_scsih_get_sas_address(ioc,
7289 le16_to_cpu(sas_device_pg0.ParentDevHandle),
7290 &sas_device->sas_address_parent) != 0)
7291 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7292 __FILE__, __LINE__, __func__);
7293 sas_device->enclosure_handle =
7294 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7295 if (sas_device->enclosure_handle != 0)
7296 sas_device->slot =
7297 le16_to_cpu(sas_device_pg0.Slot);
7298 sas_device->device_info = device_info;
7299 sas_device->sas_address = sas_address;
7300 sas_device->phy = sas_device_pg0.PhyNum;
7301 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7302 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7303 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7304 if (!sas_device->port) {
7305 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7306 __FILE__, __LINE__, __func__);
7307 goto out;
7308 }
7309
7310 if (le16_to_cpu(sas_device_pg0.Flags)
7311 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7312 sas_device->enclosure_level =
7313 sas_device_pg0.EnclosureLevel;
7314 memcpy(sas_device->connector_name,
7315 sas_device_pg0.ConnectorName, 4);
7316 sas_device->connector_name[4] = '\0';
7317 } else {
7318 sas_device->enclosure_level = 0;
7319 sas_device->connector_name[0] = '\0';
7320 }
7321 /* get enclosure_logical_id & chassis_slot*/
7322 sas_device->is_chassis_slot_valid = 0;
7323 if (enclosure_dev) {
7324 sas_device->enclosure_logical_id =
7325 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7326 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7327 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7328 sas_device->is_chassis_slot_valid = 1;
7329 sas_device->chassis_slot =
7330 enclosure_dev->pg0.ChassisSlot;
7331 }
7332 }
7333
7334 /* get device name */
7335 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7336
7337 if (ioc->wait_for_discovery_to_complete)
7338 _scsih_sas_device_init_add(ioc, sas_device);
7339 else
7340 _scsih_sas_device_add(ioc, sas_device);
7341
7342 out:
7343 sas_device_put(sas_device);
7344 return 0;
7345 }
7346
7347 /**
7348 * _scsih_remove_device - removing sas device object
7349 * @ioc: per adapter object
7350 * @sas_device: the sas_device object
7351 */
7352 static void
_scsih_remove_device(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)7353 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7354 struct _sas_device *sas_device)
7355 {
7356 struct MPT3SAS_TARGET *sas_target_priv_data;
7357
7358 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7359 (sas_device->pfa_led_on)) {
7360 _scsih_turn_off_pfa_led(ioc, sas_device);
7361 sas_device->pfa_led_on = 0;
7362 }
7363
7364 dewtprintk(ioc,
7365 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7366 __func__,
7367 sas_device->handle, (u64)sas_device->sas_address));
7368
7369 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7370 NULL, NULL));
7371
7372 if (sas_device->starget && sas_device->starget->hostdata) {
7373 sas_target_priv_data = sas_device->starget->hostdata;
7374 sas_target_priv_data->deleted = 1;
7375 _scsih_ublock_io_device(ioc, sas_device->sas_address,
7376 sas_device->port);
7377 sas_target_priv_data->handle =
7378 MPT3SAS_INVALID_DEVICE_HANDLE;
7379 }
7380
7381 if (!ioc->hide_drives)
7382 mpt3sas_transport_port_remove(ioc,
7383 sas_device->sas_address,
7384 sas_device->sas_address_parent,
7385 sas_device->port);
7386
7387 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7388 sas_device->handle, (u64)sas_device->sas_address);
7389
7390 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7391
7392 dewtprintk(ioc,
7393 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7394 __func__,
7395 sas_device->handle, (u64)sas_device->sas_address));
7396 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7397 NULL, NULL));
7398 }
7399
7400 /**
7401 * _scsih_sas_topology_change_event_debug - debug for topology event
7402 * @ioc: per adapter object
7403 * @event_data: event data payload
7404 * Context: user.
7405 */
7406 static void
_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)7407 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7408 Mpi2EventDataSasTopologyChangeList_t *event_data)
7409 {
7410 int i;
7411 u16 handle;
7412 u16 reason_code;
7413 u8 phy_number;
7414 char *status_str = NULL;
7415 u8 link_rate, prev_link_rate;
7416
7417 switch (event_data->ExpStatus) {
7418 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7419 status_str = "add";
7420 break;
7421 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7422 status_str = "remove";
7423 break;
7424 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7425 case 0:
7426 status_str = "responding";
7427 break;
7428 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7429 status_str = "remove delay";
7430 break;
7431 default:
7432 status_str = "unknown status";
7433 break;
7434 }
7435 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7436 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7437 "start_phy(%02d), count(%d)\n",
7438 le16_to_cpu(event_data->ExpanderDevHandle),
7439 le16_to_cpu(event_data->EnclosureHandle),
7440 event_data->StartPhyNum, event_data->NumEntries);
7441 for (i = 0; i < event_data->NumEntries; i++) {
7442 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7443 if (!handle)
7444 continue;
7445 phy_number = event_data->StartPhyNum + i;
7446 reason_code = event_data->PHY[i].PhyStatus &
7447 MPI2_EVENT_SAS_TOPO_RC_MASK;
7448 switch (reason_code) {
7449 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7450 status_str = "target add";
7451 break;
7452 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7453 status_str = "target remove";
7454 break;
7455 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7456 status_str = "delay target remove";
7457 break;
7458 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7459 status_str = "link rate change";
7460 break;
7461 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7462 status_str = "target responding";
7463 break;
7464 default:
7465 status_str = "unknown";
7466 break;
7467 }
7468 link_rate = event_data->PHY[i].LinkRate >> 4;
7469 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7470 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7471 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7472 handle, status_str, link_rate, prev_link_rate);
7473
7474 }
7475 }
7476
7477 /**
7478 * _scsih_sas_topology_change_event - handle topology changes
7479 * @ioc: per adapter object
7480 * @fw_event: The fw_event_work object
7481 * Context: user.
7482 *
7483 */
7484 static int
_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7485 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7486 struct fw_event_work *fw_event)
7487 {
7488 int i;
7489 u16 parent_handle, handle;
7490 u16 reason_code;
7491 u8 phy_number, max_phys;
7492 struct _sas_node *sas_expander;
7493 u64 sas_address;
7494 unsigned long flags;
7495 u8 link_rate, prev_link_rate;
7496 struct hba_port *port;
7497 Mpi2EventDataSasTopologyChangeList_t *event_data =
7498 (Mpi2EventDataSasTopologyChangeList_t *)
7499 fw_event->event_data;
7500
7501 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7502 _scsih_sas_topology_change_event_debug(ioc, event_data);
7503
7504 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7505 return 0;
7506
7507 if (!ioc->sas_hba.num_phys)
7508 _scsih_sas_host_add(ioc);
7509 else
7510 _scsih_sas_host_refresh(ioc);
7511
7512 if (fw_event->ignore) {
7513 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7514 return 0;
7515 }
7516
7517 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7518 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7519
7520 /* handle expander add */
7521 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7522 if (_scsih_expander_add(ioc, parent_handle) != 0)
7523 return 0;
7524
7525 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7526 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7527 parent_handle);
7528 if (sas_expander) {
7529 sas_address = sas_expander->sas_address;
7530 max_phys = sas_expander->num_phys;
7531 port = sas_expander->port;
7532 } else if (parent_handle < ioc->sas_hba.num_phys) {
7533 sas_address = ioc->sas_hba.sas_address;
7534 max_phys = ioc->sas_hba.num_phys;
7535 } else {
7536 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7537 return 0;
7538 }
7539 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7540
7541 /* handle siblings events */
7542 for (i = 0; i < event_data->NumEntries; i++) {
7543 if (fw_event->ignore) {
7544 dewtprintk(ioc,
7545 ioc_info(ioc, "ignoring expander event\n"));
7546 return 0;
7547 }
7548 if (ioc->remove_host || ioc->pci_error_recovery)
7549 return 0;
7550 phy_number = event_data->StartPhyNum + i;
7551 if (phy_number >= max_phys)
7552 continue;
7553 reason_code = event_data->PHY[i].PhyStatus &
7554 MPI2_EVENT_SAS_TOPO_RC_MASK;
7555 if ((event_data->PHY[i].PhyStatus &
7556 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7557 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7558 continue;
7559 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7560 if (!handle)
7561 continue;
7562 link_rate = event_data->PHY[i].LinkRate >> 4;
7563 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7564 switch (reason_code) {
7565 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7566
7567 if (ioc->shost_recovery)
7568 break;
7569
7570 if (link_rate == prev_link_rate)
7571 break;
7572
7573 mpt3sas_transport_update_links(ioc, sas_address,
7574 handle, phy_number, link_rate, port);
7575
7576 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7577 break;
7578
7579 _scsih_check_device(ioc, sas_address, handle,
7580 phy_number, link_rate);
7581
7582 if (!test_bit(handle, ioc->pend_os_device_add))
7583 break;
7584
7585 fallthrough;
7586
7587 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7588
7589 if (ioc->shost_recovery)
7590 break;
7591
7592 mpt3sas_transport_update_links(ioc, sas_address,
7593 handle, phy_number, link_rate, port);
7594
7595 _scsih_add_device(ioc, handle, phy_number, 0);
7596
7597 break;
7598 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7599
7600 _scsih_device_remove_by_handle(ioc, handle);
7601 break;
7602 }
7603 }
7604
7605 /* handle expander removal */
7606 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7607 sas_expander)
7608 mpt3sas_expander_remove(ioc, sas_address, port);
7609
7610 return 0;
7611 }
7612
7613 /**
7614 * _scsih_sas_device_status_change_event_debug - debug for device event
7615 * @ioc: ?
7616 * @event_data: event data payload
7617 * Context: user.
7618 */
7619 static void
_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)7620 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7621 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7622 {
7623 char *reason_str = NULL;
7624
7625 switch (event_data->ReasonCode) {
7626 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7627 reason_str = "smart data";
7628 break;
7629 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7630 reason_str = "unsupported device discovered";
7631 break;
7632 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7633 reason_str = "internal device reset";
7634 break;
7635 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7636 reason_str = "internal task abort";
7637 break;
7638 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7639 reason_str = "internal task abort set";
7640 break;
7641 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7642 reason_str = "internal clear task set";
7643 break;
7644 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7645 reason_str = "internal query task";
7646 break;
7647 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7648 reason_str = "sata init failure";
7649 break;
7650 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7651 reason_str = "internal device reset complete";
7652 break;
7653 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7654 reason_str = "internal task abort complete";
7655 break;
7656 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7657 reason_str = "internal async notification";
7658 break;
7659 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7660 reason_str = "expander reduced functionality";
7661 break;
7662 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7663 reason_str = "expander reduced functionality complete";
7664 break;
7665 default:
7666 reason_str = "unknown reason";
7667 break;
7668 }
7669 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7670 reason_str, le16_to_cpu(event_data->DevHandle),
7671 (u64)le64_to_cpu(event_data->SASAddress),
7672 le16_to_cpu(event_data->TaskTag));
7673 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7674 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7675 event_data->ASC, event_data->ASCQ);
7676 pr_cont("\n");
7677 }
7678
7679 /**
7680 * _scsih_sas_device_status_change_event - handle device status change
7681 * @ioc: per adapter object
7682 * @event_data: The fw event
7683 * Context: user.
7684 */
7685 static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)7686 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7687 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7688 {
7689 struct MPT3SAS_TARGET *target_priv_data;
7690 struct _sas_device *sas_device;
7691 u64 sas_address;
7692 unsigned long flags;
7693
7694 /* In MPI Revision K (0xC), the internal device reset complete was
7695 * implemented, so avoid setting tm_busy flag for older firmware.
7696 */
7697 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7698 return;
7699
7700 if (event_data->ReasonCode !=
7701 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7702 event_data->ReasonCode !=
7703 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7704 return;
7705
7706 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7707 sas_address = le64_to_cpu(event_data->SASAddress);
7708 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7709 sas_address,
7710 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7711
7712 if (!sas_device || !sas_device->starget)
7713 goto out;
7714
7715 target_priv_data = sas_device->starget->hostdata;
7716 if (!target_priv_data)
7717 goto out;
7718
7719 if (event_data->ReasonCode ==
7720 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7721 target_priv_data->tm_busy = 1;
7722 else
7723 target_priv_data->tm_busy = 0;
7724
7725 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7726 ioc_info(ioc,
7727 "%s tm_busy flag for handle(0x%04x)\n",
7728 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7729 target_priv_data->handle);
7730
7731 out:
7732 if (sas_device)
7733 sas_device_put(sas_device);
7734
7735 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7736 }
7737
7738
7739 /**
7740 * _scsih_check_pcie_access_status - check access flags
7741 * @ioc: per adapter object
7742 * @wwid: wwid
7743 * @handle: sas device handle
7744 * @access_status: errors returned during discovery of the device
7745 *
7746 * Return: 0 for success, else failure
7747 */
7748 static u8
_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle,u8 access_status)7749 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7750 u16 handle, u8 access_status)
7751 {
7752 u8 rc = 1;
7753 char *desc = NULL;
7754
7755 switch (access_status) {
7756 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7757 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7758 rc = 0;
7759 break;
7760 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7761 desc = "PCIe device capability failed";
7762 break;
7763 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7764 desc = "PCIe device blocked";
7765 ioc_info(ioc,
7766 "Device with Access Status (%s): wwid(0x%016llx), "
7767 "handle(0x%04x)\n ll only be added to the internal list",
7768 desc, (u64)wwid, handle);
7769 rc = 0;
7770 break;
7771 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7772 desc = "PCIe device mem space access failed";
7773 break;
7774 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7775 desc = "PCIe device unsupported";
7776 break;
7777 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7778 desc = "PCIe device MSIx Required";
7779 break;
7780 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7781 desc = "PCIe device init fail max";
7782 break;
7783 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7784 desc = "PCIe device status unknown";
7785 break;
7786 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7787 desc = "nvme ready timeout";
7788 break;
7789 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7790 desc = "nvme device configuration unsupported";
7791 break;
7792 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7793 desc = "nvme identify failed";
7794 break;
7795 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7796 desc = "nvme qconfig failed";
7797 break;
7798 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7799 desc = "nvme qcreation failed";
7800 break;
7801 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7802 desc = "nvme eventcfg failed";
7803 break;
7804 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7805 desc = "nvme get feature stat failed";
7806 break;
7807 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7808 desc = "nvme idle timeout";
7809 break;
7810 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7811 desc = "nvme failure status";
7812 break;
7813 default:
7814 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7815 access_status, (u64)wwid, handle);
7816 return rc;
7817 }
7818
7819 if (!rc)
7820 return rc;
7821
7822 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7823 desc, (u64)wwid, handle);
7824 return rc;
7825 }
7826
7827 /**
7828 * _scsih_pcie_device_remove_from_sml - removing pcie device
7829 * from SML and free up associated memory
7830 * @ioc: per adapter object
7831 * @pcie_device: the pcie_device object
7832 */
7833 static void
_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)7834 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7835 struct _pcie_device *pcie_device)
7836 {
7837 struct MPT3SAS_TARGET *sas_target_priv_data;
7838
7839 dewtprintk(ioc,
7840 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7841 __func__,
7842 pcie_device->handle, (u64)pcie_device->wwid));
7843 if (pcie_device->enclosure_handle != 0)
7844 dewtprintk(ioc,
7845 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7846 __func__,
7847 (u64)pcie_device->enclosure_logical_id,
7848 pcie_device->slot));
7849 if (pcie_device->connector_name[0] != '\0')
7850 dewtprintk(ioc,
7851 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7852 __func__,
7853 pcie_device->enclosure_level,
7854 pcie_device->connector_name));
7855
7856 if (pcie_device->starget && pcie_device->starget->hostdata) {
7857 sas_target_priv_data = pcie_device->starget->hostdata;
7858 sas_target_priv_data->deleted = 1;
7859 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7860 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7861 }
7862
7863 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7864 pcie_device->handle, (u64)pcie_device->wwid);
7865 if (pcie_device->enclosure_handle != 0)
7866 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7867 (u64)pcie_device->enclosure_logical_id,
7868 pcie_device->slot);
7869 if (pcie_device->connector_name[0] != '\0')
7870 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7871 pcie_device->enclosure_level,
7872 pcie_device->connector_name);
7873
7874 if (pcie_device->starget && (pcie_device->access_status !=
7875 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7876 scsi_remove_target(&pcie_device->starget->dev);
7877 dewtprintk(ioc,
7878 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7879 __func__,
7880 pcie_device->handle, (u64)pcie_device->wwid));
7881 if (pcie_device->enclosure_handle != 0)
7882 dewtprintk(ioc,
7883 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7884 __func__,
7885 (u64)pcie_device->enclosure_logical_id,
7886 pcie_device->slot));
7887 if (pcie_device->connector_name[0] != '\0')
7888 dewtprintk(ioc,
7889 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7890 __func__,
7891 pcie_device->enclosure_level,
7892 pcie_device->connector_name));
7893
7894 kfree(pcie_device->serial_number);
7895 }
7896
7897
7898 /**
7899 * _scsih_pcie_check_device - checking device responsiveness
7900 * @ioc: per adapter object
7901 * @handle: attached device handle
7902 */
7903 static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)7904 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7905 {
7906 Mpi2ConfigReply_t mpi_reply;
7907 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7908 u32 ioc_status;
7909 struct _pcie_device *pcie_device;
7910 u64 wwid;
7911 unsigned long flags;
7912 struct scsi_target *starget;
7913 struct MPT3SAS_TARGET *sas_target_priv_data;
7914 u32 device_info;
7915
7916 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7917 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
7918 return;
7919
7920 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7921 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7922 return;
7923
7924 /* check if this is end device */
7925 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
7926 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
7927 return;
7928
7929 wwid = le64_to_cpu(pcie_device_pg0.WWID);
7930 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7931 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7932
7933 if (!pcie_device) {
7934 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7935 return;
7936 }
7937
7938 if (unlikely(pcie_device->handle != handle)) {
7939 starget = pcie_device->starget;
7940 sas_target_priv_data = starget->hostdata;
7941 pcie_device->access_status = pcie_device_pg0.AccessStatus;
7942 starget_printk(KERN_INFO, starget,
7943 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7944 pcie_device->handle, handle);
7945 sas_target_priv_data->handle = handle;
7946 pcie_device->handle = handle;
7947
7948 if (le32_to_cpu(pcie_device_pg0.Flags) &
7949 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
7950 pcie_device->enclosure_level =
7951 pcie_device_pg0.EnclosureLevel;
7952 memcpy(&pcie_device->connector_name[0],
7953 &pcie_device_pg0.ConnectorName[0], 4);
7954 } else {
7955 pcie_device->enclosure_level = 0;
7956 pcie_device->connector_name[0] = '\0';
7957 }
7958 }
7959
7960 /* check if device is present */
7961 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
7962 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
7963 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
7964 handle);
7965 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7966 pcie_device_put(pcie_device);
7967 return;
7968 }
7969
7970 /* check if there were any issues with discovery */
7971 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
7972 pcie_device_pg0.AccessStatus)) {
7973 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7974 pcie_device_put(pcie_device);
7975 return;
7976 }
7977
7978 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7979 pcie_device_put(pcie_device);
7980
7981 _scsih_ublock_io_device(ioc, wwid, NULL);
7982
7983 return;
7984 }
7985
7986 /**
7987 * _scsih_pcie_add_device - creating pcie device object
7988 * @ioc: per adapter object
7989 * @handle: pcie device handle
7990 *
7991 * Creating end device object, stored in ioc->pcie_device_list.
7992 *
7993 * Return: 1 means queue the event later, 0 means complete the event
7994 */
7995 static int
_scsih_pcie_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)7996 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7997 {
7998 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7999 Mpi26PCIeDevicePage2_t pcie_device_pg2;
8000 Mpi2ConfigReply_t mpi_reply;
8001 struct _pcie_device *pcie_device;
8002 struct _enclosure_node *enclosure_dev;
8003 u32 ioc_status;
8004 u64 wwid;
8005
8006 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8007 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
8008 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8009 __FILE__, __LINE__, __func__);
8010 return 0;
8011 }
8012 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8013 MPI2_IOCSTATUS_MASK;
8014 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8015 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8016 __FILE__, __LINE__, __func__);
8017 return 0;
8018 }
8019
8020 set_bit(handle, ioc->pend_os_device_add);
8021 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8022
8023 /* check if device is present */
8024 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8025 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8026 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8027 handle);
8028 return 0;
8029 }
8030
8031 /* check if there were any issues with discovery */
8032 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8033 pcie_device_pg0.AccessStatus))
8034 return 0;
8035
8036 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8037 (pcie_device_pg0.DeviceInfo))))
8038 return 0;
8039
8040 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8041 if (pcie_device) {
8042 clear_bit(handle, ioc->pend_os_device_add);
8043 pcie_device_put(pcie_device);
8044 return 0;
8045 }
8046
8047 /* PCIe Device Page 2 contains read-only information about a
8048 * specific NVMe device; therefore, this page is only
8049 * valid for NVMe devices and skip for pcie devices of type scsi.
8050 */
8051 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8052 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8053 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8054 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8055 handle)) {
8056 ioc_err(ioc,
8057 "failure at %s:%d/%s()!\n", __FILE__,
8058 __LINE__, __func__);
8059 return 0;
8060 }
8061
8062 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8063 MPI2_IOCSTATUS_MASK;
8064 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8065 ioc_err(ioc,
8066 "failure at %s:%d/%s()!\n", __FILE__,
8067 __LINE__, __func__);
8068 return 0;
8069 }
8070 }
8071
8072 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8073 if (!pcie_device) {
8074 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8075 __FILE__, __LINE__, __func__);
8076 return 0;
8077 }
8078
8079 kref_init(&pcie_device->refcount);
8080 pcie_device->id = ioc->pcie_target_id++;
8081 pcie_device->channel = PCIE_CHANNEL;
8082 pcie_device->handle = handle;
8083 pcie_device->access_status = pcie_device_pg0.AccessStatus;
8084 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8085 pcie_device->wwid = wwid;
8086 pcie_device->port_num = pcie_device_pg0.PortNum;
8087 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8088 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8089
8090 pcie_device->enclosure_handle =
8091 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8092 if (pcie_device->enclosure_handle != 0)
8093 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8094
8095 if (le32_to_cpu(pcie_device_pg0.Flags) &
8096 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8097 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8098 memcpy(&pcie_device->connector_name[0],
8099 &pcie_device_pg0.ConnectorName[0], 4);
8100 } else {
8101 pcie_device->enclosure_level = 0;
8102 pcie_device->connector_name[0] = '\0';
8103 }
8104
8105 /* get enclosure_logical_id */
8106 if (pcie_device->enclosure_handle) {
8107 enclosure_dev =
8108 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8109 pcie_device->enclosure_handle);
8110 if (enclosure_dev)
8111 pcie_device->enclosure_logical_id =
8112 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8113 }
8114 /* TODO -- Add device name once FW supports it */
8115 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8116 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8117 pcie_device->nvme_mdts =
8118 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8119 pcie_device->shutdown_latency =
8120 le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8121 /*
8122 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
8123 * if drive's RTD3 Entry Latency is greater then IOC's
8124 * max_shutdown_latency.
8125 */
8126 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8127 ioc->max_shutdown_latency =
8128 pcie_device->shutdown_latency;
8129 if (pcie_device_pg2.ControllerResetTO)
8130 pcie_device->reset_timeout =
8131 pcie_device_pg2.ControllerResetTO;
8132 else
8133 pcie_device->reset_timeout = 30;
8134 } else
8135 pcie_device->reset_timeout = 30;
8136
8137 if (ioc->wait_for_discovery_to_complete)
8138 _scsih_pcie_device_init_add(ioc, pcie_device);
8139 else
8140 _scsih_pcie_device_add(ioc, pcie_device);
8141
8142 pcie_device_put(pcie_device);
8143 return 0;
8144 }
8145
8146 /**
8147 * _scsih_pcie_topology_change_event_debug - debug for topology
8148 * event
8149 * @ioc: per adapter object
8150 * @event_data: event data payload
8151 * Context: user.
8152 */
8153 static void
_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)8154 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8155 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8156 {
8157 int i;
8158 u16 handle;
8159 u16 reason_code;
8160 u8 port_number;
8161 char *status_str = NULL;
8162 u8 link_rate, prev_link_rate;
8163
8164 switch (event_data->SwitchStatus) {
8165 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8166 status_str = "add";
8167 break;
8168 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8169 status_str = "remove";
8170 break;
8171 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8172 case 0:
8173 status_str = "responding";
8174 break;
8175 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8176 status_str = "remove delay";
8177 break;
8178 default:
8179 status_str = "unknown status";
8180 break;
8181 }
8182 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8183 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8184 "start_port(%02d), count(%d)\n",
8185 le16_to_cpu(event_data->SwitchDevHandle),
8186 le16_to_cpu(event_data->EnclosureHandle),
8187 event_data->StartPortNum, event_data->NumEntries);
8188 for (i = 0; i < event_data->NumEntries; i++) {
8189 handle =
8190 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8191 if (!handle)
8192 continue;
8193 port_number = event_data->StartPortNum + i;
8194 reason_code = event_data->PortEntry[i].PortStatus;
8195 switch (reason_code) {
8196 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8197 status_str = "target add";
8198 break;
8199 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8200 status_str = "target remove";
8201 break;
8202 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8203 status_str = "delay target remove";
8204 break;
8205 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8206 status_str = "link rate change";
8207 break;
8208 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8209 status_str = "target responding";
8210 break;
8211 default:
8212 status_str = "unknown";
8213 break;
8214 }
8215 link_rate = event_data->PortEntry[i].CurrentPortInfo &
8216 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8217 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8218 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8219 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8220 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
8221 handle, status_str, link_rate, prev_link_rate);
8222 }
8223 }
8224
8225 /**
8226 * _scsih_pcie_topology_change_event - handle PCIe topology
8227 * changes
8228 * @ioc: per adapter object
8229 * @fw_event: The fw_event_work object
8230 * Context: user.
8231 *
8232 */
8233 static void
_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8234 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8235 struct fw_event_work *fw_event)
8236 {
8237 int i;
8238 u16 handle;
8239 u16 reason_code;
8240 u8 link_rate, prev_link_rate;
8241 unsigned long flags;
8242 int rc;
8243 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8244 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8245 struct _pcie_device *pcie_device;
8246
8247 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8248 _scsih_pcie_topology_change_event_debug(ioc, event_data);
8249
8250 if (ioc->shost_recovery || ioc->remove_host ||
8251 ioc->pci_error_recovery)
8252 return;
8253
8254 if (fw_event->ignore) {
8255 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8256 return;
8257 }
8258
8259 /* handle siblings events */
8260 for (i = 0; i < event_data->NumEntries; i++) {
8261 if (fw_event->ignore) {
8262 dewtprintk(ioc,
8263 ioc_info(ioc, "ignoring switch event\n"));
8264 return;
8265 }
8266 if (ioc->remove_host || ioc->pci_error_recovery)
8267 return;
8268 reason_code = event_data->PortEntry[i].PortStatus;
8269 handle =
8270 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8271 if (!handle)
8272 continue;
8273
8274 link_rate = event_data->PortEntry[i].CurrentPortInfo
8275 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8276 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8277 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8278
8279 switch (reason_code) {
8280 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8281 if (ioc->shost_recovery)
8282 break;
8283 if (link_rate == prev_link_rate)
8284 break;
8285 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8286 break;
8287
8288 _scsih_pcie_check_device(ioc, handle);
8289
8290 /* This code after this point handles the test case
8291 * where a device has been added, however its returning
8292 * BUSY for sometime. Then before the Device Missing
8293 * Delay expires and the device becomes READY, the
8294 * device is removed and added back.
8295 */
8296 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8297 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8298 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8299
8300 if (pcie_device) {
8301 pcie_device_put(pcie_device);
8302 break;
8303 }
8304
8305 if (!test_bit(handle, ioc->pend_os_device_add))
8306 break;
8307
8308 dewtprintk(ioc,
8309 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8310 handle));
8311 event_data->PortEntry[i].PortStatus &= 0xF0;
8312 event_data->PortEntry[i].PortStatus |=
8313 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8314 fallthrough;
8315 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8316 if (ioc->shost_recovery)
8317 break;
8318 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8319 break;
8320
8321 rc = _scsih_pcie_add_device(ioc, handle);
8322 if (!rc) {
8323 /* mark entry vacant */
8324 /* TODO This needs to be reviewed and fixed,
8325 * we dont have an entry
8326 * to make an event void like vacant
8327 */
8328 event_data->PortEntry[i].PortStatus |=
8329 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8330 }
8331 break;
8332 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8333 _scsih_pcie_device_remove_by_handle(ioc, handle);
8334 break;
8335 }
8336 }
8337 }
8338
8339 /**
8340 * _scsih_pcie_device_status_change_event_debug - debug for device event
8341 * @ioc: ?
8342 * @event_data: event data payload
8343 * Context: user.
8344 */
8345 static void
_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeDeviceStatusChange_t * event_data)8346 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8347 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8348 {
8349 char *reason_str = NULL;
8350
8351 switch (event_data->ReasonCode) {
8352 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8353 reason_str = "smart data";
8354 break;
8355 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8356 reason_str = "unsupported device discovered";
8357 break;
8358 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8359 reason_str = "internal device reset";
8360 break;
8361 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8362 reason_str = "internal task abort";
8363 break;
8364 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8365 reason_str = "internal task abort set";
8366 break;
8367 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8368 reason_str = "internal clear task set";
8369 break;
8370 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8371 reason_str = "internal query task";
8372 break;
8373 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8374 reason_str = "device init failure";
8375 break;
8376 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8377 reason_str = "internal device reset complete";
8378 break;
8379 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8380 reason_str = "internal task abort complete";
8381 break;
8382 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8383 reason_str = "internal async notification";
8384 break;
8385 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8386 reason_str = "pcie hot reset failed";
8387 break;
8388 default:
8389 reason_str = "unknown reason";
8390 break;
8391 }
8392
8393 ioc_info(ioc, "PCIE device status change: (%s)\n"
8394 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8395 reason_str, le16_to_cpu(event_data->DevHandle),
8396 (u64)le64_to_cpu(event_data->WWID),
8397 le16_to_cpu(event_data->TaskTag));
8398 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8399 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8400 event_data->ASC, event_data->ASCQ);
8401 pr_cont("\n");
8402 }
8403
8404 /**
8405 * _scsih_pcie_device_status_change_event - handle device status
8406 * change
8407 * @ioc: per adapter object
8408 * @fw_event: The fw_event_work object
8409 * Context: user.
8410 */
8411 static void
_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8412 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8413 struct fw_event_work *fw_event)
8414 {
8415 struct MPT3SAS_TARGET *target_priv_data;
8416 struct _pcie_device *pcie_device;
8417 u64 wwid;
8418 unsigned long flags;
8419 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8420 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8421 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8422 _scsih_pcie_device_status_change_event_debug(ioc,
8423 event_data);
8424
8425 if (event_data->ReasonCode !=
8426 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8427 event_data->ReasonCode !=
8428 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8429 return;
8430
8431 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8432 wwid = le64_to_cpu(event_data->WWID);
8433 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8434
8435 if (!pcie_device || !pcie_device->starget)
8436 goto out;
8437
8438 target_priv_data = pcie_device->starget->hostdata;
8439 if (!target_priv_data)
8440 goto out;
8441
8442 if (event_data->ReasonCode ==
8443 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8444 target_priv_data->tm_busy = 1;
8445 else
8446 target_priv_data->tm_busy = 0;
8447 out:
8448 if (pcie_device)
8449 pcie_device_put(pcie_device);
8450
8451 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8452 }
8453
8454 /**
8455 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
8456 * event
8457 * @ioc: per adapter object
8458 * @event_data: event data payload
8459 * Context: user.
8460 */
8461 static void
_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasEnclDevStatusChange_t * event_data)8462 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8463 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8464 {
8465 char *reason_str = NULL;
8466
8467 switch (event_data->ReasonCode) {
8468 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8469 reason_str = "enclosure add";
8470 break;
8471 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8472 reason_str = "enclosure remove";
8473 break;
8474 default:
8475 reason_str = "unknown reason";
8476 break;
8477 }
8478
8479 ioc_info(ioc, "enclosure status change: (%s)\n"
8480 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8481 reason_str,
8482 le16_to_cpu(event_data->EnclosureHandle),
8483 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8484 le16_to_cpu(event_data->StartSlot));
8485 }
8486
8487 /**
8488 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
8489 * @ioc: per adapter object
8490 * @fw_event: The fw_event_work object
8491 * Context: user.
8492 */
8493 static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8494 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8495 struct fw_event_work *fw_event)
8496 {
8497 Mpi2ConfigReply_t mpi_reply;
8498 struct _enclosure_node *enclosure_dev = NULL;
8499 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8500 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8501 int rc;
8502 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8503
8504 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8505 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8506 (Mpi2EventDataSasEnclDevStatusChange_t *)
8507 fw_event->event_data);
8508 if (ioc->shost_recovery)
8509 return;
8510
8511 if (enclosure_handle)
8512 enclosure_dev =
8513 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8514 enclosure_handle);
8515 switch (event_data->ReasonCode) {
8516 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8517 if (!enclosure_dev) {
8518 enclosure_dev =
8519 kzalloc(sizeof(struct _enclosure_node),
8520 GFP_KERNEL);
8521 if (!enclosure_dev) {
8522 ioc_info(ioc, "failure at %s:%d/%s()!\n",
8523 __FILE__, __LINE__, __func__);
8524 return;
8525 }
8526 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8527 &enclosure_dev->pg0,
8528 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8529 enclosure_handle);
8530
8531 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8532 MPI2_IOCSTATUS_MASK)) {
8533 kfree(enclosure_dev);
8534 return;
8535 }
8536
8537 list_add_tail(&enclosure_dev->list,
8538 &ioc->enclosure_list);
8539 }
8540 break;
8541 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8542 if (enclosure_dev) {
8543 list_del(&enclosure_dev->list);
8544 kfree(enclosure_dev);
8545 }
8546 break;
8547 default:
8548 break;
8549 }
8550 }
8551
8552 /**
8553 * _scsih_sas_broadcast_primitive_event - handle broadcast events
8554 * @ioc: per adapter object
8555 * @fw_event: The fw_event_work object
8556 * Context: user.
8557 */
8558 static void
_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8559 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8560 struct fw_event_work *fw_event)
8561 {
8562 struct scsi_cmnd *scmd;
8563 struct scsi_device *sdev;
8564 struct scsiio_tracker *st;
8565 u16 smid, handle;
8566 u32 lun;
8567 struct MPT3SAS_DEVICE *sas_device_priv_data;
8568 u32 termination_count;
8569 u32 query_count;
8570 Mpi2SCSITaskManagementReply_t *mpi_reply;
8571 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8572 (Mpi2EventDataSasBroadcastPrimitive_t *)
8573 fw_event->event_data;
8574 u16 ioc_status;
8575 unsigned long flags;
8576 int r;
8577 u8 max_retries = 0;
8578 u8 task_abort_retries;
8579
8580 mutex_lock(&ioc->tm_cmds.mutex);
8581 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8582 __func__, event_data->PhyNum, event_data->PortWidth);
8583
8584 _scsih_block_io_all_device(ioc);
8585
8586 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8587 mpi_reply = ioc->tm_cmds.reply;
8588 broadcast_aen_retry:
8589
8590 /* sanity checks for retrying this loop */
8591 if (max_retries++ == 5) {
8592 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8593 goto out;
8594 } else if (max_retries > 1)
8595 dewtprintk(ioc,
8596 ioc_info(ioc, "%s: %d retry\n",
8597 __func__, max_retries - 1));
8598
8599 termination_count = 0;
8600 query_count = 0;
8601 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8602 if (ioc->shost_recovery)
8603 goto out;
8604 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8605 if (!scmd)
8606 continue;
8607 st = scsi_cmd_priv(scmd);
8608 sdev = scmd->device;
8609 sas_device_priv_data = sdev->hostdata;
8610 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8611 continue;
8612 /* skip hidden raid components */
8613 if (sas_device_priv_data->sas_target->flags &
8614 MPT_TARGET_FLAGS_RAID_COMPONENT)
8615 continue;
8616 /* skip volumes */
8617 if (sas_device_priv_data->sas_target->flags &
8618 MPT_TARGET_FLAGS_VOLUME)
8619 continue;
8620 /* skip PCIe devices */
8621 if (sas_device_priv_data->sas_target->flags &
8622 MPT_TARGET_FLAGS_PCIE_DEVICE)
8623 continue;
8624
8625 handle = sas_device_priv_data->sas_target->handle;
8626 lun = sas_device_priv_data->lun;
8627 query_count++;
8628
8629 if (ioc->shost_recovery)
8630 goto out;
8631
8632 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8633 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8634 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8635 st->msix_io, 30, 0);
8636 if (r == FAILED) {
8637 sdev_printk(KERN_WARNING, sdev,
8638 "mpt3sas_scsih_issue_tm: FAILED when sending "
8639 "QUERY_TASK: scmd(%p)\n", scmd);
8640 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8641 goto broadcast_aen_retry;
8642 }
8643 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8644 & MPI2_IOCSTATUS_MASK;
8645 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8646 sdev_printk(KERN_WARNING, sdev,
8647 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8648 ioc_status, scmd);
8649 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8650 goto broadcast_aen_retry;
8651 }
8652
8653 /* see if IO is still owned by IOC and target */
8654 if (mpi_reply->ResponseCode ==
8655 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8656 mpi_reply->ResponseCode ==
8657 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8658 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8659 continue;
8660 }
8661 task_abort_retries = 0;
8662 tm_retry:
8663 if (task_abort_retries++ == 60) {
8664 dewtprintk(ioc,
8665 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8666 __func__));
8667 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8668 goto broadcast_aen_retry;
8669 }
8670
8671 if (ioc->shost_recovery)
8672 goto out_no_lock;
8673
8674 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8675 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8676 st->smid, st->msix_io, 30, 0);
8677 if (r == FAILED || st->cb_idx != 0xFF) {
8678 sdev_printk(KERN_WARNING, sdev,
8679 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8680 "scmd(%p)\n", scmd);
8681 goto tm_retry;
8682 }
8683
8684 if (task_abort_retries > 1)
8685 sdev_printk(KERN_WARNING, sdev,
8686 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8687 " scmd(%p)\n",
8688 task_abort_retries - 1, scmd);
8689
8690 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8691 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8692 }
8693
8694 if (ioc->broadcast_aen_pending) {
8695 dewtprintk(ioc,
8696 ioc_info(ioc,
8697 "%s: loop back due to pending AEN\n",
8698 __func__));
8699 ioc->broadcast_aen_pending = 0;
8700 goto broadcast_aen_retry;
8701 }
8702
8703 out:
8704 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8705 out_no_lock:
8706
8707 dewtprintk(ioc,
8708 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8709 __func__, query_count, termination_count));
8710
8711 ioc->broadcast_aen_busy = 0;
8712 if (!ioc->shost_recovery)
8713 _scsih_ublock_io_all_device(ioc);
8714 mutex_unlock(&ioc->tm_cmds.mutex);
8715 }
8716
8717 /**
8718 * _scsih_sas_discovery_event - handle discovery events
8719 * @ioc: per adapter object
8720 * @fw_event: The fw_event_work object
8721 * Context: user.
8722 */
8723 static void
_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8724 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8725 struct fw_event_work *fw_event)
8726 {
8727 Mpi2EventDataSasDiscovery_t *event_data =
8728 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8729
8730 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8731 ioc_info(ioc, "discovery event: (%s)",
8732 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8733 "start" : "stop");
8734 if (event_data->DiscoveryStatus)
8735 pr_cont("discovery_status(0x%08x)",
8736 le32_to_cpu(event_data->DiscoveryStatus));
8737 pr_cont("\n");
8738 }
8739
8740 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8741 !ioc->sas_hba.num_phys) {
8742 if (disable_discovery > 0 && ioc->shost_recovery) {
8743 /* Wait for the reset to complete */
8744 while (ioc->shost_recovery)
8745 ssleep(1);
8746 }
8747 _scsih_sas_host_add(ioc);
8748 }
8749 }
8750
8751 /**
8752 * _scsih_sas_device_discovery_error_event - display SAS device discovery error
8753 * events
8754 * @ioc: per adapter object
8755 * @fw_event: The fw_event_work object
8756 * Context: user.
8757 */
8758 static void
_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8759 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8760 struct fw_event_work *fw_event)
8761 {
8762 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8763 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8764
8765 switch (event_data->ReasonCode) {
8766 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8767 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8768 le16_to_cpu(event_data->DevHandle),
8769 (u64)le64_to_cpu(event_data->SASAddress),
8770 event_data->PhysicalPort);
8771 break;
8772 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8773 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8774 le16_to_cpu(event_data->DevHandle),
8775 (u64)le64_to_cpu(event_data->SASAddress),
8776 event_data->PhysicalPort);
8777 break;
8778 default:
8779 break;
8780 }
8781 }
8782
8783 /**
8784 * _scsih_pcie_enumeration_event - handle enumeration events
8785 * @ioc: per adapter object
8786 * @fw_event: The fw_event_work object
8787 * Context: user.
8788 */
8789 static void
_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8790 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8791 struct fw_event_work *fw_event)
8792 {
8793 Mpi26EventDataPCIeEnumeration_t *event_data =
8794 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8795
8796 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8797 return;
8798
8799 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8800 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8801 "started" : "completed",
8802 event_data->Flags);
8803 if (event_data->EnumerationStatus)
8804 pr_cont("enumeration_status(0x%08x)",
8805 le32_to_cpu(event_data->EnumerationStatus));
8806 pr_cont("\n");
8807 }
8808
8809 /**
8810 * _scsih_ir_fastpath - turn on fastpath for IR physdisk
8811 * @ioc: per adapter object
8812 * @handle: device handle for physical disk
8813 * @phys_disk_num: physical disk number
8814 *
8815 * Return: 0 for success, else failure.
8816 */
8817 static int
_scsih_ir_fastpath(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phys_disk_num)8818 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8819 {
8820 Mpi2RaidActionRequest_t *mpi_request;
8821 Mpi2RaidActionReply_t *mpi_reply;
8822 u16 smid;
8823 u8 issue_reset = 0;
8824 int rc = 0;
8825 u16 ioc_status;
8826 u32 log_info;
8827
8828 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8829 return rc;
8830
8831 mutex_lock(&ioc->scsih_cmds.mutex);
8832
8833 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8834 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8835 rc = -EAGAIN;
8836 goto out;
8837 }
8838 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8839
8840 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8841 if (!smid) {
8842 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8843 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8844 rc = -EAGAIN;
8845 goto out;
8846 }
8847
8848 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8849 ioc->scsih_cmds.smid = smid;
8850 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8851
8852 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8853 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8854 mpi_request->PhysDiskNum = phys_disk_num;
8855
8856 dewtprintk(ioc,
8857 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8858 handle, phys_disk_num));
8859
8860 init_completion(&ioc->scsih_cmds.done);
8861 ioc->put_smid_default(ioc, smid);
8862 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8863
8864 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8865 mpt3sas_check_cmd_timeout(ioc,
8866 ioc->scsih_cmds.status, mpi_request,
8867 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8868 rc = -EFAULT;
8869 goto out;
8870 }
8871
8872 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8873
8874 mpi_reply = ioc->scsih_cmds.reply;
8875 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8876 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8877 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
8878 else
8879 log_info = 0;
8880 ioc_status &= MPI2_IOCSTATUS_MASK;
8881 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8882 dewtprintk(ioc,
8883 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8884 ioc_status, log_info));
8885 rc = -EFAULT;
8886 } else
8887 dewtprintk(ioc,
8888 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8889 }
8890
8891 out:
8892 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8893 mutex_unlock(&ioc->scsih_cmds.mutex);
8894
8895 if (issue_reset)
8896 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8897 return rc;
8898 }
8899
8900 /**
8901 * _scsih_reprobe_lun - reprobing lun
8902 * @sdev: scsi device struct
8903 * @no_uld_attach: sdev->no_uld_attach flag setting
8904 *
8905 **/
8906 static void
_scsih_reprobe_lun(struct scsi_device * sdev,void * no_uld_attach)8907 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8908 {
8909 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
8910 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
8911 sdev->no_uld_attach ? "hiding" : "exposing");
8912 WARN_ON(scsi_device_reprobe(sdev));
8913 }
8914
8915 /**
8916 * _scsih_sas_volume_add - add new volume
8917 * @ioc: per adapter object
8918 * @element: IR config element data
8919 * Context: user.
8920 */
8921 static void
_scsih_sas_volume_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)8922 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
8923 Mpi2EventIrConfigElement_t *element)
8924 {
8925 struct _raid_device *raid_device;
8926 unsigned long flags;
8927 u64 wwid;
8928 u16 handle = le16_to_cpu(element->VolDevHandle);
8929 int rc;
8930
8931 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8932 if (!wwid) {
8933 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8934 __FILE__, __LINE__, __func__);
8935 return;
8936 }
8937
8938 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8939 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
8940 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8941
8942 if (raid_device)
8943 return;
8944
8945 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8946 if (!raid_device) {
8947 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8948 __FILE__, __LINE__, __func__);
8949 return;
8950 }
8951
8952 raid_device->id = ioc->sas_id++;
8953 raid_device->channel = RAID_CHANNEL;
8954 raid_device->handle = handle;
8955 raid_device->wwid = wwid;
8956 _scsih_raid_device_add(ioc, raid_device);
8957 if (!ioc->wait_for_discovery_to_complete) {
8958 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8959 raid_device->id, 0);
8960 if (rc)
8961 _scsih_raid_device_remove(ioc, raid_device);
8962 } else {
8963 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8964 _scsih_determine_boot_device(ioc, raid_device, 1);
8965 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8966 }
8967 }
8968
8969 /**
8970 * _scsih_sas_volume_delete - delete volume
8971 * @ioc: per adapter object
8972 * @handle: volume device handle
8973 * Context: user.
8974 */
8975 static void
_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER * ioc,u16 handle)8976 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8977 {
8978 struct _raid_device *raid_device;
8979 unsigned long flags;
8980 struct MPT3SAS_TARGET *sas_target_priv_data;
8981 struct scsi_target *starget = NULL;
8982
8983 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8984 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8985 if (raid_device) {
8986 if (raid_device->starget) {
8987 starget = raid_device->starget;
8988 sas_target_priv_data = starget->hostdata;
8989 sas_target_priv_data->deleted = 1;
8990 }
8991 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
8992 raid_device->handle, (u64)raid_device->wwid);
8993 list_del(&raid_device->list);
8994 kfree(raid_device);
8995 }
8996 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8997 if (starget)
8998 scsi_remove_target(&starget->dev);
8999 }
9000
9001 /**
9002 * _scsih_sas_pd_expose - expose pd component to /dev/sdX
9003 * @ioc: per adapter object
9004 * @element: IR config element data
9005 * Context: user.
9006 */
9007 static void
_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9008 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
9009 Mpi2EventIrConfigElement_t *element)
9010 {
9011 struct _sas_device *sas_device;
9012 struct scsi_target *starget = NULL;
9013 struct MPT3SAS_TARGET *sas_target_priv_data;
9014 unsigned long flags;
9015 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9016
9017 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9018 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9019 if (sas_device) {
9020 sas_device->volume_handle = 0;
9021 sas_device->volume_wwid = 0;
9022 clear_bit(handle, ioc->pd_handles);
9023 if (sas_device->starget && sas_device->starget->hostdata) {
9024 starget = sas_device->starget;
9025 sas_target_priv_data = starget->hostdata;
9026 sas_target_priv_data->flags &=
9027 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9028 }
9029 }
9030 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9031 if (!sas_device)
9032 return;
9033
9034 /* exposing raid component */
9035 if (starget)
9036 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9037
9038 sas_device_put(sas_device);
9039 }
9040
9041 /**
9042 * _scsih_sas_pd_hide - hide pd component from /dev/sdX
9043 * @ioc: per adapter object
9044 * @element: IR config element data
9045 * Context: user.
9046 */
9047 static void
_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9048 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9049 Mpi2EventIrConfigElement_t *element)
9050 {
9051 struct _sas_device *sas_device;
9052 struct scsi_target *starget = NULL;
9053 struct MPT3SAS_TARGET *sas_target_priv_data;
9054 unsigned long flags;
9055 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9056 u16 volume_handle = 0;
9057 u64 volume_wwid = 0;
9058
9059 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9060 if (volume_handle)
9061 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9062 &volume_wwid);
9063
9064 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9065 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9066 if (sas_device) {
9067 set_bit(handle, ioc->pd_handles);
9068 if (sas_device->starget && sas_device->starget->hostdata) {
9069 starget = sas_device->starget;
9070 sas_target_priv_data = starget->hostdata;
9071 sas_target_priv_data->flags |=
9072 MPT_TARGET_FLAGS_RAID_COMPONENT;
9073 sas_device->volume_handle = volume_handle;
9074 sas_device->volume_wwid = volume_wwid;
9075 }
9076 }
9077 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9078 if (!sas_device)
9079 return;
9080
9081 /* hiding raid component */
9082 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9083
9084 if (starget)
9085 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9086
9087 sas_device_put(sas_device);
9088 }
9089
9090 /**
9091 * _scsih_sas_pd_delete - delete pd component
9092 * @ioc: per adapter object
9093 * @element: IR config element data
9094 * Context: user.
9095 */
9096 static void
_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9097 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9098 Mpi2EventIrConfigElement_t *element)
9099 {
9100 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9101
9102 _scsih_device_remove_by_handle(ioc, handle);
9103 }
9104
9105 /**
9106 * _scsih_sas_pd_add - remove pd component
9107 * @ioc: per adapter object
9108 * @element: IR config element data
9109 * Context: user.
9110 */
9111 static void
_scsih_sas_pd_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9112 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9113 Mpi2EventIrConfigElement_t *element)
9114 {
9115 struct _sas_device *sas_device;
9116 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9117 Mpi2ConfigReply_t mpi_reply;
9118 Mpi2SasDevicePage0_t sas_device_pg0;
9119 u32 ioc_status;
9120 u64 sas_address;
9121 u16 parent_handle;
9122
9123 set_bit(handle, ioc->pd_handles);
9124
9125 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9126 if (sas_device) {
9127 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9128 sas_device_put(sas_device);
9129 return;
9130 }
9131
9132 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9133 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9134 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9135 __FILE__, __LINE__, __func__);
9136 return;
9137 }
9138
9139 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9140 MPI2_IOCSTATUS_MASK;
9141 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9142 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9143 __FILE__, __LINE__, __func__);
9144 return;
9145 }
9146
9147 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9148 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9149 mpt3sas_transport_update_links(ioc, sas_address, handle,
9150 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9151 mpt3sas_get_port_by_id(ioc,
9152 sas_device_pg0.PhysicalPort, 0));
9153
9154 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9155 _scsih_add_device(ioc, handle, 0, 1);
9156 }
9157
9158 /**
9159 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
9160 * @ioc: per adapter object
9161 * @event_data: event data payload
9162 * Context: user.
9163 */
9164 static void
_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)9165 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9166 Mpi2EventDataIrConfigChangeList_t *event_data)
9167 {
9168 Mpi2EventIrConfigElement_t *element;
9169 u8 element_type;
9170 int i;
9171 char *reason_str = NULL, *element_str = NULL;
9172
9173 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9174
9175 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9176 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9177 "foreign" : "native",
9178 event_data->NumElements);
9179 for (i = 0; i < event_data->NumElements; i++, element++) {
9180 switch (element->ReasonCode) {
9181 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9182 reason_str = "add";
9183 break;
9184 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9185 reason_str = "remove";
9186 break;
9187 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9188 reason_str = "no change";
9189 break;
9190 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9191 reason_str = "hide";
9192 break;
9193 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9194 reason_str = "unhide";
9195 break;
9196 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9197 reason_str = "volume_created";
9198 break;
9199 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9200 reason_str = "volume_deleted";
9201 break;
9202 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9203 reason_str = "pd_created";
9204 break;
9205 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9206 reason_str = "pd_deleted";
9207 break;
9208 default:
9209 reason_str = "unknown reason";
9210 break;
9211 }
9212 element_type = le16_to_cpu(element->ElementFlags) &
9213 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9214 switch (element_type) {
9215 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9216 element_str = "volume";
9217 break;
9218 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9219 element_str = "phys disk";
9220 break;
9221 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9222 element_str = "hot spare";
9223 break;
9224 default:
9225 element_str = "unknown element";
9226 break;
9227 }
9228 pr_info("\t(%s:%s), vol handle(0x%04x), " \
9229 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9230 reason_str, le16_to_cpu(element->VolDevHandle),
9231 le16_to_cpu(element->PhysDiskDevHandle),
9232 element->PhysDiskNum);
9233 }
9234 }
9235
9236 /**
9237 * _scsih_sas_ir_config_change_event - handle ir configuration change events
9238 * @ioc: per adapter object
9239 * @fw_event: The fw_event_work object
9240 * Context: user.
9241 */
9242 static void
_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9243 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9244 struct fw_event_work *fw_event)
9245 {
9246 Mpi2EventIrConfigElement_t *element;
9247 int i;
9248 u8 foreign_config;
9249 Mpi2EventDataIrConfigChangeList_t *event_data =
9250 (Mpi2EventDataIrConfigChangeList_t *)
9251 fw_event->event_data;
9252
9253 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9254 (!ioc->hide_ir_msg))
9255 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
9256
9257 foreign_config = (le32_to_cpu(event_data->Flags) &
9258 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9259
9260 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9261 if (ioc->shost_recovery &&
9262 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9263 for (i = 0; i < event_data->NumElements; i++, element++) {
9264 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9265 _scsih_ir_fastpath(ioc,
9266 le16_to_cpu(element->PhysDiskDevHandle),
9267 element->PhysDiskNum);
9268 }
9269 return;
9270 }
9271
9272 for (i = 0; i < event_data->NumElements; i++, element++) {
9273
9274 switch (element->ReasonCode) {
9275 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9276 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9277 if (!foreign_config)
9278 _scsih_sas_volume_add(ioc, element);
9279 break;
9280 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9281 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9282 if (!foreign_config)
9283 _scsih_sas_volume_delete(ioc,
9284 le16_to_cpu(element->VolDevHandle));
9285 break;
9286 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9287 if (!ioc->is_warpdrive)
9288 _scsih_sas_pd_hide(ioc, element);
9289 break;
9290 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9291 if (!ioc->is_warpdrive)
9292 _scsih_sas_pd_expose(ioc, element);
9293 break;
9294 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9295 if (!ioc->is_warpdrive)
9296 _scsih_sas_pd_add(ioc, element);
9297 break;
9298 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9299 if (!ioc->is_warpdrive)
9300 _scsih_sas_pd_delete(ioc, element);
9301 break;
9302 }
9303 }
9304 }
9305
9306 /**
9307 * _scsih_sas_ir_volume_event - IR volume event
9308 * @ioc: per adapter object
9309 * @fw_event: The fw_event_work object
9310 * Context: user.
9311 */
9312 static void
_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9313 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9314 struct fw_event_work *fw_event)
9315 {
9316 u64 wwid;
9317 unsigned long flags;
9318 struct _raid_device *raid_device;
9319 u16 handle;
9320 u32 state;
9321 int rc;
9322 Mpi2EventDataIrVolume_t *event_data =
9323 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
9324
9325 if (ioc->shost_recovery)
9326 return;
9327
9328 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9329 return;
9330
9331 handle = le16_to_cpu(event_data->VolDevHandle);
9332 state = le32_to_cpu(event_data->NewValue);
9333 if (!ioc->hide_ir_msg)
9334 dewtprintk(ioc,
9335 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9336 __func__, handle,
9337 le32_to_cpu(event_data->PreviousValue),
9338 state));
9339 switch (state) {
9340 case MPI2_RAID_VOL_STATE_MISSING:
9341 case MPI2_RAID_VOL_STATE_FAILED:
9342 _scsih_sas_volume_delete(ioc, handle);
9343 break;
9344
9345 case MPI2_RAID_VOL_STATE_ONLINE:
9346 case MPI2_RAID_VOL_STATE_DEGRADED:
9347 case MPI2_RAID_VOL_STATE_OPTIMAL:
9348
9349 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9350 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9351 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9352
9353 if (raid_device)
9354 break;
9355
9356 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9357 if (!wwid) {
9358 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9359 __FILE__, __LINE__, __func__);
9360 break;
9361 }
9362
9363 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9364 if (!raid_device) {
9365 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9366 __FILE__, __LINE__, __func__);
9367 break;
9368 }
9369
9370 raid_device->id = ioc->sas_id++;
9371 raid_device->channel = RAID_CHANNEL;
9372 raid_device->handle = handle;
9373 raid_device->wwid = wwid;
9374 _scsih_raid_device_add(ioc, raid_device);
9375 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9376 raid_device->id, 0);
9377 if (rc)
9378 _scsih_raid_device_remove(ioc, raid_device);
9379 break;
9380
9381 case MPI2_RAID_VOL_STATE_INITIALIZING:
9382 default:
9383 break;
9384 }
9385 }
9386
9387 /**
9388 * _scsih_sas_ir_physical_disk_event - PD event
9389 * @ioc: per adapter object
9390 * @fw_event: The fw_event_work object
9391 * Context: user.
9392 */
9393 static void
_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9394 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9395 struct fw_event_work *fw_event)
9396 {
9397 u16 handle, parent_handle;
9398 u32 state;
9399 struct _sas_device *sas_device;
9400 Mpi2ConfigReply_t mpi_reply;
9401 Mpi2SasDevicePage0_t sas_device_pg0;
9402 u32 ioc_status;
9403 Mpi2EventDataIrPhysicalDisk_t *event_data =
9404 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9405 u64 sas_address;
9406
9407 if (ioc->shost_recovery)
9408 return;
9409
9410 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9411 return;
9412
9413 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9414 state = le32_to_cpu(event_data->NewValue);
9415
9416 if (!ioc->hide_ir_msg)
9417 dewtprintk(ioc,
9418 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9419 __func__, handle,
9420 le32_to_cpu(event_data->PreviousValue),
9421 state));
9422
9423 switch (state) {
9424 case MPI2_RAID_PD_STATE_ONLINE:
9425 case MPI2_RAID_PD_STATE_DEGRADED:
9426 case MPI2_RAID_PD_STATE_REBUILDING:
9427 case MPI2_RAID_PD_STATE_OPTIMAL:
9428 case MPI2_RAID_PD_STATE_HOT_SPARE:
9429
9430 if (!ioc->is_warpdrive)
9431 set_bit(handle, ioc->pd_handles);
9432
9433 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9434 if (sas_device) {
9435 sas_device_put(sas_device);
9436 return;
9437 }
9438
9439 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9440 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9441 handle))) {
9442 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9443 __FILE__, __LINE__, __func__);
9444 return;
9445 }
9446
9447 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9448 MPI2_IOCSTATUS_MASK;
9449 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9450 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9451 __FILE__, __LINE__, __func__);
9452 return;
9453 }
9454
9455 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9456 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9457 mpt3sas_transport_update_links(ioc, sas_address, handle,
9458 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9459 mpt3sas_get_port_by_id(ioc,
9460 sas_device_pg0.PhysicalPort, 0));
9461
9462 _scsih_add_device(ioc, handle, 0, 1);
9463
9464 break;
9465
9466 case MPI2_RAID_PD_STATE_OFFLINE:
9467 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9468 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9469 default:
9470 break;
9471 }
9472 }
9473
9474 /**
9475 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
9476 * @ioc: per adapter object
9477 * @event_data: event data payload
9478 * Context: user.
9479 */
9480 static void
_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrOperationStatus_t * event_data)9481 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9482 Mpi2EventDataIrOperationStatus_t *event_data)
9483 {
9484 char *reason_str = NULL;
9485
9486 switch (event_data->RAIDOperation) {
9487 case MPI2_EVENT_IR_RAIDOP_RESYNC:
9488 reason_str = "resync";
9489 break;
9490 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9491 reason_str = "online capacity expansion";
9492 break;
9493 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9494 reason_str = "consistency check";
9495 break;
9496 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9497 reason_str = "background init";
9498 break;
9499 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9500 reason_str = "make data consistent";
9501 break;
9502 }
9503
9504 if (!reason_str)
9505 return;
9506
9507 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9508 reason_str,
9509 le16_to_cpu(event_data->VolDevHandle),
9510 event_data->PercentComplete);
9511 }
9512
9513 /**
9514 * _scsih_sas_ir_operation_status_event - handle RAID operation events
9515 * @ioc: per adapter object
9516 * @fw_event: The fw_event_work object
9517 * Context: user.
9518 */
9519 static void
_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9520 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9521 struct fw_event_work *fw_event)
9522 {
9523 Mpi2EventDataIrOperationStatus_t *event_data =
9524 (Mpi2EventDataIrOperationStatus_t *)
9525 fw_event->event_data;
9526 static struct _raid_device *raid_device;
9527 unsigned long flags;
9528 u16 handle;
9529
9530 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9531 (!ioc->hide_ir_msg))
9532 _scsih_sas_ir_operation_status_event_debug(ioc,
9533 event_data);
9534
9535 /* code added for raid transport support */
9536 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9537
9538 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9539 handle = le16_to_cpu(event_data->VolDevHandle);
9540 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9541 if (raid_device)
9542 raid_device->percent_complete =
9543 event_data->PercentComplete;
9544 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9545 }
9546 }
9547
9548 /**
9549 * _scsih_prep_device_scan - initialize parameters prior to device scan
9550 * @ioc: per adapter object
9551 *
9552 * Set the deleted flag prior to device scan. If the device is found during
9553 * the scan, then we clear the deleted flag.
9554 */
9555 static void
_scsih_prep_device_scan(struct MPT3SAS_ADAPTER * ioc)9556 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9557 {
9558 struct MPT3SAS_DEVICE *sas_device_priv_data;
9559 struct scsi_device *sdev;
9560
9561 shost_for_each_device(sdev, ioc->shost) {
9562 sas_device_priv_data = sdev->hostdata;
9563 if (sas_device_priv_data && sas_device_priv_data->sas_target)
9564 sas_device_priv_data->sas_target->deleted = 1;
9565 }
9566 }
9567
9568 /**
9569 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
9570 * @ioc: per adapter object
9571 * @sas_device_pg0: SAS Device page 0
9572 *
9573 * After host reset, find out whether devices are still responding.
9574 * Used in _scsih_remove_unresponsive_sas_devices.
9575 */
9576 static void
_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER * ioc,Mpi2SasDevicePage0_t * sas_device_pg0)9577 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9578 Mpi2SasDevicePage0_t *sas_device_pg0)
9579 {
9580 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9581 struct scsi_target *starget;
9582 struct _sas_device *sas_device = NULL;
9583 struct _enclosure_node *enclosure_dev = NULL;
9584 unsigned long flags;
9585 struct hba_port *port = mpt3sas_get_port_by_id(
9586 ioc, sas_device_pg0->PhysicalPort, 0);
9587
9588 if (sas_device_pg0->EnclosureHandle) {
9589 enclosure_dev =
9590 mpt3sas_scsih_enclosure_find_by_handle(ioc,
9591 le16_to_cpu(sas_device_pg0->EnclosureHandle));
9592 if (enclosure_dev == NULL)
9593 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9594 sas_device_pg0->EnclosureHandle);
9595 }
9596 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9597 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9598 if (sas_device->sas_address != le64_to_cpu(
9599 sas_device_pg0->SASAddress))
9600 continue;
9601 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9602 continue;
9603 if (sas_device->port != port)
9604 continue;
9605 sas_device->responding = 1;
9606 starget = sas_device->starget;
9607 if (starget && starget->hostdata) {
9608 sas_target_priv_data = starget->hostdata;
9609 sas_target_priv_data->tm_busy = 0;
9610 sas_target_priv_data->deleted = 0;
9611 } else
9612 sas_target_priv_data = NULL;
9613 if (starget) {
9614 starget_printk(KERN_INFO, starget,
9615 "handle(0x%04x), sas_addr(0x%016llx)\n",
9616 le16_to_cpu(sas_device_pg0->DevHandle),
9617 (unsigned long long)
9618 sas_device->sas_address);
9619
9620 if (sas_device->enclosure_handle != 0)
9621 starget_printk(KERN_INFO, starget,
9622 "enclosure logical id(0x%016llx), slot(%d)\n",
9623 (unsigned long long)
9624 sas_device->enclosure_logical_id,
9625 sas_device->slot);
9626 }
9627 if (le16_to_cpu(sas_device_pg0->Flags) &
9628 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9629 sas_device->enclosure_level =
9630 sas_device_pg0->EnclosureLevel;
9631 memcpy(&sas_device->connector_name[0],
9632 &sas_device_pg0->ConnectorName[0], 4);
9633 } else {
9634 sas_device->enclosure_level = 0;
9635 sas_device->connector_name[0] = '\0';
9636 }
9637
9638 sas_device->enclosure_handle =
9639 le16_to_cpu(sas_device_pg0->EnclosureHandle);
9640 sas_device->is_chassis_slot_valid = 0;
9641 if (enclosure_dev) {
9642 sas_device->enclosure_logical_id = le64_to_cpu(
9643 enclosure_dev->pg0.EnclosureLogicalID);
9644 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9645 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9646 sas_device->is_chassis_slot_valid = 1;
9647 sas_device->chassis_slot =
9648 enclosure_dev->pg0.ChassisSlot;
9649 }
9650 }
9651
9652 if (sas_device->handle == le16_to_cpu(
9653 sas_device_pg0->DevHandle))
9654 goto out;
9655 pr_info("\thandle changed from(0x%04x)!!!\n",
9656 sas_device->handle);
9657 sas_device->handle = le16_to_cpu(
9658 sas_device_pg0->DevHandle);
9659 if (sas_target_priv_data)
9660 sas_target_priv_data->handle =
9661 le16_to_cpu(sas_device_pg0->DevHandle);
9662 goto out;
9663 }
9664 out:
9665 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9666 }
9667
9668 /**
9669 * _scsih_create_enclosure_list_after_reset - Free Existing list,
9670 * And create enclosure list by scanning all Enclosure Page(0)s
9671 * @ioc: per adapter object
9672 */
9673 static void
_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER * ioc)9674 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9675 {
9676 struct _enclosure_node *enclosure_dev;
9677 Mpi2ConfigReply_t mpi_reply;
9678 u16 enclosure_handle;
9679 int rc;
9680
9681 /* Free existing enclosure list */
9682 mpt3sas_free_enclosure_list(ioc);
9683
9684 /* Re constructing enclosure list after reset*/
9685 enclosure_handle = 0xFFFF;
9686 do {
9687 enclosure_dev =
9688 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9689 if (!enclosure_dev) {
9690 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9691 __FILE__, __LINE__, __func__);
9692 return;
9693 }
9694 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9695 &enclosure_dev->pg0,
9696 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9697 enclosure_handle);
9698
9699 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9700 MPI2_IOCSTATUS_MASK)) {
9701 kfree(enclosure_dev);
9702 return;
9703 }
9704 list_add_tail(&enclosure_dev->list,
9705 &ioc->enclosure_list);
9706 enclosure_handle =
9707 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9708 } while (1);
9709 }
9710
9711 /**
9712 * _scsih_search_responding_sas_devices -
9713 * @ioc: per adapter object
9714 *
9715 * After host reset, find out whether devices are still responding.
9716 * If not remove.
9717 */
9718 static void
_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER * ioc)9719 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9720 {
9721 Mpi2SasDevicePage0_t sas_device_pg0;
9722 Mpi2ConfigReply_t mpi_reply;
9723 u16 ioc_status;
9724 u16 handle;
9725 u32 device_info;
9726
9727 ioc_info(ioc, "search for end-devices: start\n");
9728
9729 if (list_empty(&ioc->sas_device_list))
9730 goto out;
9731
9732 handle = 0xFFFF;
9733 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9734 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9735 handle))) {
9736 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9737 MPI2_IOCSTATUS_MASK;
9738 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9739 break;
9740 handle = le16_to_cpu(sas_device_pg0.DevHandle);
9741 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9742 if (!(_scsih_is_end_device(device_info)))
9743 continue;
9744 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9745 }
9746
9747 out:
9748 ioc_info(ioc, "search for end-devices: complete\n");
9749 }
9750
9751 /**
9752 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
9753 * @ioc: per adapter object
9754 * @pcie_device_pg0: PCIe Device page 0
9755 *
9756 * After host reset, find out whether devices are still responding.
9757 * Used in _scsih_remove_unresponding_devices.
9758 */
9759 static void
_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER * ioc,Mpi26PCIeDevicePage0_t * pcie_device_pg0)9760 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9761 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9762 {
9763 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9764 struct scsi_target *starget;
9765 struct _pcie_device *pcie_device;
9766 unsigned long flags;
9767
9768 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9769 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9770 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9771 && (pcie_device->slot == le16_to_cpu(
9772 pcie_device_pg0->Slot))) {
9773 pcie_device->access_status =
9774 pcie_device_pg0->AccessStatus;
9775 pcie_device->responding = 1;
9776 starget = pcie_device->starget;
9777 if (starget && starget->hostdata) {
9778 sas_target_priv_data = starget->hostdata;
9779 sas_target_priv_data->tm_busy = 0;
9780 sas_target_priv_data->deleted = 0;
9781 } else
9782 sas_target_priv_data = NULL;
9783 if (starget) {
9784 starget_printk(KERN_INFO, starget,
9785 "handle(0x%04x), wwid(0x%016llx) ",
9786 pcie_device->handle,
9787 (unsigned long long)pcie_device->wwid);
9788 if (pcie_device->enclosure_handle != 0)
9789 starget_printk(KERN_INFO, starget,
9790 "enclosure logical id(0x%016llx), "
9791 "slot(%d)\n",
9792 (unsigned long long)
9793 pcie_device->enclosure_logical_id,
9794 pcie_device->slot);
9795 }
9796
9797 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9798 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9799 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9800 pcie_device->enclosure_level =
9801 pcie_device_pg0->EnclosureLevel;
9802 memcpy(&pcie_device->connector_name[0],
9803 &pcie_device_pg0->ConnectorName[0], 4);
9804 } else {
9805 pcie_device->enclosure_level = 0;
9806 pcie_device->connector_name[0] = '\0';
9807 }
9808
9809 if (pcie_device->handle == le16_to_cpu(
9810 pcie_device_pg0->DevHandle))
9811 goto out;
9812 pr_info("\thandle changed from(0x%04x)!!!\n",
9813 pcie_device->handle);
9814 pcie_device->handle = le16_to_cpu(
9815 pcie_device_pg0->DevHandle);
9816 if (sas_target_priv_data)
9817 sas_target_priv_data->handle =
9818 le16_to_cpu(pcie_device_pg0->DevHandle);
9819 goto out;
9820 }
9821 }
9822
9823 out:
9824 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9825 }
9826
9827 /**
9828 * _scsih_search_responding_pcie_devices -
9829 * @ioc: per adapter object
9830 *
9831 * After host reset, find out whether devices are still responding.
9832 * If not remove.
9833 */
9834 static void
_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER * ioc)9835 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9836 {
9837 Mpi26PCIeDevicePage0_t pcie_device_pg0;
9838 Mpi2ConfigReply_t mpi_reply;
9839 u16 ioc_status;
9840 u16 handle;
9841 u32 device_info;
9842
9843 ioc_info(ioc, "search for end-devices: start\n");
9844
9845 if (list_empty(&ioc->pcie_device_list))
9846 goto out;
9847
9848 handle = 0xFFFF;
9849 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9850 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9851 handle))) {
9852 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9853 MPI2_IOCSTATUS_MASK;
9854 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9855 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9856 __func__, ioc_status,
9857 le32_to_cpu(mpi_reply.IOCLogInfo));
9858 break;
9859 }
9860 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9861 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9862 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
9863 continue;
9864 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
9865 }
9866 out:
9867 ioc_info(ioc, "search for PCIe end-devices: complete\n");
9868 }
9869
9870 /**
9871 * _scsih_mark_responding_raid_device - mark a raid_device as responding
9872 * @ioc: per adapter object
9873 * @wwid: world wide identifier for raid volume
9874 * @handle: device handle
9875 *
9876 * After host reset, find out whether devices are still responding.
9877 * Used in _scsih_remove_unresponsive_raid_devices.
9878 */
9879 static void
_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle)9880 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
9881 u16 handle)
9882 {
9883 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9884 struct scsi_target *starget;
9885 struct _raid_device *raid_device;
9886 unsigned long flags;
9887
9888 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9889 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
9890 if (raid_device->wwid == wwid && raid_device->starget) {
9891 starget = raid_device->starget;
9892 if (starget && starget->hostdata) {
9893 sas_target_priv_data = starget->hostdata;
9894 sas_target_priv_data->deleted = 0;
9895 } else
9896 sas_target_priv_data = NULL;
9897 raid_device->responding = 1;
9898 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9899 starget_printk(KERN_INFO, raid_device->starget,
9900 "handle(0x%04x), wwid(0x%016llx)\n", handle,
9901 (unsigned long long)raid_device->wwid);
9902
9903 /*
9904 * WARPDRIVE: The handles of the PDs might have changed
9905 * across the host reset so re-initialize the
9906 * required data for Direct IO
9907 */
9908 mpt3sas_init_warpdrive_properties(ioc, raid_device);
9909 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9910 if (raid_device->handle == handle) {
9911 spin_unlock_irqrestore(&ioc->raid_device_lock,
9912 flags);
9913 return;
9914 }
9915 pr_info("\thandle changed from(0x%04x)!!!\n",
9916 raid_device->handle);
9917 raid_device->handle = handle;
9918 if (sas_target_priv_data)
9919 sas_target_priv_data->handle = handle;
9920 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9921 return;
9922 }
9923 }
9924 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9925 }
9926
9927 /**
9928 * _scsih_search_responding_raid_devices -
9929 * @ioc: per adapter object
9930 *
9931 * After host reset, find out whether devices are still responding.
9932 * If not remove.
9933 */
9934 static void
_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER * ioc)9935 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
9936 {
9937 Mpi2RaidVolPage1_t volume_pg1;
9938 Mpi2RaidVolPage0_t volume_pg0;
9939 Mpi2RaidPhysDiskPage0_t pd_pg0;
9940 Mpi2ConfigReply_t mpi_reply;
9941 u16 ioc_status;
9942 u16 handle;
9943 u8 phys_disk_num;
9944
9945 if (!ioc->ir_firmware)
9946 return;
9947
9948 ioc_info(ioc, "search for raid volumes: start\n");
9949
9950 if (list_empty(&ioc->raid_device_list))
9951 goto out;
9952
9953 handle = 0xFFFF;
9954 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9955 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9956 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9957 MPI2_IOCSTATUS_MASK;
9958 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9959 break;
9960 handle = le16_to_cpu(volume_pg1.DevHandle);
9961
9962 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9963 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9964 sizeof(Mpi2RaidVolPage0_t)))
9965 continue;
9966
9967 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9968 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9969 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
9970 _scsih_mark_responding_raid_device(ioc,
9971 le64_to_cpu(volume_pg1.WWID), handle);
9972 }
9973
9974 /* refresh the pd_handles */
9975 if (!ioc->is_warpdrive) {
9976 phys_disk_num = 0xFF;
9977 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
9978 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9979 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9980 phys_disk_num))) {
9981 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9982 MPI2_IOCSTATUS_MASK;
9983 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9984 break;
9985 phys_disk_num = pd_pg0.PhysDiskNum;
9986 handle = le16_to_cpu(pd_pg0.DevHandle);
9987 set_bit(handle, ioc->pd_handles);
9988 }
9989 }
9990 out:
9991 ioc_info(ioc, "search for responding raid volumes: complete\n");
9992 }
9993
9994 /**
9995 * _scsih_mark_responding_expander - mark a expander as responding
9996 * @ioc: per adapter object
9997 * @expander_pg0:SAS Expander Config Page0
9998 *
9999 * After host reset, find out whether devices are still responding.
10000 * Used in _scsih_remove_unresponsive_expanders.
10001 */
10002 static void
_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER * ioc,Mpi2ExpanderPage0_t * expander_pg0)10003 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
10004 Mpi2ExpanderPage0_t *expander_pg0)
10005 {
10006 struct _sas_node *sas_expander = NULL;
10007 unsigned long flags;
10008 int i;
10009 struct _enclosure_node *enclosure_dev = NULL;
10010 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
10011 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10012 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10013 struct hba_port *port = mpt3sas_get_port_by_id(
10014 ioc, expander_pg0->PhysicalPort, 0);
10015
10016 if (enclosure_handle)
10017 enclosure_dev =
10018 mpt3sas_scsih_enclosure_find_by_handle(ioc,
10019 enclosure_handle);
10020
10021 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10022 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10023 if (sas_expander->sas_address != sas_address)
10024 continue;
10025 if (sas_expander->port != port)
10026 continue;
10027 sas_expander->responding = 1;
10028
10029 if (enclosure_dev) {
10030 sas_expander->enclosure_logical_id =
10031 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10032 sas_expander->enclosure_handle =
10033 le16_to_cpu(expander_pg0->EnclosureHandle);
10034 }
10035
10036 if (sas_expander->handle == handle)
10037 goto out;
10038 pr_info("\texpander(0x%016llx): handle changed" \
10039 " from(0x%04x) to (0x%04x)!!!\n",
10040 (unsigned long long)sas_expander->sas_address,
10041 sas_expander->handle, handle);
10042 sas_expander->handle = handle;
10043 for (i = 0 ; i < sas_expander->num_phys ; i++)
10044 sas_expander->phy[i].handle = handle;
10045 goto out;
10046 }
10047 out:
10048 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10049 }
10050
10051 /**
10052 * _scsih_search_responding_expanders -
10053 * @ioc: per adapter object
10054 *
10055 * After host reset, find out whether devices are still responding.
10056 * If not remove.
10057 */
10058 static void
_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER * ioc)10059 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10060 {
10061 Mpi2ExpanderPage0_t expander_pg0;
10062 Mpi2ConfigReply_t mpi_reply;
10063 u16 ioc_status;
10064 u64 sas_address;
10065 u16 handle;
10066 u8 port;
10067
10068 ioc_info(ioc, "search for expanders: start\n");
10069
10070 if (list_empty(&ioc->sas_expander_list))
10071 goto out;
10072
10073 handle = 0xFFFF;
10074 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10075 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10076
10077 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10078 MPI2_IOCSTATUS_MASK;
10079 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10080 break;
10081
10082 handle = le16_to_cpu(expander_pg0.DevHandle);
10083 sas_address = le64_to_cpu(expander_pg0.SASAddress);
10084 port = expander_pg0.PhysicalPort;
10085 pr_info(
10086 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10087 handle, (unsigned long long)sas_address,
10088 (ioc->multipath_on_hba ?
10089 port : MULTIPATH_DISABLED_PORT_ID));
10090 _scsih_mark_responding_expander(ioc, &expander_pg0);
10091 }
10092
10093 out:
10094 ioc_info(ioc, "search for expanders: complete\n");
10095 }
10096
10097 /**
10098 * _scsih_remove_unresponding_devices - removing unresponding devices
10099 * @ioc: per adapter object
10100 */
10101 static void
_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER * ioc)10102 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10103 {
10104 struct _sas_device *sas_device, *sas_device_next;
10105 struct _sas_node *sas_expander, *sas_expander_next;
10106 struct _raid_device *raid_device, *raid_device_next;
10107 struct _pcie_device *pcie_device, *pcie_device_next;
10108 struct list_head tmp_list;
10109 unsigned long flags;
10110 LIST_HEAD(head);
10111
10112 ioc_info(ioc, "removing unresponding devices: start\n");
10113
10114 /* removing unresponding end devices */
10115 ioc_info(ioc, "removing unresponding devices: end-devices\n");
10116 /*
10117 * Iterate, pulling off devices marked as non-responding. We become the
10118 * owner for the reference the list had on any object we prune.
10119 */
10120 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10121 list_for_each_entry_safe(sas_device, sas_device_next,
10122 &ioc->sas_device_list, list) {
10123 if (!sas_device->responding)
10124 list_move_tail(&sas_device->list, &head);
10125 else
10126 sas_device->responding = 0;
10127 }
10128 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10129
10130 /*
10131 * Now, uninitialize and remove the unresponding devices we pruned.
10132 */
10133 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10134 _scsih_remove_device(ioc, sas_device);
10135 list_del_init(&sas_device->list);
10136 sas_device_put(sas_device);
10137 }
10138
10139 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10140 INIT_LIST_HEAD(&head);
10141 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10142 list_for_each_entry_safe(pcie_device, pcie_device_next,
10143 &ioc->pcie_device_list, list) {
10144 if (!pcie_device->responding)
10145 list_move_tail(&pcie_device->list, &head);
10146 else
10147 pcie_device->responding = 0;
10148 }
10149 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10150
10151 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10152 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10153 list_del_init(&pcie_device->list);
10154 pcie_device_put(pcie_device);
10155 }
10156
10157 /* removing unresponding volumes */
10158 if (ioc->ir_firmware) {
10159 ioc_info(ioc, "removing unresponding devices: volumes\n");
10160 list_for_each_entry_safe(raid_device, raid_device_next,
10161 &ioc->raid_device_list, list) {
10162 if (!raid_device->responding)
10163 _scsih_sas_volume_delete(ioc,
10164 raid_device->handle);
10165 else
10166 raid_device->responding = 0;
10167 }
10168 }
10169
10170 /* removing unresponding expanders */
10171 ioc_info(ioc, "removing unresponding devices: expanders\n");
10172 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10173 INIT_LIST_HEAD(&tmp_list);
10174 list_for_each_entry_safe(sas_expander, sas_expander_next,
10175 &ioc->sas_expander_list, list) {
10176 if (!sas_expander->responding)
10177 list_move_tail(&sas_expander->list, &tmp_list);
10178 else
10179 sas_expander->responding = 0;
10180 }
10181 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10182 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10183 list) {
10184 _scsih_expander_node_remove(ioc, sas_expander);
10185 }
10186
10187 ioc_info(ioc, "removing unresponding devices: complete\n");
10188
10189 /* unblock devices */
10190 _scsih_ublock_io_all_device(ioc);
10191 }
10192
10193 static void
_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander,u16 handle)10194 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10195 struct _sas_node *sas_expander, u16 handle)
10196 {
10197 Mpi2ExpanderPage1_t expander_pg1;
10198 Mpi2ConfigReply_t mpi_reply;
10199 int i;
10200
10201 for (i = 0 ; i < sas_expander->num_phys ; i++) {
10202 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10203 &expander_pg1, i, handle))) {
10204 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10205 __FILE__, __LINE__, __func__);
10206 return;
10207 }
10208
10209 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10210 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10211 expander_pg1.NegotiatedLinkRate >> 4,
10212 sas_expander->port);
10213 }
10214 }
10215
10216 /**
10217 * _scsih_scan_for_devices_after_reset - scan for devices after host reset
10218 * @ioc: per adapter object
10219 */
10220 static void
_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER * ioc)10221 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10222 {
10223 Mpi2ExpanderPage0_t expander_pg0;
10224 Mpi2SasDevicePage0_t sas_device_pg0;
10225 Mpi26PCIeDevicePage0_t pcie_device_pg0;
10226 Mpi2RaidVolPage1_t *volume_pg1;
10227 Mpi2RaidVolPage0_t *volume_pg0;
10228 Mpi2RaidPhysDiskPage0_t pd_pg0;
10229 Mpi2EventIrConfigElement_t element;
10230 Mpi2ConfigReply_t mpi_reply;
10231 u8 phys_disk_num, port_id;
10232 u16 ioc_status;
10233 u16 handle, parent_handle;
10234 u64 sas_address;
10235 struct _sas_device *sas_device;
10236 struct _pcie_device *pcie_device;
10237 struct _sas_node *expander_device;
10238 static struct _raid_device *raid_device;
10239 u8 retry_count;
10240 unsigned long flags;
10241
10242 volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL);
10243 if (!volume_pg0)
10244 return;
10245
10246 volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL);
10247 if (!volume_pg1) {
10248 kfree(volume_pg0);
10249 return;
10250 }
10251
10252 ioc_info(ioc, "scan devices: start\n");
10253
10254 _scsih_sas_host_refresh(ioc);
10255
10256 ioc_info(ioc, "\tscan devices: expanders start\n");
10257
10258 /* expanders */
10259 handle = 0xFFFF;
10260 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10261 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10262 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10263 MPI2_IOCSTATUS_MASK;
10264 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10265 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10266 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10267 break;
10268 }
10269 handle = le16_to_cpu(expander_pg0.DevHandle);
10270 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10271 port_id = expander_pg0.PhysicalPort;
10272 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10273 ioc, le64_to_cpu(expander_pg0.SASAddress),
10274 mpt3sas_get_port_by_id(ioc, port_id, 0));
10275 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10276 if (expander_device)
10277 _scsih_refresh_expander_links(ioc, expander_device,
10278 handle);
10279 else {
10280 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10281 handle,
10282 (u64)le64_to_cpu(expander_pg0.SASAddress));
10283 _scsih_expander_add(ioc, handle);
10284 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10285 handle,
10286 (u64)le64_to_cpu(expander_pg0.SASAddress));
10287 }
10288 }
10289
10290 ioc_info(ioc, "\tscan devices: expanders complete\n");
10291
10292 if (!ioc->ir_firmware)
10293 goto skip_to_sas;
10294
10295 ioc_info(ioc, "\tscan devices: phys disk start\n");
10296
10297 /* phys disk */
10298 phys_disk_num = 0xFF;
10299 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10300 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10301 phys_disk_num))) {
10302 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10303 MPI2_IOCSTATUS_MASK;
10304 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10305 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10306 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10307 break;
10308 }
10309 phys_disk_num = pd_pg0.PhysDiskNum;
10310 handle = le16_to_cpu(pd_pg0.DevHandle);
10311 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10312 if (sas_device) {
10313 sas_device_put(sas_device);
10314 continue;
10315 }
10316 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10317 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10318 handle) != 0)
10319 continue;
10320 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10321 MPI2_IOCSTATUS_MASK;
10322 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10323 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10324 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10325 break;
10326 }
10327 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10328 if (!_scsih_get_sas_address(ioc, parent_handle,
10329 &sas_address)) {
10330 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10331 handle,
10332 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10333 port_id = sas_device_pg0.PhysicalPort;
10334 mpt3sas_transport_update_links(ioc, sas_address,
10335 handle, sas_device_pg0.PhyNum,
10336 MPI2_SAS_NEG_LINK_RATE_1_5,
10337 mpt3sas_get_port_by_id(ioc, port_id, 0));
10338 set_bit(handle, ioc->pd_handles);
10339 retry_count = 0;
10340 /* This will retry adding the end device.
10341 * _scsih_add_device() will decide on retries and
10342 * return "1" when it should be retried
10343 */
10344 while (_scsih_add_device(ioc, handle, retry_count++,
10345 1)) {
10346 ssleep(1);
10347 }
10348 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10349 handle,
10350 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10351 }
10352 }
10353
10354 ioc_info(ioc, "\tscan devices: phys disk complete\n");
10355
10356 ioc_info(ioc, "\tscan devices: volumes start\n");
10357
10358 /* volumes */
10359 handle = 0xFFFF;
10360 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10361 volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10362 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10363 MPI2_IOCSTATUS_MASK;
10364 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10365 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10366 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10367 break;
10368 }
10369 handle = le16_to_cpu(volume_pg1->DevHandle);
10370 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10371 raid_device = _scsih_raid_device_find_by_wwid(ioc,
10372 le64_to_cpu(volume_pg1->WWID));
10373 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10374 if (raid_device)
10375 continue;
10376 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10377 volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10378 sizeof(Mpi2RaidVolPage0_t)))
10379 continue;
10380 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10381 MPI2_IOCSTATUS_MASK;
10382 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10383 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10384 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10385 break;
10386 }
10387 if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10388 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10389 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10390 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10391 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10392 element.VolDevHandle = volume_pg1->DevHandle;
10393 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10394 volume_pg1->DevHandle);
10395 _scsih_sas_volume_add(ioc, &element);
10396 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10397 volume_pg1->DevHandle);
10398 }
10399 }
10400
10401 ioc_info(ioc, "\tscan devices: volumes complete\n");
10402
10403 skip_to_sas:
10404
10405 ioc_info(ioc, "\tscan devices: end devices start\n");
10406
10407 /* sas devices */
10408 handle = 0xFFFF;
10409 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10410 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10411 handle))) {
10412 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10413 MPI2_IOCSTATUS_MASK;
10414 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10415 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10416 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10417 break;
10418 }
10419 handle = le16_to_cpu(sas_device_pg0.DevHandle);
10420 if (!(_scsih_is_end_device(
10421 le32_to_cpu(sas_device_pg0.DeviceInfo))))
10422 continue;
10423 port_id = sas_device_pg0.PhysicalPort;
10424 sas_device = mpt3sas_get_sdev_by_addr(ioc,
10425 le64_to_cpu(sas_device_pg0.SASAddress),
10426 mpt3sas_get_port_by_id(ioc, port_id, 0));
10427 if (sas_device) {
10428 sas_device_put(sas_device);
10429 continue;
10430 }
10431 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10432 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10433 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10434 handle,
10435 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10436 mpt3sas_transport_update_links(ioc, sas_address, handle,
10437 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10438 mpt3sas_get_port_by_id(ioc, port_id, 0));
10439 retry_count = 0;
10440 /* This will retry adding the end device.
10441 * _scsih_add_device() will decide on retries and
10442 * return "1" when it should be retried
10443 */
10444 while (_scsih_add_device(ioc, handle, retry_count++,
10445 0)) {
10446 ssleep(1);
10447 }
10448 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10449 handle,
10450 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10451 }
10452 }
10453 ioc_info(ioc, "\tscan devices: end devices complete\n");
10454 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10455
10456 /* pcie devices */
10457 handle = 0xFFFF;
10458 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10459 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10460 handle))) {
10461 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10462 & MPI2_IOCSTATUS_MASK;
10463 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10464 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10465 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10466 break;
10467 }
10468 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10469 if (!(_scsih_is_nvme_pciescsi_device(
10470 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10471 continue;
10472 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10473 le64_to_cpu(pcie_device_pg0.WWID));
10474 if (pcie_device) {
10475 pcie_device_put(pcie_device);
10476 continue;
10477 }
10478 retry_count = 0;
10479 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10480 _scsih_pcie_add_device(ioc, handle);
10481
10482 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10483 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10484 }
10485
10486 kfree(volume_pg0);
10487 kfree(volume_pg1);
10488
10489 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10490 ioc_info(ioc, "scan devices: complete\n");
10491 }
10492
10493 /**
10494 * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
10495 * @ioc: per adapter object
10496 *
10497 * The handler for doing any required cleanup or initialization.
10498 */
mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER * ioc)10499 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10500 {
10501 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10502 }
10503
10504 /**
10505 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
10506 * scsi & tm cmds.
10507 * @ioc: per adapter object
10508 *
10509 * The handler for doing any required cleanup or initialization.
10510 */
10511 void
mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER * ioc)10512 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10513 {
10514 dtmprintk(ioc,
10515 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10516 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10517 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10518 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10519 complete(&ioc->scsih_cmds.done);
10520 }
10521 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10522 ioc->tm_cmds.status |= MPT3_CMD_RESET;
10523 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10524 complete(&ioc->tm_cmds.done);
10525 }
10526
10527 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10528 memset(ioc->device_remove_in_progress, 0,
10529 ioc->device_remove_in_progress_sz);
10530 _scsih_fw_event_cleanup_queue(ioc);
10531 _scsih_flush_running_cmds(ioc);
10532 }
10533
10534 /**
10535 * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
10536 * @ioc: per adapter object
10537 *
10538 * The handler for doing any required cleanup or initialization.
10539 */
10540 void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER * ioc)10541 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10542 {
10543 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10544 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
10545 !ioc->sas_hba.num_phys)) {
10546 if (ioc->multipath_on_hba) {
10547 _scsih_sas_port_refresh(ioc);
10548 _scsih_update_vphys_after_reset(ioc);
10549 }
10550 _scsih_prep_device_scan(ioc);
10551 _scsih_create_enclosure_list_after_reset(ioc);
10552 _scsih_search_responding_sas_devices(ioc);
10553 _scsih_search_responding_pcie_devices(ioc);
10554 _scsih_search_responding_raid_devices(ioc);
10555 _scsih_search_responding_expanders(ioc);
10556 _scsih_error_recovery_delete_devices(ioc);
10557 }
10558 }
10559
10560 /**
10561 * _mpt3sas_fw_work - delayed task for processing firmware events
10562 * @ioc: per adapter object
10563 * @fw_event: The fw_event_work object
10564 * Context: user.
10565 */
10566 static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)10567 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10568 {
10569 ioc->current_event = fw_event;
10570 _scsih_fw_event_del_from_list(ioc, fw_event);
10571
10572 /* the queue is being flushed so ignore this event */
10573 if (ioc->remove_host || ioc->pci_error_recovery) {
10574 fw_event_work_put(fw_event);
10575 ioc->current_event = NULL;
10576 return;
10577 }
10578
10579 switch (fw_event->event) {
10580 case MPT3SAS_PROCESS_TRIGGER_DIAG:
10581 mpt3sas_process_trigger_data(ioc,
10582 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10583 fw_event->event_data);
10584 break;
10585 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10586 while (scsi_host_in_recovery(ioc->shost) ||
10587 ioc->shost_recovery) {
10588 /*
10589 * If we're unloading or cancelling the work, bail.
10590 * Otherwise, this can become an infinite loop.
10591 */
10592 if (ioc->remove_host || ioc->fw_events_cleanup)
10593 goto out;
10594 ssleep(1);
10595 }
10596 _scsih_remove_unresponding_devices(ioc);
10597 _scsih_del_dirty_vphy(ioc);
10598 _scsih_del_dirty_port_entries(ioc);
10599 _scsih_scan_for_devices_after_reset(ioc);
10600 _scsih_set_nvme_max_shutdown_latency(ioc);
10601 break;
10602 case MPT3SAS_PORT_ENABLE_COMPLETE:
10603 ioc->start_scan = 0;
10604 if (missing_delay[0] != -1 && missing_delay[1] != -1)
10605 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10606 missing_delay[1]);
10607 dewtprintk(ioc,
10608 ioc_info(ioc, "port enable: complete from worker thread\n"));
10609 break;
10610 case MPT3SAS_TURN_ON_PFA_LED:
10611 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10612 break;
10613 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10614 _scsih_sas_topology_change_event(ioc, fw_event);
10615 break;
10616 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10617 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10618 _scsih_sas_device_status_change_event_debug(ioc,
10619 (Mpi2EventDataSasDeviceStatusChange_t *)
10620 fw_event->event_data);
10621 break;
10622 case MPI2_EVENT_SAS_DISCOVERY:
10623 _scsih_sas_discovery_event(ioc, fw_event);
10624 break;
10625 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10626 _scsih_sas_device_discovery_error_event(ioc, fw_event);
10627 break;
10628 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10629 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
10630 break;
10631 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10632 _scsih_sas_enclosure_dev_status_change_event(ioc,
10633 fw_event);
10634 break;
10635 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10636 _scsih_sas_ir_config_change_event(ioc, fw_event);
10637 break;
10638 case MPI2_EVENT_IR_VOLUME:
10639 _scsih_sas_ir_volume_event(ioc, fw_event);
10640 break;
10641 case MPI2_EVENT_IR_PHYSICAL_DISK:
10642 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
10643 break;
10644 case MPI2_EVENT_IR_OPERATION_STATUS:
10645 _scsih_sas_ir_operation_status_event(ioc, fw_event);
10646 break;
10647 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10648 _scsih_pcie_device_status_change_event(ioc, fw_event);
10649 break;
10650 case MPI2_EVENT_PCIE_ENUMERATION:
10651 _scsih_pcie_enumeration_event(ioc, fw_event);
10652 break;
10653 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10654 _scsih_pcie_topology_change_event(ioc, fw_event);
10655 ioc->current_event = NULL;
10656 return;
10657 break;
10658 }
10659 out:
10660 fw_event_work_put(fw_event);
10661 ioc->current_event = NULL;
10662 }
10663
10664 /**
10665 * _firmware_event_work
10666 * @work: The fw_event_work object
10667 * Context: user.
10668 *
10669 * wrappers for the work thread handling firmware events
10670 */
10671
10672 static void
_firmware_event_work(struct work_struct * work)10673 _firmware_event_work(struct work_struct *work)
10674 {
10675 struct fw_event_work *fw_event = container_of(work,
10676 struct fw_event_work, work);
10677
10678 _mpt3sas_fw_work(fw_event->ioc, fw_event);
10679 }
10680
10681 /**
10682 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
10683 * @ioc: per adapter object
10684 * @msix_index: MSIX table index supplied by the OS
10685 * @reply: reply message frame(lower 32bit addr)
10686 * Context: interrupt.
10687 *
10688 * This function merely adds a new work task into ioc->firmware_event_thread.
10689 * The tasks are worked from _firmware_event_work in user context.
10690 *
10691 * Return: 1 meaning mf should be freed from _base_interrupt
10692 * 0 means the mf is freed from this function.
10693 */
10694 u8
mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER * ioc,u8 msix_index,u32 reply)10695 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10696 u32 reply)
10697 {
10698 struct fw_event_work *fw_event;
10699 Mpi2EventNotificationReply_t *mpi_reply;
10700 u16 event;
10701 u16 sz;
10702 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10703
10704 /* events turned off due to host reset */
10705 if (ioc->pci_error_recovery)
10706 return 1;
10707
10708 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10709
10710 if (unlikely(!mpi_reply)) {
10711 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10712 __FILE__, __LINE__, __func__);
10713 return 1;
10714 }
10715
10716 event = le16_to_cpu(mpi_reply->Event);
10717
10718 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10719 mpt3sas_trigger_event(ioc, event, 0);
10720
10721 switch (event) {
10722 /* handle these */
10723 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10724 {
10725 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10726 (Mpi2EventDataSasBroadcastPrimitive_t *)
10727 mpi_reply->EventData;
10728
10729 if (baen_data->Primitive !=
10730 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10731 return 1;
10732
10733 if (ioc->broadcast_aen_busy) {
10734 ioc->broadcast_aen_pending++;
10735 return 1;
10736 } else
10737 ioc->broadcast_aen_busy = 1;
10738 break;
10739 }
10740
10741 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10742 _scsih_check_topo_delete_events(ioc,
10743 (Mpi2EventDataSasTopologyChangeList_t *)
10744 mpi_reply->EventData);
10745 break;
10746 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10747 _scsih_check_pcie_topo_remove_events(ioc,
10748 (Mpi26EventDataPCIeTopologyChangeList_t *)
10749 mpi_reply->EventData);
10750 break;
10751 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10752 _scsih_check_ir_config_unhide_events(ioc,
10753 (Mpi2EventDataIrConfigChangeList_t *)
10754 mpi_reply->EventData);
10755 break;
10756 case MPI2_EVENT_IR_VOLUME:
10757 _scsih_check_volume_delete_events(ioc,
10758 (Mpi2EventDataIrVolume_t *)
10759 mpi_reply->EventData);
10760 break;
10761 case MPI2_EVENT_LOG_ENTRY_ADDED:
10762 {
10763 Mpi2EventDataLogEntryAdded_t *log_entry;
10764 u32 *log_code;
10765
10766 if (!ioc->is_warpdrive)
10767 break;
10768
10769 log_entry = (Mpi2EventDataLogEntryAdded_t *)
10770 mpi_reply->EventData;
10771 log_code = (u32 *)log_entry->LogData;
10772
10773 if (le16_to_cpu(log_entry->LogEntryQualifier)
10774 != MPT2_WARPDRIVE_LOGENTRY)
10775 break;
10776
10777 switch (le32_to_cpu(*log_code)) {
10778 case MPT2_WARPDRIVE_LC_SSDT:
10779 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10780 break;
10781 case MPT2_WARPDRIVE_LC_SSDLW:
10782 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10783 break;
10784 case MPT2_WARPDRIVE_LC_SSDLF:
10785 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10786 break;
10787 case MPT2_WARPDRIVE_LC_BRMF:
10788 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10789 break;
10790 }
10791
10792 break;
10793 }
10794 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10795 _scsih_sas_device_status_change_event(ioc,
10796 (Mpi2EventDataSasDeviceStatusChange_t *)
10797 mpi_reply->EventData);
10798 break;
10799 case MPI2_EVENT_IR_OPERATION_STATUS:
10800 case MPI2_EVENT_SAS_DISCOVERY:
10801 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10802 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10803 case MPI2_EVENT_IR_PHYSICAL_DISK:
10804 case MPI2_EVENT_PCIE_ENUMERATION:
10805 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10806 break;
10807
10808 case MPI2_EVENT_TEMP_THRESHOLD:
10809 _scsih_temp_threshold_events(ioc,
10810 (Mpi2EventDataTemperature_t *)
10811 mpi_reply->EventData);
10812 break;
10813 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10814 ActiveCableEventData =
10815 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10816 switch (ActiveCableEventData->ReasonCode) {
10817 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
10818 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10819 ActiveCableEventData->ReceptacleID);
10820 pr_notice("cannot be powered and devices connected\n");
10821 pr_notice("to this active cable will not be seen\n");
10822 pr_notice("This active cable requires %d mW of power\n",
10823 le32_to_cpu(
10824 ActiveCableEventData->ActiveCablePowerRequirement));
10825 break;
10826
10827 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
10828 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
10829 ActiveCableEventData->ReceptacleID);
10830 pr_notice(
10831 "is not running at optimal speed(12 Gb/s rate)\n");
10832 break;
10833 }
10834
10835 break;
10836
10837 default: /* ignore the rest */
10838 return 1;
10839 }
10840
10841 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
10842 fw_event = alloc_fw_event_work(sz);
10843 if (!fw_event) {
10844 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10845 __FILE__, __LINE__, __func__);
10846 return 1;
10847 }
10848
10849 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
10850 fw_event->ioc = ioc;
10851 fw_event->VF_ID = mpi_reply->VF_ID;
10852 fw_event->VP_ID = mpi_reply->VP_ID;
10853 fw_event->event = event;
10854 _scsih_fw_event_add(ioc, fw_event);
10855 fw_event_work_put(fw_event);
10856 return 1;
10857 }
10858
10859 /**
10860 * _scsih_expander_node_remove - removing expander device from list.
10861 * @ioc: per adapter object
10862 * @sas_expander: the sas_device object
10863 *
10864 * Removing object and freeing associated memory from the
10865 * ioc->sas_expander_list.
10866 */
10867 static void
_scsih_expander_node_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)10868 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
10869 struct _sas_node *sas_expander)
10870 {
10871 struct _sas_port *mpt3sas_port, *next;
10872 unsigned long flags;
10873
10874 /* remove sibling ports attached to this expander */
10875 list_for_each_entry_safe(mpt3sas_port, next,
10876 &sas_expander->sas_port_list, port_list) {
10877 if (ioc->shost_recovery)
10878 return;
10879 if (mpt3sas_port->remote_identify.device_type ==
10880 SAS_END_DEVICE)
10881 mpt3sas_device_remove_by_sas_address(ioc,
10882 mpt3sas_port->remote_identify.sas_address,
10883 mpt3sas_port->hba_port);
10884 else if (mpt3sas_port->remote_identify.device_type ==
10885 SAS_EDGE_EXPANDER_DEVICE ||
10886 mpt3sas_port->remote_identify.device_type ==
10887 SAS_FANOUT_EXPANDER_DEVICE)
10888 mpt3sas_expander_remove(ioc,
10889 mpt3sas_port->remote_identify.sas_address,
10890 mpt3sas_port->hba_port);
10891 }
10892
10893 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
10894 sas_expander->sas_address_parent, sas_expander->port);
10895
10896 ioc_info(ioc,
10897 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10898 sas_expander->handle, (unsigned long long)
10899 sas_expander->sas_address,
10900 sas_expander->port->port_id);
10901
10902 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10903 list_del(&sas_expander->list);
10904 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10905
10906 kfree(sas_expander->phy);
10907 kfree(sas_expander);
10908 }
10909
10910 /**
10911 * _scsih_nvme_shutdown - NVMe shutdown notification
10912 * @ioc: per adapter object
10913 *
10914 * Sending IoUnitControl request with shutdown operation code to alert IOC that
10915 * the host system is shutting down so that IOC can issue NVMe shutdown to
10916 * NVMe drives attached to it.
10917 */
10918 static void
_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER * ioc)10919 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
10920 {
10921 Mpi26IoUnitControlRequest_t *mpi_request;
10922 Mpi26IoUnitControlReply_t *mpi_reply;
10923 u16 smid;
10924
10925 /* are there any NVMe devices ? */
10926 if (list_empty(&ioc->pcie_device_list))
10927 return;
10928
10929 mutex_lock(&ioc->scsih_cmds.mutex);
10930
10931 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
10932 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
10933 goto out;
10934 }
10935
10936 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
10937
10938 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
10939 if (!smid) {
10940 ioc_err(ioc,
10941 "%s: failed obtaining a smid\n", __func__);
10942 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10943 goto out;
10944 }
10945
10946 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
10947 ioc->scsih_cmds.smid = smid;
10948 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
10949 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
10950 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
10951
10952 init_completion(&ioc->scsih_cmds.done);
10953 ioc->put_smid_default(ioc, smid);
10954 /* Wait for max_shutdown_latency seconds */
10955 ioc_info(ioc,
10956 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
10957 ioc->max_shutdown_latency);
10958 wait_for_completion_timeout(&ioc->scsih_cmds.done,
10959 ioc->max_shutdown_latency*HZ);
10960
10961 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
10962 ioc_err(ioc, "%s: timeout\n", __func__);
10963 goto out;
10964 }
10965
10966 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
10967 mpi_reply = ioc->scsih_cmds.reply;
10968 ioc_info(ioc, "Io Unit Control shutdown (complete):"
10969 "ioc_status(0x%04x), loginfo(0x%08x)\n",
10970 le16_to_cpu(mpi_reply->IOCStatus),
10971 le32_to_cpu(mpi_reply->IOCLogInfo));
10972 }
10973 out:
10974 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10975 mutex_unlock(&ioc->scsih_cmds.mutex);
10976 }
10977
10978
10979 /**
10980 * _scsih_ir_shutdown - IR shutdown notification
10981 * @ioc: per adapter object
10982 *
10983 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
10984 * the host system is shutting down.
10985 */
10986 static void
_scsih_ir_shutdown(struct MPT3SAS_ADAPTER * ioc)10987 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
10988 {
10989 Mpi2RaidActionRequest_t *mpi_request;
10990 Mpi2RaidActionReply_t *mpi_reply;
10991 u16 smid;
10992
10993 /* is IR firmware build loaded ? */
10994 if (!ioc->ir_firmware)
10995 return;
10996
10997 /* are there any volumes ? */
10998 if (list_empty(&ioc->raid_device_list))
10999 return;
11000
11001 mutex_lock(&ioc->scsih_cmds.mutex);
11002
11003 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11004 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11005 goto out;
11006 }
11007 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11008
11009 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11010 if (!smid) {
11011 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
11012 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11013 goto out;
11014 }
11015
11016 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11017 ioc->scsih_cmds.smid = smid;
11018 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
11019
11020 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
11021 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
11022
11023 if (!ioc->hide_ir_msg)
11024 ioc_info(ioc, "IR shutdown (sending)\n");
11025 init_completion(&ioc->scsih_cmds.done);
11026 ioc->put_smid_default(ioc, smid);
11027 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11028
11029 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11030 ioc_err(ioc, "%s: timeout\n", __func__);
11031 goto out;
11032 }
11033
11034 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11035 mpi_reply = ioc->scsih_cmds.reply;
11036 if (!ioc->hide_ir_msg)
11037 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11038 le16_to_cpu(mpi_reply->IOCStatus),
11039 le32_to_cpu(mpi_reply->IOCLogInfo));
11040 }
11041
11042 out:
11043 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11044 mutex_unlock(&ioc->scsih_cmds.mutex);
11045 }
11046
11047 /**
11048 * _scsih_get_shost_and_ioc - get shost and ioc
11049 * and verify whether they are NULL or not
11050 * @pdev: PCI device struct
11051 * @shost: address of scsi host pointer
11052 * @ioc: address of HBA adapter pointer
11053 *
11054 * Return zero if *shost and *ioc are not NULL otherwise return error number.
11055 */
11056 static int
_scsih_get_shost_and_ioc(struct pci_dev * pdev,struct Scsi_Host ** shost,struct MPT3SAS_ADAPTER ** ioc)11057 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
11058 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11059 {
11060 *shost = pci_get_drvdata(pdev);
11061 if (*shost == NULL) {
11062 dev_err(&pdev->dev, "pdev's driver data is null\n");
11063 return -ENXIO;
11064 }
11065
11066 *ioc = shost_priv(*shost);
11067 if (*ioc == NULL) {
11068 dev_err(&pdev->dev, "shost's private data is null\n");
11069 return -ENXIO;
11070 }
11071
11072 return 0;
11073 }
11074
11075 /**
11076 * scsih_remove - detach and remove add host
11077 * @pdev: PCI device struct
11078 *
11079 * Routine called when unloading the driver.
11080 */
scsih_remove(struct pci_dev * pdev)11081 static void scsih_remove(struct pci_dev *pdev)
11082 {
11083 struct Scsi_Host *shost;
11084 struct MPT3SAS_ADAPTER *ioc;
11085 struct _sas_port *mpt3sas_port, *next_port;
11086 struct _raid_device *raid_device, *next;
11087 struct MPT3SAS_TARGET *sas_target_priv_data;
11088 struct _pcie_device *pcie_device, *pcienext;
11089 struct workqueue_struct *wq;
11090 unsigned long flags;
11091 Mpi2ConfigReply_t mpi_reply;
11092 struct hba_port *port, *port_next;
11093
11094 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11095 return;
11096
11097 ioc->remove_host = 1;
11098
11099 if (!pci_device_is_present(pdev))
11100 _scsih_flush_running_cmds(ioc);
11101
11102 _scsih_fw_event_cleanup_queue(ioc);
11103
11104 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11105 wq = ioc->firmware_event_thread;
11106 ioc->firmware_event_thread = NULL;
11107 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11108 if (wq)
11109 destroy_workqueue(wq);
11110 /*
11111 * Copy back the unmodified ioc page1. so that on next driver load,
11112 * current modified changes on ioc page1 won't take effect.
11113 */
11114 if (ioc->is_aero_ioc)
11115 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11116 &ioc->ioc_pg1_copy);
11117 /* release all the volumes */
11118 _scsih_ir_shutdown(ioc);
11119 mpt3sas_destroy_debugfs(ioc);
11120 sas_remove_host(shost);
11121 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11122 list) {
11123 if (raid_device->starget) {
11124 sas_target_priv_data =
11125 raid_device->starget->hostdata;
11126 sas_target_priv_data->deleted = 1;
11127 scsi_remove_target(&raid_device->starget->dev);
11128 }
11129 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11130 raid_device->handle, (u64)raid_device->wwid);
11131 _scsih_raid_device_remove(ioc, raid_device);
11132 }
11133 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11134 list) {
11135 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11136 list_del_init(&pcie_device->list);
11137 pcie_device_put(pcie_device);
11138 }
11139
11140 /* free ports attached to the sas_host */
11141 list_for_each_entry_safe(mpt3sas_port, next_port,
11142 &ioc->sas_hba.sas_port_list, port_list) {
11143 if (mpt3sas_port->remote_identify.device_type ==
11144 SAS_END_DEVICE)
11145 mpt3sas_device_remove_by_sas_address(ioc,
11146 mpt3sas_port->remote_identify.sas_address,
11147 mpt3sas_port->hba_port);
11148 else if (mpt3sas_port->remote_identify.device_type ==
11149 SAS_EDGE_EXPANDER_DEVICE ||
11150 mpt3sas_port->remote_identify.device_type ==
11151 SAS_FANOUT_EXPANDER_DEVICE)
11152 mpt3sas_expander_remove(ioc,
11153 mpt3sas_port->remote_identify.sas_address,
11154 mpt3sas_port->hba_port);
11155 }
11156
11157 list_for_each_entry_safe(port, port_next,
11158 &ioc->port_table_list, list) {
11159 list_del(&port->list);
11160 kfree(port);
11161 }
11162
11163 /* free phys attached to the sas_host */
11164 if (ioc->sas_hba.num_phys) {
11165 kfree(ioc->sas_hba.phy);
11166 ioc->sas_hba.phy = NULL;
11167 ioc->sas_hba.num_phys = 0;
11168 }
11169
11170 mpt3sas_base_detach(ioc);
11171 spin_lock(&gioc_lock);
11172 list_del(&ioc->list);
11173 spin_unlock(&gioc_lock);
11174 scsi_host_put(shost);
11175 }
11176
11177 /**
11178 * scsih_shutdown - routine call during system shutdown
11179 * @pdev: PCI device struct
11180 */
11181 static void
scsih_shutdown(struct pci_dev * pdev)11182 scsih_shutdown(struct pci_dev *pdev)
11183 {
11184 struct Scsi_Host *shost;
11185 struct MPT3SAS_ADAPTER *ioc;
11186 struct workqueue_struct *wq;
11187 unsigned long flags;
11188 Mpi2ConfigReply_t mpi_reply;
11189
11190 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11191 return;
11192
11193 ioc->remove_host = 1;
11194
11195 if (!pci_device_is_present(pdev))
11196 _scsih_flush_running_cmds(ioc);
11197
11198 _scsih_fw_event_cleanup_queue(ioc);
11199
11200 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11201 wq = ioc->firmware_event_thread;
11202 ioc->firmware_event_thread = NULL;
11203 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11204 if (wq)
11205 destroy_workqueue(wq);
11206 /*
11207 * Copy back the unmodified ioc page1 so that on next driver load,
11208 * current modified changes on ioc page1 won't take effect.
11209 */
11210 if (ioc->is_aero_ioc)
11211 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11212 &ioc->ioc_pg1_copy);
11213
11214 _scsih_ir_shutdown(ioc);
11215 _scsih_nvme_shutdown(ioc);
11216 mpt3sas_base_detach(ioc);
11217 }
11218
11219
11220 /**
11221 * _scsih_probe_boot_devices - reports 1st device
11222 * @ioc: per adapter object
11223 *
11224 * If specified in bios page 2, this routine reports the 1st
11225 * device scsi-ml or sas transport for persistent boot device
11226 * purposes. Please refer to function _scsih_determine_boot_device()
11227 */
11228 static void
_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER * ioc)11229 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11230 {
11231 u32 channel;
11232 void *device;
11233 struct _sas_device *sas_device;
11234 struct _raid_device *raid_device;
11235 struct _pcie_device *pcie_device;
11236 u16 handle;
11237 u64 sas_address_parent;
11238 u64 sas_address;
11239 unsigned long flags;
11240 int rc;
11241 int tid;
11242 struct hba_port *port;
11243
11244 /* no Bios, return immediately */
11245 if (!ioc->bios_pg3.BiosVersion)
11246 return;
11247
11248 device = NULL;
11249 if (ioc->req_boot_device.device) {
11250 device = ioc->req_boot_device.device;
11251 channel = ioc->req_boot_device.channel;
11252 } else if (ioc->req_alt_boot_device.device) {
11253 device = ioc->req_alt_boot_device.device;
11254 channel = ioc->req_alt_boot_device.channel;
11255 } else if (ioc->current_boot_device.device) {
11256 device = ioc->current_boot_device.device;
11257 channel = ioc->current_boot_device.channel;
11258 }
11259
11260 if (!device)
11261 return;
11262
11263 if (channel == RAID_CHANNEL) {
11264 raid_device = device;
11265 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11266 raid_device->id, 0);
11267 if (rc)
11268 _scsih_raid_device_remove(ioc, raid_device);
11269 } else if (channel == PCIE_CHANNEL) {
11270 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11271 pcie_device = device;
11272 tid = pcie_device->id;
11273 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11274 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11275 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11276 if (rc)
11277 _scsih_pcie_device_remove(ioc, pcie_device);
11278 } else {
11279 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11280 sas_device = device;
11281 handle = sas_device->handle;
11282 sas_address_parent = sas_device->sas_address_parent;
11283 sas_address = sas_device->sas_address;
11284 port = sas_device->port;
11285 list_move_tail(&sas_device->list, &ioc->sas_device_list);
11286 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11287
11288 if (ioc->hide_drives)
11289 return;
11290
11291 if (!port)
11292 return;
11293
11294 if (!mpt3sas_transport_port_add(ioc, handle,
11295 sas_address_parent, port)) {
11296 _scsih_sas_device_remove(ioc, sas_device);
11297 } else if (!sas_device->starget) {
11298 if (!ioc->is_driver_loading) {
11299 mpt3sas_transport_port_remove(ioc,
11300 sas_address,
11301 sas_address_parent, port);
11302 _scsih_sas_device_remove(ioc, sas_device);
11303 }
11304 }
11305 }
11306 }
11307
11308 /**
11309 * _scsih_probe_raid - reporting raid volumes to scsi-ml
11310 * @ioc: per adapter object
11311 *
11312 * Called during initial loading of the driver.
11313 */
11314 static void
_scsih_probe_raid(struct MPT3SAS_ADAPTER * ioc)11315 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11316 {
11317 struct _raid_device *raid_device, *raid_next;
11318 int rc;
11319
11320 list_for_each_entry_safe(raid_device, raid_next,
11321 &ioc->raid_device_list, list) {
11322 if (raid_device->starget)
11323 continue;
11324 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11325 raid_device->id, 0);
11326 if (rc)
11327 _scsih_raid_device_remove(ioc, raid_device);
11328 }
11329 }
11330
get_next_sas_device(struct MPT3SAS_ADAPTER * ioc)11331 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11332 {
11333 struct _sas_device *sas_device = NULL;
11334 unsigned long flags;
11335
11336 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11337 if (!list_empty(&ioc->sas_device_init_list)) {
11338 sas_device = list_first_entry(&ioc->sas_device_init_list,
11339 struct _sas_device, list);
11340 sas_device_get(sas_device);
11341 }
11342 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11343
11344 return sas_device;
11345 }
11346
sas_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)11347 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11348 struct _sas_device *sas_device)
11349 {
11350 unsigned long flags;
11351
11352 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11353
11354 /*
11355 * Since we dropped the lock during the call to port_add(), we need to
11356 * be careful here that somebody else didn't move or delete this item
11357 * while we were busy with other things.
11358 *
11359 * If it was on the list, we need a put() for the reference the list
11360 * had. Either way, we need a get() for the destination list.
11361 */
11362 if (!list_empty(&sas_device->list)) {
11363 list_del_init(&sas_device->list);
11364 sas_device_put(sas_device);
11365 }
11366
11367 sas_device_get(sas_device);
11368 list_add_tail(&sas_device->list, &ioc->sas_device_list);
11369
11370 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11371 }
11372
11373 /**
11374 * _scsih_probe_sas - reporting sas devices to sas transport
11375 * @ioc: per adapter object
11376 *
11377 * Called during initial loading of the driver.
11378 */
11379 static void
_scsih_probe_sas(struct MPT3SAS_ADAPTER * ioc)11380 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11381 {
11382 struct _sas_device *sas_device;
11383
11384 if (ioc->hide_drives)
11385 return;
11386
11387 while ((sas_device = get_next_sas_device(ioc))) {
11388 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11389 sas_device->sas_address_parent, sas_device->port)) {
11390 _scsih_sas_device_remove(ioc, sas_device);
11391 sas_device_put(sas_device);
11392 continue;
11393 } else if (!sas_device->starget) {
11394 /*
11395 * When asyn scanning is enabled, its not possible to
11396 * remove devices while scanning is turned on due to an
11397 * oops in scsi_sysfs_add_sdev()->add_device()->
11398 * sysfs_addrm_start()
11399 */
11400 if (!ioc->is_driver_loading) {
11401 mpt3sas_transport_port_remove(ioc,
11402 sas_device->sas_address,
11403 sas_device->sas_address_parent,
11404 sas_device->port);
11405 _scsih_sas_device_remove(ioc, sas_device);
11406 sas_device_put(sas_device);
11407 continue;
11408 }
11409 }
11410 sas_device_make_active(ioc, sas_device);
11411 sas_device_put(sas_device);
11412 }
11413 }
11414
11415 /**
11416 * get_next_pcie_device - Get the next pcie device
11417 * @ioc: per adapter object
11418 *
11419 * Get the next pcie device from pcie_device_init_list list.
11420 *
11421 * Return: pcie device structure if pcie_device_init_list list is not empty
11422 * otherwise returns NULL
11423 */
get_next_pcie_device(struct MPT3SAS_ADAPTER * ioc)11424 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11425 {
11426 struct _pcie_device *pcie_device = NULL;
11427 unsigned long flags;
11428
11429 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11430 if (!list_empty(&ioc->pcie_device_init_list)) {
11431 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11432 struct _pcie_device, list);
11433 pcie_device_get(pcie_device);
11434 }
11435 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11436
11437 return pcie_device;
11438 }
11439
11440 /**
11441 * pcie_device_make_active - Add pcie device to pcie_device_list list
11442 * @ioc: per adapter object
11443 * @pcie_device: pcie device object
11444 *
11445 * Add the pcie device which has registered with SCSI Transport Later to
11446 * pcie_device_list list
11447 */
pcie_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)11448 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11449 struct _pcie_device *pcie_device)
11450 {
11451 unsigned long flags;
11452
11453 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11454
11455 if (!list_empty(&pcie_device->list)) {
11456 list_del_init(&pcie_device->list);
11457 pcie_device_put(pcie_device);
11458 }
11459 pcie_device_get(pcie_device);
11460 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11461
11462 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11463 }
11464
11465 /**
11466 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
11467 * @ioc: per adapter object
11468 *
11469 * Called during initial loading of the driver.
11470 */
11471 static void
_scsih_probe_pcie(struct MPT3SAS_ADAPTER * ioc)11472 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11473 {
11474 struct _pcie_device *pcie_device;
11475 int rc;
11476
11477 /* PCIe Device List */
11478 while ((pcie_device = get_next_pcie_device(ioc))) {
11479 if (pcie_device->starget) {
11480 pcie_device_put(pcie_device);
11481 continue;
11482 }
11483 if (pcie_device->access_status ==
11484 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11485 pcie_device_make_active(ioc, pcie_device);
11486 pcie_device_put(pcie_device);
11487 continue;
11488 }
11489 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11490 pcie_device->id, 0);
11491 if (rc) {
11492 _scsih_pcie_device_remove(ioc, pcie_device);
11493 pcie_device_put(pcie_device);
11494 continue;
11495 } else if (!pcie_device->starget) {
11496 /*
11497 * When async scanning is enabled, its not possible to
11498 * remove devices while scanning is turned on due to an
11499 * oops in scsi_sysfs_add_sdev()->add_device()->
11500 * sysfs_addrm_start()
11501 */
11502 if (!ioc->is_driver_loading) {
11503 /* TODO-- Need to find out whether this condition will
11504 * occur or not
11505 */
11506 _scsih_pcie_device_remove(ioc, pcie_device);
11507 pcie_device_put(pcie_device);
11508 continue;
11509 }
11510 }
11511 pcie_device_make_active(ioc, pcie_device);
11512 pcie_device_put(pcie_device);
11513 }
11514 }
11515
11516 /**
11517 * _scsih_probe_devices - probing for devices
11518 * @ioc: per adapter object
11519 *
11520 * Called during initial loading of the driver.
11521 */
11522 static void
_scsih_probe_devices(struct MPT3SAS_ADAPTER * ioc)11523 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11524 {
11525 u16 volume_mapping_flags;
11526
11527 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11528 return; /* return when IOC doesn't support initiator mode */
11529
11530 _scsih_probe_boot_devices(ioc);
11531
11532 if (ioc->ir_firmware) {
11533 volume_mapping_flags =
11534 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11535 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11536 if (volume_mapping_flags ==
11537 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11538 _scsih_probe_raid(ioc);
11539 _scsih_probe_sas(ioc);
11540 } else {
11541 _scsih_probe_sas(ioc);
11542 _scsih_probe_raid(ioc);
11543 }
11544 } else {
11545 _scsih_probe_sas(ioc);
11546 _scsih_probe_pcie(ioc);
11547 }
11548 }
11549
11550 /**
11551 * scsih_scan_start - scsi lld callback for .scan_start
11552 * @shost: SCSI host pointer
11553 *
11554 * The shost has the ability to discover targets on its own instead
11555 * of scanning the entire bus. In our implemention, we will kick off
11556 * firmware discovery.
11557 */
11558 static void
scsih_scan_start(struct Scsi_Host * shost)11559 scsih_scan_start(struct Scsi_Host *shost)
11560 {
11561 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11562 int rc;
11563 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11564 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11565 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11566 mpt3sas_enable_diag_buffer(ioc, 1);
11567
11568 if (disable_discovery > 0)
11569 return;
11570
11571 ioc->start_scan = 1;
11572 rc = mpt3sas_port_enable(ioc);
11573
11574 if (rc != 0)
11575 ioc_info(ioc, "port enable: FAILED\n");
11576 }
11577
11578 /**
11579 * scsih_scan_finished - scsi lld callback for .scan_finished
11580 * @shost: SCSI host pointer
11581 * @time: elapsed time of the scan in jiffies
11582 *
11583 * This function will be called periodicallyn until it returns 1 with the
11584 * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
11585 * we wait for firmware discovery to complete, then return 1.
11586 */
11587 static int
scsih_scan_finished(struct Scsi_Host * shost,unsigned long time)11588 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11589 {
11590 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11591
11592 if (disable_discovery > 0) {
11593 ioc->is_driver_loading = 0;
11594 ioc->wait_for_discovery_to_complete = 0;
11595 return 1;
11596 }
11597
11598 if (time >= (300 * HZ)) {
11599 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11600 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11601 ioc->is_driver_loading = 0;
11602 return 1;
11603 }
11604
11605 if (ioc->start_scan)
11606 return 0;
11607
11608 if (ioc->start_scan_failed) {
11609 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11610 ioc->start_scan_failed);
11611 ioc->is_driver_loading = 0;
11612 ioc->wait_for_discovery_to_complete = 0;
11613 ioc->remove_host = 1;
11614 return 1;
11615 }
11616
11617 ioc_info(ioc, "port enable: SUCCESS\n");
11618 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11619
11620 if (ioc->wait_for_discovery_to_complete) {
11621 ioc->wait_for_discovery_to_complete = 0;
11622 _scsih_probe_devices(ioc);
11623 }
11624 mpt3sas_base_start_watchdog(ioc);
11625 ioc->is_driver_loading = 0;
11626 return 1;
11627 }
11628
11629 /**
11630 * scsih_map_queues - map reply queues with request queues
11631 * @shost: SCSI host pointer
11632 */
scsih_map_queues(struct Scsi_Host * shost)11633 static int scsih_map_queues(struct Scsi_Host *shost)
11634 {
11635 struct MPT3SAS_ADAPTER *ioc =
11636 (struct MPT3SAS_ADAPTER *)shost->hostdata;
11637
11638 if (ioc->shost->nr_hw_queues == 1)
11639 return 0;
11640
11641 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
11642 ioc->pdev, ioc->high_iops_queues);
11643 }
11644
11645 /* shost template for SAS 2.0 HBA devices */
11646 static struct scsi_host_template mpt2sas_driver_template = {
11647 .module = THIS_MODULE,
11648 .name = "Fusion MPT SAS Host",
11649 .proc_name = MPT2SAS_DRIVER_NAME,
11650 .queuecommand = scsih_qcmd,
11651 .target_alloc = scsih_target_alloc,
11652 .slave_alloc = scsih_slave_alloc,
11653 .slave_configure = scsih_slave_configure,
11654 .target_destroy = scsih_target_destroy,
11655 .slave_destroy = scsih_slave_destroy,
11656 .scan_finished = scsih_scan_finished,
11657 .scan_start = scsih_scan_start,
11658 .change_queue_depth = scsih_change_queue_depth,
11659 .eh_abort_handler = scsih_abort,
11660 .eh_device_reset_handler = scsih_dev_reset,
11661 .eh_target_reset_handler = scsih_target_reset,
11662 .eh_host_reset_handler = scsih_host_reset,
11663 .bios_param = scsih_bios_param,
11664 .can_queue = 1,
11665 .this_id = -1,
11666 .sg_tablesize = MPT2SAS_SG_DEPTH,
11667 .max_sectors = 32767,
11668 .cmd_per_lun = 7,
11669 .shost_attrs = mpt3sas_host_attrs,
11670 .sdev_attrs = mpt3sas_dev_attrs,
11671 .track_queue_depth = 1,
11672 .cmd_size = sizeof(struct scsiio_tracker),
11673 };
11674
11675 /* raid transport support for SAS 2.0 HBA devices */
11676 static struct raid_function_template mpt2sas_raid_functions = {
11677 .cookie = &mpt2sas_driver_template,
11678 .is_raid = scsih_is_raid,
11679 .get_resync = scsih_get_resync,
11680 .get_state = scsih_get_state,
11681 };
11682
11683 /* shost template for SAS 3.0 HBA devices */
11684 static struct scsi_host_template mpt3sas_driver_template = {
11685 .module = THIS_MODULE,
11686 .name = "Fusion MPT SAS Host",
11687 .proc_name = MPT3SAS_DRIVER_NAME,
11688 .queuecommand = scsih_qcmd,
11689 .target_alloc = scsih_target_alloc,
11690 .slave_alloc = scsih_slave_alloc,
11691 .slave_configure = scsih_slave_configure,
11692 .target_destroy = scsih_target_destroy,
11693 .slave_destroy = scsih_slave_destroy,
11694 .scan_finished = scsih_scan_finished,
11695 .scan_start = scsih_scan_start,
11696 .change_queue_depth = scsih_change_queue_depth,
11697 .eh_abort_handler = scsih_abort,
11698 .eh_device_reset_handler = scsih_dev_reset,
11699 .eh_target_reset_handler = scsih_target_reset,
11700 .eh_host_reset_handler = scsih_host_reset,
11701 .bios_param = scsih_bios_param,
11702 .can_queue = 1,
11703 .this_id = -1,
11704 .sg_tablesize = MPT3SAS_SG_DEPTH,
11705 .max_sectors = 32767,
11706 .max_segment_size = 0xffffffff,
11707 .cmd_per_lun = 7,
11708 .shost_attrs = mpt3sas_host_attrs,
11709 .sdev_attrs = mpt3sas_dev_attrs,
11710 .track_queue_depth = 1,
11711 .cmd_size = sizeof(struct scsiio_tracker),
11712 .map_queues = scsih_map_queues,
11713 };
11714
11715 /* raid transport support for SAS 3.0 HBA devices */
11716 static struct raid_function_template mpt3sas_raid_functions = {
11717 .cookie = &mpt3sas_driver_template,
11718 .is_raid = scsih_is_raid,
11719 .get_resync = scsih_get_resync,
11720 .get_state = scsih_get_state,
11721 };
11722
11723 /**
11724 * _scsih_determine_hba_mpi_version - determine in which MPI version class
11725 * this device belongs to.
11726 * @pdev: PCI device struct
11727 *
11728 * return MPI2_VERSION for SAS 2.0 HBA devices,
11729 * MPI25_VERSION for SAS 3.0 HBA devices, and
11730 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
11731 */
11732 static u16
_scsih_determine_hba_mpi_version(struct pci_dev * pdev)11733 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
11734 {
11735
11736 switch (pdev->device) {
11737 case MPI2_MFGPAGE_DEVID_SSS6200:
11738 case MPI2_MFGPAGE_DEVID_SAS2004:
11739 case MPI2_MFGPAGE_DEVID_SAS2008:
11740 case MPI2_MFGPAGE_DEVID_SAS2108_1:
11741 case MPI2_MFGPAGE_DEVID_SAS2108_2:
11742 case MPI2_MFGPAGE_DEVID_SAS2108_3:
11743 case MPI2_MFGPAGE_DEVID_SAS2116_1:
11744 case MPI2_MFGPAGE_DEVID_SAS2116_2:
11745 case MPI2_MFGPAGE_DEVID_SAS2208_1:
11746 case MPI2_MFGPAGE_DEVID_SAS2208_2:
11747 case MPI2_MFGPAGE_DEVID_SAS2208_3:
11748 case MPI2_MFGPAGE_DEVID_SAS2208_4:
11749 case MPI2_MFGPAGE_DEVID_SAS2208_5:
11750 case MPI2_MFGPAGE_DEVID_SAS2208_6:
11751 case MPI2_MFGPAGE_DEVID_SAS2308_1:
11752 case MPI2_MFGPAGE_DEVID_SAS2308_2:
11753 case MPI2_MFGPAGE_DEVID_SAS2308_3:
11754 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
11755 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
11756 return MPI2_VERSION;
11757 case MPI25_MFGPAGE_DEVID_SAS3004:
11758 case MPI25_MFGPAGE_DEVID_SAS3008:
11759 case MPI25_MFGPAGE_DEVID_SAS3108_1:
11760 case MPI25_MFGPAGE_DEVID_SAS3108_2:
11761 case MPI25_MFGPAGE_DEVID_SAS3108_5:
11762 case MPI25_MFGPAGE_DEVID_SAS3108_6:
11763 return MPI25_VERSION;
11764 case MPI26_MFGPAGE_DEVID_SAS3216:
11765 case MPI26_MFGPAGE_DEVID_SAS3224:
11766 case MPI26_MFGPAGE_DEVID_SAS3316_1:
11767 case MPI26_MFGPAGE_DEVID_SAS3316_2:
11768 case MPI26_MFGPAGE_DEVID_SAS3316_3:
11769 case MPI26_MFGPAGE_DEVID_SAS3316_4:
11770 case MPI26_MFGPAGE_DEVID_SAS3324_1:
11771 case MPI26_MFGPAGE_DEVID_SAS3324_2:
11772 case MPI26_MFGPAGE_DEVID_SAS3324_3:
11773 case MPI26_MFGPAGE_DEVID_SAS3324_4:
11774 case MPI26_MFGPAGE_DEVID_SAS3508:
11775 case MPI26_MFGPAGE_DEVID_SAS3508_1:
11776 case MPI26_MFGPAGE_DEVID_SAS3408:
11777 case MPI26_MFGPAGE_DEVID_SAS3516:
11778 case MPI26_MFGPAGE_DEVID_SAS3516_1:
11779 case MPI26_MFGPAGE_DEVID_SAS3416:
11780 case MPI26_MFGPAGE_DEVID_SAS3616:
11781 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
11782 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
11783 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
11784 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
11785 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
11786 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
11787 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
11788 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
11789 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
11790 return MPI26_VERSION;
11791 }
11792 return 0;
11793 }
11794
11795 /**
11796 * _scsih_probe - attach and add scsi host
11797 * @pdev: PCI device struct
11798 * @id: pci device id
11799 *
11800 * Return: 0 success, anything else error.
11801 */
11802 static int
_scsih_probe(struct pci_dev * pdev,const struct pci_device_id * id)11803 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
11804 {
11805 struct MPT3SAS_ADAPTER *ioc;
11806 struct Scsi_Host *shost = NULL;
11807 int rv;
11808 u16 hba_mpi_version;
11809
11810 /* Determine in which MPI version class this pci device belongs */
11811 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
11812 if (hba_mpi_version == 0)
11813 return -ENODEV;
11814
11815 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
11816 * for other generation HBA's return with -ENODEV
11817 */
11818 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
11819 return -ENODEV;
11820
11821 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
11822 * for other generation HBA's return with -ENODEV
11823 */
11824 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
11825 || hba_mpi_version == MPI26_VERSION)))
11826 return -ENODEV;
11827
11828 switch (hba_mpi_version) {
11829 case MPI2_VERSION:
11830 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
11831 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
11832 /* Use mpt2sas driver host template for SAS 2.0 HBA's */
11833 shost = scsi_host_alloc(&mpt2sas_driver_template,
11834 sizeof(struct MPT3SAS_ADAPTER));
11835 if (!shost)
11836 return -ENODEV;
11837 ioc = shost_priv(shost);
11838 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
11839 ioc->hba_mpi_version_belonged = hba_mpi_version;
11840 ioc->id = mpt2_ids++;
11841 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
11842 switch (pdev->device) {
11843 case MPI2_MFGPAGE_DEVID_SSS6200:
11844 ioc->is_warpdrive = 1;
11845 ioc->hide_ir_msg = 1;
11846 break;
11847 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
11848 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
11849 ioc->is_mcpu_endpoint = 1;
11850 break;
11851 default:
11852 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
11853 break;
11854 }
11855
11856 if (multipath_on_hba == -1 || multipath_on_hba == 0)
11857 ioc->multipath_on_hba = 0;
11858 else
11859 ioc->multipath_on_hba = 1;
11860
11861 break;
11862 case MPI25_VERSION:
11863 case MPI26_VERSION:
11864 /* Use mpt3sas driver host template for SAS 3.0 HBA's */
11865 shost = scsi_host_alloc(&mpt3sas_driver_template,
11866 sizeof(struct MPT3SAS_ADAPTER));
11867 if (!shost)
11868 return -ENODEV;
11869 ioc = shost_priv(shost);
11870 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
11871 ioc->hba_mpi_version_belonged = hba_mpi_version;
11872 ioc->id = mpt3_ids++;
11873 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
11874 switch (pdev->device) {
11875 case MPI26_MFGPAGE_DEVID_SAS3508:
11876 case MPI26_MFGPAGE_DEVID_SAS3508_1:
11877 case MPI26_MFGPAGE_DEVID_SAS3408:
11878 case MPI26_MFGPAGE_DEVID_SAS3516:
11879 case MPI26_MFGPAGE_DEVID_SAS3516_1:
11880 case MPI26_MFGPAGE_DEVID_SAS3416:
11881 case MPI26_MFGPAGE_DEVID_SAS3616:
11882 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
11883 ioc->is_gen35_ioc = 1;
11884 break;
11885 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
11886 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
11887 dev_err(&pdev->dev,
11888 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
11889 pdev->device, pdev->subsystem_vendor,
11890 pdev->subsystem_device);
11891 return 1;
11892 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
11893 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
11894 dev_err(&pdev->dev,
11895 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
11896 pdev->device, pdev->subsystem_vendor,
11897 pdev->subsystem_device);
11898 return 1;
11899 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
11900 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
11901 dev_info(&pdev->dev,
11902 "HBA is in Configurable Secure mode\n");
11903 fallthrough;
11904 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
11905 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
11906 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
11907 break;
11908 default:
11909 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
11910 }
11911 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
11912 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
11913 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
11914 ioc->combined_reply_queue = 1;
11915 if (ioc->is_gen35_ioc)
11916 ioc->combined_reply_index_count =
11917 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
11918 else
11919 ioc->combined_reply_index_count =
11920 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
11921 }
11922
11923 switch (ioc->is_gen35_ioc) {
11924 case 0:
11925 if (multipath_on_hba == -1 || multipath_on_hba == 0)
11926 ioc->multipath_on_hba = 0;
11927 else
11928 ioc->multipath_on_hba = 1;
11929 break;
11930 case 1:
11931 if (multipath_on_hba == -1 || multipath_on_hba > 0)
11932 ioc->multipath_on_hba = 1;
11933 else
11934 ioc->multipath_on_hba = 0;
11935 default:
11936 break;
11937 }
11938
11939 break;
11940 default:
11941 return -ENODEV;
11942 }
11943
11944 INIT_LIST_HEAD(&ioc->list);
11945 spin_lock(&gioc_lock);
11946 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
11947 spin_unlock(&gioc_lock);
11948 ioc->shost = shost;
11949 ioc->pdev = pdev;
11950 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
11951 ioc->tm_cb_idx = tm_cb_idx;
11952 ioc->ctl_cb_idx = ctl_cb_idx;
11953 ioc->base_cb_idx = base_cb_idx;
11954 ioc->port_enable_cb_idx = port_enable_cb_idx;
11955 ioc->transport_cb_idx = transport_cb_idx;
11956 ioc->scsih_cb_idx = scsih_cb_idx;
11957 ioc->config_cb_idx = config_cb_idx;
11958 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
11959 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
11960 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
11961 ioc->logging_level = logging_level;
11962 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
11963 /* Host waits for minimum of six seconds */
11964 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
11965 /*
11966 * Enable MEMORY MOVE support flag.
11967 */
11968 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
11969 /* Enable ADDITIONAL QUERY support flag. */
11970 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
11971
11972 ioc->enable_sdev_max_qd = enable_sdev_max_qd;
11973
11974 /* misc semaphores and spin locks */
11975 mutex_init(&ioc->reset_in_progress_mutex);
11976 /* initializing pci_access_mutex lock */
11977 mutex_init(&ioc->pci_access_mutex);
11978 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
11979 spin_lock_init(&ioc->scsi_lookup_lock);
11980 spin_lock_init(&ioc->sas_device_lock);
11981 spin_lock_init(&ioc->sas_node_lock);
11982 spin_lock_init(&ioc->fw_event_lock);
11983 spin_lock_init(&ioc->raid_device_lock);
11984 spin_lock_init(&ioc->pcie_device_lock);
11985 spin_lock_init(&ioc->diag_trigger_lock);
11986
11987 INIT_LIST_HEAD(&ioc->sas_device_list);
11988 INIT_LIST_HEAD(&ioc->sas_device_init_list);
11989 INIT_LIST_HEAD(&ioc->sas_expander_list);
11990 INIT_LIST_HEAD(&ioc->enclosure_list);
11991 INIT_LIST_HEAD(&ioc->pcie_device_list);
11992 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
11993 INIT_LIST_HEAD(&ioc->fw_event_list);
11994 INIT_LIST_HEAD(&ioc->raid_device_list);
11995 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
11996 INIT_LIST_HEAD(&ioc->delayed_tr_list);
11997 INIT_LIST_HEAD(&ioc->delayed_sc_list);
11998 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
11999 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
12000 INIT_LIST_HEAD(&ioc->reply_queue_list);
12001 INIT_LIST_HEAD(&ioc->port_table_list);
12002
12003 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
12004
12005 /* init shost parameters */
12006 shost->max_cmd_len = 32;
12007 shost->max_lun = max_lun;
12008 shost->transportt = mpt3sas_transport_template;
12009 shost->unique_id = ioc->id;
12010
12011 if (ioc->is_mcpu_endpoint) {
12012 /* mCPU MPI support 64K max IO */
12013 shost->max_sectors = 128;
12014 ioc_info(ioc, "The max_sectors value is set to %d\n",
12015 shost->max_sectors);
12016 } else {
12017 if (max_sectors != 0xFFFF) {
12018 if (max_sectors < 64) {
12019 shost->max_sectors = 64;
12020 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
12021 max_sectors);
12022 } else if (max_sectors > 32767) {
12023 shost->max_sectors = 32767;
12024 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
12025 max_sectors);
12026 } else {
12027 shost->max_sectors = max_sectors & 0xFFFE;
12028 ioc_info(ioc, "The max_sectors value is set to %d\n",
12029 shost->max_sectors);
12030 }
12031 }
12032 }
12033 /* register EEDP capabilities with SCSI layer */
12034 if (prot_mask >= 0)
12035 scsi_host_set_prot(shost, (prot_mask & 0x07));
12036 else
12037 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
12038 | SHOST_DIF_TYPE2_PROTECTION
12039 | SHOST_DIF_TYPE3_PROTECTION);
12040
12041 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
12042
12043 /* event thread */
12044 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
12045 "fw_event_%s%d", ioc->driver_name, ioc->id);
12046 ioc->firmware_event_thread = alloc_ordered_workqueue(
12047 ioc->firmware_event_name, 0);
12048 if (!ioc->firmware_event_thread) {
12049 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12050 __FILE__, __LINE__, __func__);
12051 rv = -ENODEV;
12052 goto out_thread_fail;
12053 }
12054
12055 ioc->is_driver_loading = 1;
12056 if ((mpt3sas_base_attach(ioc))) {
12057 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12058 __FILE__, __LINE__, __func__);
12059 rv = -ENODEV;
12060 goto out_attach_fail;
12061 }
12062
12063 if (ioc->is_warpdrive) {
12064 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
12065 ioc->hide_drives = 0;
12066 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
12067 ioc->hide_drives = 1;
12068 else {
12069 if (mpt3sas_get_num_volumes(ioc))
12070 ioc->hide_drives = 1;
12071 else
12072 ioc->hide_drives = 0;
12073 }
12074 } else
12075 ioc->hide_drives = 0;
12076
12077 shost->host_tagset = 0;
12078 shost->nr_hw_queues = 1;
12079
12080 if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 &&
12081 host_tagset_enable && ioc->smp_affinity_enable) {
12082
12083 shost->host_tagset = 1;
12084 shost->nr_hw_queues =
12085 ioc->reply_queue_count - ioc->high_iops_queues;
12086
12087 dev_info(&ioc->pdev->dev,
12088 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
12089 shost->can_queue, shost->nr_hw_queues);
12090 }
12091
12092 rv = scsi_add_host(shost, &pdev->dev);
12093 if (rv) {
12094 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12095 __FILE__, __LINE__, __func__);
12096 goto out_add_shost_fail;
12097 }
12098
12099 scsi_scan_host(shost);
12100 mpt3sas_setup_debugfs(ioc);
12101 return 0;
12102 out_add_shost_fail:
12103 mpt3sas_base_detach(ioc);
12104 out_attach_fail:
12105 destroy_workqueue(ioc->firmware_event_thread);
12106 out_thread_fail:
12107 spin_lock(&gioc_lock);
12108 list_del(&ioc->list);
12109 spin_unlock(&gioc_lock);
12110 scsi_host_put(shost);
12111 return rv;
12112 }
12113
12114 /**
12115 * scsih_suspend - power management suspend main entry point
12116 * @dev: Device struct
12117 *
12118 * Return: 0 success, anything else error.
12119 */
12120 static int __maybe_unused
scsih_suspend(struct device * dev)12121 scsih_suspend(struct device *dev)
12122 {
12123 struct pci_dev *pdev = to_pci_dev(dev);
12124 struct Scsi_Host *shost;
12125 struct MPT3SAS_ADAPTER *ioc;
12126 int rc;
12127
12128 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12129 if (rc)
12130 return rc;
12131
12132 mpt3sas_base_stop_watchdog(ioc);
12133 flush_scheduled_work();
12134 scsi_block_requests(shost);
12135 _scsih_nvme_shutdown(ioc);
12136 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12137 pdev, pci_name(pdev));
12138
12139 mpt3sas_base_free_resources(ioc);
12140 return 0;
12141 }
12142
12143 /**
12144 * scsih_resume - power management resume main entry point
12145 * @dev: Device struct
12146 *
12147 * Return: 0 success, anything else error.
12148 */
12149 static int __maybe_unused
scsih_resume(struct device * dev)12150 scsih_resume(struct device *dev)
12151 {
12152 struct pci_dev *pdev = to_pci_dev(dev);
12153 struct Scsi_Host *shost;
12154 struct MPT3SAS_ADAPTER *ioc;
12155 pci_power_t device_state = pdev->current_state;
12156 int r;
12157
12158 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12159 if (r)
12160 return r;
12161
12162 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12163 pdev, pci_name(pdev), device_state);
12164
12165 ioc->pdev = pdev;
12166 r = mpt3sas_base_map_resources(ioc);
12167 if (r)
12168 return r;
12169 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12170 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12171 scsi_unblock_requests(shost);
12172 mpt3sas_base_start_watchdog(ioc);
12173 return 0;
12174 }
12175
12176 /**
12177 * scsih_pci_error_detected - Called when a PCI error is detected.
12178 * @pdev: PCI device struct
12179 * @state: PCI channel state
12180 *
12181 * Description: Called when a PCI error is detected.
12182 *
12183 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
12184 */
12185 static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)12186 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12187 {
12188 struct Scsi_Host *shost;
12189 struct MPT3SAS_ADAPTER *ioc;
12190
12191 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12192 return PCI_ERS_RESULT_DISCONNECT;
12193
12194 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12195
12196 switch (state) {
12197 case pci_channel_io_normal:
12198 return PCI_ERS_RESULT_CAN_RECOVER;
12199 case pci_channel_io_frozen:
12200 /* Fatal error, prepare for slot reset */
12201 ioc->pci_error_recovery = 1;
12202 scsi_block_requests(ioc->shost);
12203 mpt3sas_base_stop_watchdog(ioc);
12204 mpt3sas_base_free_resources(ioc);
12205 return PCI_ERS_RESULT_NEED_RESET;
12206 case pci_channel_io_perm_failure:
12207 /* Permanent error, prepare for device removal */
12208 ioc->pci_error_recovery = 1;
12209 mpt3sas_base_stop_watchdog(ioc);
12210 _scsih_flush_running_cmds(ioc);
12211 return PCI_ERS_RESULT_DISCONNECT;
12212 }
12213 return PCI_ERS_RESULT_NEED_RESET;
12214 }
12215
12216 /**
12217 * scsih_pci_slot_reset - Called when PCI slot has been reset.
12218 * @pdev: PCI device struct
12219 *
12220 * Description: This routine is called by the pci error recovery
12221 * code after the PCI slot has been reset, just before we
12222 * should resume normal operations.
12223 */
12224 static pci_ers_result_t
scsih_pci_slot_reset(struct pci_dev * pdev)12225 scsih_pci_slot_reset(struct pci_dev *pdev)
12226 {
12227 struct Scsi_Host *shost;
12228 struct MPT3SAS_ADAPTER *ioc;
12229 int rc;
12230
12231 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12232 return PCI_ERS_RESULT_DISCONNECT;
12233
12234 ioc_info(ioc, "PCI error: slot reset callback!!\n");
12235
12236 ioc->pci_error_recovery = 0;
12237 ioc->pdev = pdev;
12238 pci_restore_state(pdev);
12239 rc = mpt3sas_base_map_resources(ioc);
12240 if (rc)
12241 return PCI_ERS_RESULT_DISCONNECT;
12242
12243 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12244 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12245
12246 ioc_warn(ioc, "hard reset: %s\n",
12247 (rc == 0) ? "success" : "failed");
12248
12249 if (!rc)
12250 return PCI_ERS_RESULT_RECOVERED;
12251 else
12252 return PCI_ERS_RESULT_DISCONNECT;
12253 }
12254
12255 /**
12256 * scsih_pci_resume() - resume normal ops after PCI reset
12257 * @pdev: pointer to PCI device
12258 *
12259 * Called when the error recovery driver tells us that its
12260 * OK to resume normal operation. Use completion to allow
12261 * halted scsi ops to resume.
12262 */
12263 static void
scsih_pci_resume(struct pci_dev * pdev)12264 scsih_pci_resume(struct pci_dev *pdev)
12265 {
12266 struct Scsi_Host *shost;
12267 struct MPT3SAS_ADAPTER *ioc;
12268
12269 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12270 return;
12271
12272 ioc_info(ioc, "PCI error: resume callback!!\n");
12273
12274 mpt3sas_base_start_watchdog(ioc);
12275 scsi_unblock_requests(ioc->shost);
12276 }
12277
12278 /**
12279 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
12280 * @pdev: pointer to PCI device
12281 */
12282 static pci_ers_result_t
scsih_pci_mmio_enabled(struct pci_dev * pdev)12283 scsih_pci_mmio_enabled(struct pci_dev *pdev)
12284 {
12285 struct Scsi_Host *shost;
12286 struct MPT3SAS_ADAPTER *ioc;
12287
12288 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12289 return PCI_ERS_RESULT_DISCONNECT;
12290
12291 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12292
12293 /* TODO - dump whatever for debugging purposes */
12294
12295 /* This called only if scsih_pci_error_detected returns
12296 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
12297 * works, no need to reset slot.
12298 */
12299 return PCI_ERS_RESULT_RECOVERED;
12300 }
12301
12302 /**
12303 * scsih_ncq_prio_supp - Check for NCQ command priority support
12304 * @sdev: scsi device struct
12305 *
12306 * This is called when a user indicates they would like to enable
12307 * ncq command priorities. This works only on SATA devices.
12308 */
scsih_ncq_prio_supp(struct scsi_device * sdev)12309 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
12310 {
12311 unsigned char *buf;
12312 bool ncq_prio_supp = false;
12313
12314 if (!scsi_device_supports_vpd(sdev))
12315 return ncq_prio_supp;
12316
12317 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
12318 if (!buf)
12319 return ncq_prio_supp;
12320
12321 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
12322 ncq_prio_supp = (buf[213] >> 4) & 1;
12323
12324 kfree(buf);
12325 return ncq_prio_supp;
12326 }
12327 /*
12328 * The pci device ids are defined in mpi/mpi2_cnfg.h.
12329 */
12330 static const struct pci_device_id mpt3sas_pci_table[] = {
12331 /* Spitfire ~ 2004 */
12332 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12333 PCI_ANY_ID, PCI_ANY_ID },
12334 /* Falcon ~ 2008 */
12335 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12336 PCI_ANY_ID, PCI_ANY_ID },
12337 /* Liberator ~ 2108 */
12338 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12339 PCI_ANY_ID, PCI_ANY_ID },
12340 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12341 PCI_ANY_ID, PCI_ANY_ID },
12342 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12343 PCI_ANY_ID, PCI_ANY_ID },
12344 /* Meteor ~ 2116 */
12345 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12346 PCI_ANY_ID, PCI_ANY_ID },
12347 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12348 PCI_ANY_ID, PCI_ANY_ID },
12349 /* Thunderbolt ~ 2208 */
12350 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12351 PCI_ANY_ID, PCI_ANY_ID },
12352 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12353 PCI_ANY_ID, PCI_ANY_ID },
12354 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12355 PCI_ANY_ID, PCI_ANY_ID },
12356 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12357 PCI_ANY_ID, PCI_ANY_ID },
12358 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12359 PCI_ANY_ID, PCI_ANY_ID },
12360 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12361 PCI_ANY_ID, PCI_ANY_ID },
12362 /* Mustang ~ 2308 */
12363 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12364 PCI_ANY_ID, PCI_ANY_ID },
12365 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12366 PCI_ANY_ID, PCI_ANY_ID },
12367 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12368 PCI_ANY_ID, PCI_ANY_ID },
12369 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12370 PCI_ANY_ID, PCI_ANY_ID },
12371 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12372 PCI_ANY_ID, PCI_ANY_ID },
12373 /* SSS6200 */
12374 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12375 PCI_ANY_ID, PCI_ANY_ID },
12376 /* Fury ~ 3004 and 3008 */
12377 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12378 PCI_ANY_ID, PCI_ANY_ID },
12379 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12380 PCI_ANY_ID, PCI_ANY_ID },
12381 /* Invader ~ 3108 */
12382 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12383 PCI_ANY_ID, PCI_ANY_ID },
12384 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12385 PCI_ANY_ID, PCI_ANY_ID },
12386 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12387 PCI_ANY_ID, PCI_ANY_ID },
12388 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12389 PCI_ANY_ID, PCI_ANY_ID },
12390 /* Cutlass ~ 3216 and 3224 */
12391 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12392 PCI_ANY_ID, PCI_ANY_ID },
12393 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12394 PCI_ANY_ID, PCI_ANY_ID },
12395 /* Intruder ~ 3316 and 3324 */
12396 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12397 PCI_ANY_ID, PCI_ANY_ID },
12398 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12399 PCI_ANY_ID, PCI_ANY_ID },
12400 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12401 PCI_ANY_ID, PCI_ANY_ID },
12402 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12403 PCI_ANY_ID, PCI_ANY_ID },
12404 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12405 PCI_ANY_ID, PCI_ANY_ID },
12406 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12407 PCI_ANY_ID, PCI_ANY_ID },
12408 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12409 PCI_ANY_ID, PCI_ANY_ID },
12410 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12411 PCI_ANY_ID, PCI_ANY_ID },
12412 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
12413 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12414 PCI_ANY_ID, PCI_ANY_ID },
12415 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12416 PCI_ANY_ID, PCI_ANY_ID },
12417 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12418 PCI_ANY_ID, PCI_ANY_ID },
12419 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12420 PCI_ANY_ID, PCI_ANY_ID },
12421 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12422 PCI_ANY_ID, PCI_ANY_ID },
12423 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12424 PCI_ANY_ID, PCI_ANY_ID },
12425 /* Mercator ~ 3616*/
12426 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12427 PCI_ANY_ID, PCI_ANY_ID },
12428
12429 /* Aero SI 0x00E1 Configurable Secure
12430 * 0x00E2 Hard Secure
12431 */
12432 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12433 PCI_ANY_ID, PCI_ANY_ID },
12434 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12435 PCI_ANY_ID, PCI_ANY_ID },
12436
12437 /*
12438 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
12439 */
12440 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12441 PCI_ANY_ID, PCI_ANY_ID },
12442 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12443 PCI_ANY_ID, PCI_ANY_ID },
12444
12445 /* Atlas PCIe Switch Management Port */
12446 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12447 PCI_ANY_ID, PCI_ANY_ID },
12448
12449 /* Sea SI 0x00E5 Configurable Secure
12450 * 0x00E6 Hard Secure
12451 */
12452 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12453 PCI_ANY_ID, PCI_ANY_ID },
12454 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12455 PCI_ANY_ID, PCI_ANY_ID },
12456
12457 /*
12458 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
12459 */
12460 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12461 PCI_ANY_ID, PCI_ANY_ID },
12462 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12463 PCI_ANY_ID, PCI_ANY_ID },
12464
12465 {0} /* Terminating entry */
12466 };
12467 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12468
12469 static struct pci_error_handlers _mpt3sas_err_handler = {
12470 .error_detected = scsih_pci_error_detected,
12471 .mmio_enabled = scsih_pci_mmio_enabled,
12472 .slot_reset = scsih_pci_slot_reset,
12473 .resume = scsih_pci_resume,
12474 };
12475
12476 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12477
12478 static struct pci_driver mpt3sas_driver = {
12479 .name = MPT3SAS_DRIVER_NAME,
12480 .id_table = mpt3sas_pci_table,
12481 .probe = _scsih_probe,
12482 .remove = scsih_remove,
12483 .shutdown = scsih_shutdown,
12484 .err_handler = &_mpt3sas_err_handler,
12485 .driver.pm = &scsih_pm_ops,
12486 };
12487
12488 /**
12489 * scsih_init - main entry point for this driver.
12490 *
12491 * Return: 0 success, anything else error.
12492 */
12493 static int
scsih_init(void)12494 scsih_init(void)
12495 {
12496 mpt2_ids = 0;
12497 mpt3_ids = 0;
12498
12499 mpt3sas_base_initialize_callback_handler();
12500
12501 /* queuecommand callback hander */
12502 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12503
12504 /* task management callback handler */
12505 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12506
12507 /* base internal commands callback handler */
12508 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12509 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12510 mpt3sas_port_enable_done);
12511
12512 /* transport internal commands callback handler */
12513 transport_cb_idx = mpt3sas_base_register_callback_handler(
12514 mpt3sas_transport_done);
12515
12516 /* scsih internal commands callback handler */
12517 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12518
12519 /* configuration page API internal commands callback handler */
12520 config_cb_idx = mpt3sas_base_register_callback_handler(
12521 mpt3sas_config_done);
12522
12523 /* ctl module callback handler */
12524 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12525
12526 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12527 _scsih_tm_tr_complete);
12528
12529 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12530 _scsih_tm_volume_tr_complete);
12531
12532 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12533 _scsih_sas_control_complete);
12534
12535 mpt3sas_init_debugfs();
12536 return 0;
12537 }
12538
12539 /**
12540 * scsih_exit - exit point for this driver (when it is a module).
12541 *
12542 * Return: 0 success, anything else error.
12543 */
12544 static void
scsih_exit(void)12545 scsih_exit(void)
12546 {
12547
12548 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12549 mpt3sas_base_release_callback_handler(tm_cb_idx);
12550 mpt3sas_base_release_callback_handler(base_cb_idx);
12551 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12552 mpt3sas_base_release_callback_handler(transport_cb_idx);
12553 mpt3sas_base_release_callback_handler(scsih_cb_idx);
12554 mpt3sas_base_release_callback_handler(config_cb_idx);
12555 mpt3sas_base_release_callback_handler(ctl_cb_idx);
12556
12557 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12558 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12559 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12560
12561 /* raid transport support */
12562 if (hbas_to_enumerate != 1)
12563 raid_class_release(mpt3sas_raid_template);
12564 if (hbas_to_enumerate != 2)
12565 raid_class_release(mpt2sas_raid_template);
12566 sas_release_transport(mpt3sas_transport_template);
12567 mpt3sas_exit_debugfs();
12568 }
12569
12570 /**
12571 * _mpt3sas_init - main entry point for this driver.
12572 *
12573 * Return: 0 success, anything else error.
12574 */
12575 static int __init
_mpt3sas_init(void)12576 _mpt3sas_init(void)
12577 {
12578 int error;
12579
12580 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12581 MPT3SAS_DRIVER_VERSION);
12582
12583 mpt3sas_transport_template =
12584 sas_attach_transport(&mpt3sas_transport_functions);
12585 if (!mpt3sas_transport_template)
12586 return -ENODEV;
12587
12588 /* No need attach mpt3sas raid functions template
12589 * if hbas_to_enumarate value is one.
12590 */
12591 if (hbas_to_enumerate != 1) {
12592 mpt3sas_raid_template =
12593 raid_class_attach(&mpt3sas_raid_functions);
12594 if (!mpt3sas_raid_template) {
12595 sas_release_transport(mpt3sas_transport_template);
12596 return -ENODEV;
12597 }
12598 }
12599
12600 /* No need to attach mpt2sas raid functions template
12601 * if hbas_to_enumarate value is two
12602 */
12603 if (hbas_to_enumerate != 2) {
12604 mpt2sas_raid_template =
12605 raid_class_attach(&mpt2sas_raid_functions);
12606 if (!mpt2sas_raid_template) {
12607 sas_release_transport(mpt3sas_transport_template);
12608 return -ENODEV;
12609 }
12610 }
12611
12612 error = scsih_init();
12613 if (error) {
12614 scsih_exit();
12615 return error;
12616 }
12617
12618 mpt3sas_ctl_init(hbas_to_enumerate);
12619
12620 error = pci_register_driver(&mpt3sas_driver);
12621 if (error)
12622 scsih_exit();
12623
12624 return error;
12625 }
12626
12627 /**
12628 * _mpt3sas_exit - exit point for this driver (when it is a module).
12629 *
12630 */
12631 static void __exit
_mpt3sas_exit(void)12632 _mpt3sas_exit(void)
12633 {
12634 pr_info("mpt3sas version %s unloading\n",
12635 MPT3SAS_DRIVER_VERSION);
12636
12637 mpt3sas_ctl_exit(hbas_to_enumerate);
12638
12639 pci_unregister_driver(&mpt3sas_driver);
12640
12641 scsih_exit();
12642 }
12643
12644 module_init(_mpt3sas_init);
12645 module_exit(_mpt3sas_exit);
12646