1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4 *
5 * This driver supports the newer, SCSI-based firmware interface only.
6 *
7 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
8 *
9 * Based on the original DAC960 driver, which has
10 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
11 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
12 */
13
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/delay.h>
17 #include <linux/interrupt.h>
18 #include <linux/pci.h>
19 #include <linux/raid_class.h>
20 #include <asm/unaligned.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_device.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_tcq.h>
26 #include "myrs.h"
27
28 static struct raid_template *myrs_raid_template;
29
30 static struct myrs_devstate_name_entry {
31 enum myrs_devstate state;
32 char *name;
33 } myrs_devstate_name_list[] = {
34 { MYRS_DEVICE_UNCONFIGURED, "Unconfigured" },
35 { MYRS_DEVICE_ONLINE, "Online" },
36 { MYRS_DEVICE_REBUILD, "Rebuild" },
37 { MYRS_DEVICE_MISSING, "Missing" },
38 { MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" },
39 { MYRS_DEVICE_OFFLINE, "Offline" },
40 { MYRS_DEVICE_CRITICAL, "Critical" },
41 { MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" },
42 { MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" },
43 { MYRS_DEVICE_STANDBY, "Standby" },
44 { MYRS_DEVICE_INVALID_STATE, "Invalid" },
45 };
46
myrs_devstate_name(enum myrs_devstate state)47 static char *myrs_devstate_name(enum myrs_devstate state)
48 {
49 struct myrs_devstate_name_entry *entry = myrs_devstate_name_list;
50 int i;
51
52 for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) {
53 if (entry[i].state == state)
54 return entry[i].name;
55 }
56 return NULL;
57 }
58
59 static struct myrs_raid_level_name_entry {
60 enum myrs_raid_level level;
61 char *name;
62 } myrs_raid_level_name_list[] = {
63 { MYRS_RAID_LEVEL0, "RAID0" },
64 { MYRS_RAID_LEVEL1, "RAID1" },
65 { MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" },
66 { MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" },
67 { MYRS_RAID_LEVEL6, "RAID6" },
68 { MYRS_RAID_JBOD, "JBOD" },
69 { MYRS_RAID_NEWSPAN, "New Mylex SPAN" },
70 { MYRS_RAID_LEVEL3F, "RAID3 fixed parity" },
71 { MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" },
72 { MYRS_RAID_SPAN, "Mylex SPAN" },
73 { MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" },
74 { MYRS_RAID_LEVELE, "RAIDE (concatenation)" },
75 { MYRS_RAID_PHYSICAL, "Physical device" },
76 };
77
myrs_raid_level_name(enum myrs_raid_level level)78 static char *myrs_raid_level_name(enum myrs_raid_level level)
79 {
80 struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list;
81 int i;
82
83 for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) {
84 if (entry[i].level == level)
85 return entry[i].name;
86 }
87 return NULL;
88 }
89
90 /*
91 * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
92 */
myrs_reset_cmd(struct myrs_cmdblk * cmd_blk)93 static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
94 {
95 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
96
97 memset(mbox, 0, sizeof(union myrs_cmd_mbox));
98 cmd_blk->status = 0;
99 }
100
101 /*
102 * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
103 */
myrs_qcmd(struct myrs_hba * cs,struct myrs_cmdblk * cmd_blk)104 static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
105 {
106 void __iomem *base = cs->io_base;
107 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
108 union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox;
109
110 cs->write_cmd_mbox(next_mbox, mbox);
111
112 if (cs->prev_cmd_mbox1->words[0] == 0 ||
113 cs->prev_cmd_mbox2->words[0] == 0)
114 cs->get_cmd_mbox(base);
115
116 cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1;
117 cs->prev_cmd_mbox1 = next_mbox;
118
119 if (++next_mbox > cs->last_cmd_mbox)
120 next_mbox = cs->first_cmd_mbox;
121
122 cs->next_cmd_mbox = next_mbox;
123 }
124
125 /*
126 * myrs_exec_cmd - executes V2 Command and waits for completion.
127 */
myrs_exec_cmd(struct myrs_hba * cs,struct myrs_cmdblk * cmd_blk)128 static void myrs_exec_cmd(struct myrs_hba *cs,
129 struct myrs_cmdblk *cmd_blk)
130 {
131 DECLARE_COMPLETION_ONSTACK(complete);
132 unsigned long flags;
133
134 cmd_blk->complete = &complete;
135 spin_lock_irqsave(&cs->queue_lock, flags);
136 myrs_qcmd(cs, cmd_blk);
137 spin_unlock_irqrestore(&cs->queue_lock, flags);
138
139 wait_for_completion(&complete);
140 }
141
142 /*
143 * myrs_report_progress - prints progress message
144 */
myrs_report_progress(struct myrs_hba * cs,unsigned short ldev_num,unsigned char * msg,unsigned long blocks,unsigned long size)145 static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
146 unsigned char *msg, unsigned long blocks,
147 unsigned long size)
148 {
149 shost_printk(KERN_INFO, cs->host,
150 "Logical Drive %d: %s in Progress: %d%% completed\n",
151 ldev_num, msg,
152 (100 * (int)(blocks >> 7)) / (int)(size >> 7));
153 }
154
155 /*
156 * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
157 */
myrs_get_ctlr_info(struct myrs_hba * cs)158 static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
159 {
160 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
161 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
162 dma_addr_t ctlr_info_addr;
163 union myrs_sgl *sgl;
164 unsigned char status;
165 unsigned short ldev_present, ldev_critical, ldev_offline;
166
167 ldev_present = cs->ctlr_info->ldev_present;
168 ldev_critical = cs->ctlr_info->ldev_critical;
169 ldev_offline = cs->ctlr_info->ldev_offline;
170
171 ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
172 sizeof(struct myrs_ctlr_info),
173 DMA_FROM_DEVICE);
174 if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr))
175 return MYRS_STATUS_FAILED;
176
177 mutex_lock(&cs->dcmd_mutex);
178 myrs_reset_cmd(cmd_blk);
179 mbox->ctlr_info.id = MYRS_DCMD_TAG;
180 mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL;
181 mbox->ctlr_info.control.dma_ctrl_to_host = true;
182 mbox->ctlr_info.control.no_autosense = true;
183 mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info);
184 mbox->ctlr_info.ctlr_num = 0;
185 mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO;
186 sgl = &mbox->ctlr_info.dma_addr;
187 sgl->sge[0].sge_addr = ctlr_info_addr;
188 sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
189 dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n");
190 myrs_exec_cmd(cs, cmd_blk);
191 status = cmd_blk->status;
192 mutex_unlock(&cs->dcmd_mutex);
193 dma_unmap_single(&cs->pdev->dev, ctlr_info_addr,
194 sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE);
195 if (status == MYRS_STATUS_SUCCESS) {
196 if (cs->ctlr_info->bg_init_active +
197 cs->ctlr_info->ldev_init_active +
198 cs->ctlr_info->pdev_init_active +
199 cs->ctlr_info->cc_active +
200 cs->ctlr_info->rbld_active +
201 cs->ctlr_info->exp_active != 0)
202 cs->needs_update = true;
203 if (cs->ctlr_info->ldev_present != ldev_present ||
204 cs->ctlr_info->ldev_critical != ldev_critical ||
205 cs->ctlr_info->ldev_offline != ldev_offline)
206 shost_printk(KERN_INFO, cs->host,
207 "Logical drive count changes (%d/%d/%d)\n",
208 cs->ctlr_info->ldev_critical,
209 cs->ctlr_info->ldev_offline,
210 cs->ctlr_info->ldev_present);
211 }
212
213 return status;
214 }
215
216 /*
217 * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
218 */
myrs_get_ldev_info(struct myrs_hba * cs,unsigned short ldev_num,struct myrs_ldev_info * ldev_info)219 static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
220 unsigned short ldev_num, struct myrs_ldev_info *ldev_info)
221 {
222 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
223 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
224 dma_addr_t ldev_info_addr;
225 struct myrs_ldev_info ldev_info_orig;
226 union myrs_sgl *sgl;
227 unsigned char status;
228
229 memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info));
230 ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info,
231 sizeof(struct myrs_ldev_info),
232 DMA_FROM_DEVICE);
233 if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr))
234 return MYRS_STATUS_FAILED;
235
236 mutex_lock(&cs->dcmd_mutex);
237 myrs_reset_cmd(cmd_blk);
238 mbox->ldev_info.id = MYRS_DCMD_TAG;
239 mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL;
240 mbox->ldev_info.control.dma_ctrl_to_host = true;
241 mbox->ldev_info.control.no_autosense = true;
242 mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info);
243 mbox->ldev_info.ldev.ldev_num = ldev_num;
244 mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID;
245 sgl = &mbox->ldev_info.dma_addr;
246 sgl->sge[0].sge_addr = ldev_info_addr;
247 sgl->sge[0].sge_count = mbox->ldev_info.dma_size;
248 dev_dbg(&cs->host->shost_gendev,
249 "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
250 myrs_exec_cmd(cs, cmd_blk);
251 status = cmd_blk->status;
252 mutex_unlock(&cs->dcmd_mutex);
253 dma_unmap_single(&cs->pdev->dev, ldev_info_addr,
254 sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE);
255 if (status == MYRS_STATUS_SUCCESS) {
256 unsigned short ldev_num = ldev_info->ldev_num;
257 struct myrs_ldev_info *new = ldev_info;
258 struct myrs_ldev_info *old = &ldev_info_orig;
259 unsigned long ldev_size = new->cfg_devsize;
260
261 if (new->dev_state != old->dev_state) {
262 const char *name;
263
264 name = myrs_devstate_name(new->dev_state);
265 shost_printk(KERN_INFO, cs->host,
266 "Logical Drive %d is now %s\n",
267 ldev_num, name ? name : "Invalid");
268 }
269 if ((new->soft_errs != old->soft_errs) ||
270 (new->cmds_failed != old->cmds_failed) ||
271 (new->deferred_write_errs != old->deferred_write_errs))
272 shost_printk(KERN_INFO, cs->host,
273 "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
274 ldev_num, new->soft_errs,
275 new->cmds_failed,
276 new->deferred_write_errs);
277 if (new->bg_init_active)
278 myrs_report_progress(cs, ldev_num,
279 "Background Initialization",
280 new->bg_init_lba, ldev_size);
281 else if (new->fg_init_active)
282 myrs_report_progress(cs, ldev_num,
283 "Foreground Initialization",
284 new->fg_init_lba, ldev_size);
285 else if (new->migration_active)
286 myrs_report_progress(cs, ldev_num,
287 "Data Migration",
288 new->migration_lba, ldev_size);
289 else if (new->patrol_active)
290 myrs_report_progress(cs, ldev_num,
291 "Patrol Operation",
292 new->patrol_lba, ldev_size);
293 if (old->bg_init_active && !new->bg_init_active)
294 shost_printk(KERN_INFO, cs->host,
295 "Logical Drive %d: Background Initialization %s\n",
296 ldev_num,
297 (new->ldev_control.ldev_init_done ?
298 "Completed" : "Failed"));
299 }
300 return status;
301 }
302
303 /*
304 * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
305 */
myrs_get_pdev_info(struct myrs_hba * cs,unsigned char channel,unsigned char target,unsigned char lun,struct myrs_pdev_info * pdev_info)306 static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
307 unsigned char channel, unsigned char target, unsigned char lun,
308 struct myrs_pdev_info *pdev_info)
309 {
310 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
311 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
312 dma_addr_t pdev_info_addr;
313 union myrs_sgl *sgl;
314 unsigned char status;
315
316 pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info,
317 sizeof(struct myrs_pdev_info),
318 DMA_FROM_DEVICE);
319 if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr))
320 return MYRS_STATUS_FAILED;
321
322 mutex_lock(&cs->dcmd_mutex);
323 myrs_reset_cmd(cmd_blk);
324 mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
325 mbox->pdev_info.id = MYRS_DCMD_TAG;
326 mbox->pdev_info.control.dma_ctrl_to_host = true;
327 mbox->pdev_info.control.no_autosense = true;
328 mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info);
329 mbox->pdev_info.pdev.lun = lun;
330 mbox->pdev_info.pdev.target = target;
331 mbox->pdev_info.pdev.channel = channel;
332 mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID;
333 sgl = &mbox->pdev_info.dma_addr;
334 sgl->sge[0].sge_addr = pdev_info_addr;
335 sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
336 dev_dbg(&cs->host->shost_gendev,
337 "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
338 channel, target, lun);
339 myrs_exec_cmd(cs, cmd_blk);
340 status = cmd_blk->status;
341 mutex_unlock(&cs->dcmd_mutex);
342 dma_unmap_single(&cs->pdev->dev, pdev_info_addr,
343 sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE);
344 return status;
345 }
346
347 /*
348 * myrs_dev_op - executes a "Device Operation" Command
349 */
myrs_dev_op(struct myrs_hba * cs,enum myrs_ioctl_opcode opcode,enum myrs_opdev opdev)350 static unsigned char myrs_dev_op(struct myrs_hba *cs,
351 enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev)
352 {
353 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
354 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
355 unsigned char status;
356
357 mutex_lock(&cs->dcmd_mutex);
358 myrs_reset_cmd(cmd_blk);
359 mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL;
360 mbox->dev_op.id = MYRS_DCMD_TAG;
361 mbox->dev_op.control.dma_ctrl_to_host = true;
362 mbox->dev_op.control.no_autosense = true;
363 mbox->dev_op.ioctl_opcode = opcode;
364 mbox->dev_op.opdev = opdev;
365 myrs_exec_cmd(cs, cmd_blk);
366 status = cmd_blk->status;
367 mutex_unlock(&cs->dcmd_mutex);
368 return status;
369 }
370
371 /*
372 * myrs_translate_pdev - translates a Physical Device Channel and
373 * TargetID into a Logical Device.
374 */
myrs_translate_pdev(struct myrs_hba * cs,unsigned char channel,unsigned char target,unsigned char lun,struct myrs_devmap * devmap)375 static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
376 unsigned char channel, unsigned char target, unsigned char lun,
377 struct myrs_devmap *devmap)
378 {
379 struct pci_dev *pdev = cs->pdev;
380 dma_addr_t devmap_addr;
381 struct myrs_cmdblk *cmd_blk;
382 union myrs_cmd_mbox *mbox;
383 union myrs_sgl *sgl;
384 unsigned char status;
385
386 memset(devmap, 0x0, sizeof(struct myrs_devmap));
387 devmap_addr = dma_map_single(&pdev->dev, devmap,
388 sizeof(struct myrs_devmap),
389 DMA_FROM_DEVICE);
390 if (dma_mapping_error(&pdev->dev, devmap_addr))
391 return MYRS_STATUS_FAILED;
392
393 mutex_lock(&cs->dcmd_mutex);
394 cmd_blk = &cs->dcmd_blk;
395 mbox = &cmd_blk->mbox;
396 mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
397 mbox->pdev_info.control.dma_ctrl_to_host = true;
398 mbox->pdev_info.control.no_autosense = true;
399 mbox->pdev_info.dma_size = sizeof(struct myrs_devmap);
400 mbox->pdev_info.pdev.target = target;
401 mbox->pdev_info.pdev.channel = channel;
402 mbox->pdev_info.pdev.lun = lun;
403 mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV;
404 sgl = &mbox->pdev_info.dma_addr;
405 sgl->sge[0].sge_addr = devmap_addr;
406 sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
407
408 myrs_exec_cmd(cs, cmd_blk);
409 status = cmd_blk->status;
410 mutex_unlock(&cs->dcmd_mutex);
411 dma_unmap_single(&pdev->dev, devmap_addr,
412 sizeof(struct myrs_devmap), DMA_FROM_DEVICE);
413 return status;
414 }
415
416 /*
417 * myrs_get_event - executes a Get Event Command
418 */
myrs_get_event(struct myrs_hba * cs,unsigned int event_num,struct myrs_event * event_buf)419 static unsigned char myrs_get_event(struct myrs_hba *cs,
420 unsigned int event_num, struct myrs_event *event_buf)
421 {
422 struct pci_dev *pdev = cs->pdev;
423 dma_addr_t event_addr;
424 struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
425 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
426 union myrs_sgl *sgl;
427 unsigned char status;
428
429 event_addr = dma_map_single(&pdev->dev, event_buf,
430 sizeof(struct myrs_event), DMA_FROM_DEVICE);
431 if (dma_mapping_error(&pdev->dev, event_addr))
432 return MYRS_STATUS_FAILED;
433
434 mbox->get_event.opcode = MYRS_CMD_OP_IOCTL;
435 mbox->get_event.dma_size = sizeof(struct myrs_event);
436 mbox->get_event.evnum_upper = event_num >> 16;
437 mbox->get_event.ctlr_num = 0;
438 mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT;
439 mbox->get_event.evnum_lower = event_num & 0xFFFF;
440 sgl = &mbox->get_event.dma_addr;
441 sgl->sge[0].sge_addr = event_addr;
442 sgl->sge[0].sge_count = mbox->get_event.dma_size;
443 myrs_exec_cmd(cs, cmd_blk);
444 status = cmd_blk->status;
445 dma_unmap_single(&pdev->dev, event_addr,
446 sizeof(struct myrs_event), DMA_FROM_DEVICE);
447
448 return status;
449 }
450
451 /*
452 * myrs_get_fwstatus - executes a Get Health Status Command
453 */
myrs_get_fwstatus(struct myrs_hba * cs)454 static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
455 {
456 struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
457 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
458 union myrs_sgl *sgl;
459 unsigned char status = cmd_blk->status;
460
461 myrs_reset_cmd(cmd_blk);
462 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
463 mbox->common.id = MYRS_MCMD_TAG;
464 mbox->common.control.dma_ctrl_to_host = true;
465 mbox->common.control.no_autosense = true;
466 mbox->common.dma_size = sizeof(struct myrs_fwstat);
467 mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS;
468 sgl = &mbox->common.dma_addr;
469 sgl->sge[0].sge_addr = cs->fwstat_addr;
470 sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
471 dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n");
472 myrs_exec_cmd(cs, cmd_blk);
473 status = cmd_blk->status;
474
475 return status;
476 }
477
478 /*
479 * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
480 */
myrs_enable_mmio_mbox(struct myrs_hba * cs,enable_mbox_t enable_mbox_fn)481 static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
482 enable_mbox_t enable_mbox_fn)
483 {
484 void __iomem *base = cs->io_base;
485 struct pci_dev *pdev = cs->pdev;
486 union myrs_cmd_mbox *cmd_mbox;
487 struct myrs_stat_mbox *stat_mbox;
488 union myrs_cmd_mbox *mbox;
489 dma_addr_t mbox_addr;
490 unsigned char status = MYRS_STATUS_FAILED;
491
492 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
493 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
494 dev_err(&pdev->dev, "DMA mask out of range\n");
495 return false;
496 }
497
498 /* Temporary dma mapping, used only in the scope of this function */
499 mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
500 &mbox_addr, GFP_KERNEL);
501 if (dma_mapping_error(&pdev->dev, mbox_addr))
502 return false;
503
504 /* These are the base addresses for the command memory mailbox array */
505 cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
506 cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
507 &cs->cmd_mbox_addr, GFP_KERNEL);
508 if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
509 dev_err(&pdev->dev, "Failed to map command mailbox\n");
510 goto out_free;
511 }
512 cs->first_cmd_mbox = cmd_mbox;
513 cmd_mbox += MYRS_MAX_CMD_MBOX - 1;
514 cs->last_cmd_mbox = cmd_mbox;
515 cs->next_cmd_mbox = cs->first_cmd_mbox;
516 cs->prev_cmd_mbox1 = cs->last_cmd_mbox;
517 cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1;
518
519 /* These are the base addresses for the status memory mailbox array */
520 cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
521 stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
522 &cs->stat_mbox_addr, GFP_KERNEL);
523 if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
524 dev_err(&pdev->dev, "Failed to map status mailbox\n");
525 goto out_free;
526 }
527
528 cs->first_stat_mbox = stat_mbox;
529 stat_mbox += MYRS_MAX_STAT_MBOX - 1;
530 cs->last_stat_mbox = stat_mbox;
531 cs->next_stat_mbox = cs->first_stat_mbox;
532
533 cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
534 sizeof(struct myrs_fwstat),
535 &cs->fwstat_addr, GFP_KERNEL);
536 if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
537 dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
538 cs->fwstat_buf = NULL;
539 goto out_free;
540 }
541 cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
542 GFP_KERNEL | GFP_DMA);
543 if (!cs->ctlr_info)
544 goto out_free;
545
546 cs->event_buf = kzalloc(sizeof(struct myrs_event),
547 GFP_KERNEL | GFP_DMA);
548 if (!cs->event_buf)
549 goto out_free;
550
551 /* Enable the Memory Mailbox Interface. */
552 memset(mbox, 0, sizeof(union myrs_cmd_mbox));
553 mbox->set_mbox.id = 1;
554 mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL;
555 mbox->set_mbox.control.no_autosense = true;
556 mbox->set_mbox.first_cmd_mbox_size_kb =
557 (MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10;
558 mbox->set_mbox.first_stat_mbox_size_kb =
559 (MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10;
560 mbox->set_mbox.second_cmd_mbox_size_kb = 0;
561 mbox->set_mbox.second_stat_mbox_size_kb = 0;
562 mbox->set_mbox.sense_len = 0;
563 mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX;
564 mbox->set_mbox.fwstat_buf_size_kb = 1;
565 mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr;
566 mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr;
567 mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr;
568 status = enable_mbox_fn(base, mbox_addr);
569
570 out_free:
571 dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
572 mbox, mbox_addr);
573 if (status != MYRS_STATUS_SUCCESS)
574 dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
575 status);
576 return (status == MYRS_STATUS_SUCCESS);
577 }
578
579 /*
580 * myrs_get_config - reads the Configuration Information
581 */
myrs_get_config(struct myrs_hba * cs)582 static int myrs_get_config(struct myrs_hba *cs)
583 {
584 struct myrs_ctlr_info *info = cs->ctlr_info;
585 struct Scsi_Host *shost = cs->host;
586 unsigned char status;
587 unsigned char model[20];
588 unsigned char fw_version[12];
589 int i, model_len;
590
591 /* Get data into dma-able area, then copy into permanent location */
592 mutex_lock(&cs->cinfo_mutex);
593 status = myrs_get_ctlr_info(cs);
594 mutex_unlock(&cs->cinfo_mutex);
595 if (status != MYRS_STATUS_SUCCESS) {
596 shost_printk(KERN_ERR, shost,
597 "Failed to get controller information\n");
598 return -ENODEV;
599 }
600
601 /* Initialize the Controller Model Name and Full Model Name fields. */
602 model_len = sizeof(info->ctlr_name);
603 if (model_len > sizeof(model)-1)
604 model_len = sizeof(model)-1;
605 memcpy(model, info->ctlr_name, model_len);
606 model_len--;
607 while (model[model_len] == ' ' || model[model_len] == '\0')
608 model_len--;
609 model[++model_len] = '\0';
610 strcpy(cs->model_name, "DAC960 ");
611 strcat(cs->model_name, model);
612 /* Initialize the Controller Firmware Version field. */
613 sprintf(fw_version, "%d.%02d-%02d",
614 info->fw_major_version, info->fw_minor_version,
615 info->fw_turn_number);
616 if (info->fw_major_version == 6 &&
617 info->fw_minor_version == 0 &&
618 info->fw_turn_number < 1) {
619 shost_printk(KERN_WARNING, shost,
620 "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
621 "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
622 "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
623 fw_version);
624 return -ENODEV;
625 }
626 /* Initialize the Controller Channels and Targets. */
627 shost->max_channel = info->physchan_present + info->virtchan_present;
628 shost->max_id = info->max_targets[0];
629 for (i = 1; i < 16; i++) {
630 if (!info->max_targets[i])
631 continue;
632 if (shost->max_id < info->max_targets[i])
633 shost->max_id = info->max_targets[i];
634 }
635
636 /*
637 * Initialize the Controller Queue Depth, Driver Queue Depth,
638 * Logical Drive Count, Maximum Blocks per Command, Controller
639 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
640 * The Driver Queue Depth must be at most three less than
641 * the Controller Queue Depth; tag '1' is reserved for
642 * direct commands, and tag '2' for monitoring commands.
643 */
644 shost->can_queue = info->max_tcq - 3;
645 if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3)
646 shost->can_queue = MYRS_MAX_CMD_MBOX - 3;
647 shost->max_sectors = info->max_transfer_size;
648 shost->sg_tablesize = info->max_sge;
649 if (shost->sg_tablesize > MYRS_SG_LIMIT)
650 shost->sg_tablesize = MYRS_SG_LIMIT;
651
652 shost_printk(KERN_INFO, shost,
653 "Configuring %s PCI RAID Controller\n", model);
654 shost_printk(KERN_INFO, shost,
655 " Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
656 fw_version, info->physchan_present, info->mem_size_mb);
657
658 shost_printk(KERN_INFO, shost,
659 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
660 shost->can_queue, shost->max_sectors);
661
662 shost_printk(KERN_INFO, shost,
663 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
664 shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT);
665 for (i = 0; i < info->physchan_max; i++) {
666 if (!info->max_targets[i])
667 continue;
668 shost_printk(KERN_INFO, shost,
669 " Device Channel %d: max %d devices\n",
670 i, info->max_targets[i]);
671 }
672 shost_printk(KERN_INFO, shost,
673 " Physical: %d/%d channels, %d disks, %d devices\n",
674 info->physchan_present, info->physchan_max,
675 info->pdisk_present, info->pdev_present);
676
677 shost_printk(KERN_INFO, shost,
678 " Logical: %d/%d channels, %d disks\n",
679 info->virtchan_present, info->virtchan_max,
680 info->ldev_present);
681 return 0;
682 }
683
684 /*
685 * myrs_log_event - prints a Controller Event message
686 */
687 static struct {
688 int ev_code;
689 unsigned char *ev_msg;
690 } myrs_ev_list[] = {
691 /* Physical Device Events (0x0000 - 0x007F) */
692 { 0x0001, "P Online" },
693 { 0x0002, "P Standby" },
694 { 0x0005, "P Automatic Rebuild Started" },
695 { 0x0006, "P Manual Rebuild Started" },
696 { 0x0007, "P Rebuild Completed" },
697 { 0x0008, "P Rebuild Cancelled" },
698 { 0x0009, "P Rebuild Failed for Unknown Reasons" },
699 { 0x000A, "P Rebuild Failed due to New Physical Device" },
700 { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
701 { 0x000C, "S Offline" },
702 { 0x000D, "P Found" },
703 { 0x000E, "P Removed" },
704 { 0x000F, "P Unconfigured" },
705 { 0x0010, "P Expand Capacity Started" },
706 { 0x0011, "P Expand Capacity Completed" },
707 { 0x0012, "P Expand Capacity Failed" },
708 { 0x0013, "P Command Timed Out" },
709 { 0x0014, "P Command Aborted" },
710 { 0x0015, "P Command Retried" },
711 { 0x0016, "P Parity Error" },
712 { 0x0017, "P Soft Error" },
713 { 0x0018, "P Miscellaneous Error" },
714 { 0x0019, "P Reset" },
715 { 0x001A, "P Active Spare Found" },
716 { 0x001B, "P Warm Spare Found" },
717 { 0x001C, "S Sense Data Received" },
718 { 0x001D, "P Initialization Started" },
719 { 0x001E, "P Initialization Completed" },
720 { 0x001F, "P Initialization Failed" },
721 { 0x0020, "P Initialization Cancelled" },
722 { 0x0021, "P Failed because Write Recovery Failed" },
723 { 0x0022, "P Failed because SCSI Bus Reset Failed" },
724 { 0x0023, "P Failed because of Double Check Condition" },
725 { 0x0024, "P Failed because Device Cannot Be Accessed" },
726 { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
727 { 0x0026, "P Failed because of Bad Tag from Device" },
728 { 0x0027, "P Failed because of Command Timeout" },
729 { 0x0028, "P Failed because of System Reset" },
730 { 0x0029, "P Failed because of Busy Status or Parity Error" },
731 { 0x002A, "P Failed because Host Set Device to Failed State" },
732 { 0x002B, "P Failed because of Selection Timeout" },
733 { 0x002C, "P Failed because of SCSI Bus Phase Error" },
734 { 0x002D, "P Failed because Device Returned Unknown Status" },
735 { 0x002E, "P Failed because Device Not Ready" },
736 { 0x002F, "P Failed because Device Not Found at Startup" },
737 { 0x0030, "P Failed because COD Write Operation Failed" },
738 { 0x0031, "P Failed because BDT Write Operation Failed" },
739 { 0x0039, "P Missing at Startup" },
740 { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
741 { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
742 { 0x003D, "P Standby Rebuild Started" },
743 /* Logical Device Events (0x0080 - 0x00FF) */
744 { 0x0080, "M Consistency Check Started" },
745 { 0x0081, "M Consistency Check Completed" },
746 { 0x0082, "M Consistency Check Cancelled" },
747 { 0x0083, "M Consistency Check Completed With Errors" },
748 { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
749 { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
750 { 0x0086, "L Offline" },
751 { 0x0087, "L Critical" },
752 { 0x0088, "L Online" },
753 { 0x0089, "M Automatic Rebuild Started" },
754 { 0x008A, "M Manual Rebuild Started" },
755 { 0x008B, "M Rebuild Completed" },
756 { 0x008C, "M Rebuild Cancelled" },
757 { 0x008D, "M Rebuild Failed for Unknown Reasons" },
758 { 0x008E, "M Rebuild Failed due to New Physical Device" },
759 { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
760 { 0x0090, "M Initialization Started" },
761 { 0x0091, "M Initialization Completed" },
762 { 0x0092, "M Initialization Cancelled" },
763 { 0x0093, "M Initialization Failed" },
764 { 0x0094, "L Found" },
765 { 0x0095, "L Deleted" },
766 { 0x0096, "M Expand Capacity Started" },
767 { 0x0097, "M Expand Capacity Completed" },
768 { 0x0098, "M Expand Capacity Failed" },
769 { 0x0099, "L Bad Block Found" },
770 { 0x009A, "L Size Changed" },
771 { 0x009B, "L Type Changed" },
772 { 0x009C, "L Bad Data Block Found" },
773 { 0x009E, "L Read of Data Block in BDT" },
774 { 0x009F, "L Write Back Data for Disk Block Lost" },
775 { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
776 { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
777 { 0x00A2, "L Standby Rebuild Started" },
778 /* Fault Management Events (0x0100 - 0x017F) */
779 { 0x0140, "E Fan %d Failed" },
780 { 0x0141, "E Fan %d OK" },
781 { 0x0142, "E Fan %d Not Present" },
782 { 0x0143, "E Power Supply %d Failed" },
783 { 0x0144, "E Power Supply %d OK" },
784 { 0x0145, "E Power Supply %d Not Present" },
785 { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
786 { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
787 { 0x0148, "E Temperature Sensor %d Temperature Normal" },
788 { 0x0149, "E Temperature Sensor %d Not Present" },
789 { 0x014A, "E Enclosure Management Unit %d Access Critical" },
790 { 0x014B, "E Enclosure Management Unit %d Access OK" },
791 { 0x014C, "E Enclosure Management Unit %d Access Offline" },
792 /* Controller Events (0x0180 - 0x01FF) */
793 { 0x0181, "C Cache Write Back Error" },
794 { 0x0188, "C Battery Backup Unit Found" },
795 { 0x0189, "C Battery Backup Unit Charge Level Low" },
796 { 0x018A, "C Battery Backup Unit Charge Level OK" },
797 { 0x0193, "C Installation Aborted" },
798 { 0x0195, "C Battery Backup Unit Physically Removed" },
799 { 0x0196, "C Memory Error During Warm Boot" },
800 { 0x019E, "C Memory Soft ECC Error Corrected" },
801 { 0x019F, "C Memory Hard ECC Error Corrected" },
802 { 0x01A2, "C Battery Backup Unit Failed" },
803 { 0x01AB, "C Mirror Race Recovery Failed" },
804 { 0x01AC, "C Mirror Race on Critical Drive" },
805 /* Controller Internal Processor Events */
806 { 0x0380, "C Internal Controller Hung" },
807 { 0x0381, "C Internal Controller Firmware Breakpoint" },
808 { 0x0390, "C Internal Controller i960 Processor Specific Error" },
809 { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
810 { 0, "" }
811 };
812
myrs_log_event(struct myrs_hba * cs,struct myrs_event * ev)813 static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
814 {
815 unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE];
816 int ev_idx = 0, ev_code;
817 unsigned char ev_type, *ev_msg;
818 struct Scsi_Host *shost = cs->host;
819 struct scsi_device *sdev;
820 struct scsi_sense_hdr sshdr = {0};
821 unsigned char sense_info[4];
822 unsigned char cmd_specific[4];
823
824 if (ev->ev_code == 0x1C) {
825 if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) {
826 memset(&sshdr, 0x0, sizeof(sshdr));
827 memset(sense_info, 0x0, sizeof(sense_info));
828 memset(cmd_specific, 0x0, sizeof(cmd_specific));
829 } else {
830 memcpy(sense_info, &ev->sense_data[3], 4);
831 memcpy(cmd_specific, &ev->sense_data[7], 4);
832 }
833 }
834 if (sshdr.sense_key == VENDOR_SPECIFIC &&
835 (sshdr.asc == 0x80 || sshdr.asc == 0x81))
836 ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq);
837 while (true) {
838 ev_code = myrs_ev_list[ev_idx].ev_code;
839 if (ev_code == ev->ev_code || ev_code == 0)
840 break;
841 ev_idx++;
842 }
843 ev_type = myrs_ev_list[ev_idx].ev_msg[0];
844 ev_msg = &myrs_ev_list[ev_idx].ev_msg[2];
845 if (ev_code == 0) {
846 shost_printk(KERN_WARNING, shost,
847 "Unknown Controller Event Code %04X\n",
848 ev->ev_code);
849 return;
850 }
851 switch (ev_type) {
852 case 'P':
853 sdev = scsi_device_lookup(shost, ev->channel,
854 ev->target, 0);
855 sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n",
856 ev->ev_seq, ev_msg);
857 if (sdev && sdev->hostdata &&
858 sdev->channel < cs->ctlr_info->physchan_present) {
859 struct myrs_pdev_info *pdev_info = sdev->hostdata;
860
861 switch (ev->ev_code) {
862 case 0x0001:
863 case 0x0007:
864 pdev_info->dev_state = MYRS_DEVICE_ONLINE;
865 break;
866 case 0x0002:
867 pdev_info->dev_state = MYRS_DEVICE_STANDBY;
868 break;
869 case 0x000C:
870 pdev_info->dev_state = MYRS_DEVICE_OFFLINE;
871 break;
872 case 0x000E:
873 pdev_info->dev_state = MYRS_DEVICE_MISSING;
874 break;
875 case 0x000F:
876 pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED;
877 break;
878 }
879 }
880 break;
881 case 'L':
882 shost_printk(KERN_INFO, shost,
883 "event %d: Logical Drive %d %s\n",
884 ev->ev_seq, ev->lun, ev_msg);
885 cs->needs_update = true;
886 break;
887 case 'M':
888 shost_printk(KERN_INFO, shost,
889 "event %d: Logical Drive %d %s\n",
890 ev->ev_seq, ev->lun, ev_msg);
891 cs->needs_update = true;
892 break;
893 case 'S':
894 if (sshdr.sense_key == NO_SENSE ||
895 (sshdr.sense_key == NOT_READY &&
896 sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
897 sshdr.ascq == 0x02)))
898 break;
899 shost_printk(KERN_INFO, shost,
900 "event %d: Physical Device %d:%d %s\n",
901 ev->ev_seq, ev->channel, ev->target, ev_msg);
902 shost_printk(KERN_INFO, shost,
903 "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
904 ev->channel, ev->target,
905 sshdr.sense_key, sshdr.asc, sshdr.ascq);
906 shost_printk(KERN_INFO, shost,
907 "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
908 ev->channel, ev->target,
909 sense_info[0], sense_info[1],
910 sense_info[2], sense_info[3],
911 cmd_specific[0], cmd_specific[1],
912 cmd_specific[2], cmd_specific[3]);
913 break;
914 case 'E':
915 if (cs->disable_enc_msg)
916 break;
917 sprintf(msg_buf, ev_msg, ev->lun);
918 shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n",
919 ev->ev_seq, ev->target, msg_buf);
920 break;
921 case 'C':
922 shost_printk(KERN_INFO, shost, "event %d: Controller %s\n",
923 ev->ev_seq, ev_msg);
924 break;
925 default:
926 shost_printk(KERN_INFO, shost,
927 "event %d: Unknown Event Code %04X\n",
928 ev->ev_seq, ev->ev_code);
929 break;
930 }
931 }
932
933 /*
934 * SCSI sysfs interface functions
935 */
raid_state_show(struct device * dev,struct device_attribute * attr,char * buf)936 static ssize_t raid_state_show(struct device *dev,
937 struct device_attribute *attr, char *buf)
938 {
939 struct scsi_device *sdev = to_scsi_device(dev);
940 struct myrs_hba *cs = shost_priv(sdev->host);
941 int ret;
942
943 if (!sdev->hostdata)
944 return snprintf(buf, 16, "Unknown\n");
945
946 if (sdev->channel >= cs->ctlr_info->physchan_present) {
947 struct myrs_ldev_info *ldev_info = sdev->hostdata;
948 const char *name;
949
950 name = myrs_devstate_name(ldev_info->dev_state);
951 if (name)
952 ret = snprintf(buf, 32, "%s\n", name);
953 else
954 ret = snprintf(buf, 32, "Invalid (%02X)\n",
955 ldev_info->dev_state);
956 } else {
957 struct myrs_pdev_info *pdev_info;
958 const char *name;
959
960 pdev_info = sdev->hostdata;
961 name = myrs_devstate_name(pdev_info->dev_state);
962 if (name)
963 ret = snprintf(buf, 32, "%s\n", name);
964 else
965 ret = snprintf(buf, 32, "Invalid (%02X)\n",
966 pdev_info->dev_state);
967 }
968 return ret;
969 }
970
raid_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)971 static ssize_t raid_state_store(struct device *dev,
972 struct device_attribute *attr, const char *buf, size_t count)
973 {
974 struct scsi_device *sdev = to_scsi_device(dev);
975 struct myrs_hba *cs = shost_priv(sdev->host);
976 struct myrs_cmdblk *cmd_blk;
977 union myrs_cmd_mbox *mbox;
978 enum myrs_devstate new_state;
979 unsigned short ldev_num;
980 unsigned char status;
981
982 if (!strncmp(buf, "offline", 7) ||
983 !strncmp(buf, "kill", 4))
984 new_state = MYRS_DEVICE_OFFLINE;
985 else if (!strncmp(buf, "online", 6))
986 new_state = MYRS_DEVICE_ONLINE;
987 else if (!strncmp(buf, "standby", 7))
988 new_state = MYRS_DEVICE_STANDBY;
989 else
990 return -EINVAL;
991
992 if (sdev->channel < cs->ctlr_info->physchan_present) {
993 struct myrs_pdev_info *pdev_info = sdev->hostdata;
994 struct myrs_devmap *pdev_devmap =
995 (struct myrs_devmap *)&pdev_info->rsvd13;
996
997 if (pdev_info->dev_state == new_state) {
998 sdev_printk(KERN_INFO, sdev,
999 "Device already in %s\n",
1000 myrs_devstate_name(new_state));
1001 return count;
1002 }
1003 status = myrs_translate_pdev(cs, sdev->channel, sdev->id,
1004 sdev->lun, pdev_devmap);
1005 if (status != MYRS_STATUS_SUCCESS)
1006 return -ENXIO;
1007 ldev_num = pdev_devmap->ldev_num;
1008 } else {
1009 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1010
1011 if (ldev_info->dev_state == new_state) {
1012 sdev_printk(KERN_INFO, sdev,
1013 "Device already in %s\n",
1014 myrs_devstate_name(new_state));
1015 return count;
1016 }
1017 ldev_num = ldev_info->ldev_num;
1018 }
1019 mutex_lock(&cs->dcmd_mutex);
1020 cmd_blk = &cs->dcmd_blk;
1021 myrs_reset_cmd(cmd_blk);
1022 mbox = &cmd_blk->mbox;
1023 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1024 mbox->common.id = MYRS_DCMD_TAG;
1025 mbox->common.control.dma_ctrl_to_host = true;
1026 mbox->common.control.no_autosense = true;
1027 mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE;
1028 mbox->set_devstate.state = new_state;
1029 mbox->set_devstate.ldev.ldev_num = ldev_num;
1030 myrs_exec_cmd(cs, cmd_blk);
1031 status = cmd_blk->status;
1032 mutex_unlock(&cs->dcmd_mutex);
1033 if (status == MYRS_STATUS_SUCCESS) {
1034 if (sdev->channel < cs->ctlr_info->physchan_present) {
1035 struct myrs_pdev_info *pdev_info = sdev->hostdata;
1036
1037 pdev_info->dev_state = new_state;
1038 } else {
1039 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1040
1041 ldev_info->dev_state = new_state;
1042 }
1043 sdev_printk(KERN_INFO, sdev,
1044 "Set device state to %s\n",
1045 myrs_devstate_name(new_state));
1046 return count;
1047 }
1048 sdev_printk(KERN_INFO, sdev,
1049 "Failed to set device state to %s, status 0x%02x\n",
1050 myrs_devstate_name(new_state), status);
1051 return -EINVAL;
1052 }
1053 static DEVICE_ATTR_RW(raid_state);
1054
raid_level_show(struct device * dev,struct device_attribute * attr,char * buf)1055 static ssize_t raid_level_show(struct device *dev,
1056 struct device_attribute *attr, char *buf)
1057 {
1058 struct scsi_device *sdev = to_scsi_device(dev);
1059 struct myrs_hba *cs = shost_priv(sdev->host);
1060 const char *name = NULL;
1061
1062 if (!sdev->hostdata)
1063 return snprintf(buf, 16, "Unknown\n");
1064
1065 if (sdev->channel >= cs->ctlr_info->physchan_present) {
1066 struct myrs_ldev_info *ldev_info;
1067
1068 ldev_info = sdev->hostdata;
1069 name = myrs_raid_level_name(ldev_info->raid_level);
1070 if (!name)
1071 return snprintf(buf, 32, "Invalid (%02X)\n",
1072 ldev_info->dev_state);
1073
1074 } else
1075 name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
1076
1077 return snprintf(buf, 32, "%s\n", name);
1078 }
1079 static DEVICE_ATTR_RO(raid_level);
1080
rebuild_show(struct device * dev,struct device_attribute * attr,char * buf)1081 static ssize_t rebuild_show(struct device *dev,
1082 struct device_attribute *attr, char *buf)
1083 {
1084 struct scsi_device *sdev = to_scsi_device(dev);
1085 struct myrs_hba *cs = shost_priv(sdev->host);
1086 struct myrs_ldev_info *ldev_info;
1087 unsigned short ldev_num;
1088 unsigned char status;
1089
1090 if (sdev->channel < cs->ctlr_info->physchan_present)
1091 return snprintf(buf, 32, "physical device - not rebuilding\n");
1092
1093 ldev_info = sdev->hostdata;
1094 ldev_num = ldev_info->ldev_num;
1095 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1096 if (status != MYRS_STATUS_SUCCESS) {
1097 sdev_printk(KERN_INFO, sdev,
1098 "Failed to get device information, status 0x%02x\n",
1099 status);
1100 return -EIO;
1101 }
1102 if (ldev_info->rbld_active) {
1103 return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
1104 (size_t)ldev_info->rbld_lba,
1105 (size_t)ldev_info->cfg_devsize);
1106 } else
1107 return snprintf(buf, 32, "not rebuilding\n");
1108 }
1109
rebuild_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1110 static ssize_t rebuild_store(struct device *dev,
1111 struct device_attribute *attr, const char *buf, size_t count)
1112 {
1113 struct scsi_device *sdev = to_scsi_device(dev);
1114 struct myrs_hba *cs = shost_priv(sdev->host);
1115 struct myrs_ldev_info *ldev_info;
1116 struct myrs_cmdblk *cmd_blk;
1117 union myrs_cmd_mbox *mbox;
1118 unsigned short ldev_num;
1119 unsigned char status;
1120 int rebuild, ret;
1121
1122 if (sdev->channel < cs->ctlr_info->physchan_present)
1123 return -EINVAL;
1124
1125 ldev_info = sdev->hostdata;
1126 if (!ldev_info)
1127 return -ENXIO;
1128 ldev_num = ldev_info->ldev_num;
1129
1130 ret = kstrtoint(buf, 0, &rebuild);
1131 if (ret)
1132 return ret;
1133
1134 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1135 if (status != MYRS_STATUS_SUCCESS) {
1136 sdev_printk(KERN_INFO, sdev,
1137 "Failed to get device information, status 0x%02x\n",
1138 status);
1139 return -EIO;
1140 }
1141
1142 if (rebuild && ldev_info->rbld_active) {
1143 sdev_printk(KERN_INFO, sdev,
1144 "Rebuild Not Initiated; already in progress\n");
1145 return -EALREADY;
1146 }
1147 if (!rebuild && !ldev_info->rbld_active) {
1148 sdev_printk(KERN_INFO, sdev,
1149 "Rebuild Not Cancelled; no rebuild in progress\n");
1150 return count;
1151 }
1152
1153 mutex_lock(&cs->dcmd_mutex);
1154 cmd_blk = &cs->dcmd_blk;
1155 myrs_reset_cmd(cmd_blk);
1156 mbox = &cmd_blk->mbox;
1157 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1158 mbox->common.id = MYRS_DCMD_TAG;
1159 mbox->common.control.dma_ctrl_to_host = true;
1160 mbox->common.control.no_autosense = true;
1161 if (rebuild) {
1162 mbox->ldev_info.ldev.ldev_num = ldev_num;
1163 mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START;
1164 } else {
1165 mbox->ldev_info.ldev.ldev_num = ldev_num;
1166 mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP;
1167 }
1168 myrs_exec_cmd(cs, cmd_blk);
1169 status = cmd_blk->status;
1170 mutex_unlock(&cs->dcmd_mutex);
1171 if (status) {
1172 sdev_printk(KERN_INFO, sdev,
1173 "Rebuild Not %s, status 0x%02x\n",
1174 rebuild ? "Initiated" : "Cancelled", status);
1175 ret = -EIO;
1176 } else {
1177 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1178 rebuild ? "Initiated" : "Cancelled");
1179 ret = count;
1180 }
1181
1182 return ret;
1183 }
1184 static DEVICE_ATTR_RW(rebuild);
1185
consistency_check_show(struct device * dev,struct device_attribute * attr,char * buf)1186 static ssize_t consistency_check_show(struct device *dev,
1187 struct device_attribute *attr, char *buf)
1188 {
1189 struct scsi_device *sdev = to_scsi_device(dev);
1190 struct myrs_hba *cs = shost_priv(sdev->host);
1191 struct myrs_ldev_info *ldev_info;
1192 unsigned short ldev_num;
1193
1194 if (sdev->channel < cs->ctlr_info->physchan_present)
1195 return snprintf(buf, 32, "physical device - not checking\n");
1196
1197 ldev_info = sdev->hostdata;
1198 if (!ldev_info)
1199 return -ENXIO;
1200 ldev_num = ldev_info->ldev_num;
1201 myrs_get_ldev_info(cs, ldev_num, ldev_info);
1202 if (ldev_info->cc_active)
1203 return snprintf(buf, 32, "checking block %zu of %zu\n",
1204 (size_t)ldev_info->cc_lba,
1205 (size_t)ldev_info->cfg_devsize);
1206 else
1207 return snprintf(buf, 32, "not checking\n");
1208 }
1209
consistency_check_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1210 static ssize_t consistency_check_store(struct device *dev,
1211 struct device_attribute *attr, const char *buf, size_t count)
1212 {
1213 struct scsi_device *sdev = to_scsi_device(dev);
1214 struct myrs_hba *cs = shost_priv(sdev->host);
1215 struct myrs_ldev_info *ldev_info;
1216 struct myrs_cmdblk *cmd_blk;
1217 union myrs_cmd_mbox *mbox;
1218 unsigned short ldev_num;
1219 unsigned char status;
1220 int check, ret;
1221
1222 if (sdev->channel < cs->ctlr_info->physchan_present)
1223 return -EINVAL;
1224
1225 ldev_info = sdev->hostdata;
1226 if (!ldev_info)
1227 return -ENXIO;
1228 ldev_num = ldev_info->ldev_num;
1229
1230 ret = kstrtoint(buf, 0, &check);
1231 if (ret)
1232 return ret;
1233
1234 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1235 if (status != MYRS_STATUS_SUCCESS) {
1236 sdev_printk(KERN_INFO, sdev,
1237 "Failed to get device information, status 0x%02x\n",
1238 status);
1239 return -EIO;
1240 }
1241 if (check && ldev_info->cc_active) {
1242 sdev_printk(KERN_INFO, sdev,
1243 "Consistency Check Not Initiated; "
1244 "already in progress\n");
1245 return -EALREADY;
1246 }
1247 if (!check && !ldev_info->cc_active) {
1248 sdev_printk(KERN_INFO, sdev,
1249 "Consistency Check Not Cancelled; "
1250 "check not in progress\n");
1251 return count;
1252 }
1253
1254 mutex_lock(&cs->dcmd_mutex);
1255 cmd_blk = &cs->dcmd_blk;
1256 myrs_reset_cmd(cmd_blk);
1257 mbox = &cmd_blk->mbox;
1258 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1259 mbox->common.id = MYRS_DCMD_TAG;
1260 mbox->common.control.dma_ctrl_to_host = true;
1261 mbox->common.control.no_autosense = true;
1262 if (check) {
1263 mbox->cc.ldev.ldev_num = ldev_num;
1264 mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START;
1265 mbox->cc.restore_consistency = true;
1266 mbox->cc.initialized_area_only = false;
1267 } else {
1268 mbox->cc.ldev.ldev_num = ldev_num;
1269 mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP;
1270 }
1271 myrs_exec_cmd(cs, cmd_blk);
1272 status = cmd_blk->status;
1273 mutex_unlock(&cs->dcmd_mutex);
1274 if (status != MYRS_STATUS_SUCCESS) {
1275 sdev_printk(KERN_INFO, sdev,
1276 "Consistency Check Not %s, status 0x%02x\n",
1277 check ? "Initiated" : "Cancelled", status);
1278 ret = -EIO;
1279 } else {
1280 sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
1281 check ? "Initiated" : "Cancelled");
1282 ret = count;
1283 }
1284
1285 return ret;
1286 }
1287 static DEVICE_ATTR_RW(consistency_check);
1288
1289 static struct device_attribute *myrs_sdev_attrs[] = {
1290 &dev_attr_consistency_check,
1291 &dev_attr_rebuild,
1292 &dev_attr_raid_state,
1293 &dev_attr_raid_level,
1294 NULL,
1295 };
1296
serial_show(struct device * dev,struct device_attribute * attr,char * buf)1297 static ssize_t serial_show(struct device *dev,
1298 struct device_attribute *attr, char *buf)
1299 {
1300 struct Scsi_Host *shost = class_to_shost(dev);
1301 struct myrs_hba *cs = shost_priv(shost);
1302 char serial[17];
1303
1304 memcpy(serial, cs->ctlr_info->serial_number, 16);
1305 serial[16] = '\0';
1306 return snprintf(buf, 16, "%s\n", serial);
1307 }
1308 static DEVICE_ATTR_RO(serial);
1309
ctlr_num_show(struct device * dev,struct device_attribute * attr,char * buf)1310 static ssize_t ctlr_num_show(struct device *dev,
1311 struct device_attribute *attr, char *buf)
1312 {
1313 struct Scsi_Host *shost = class_to_shost(dev);
1314 struct myrs_hba *cs = shost_priv(shost);
1315
1316 return snprintf(buf, 20, "%d\n", cs->host->host_no);
1317 }
1318 static DEVICE_ATTR_RO(ctlr_num);
1319
1320 static struct myrs_cpu_type_tbl {
1321 enum myrs_cpu_type type;
1322 char *name;
1323 } myrs_cpu_type_names[] = {
1324 { MYRS_CPUTYPE_i960CA, "i960CA" },
1325 { MYRS_CPUTYPE_i960RD, "i960RD" },
1326 { MYRS_CPUTYPE_i960RN, "i960RN" },
1327 { MYRS_CPUTYPE_i960RP, "i960RP" },
1328 { MYRS_CPUTYPE_NorthBay, "NorthBay" },
1329 { MYRS_CPUTYPE_StrongArm, "StrongARM" },
1330 { MYRS_CPUTYPE_i960RM, "i960RM" },
1331 };
1332
processor_show(struct device * dev,struct device_attribute * attr,char * buf)1333 static ssize_t processor_show(struct device *dev,
1334 struct device_attribute *attr, char *buf)
1335 {
1336 struct Scsi_Host *shost = class_to_shost(dev);
1337 struct myrs_hba *cs = shost_priv(shost);
1338 struct myrs_cpu_type_tbl *tbl;
1339 const char *first_processor = NULL;
1340 const char *second_processor = NULL;
1341 struct myrs_ctlr_info *info = cs->ctlr_info;
1342 ssize_t ret;
1343 int i;
1344
1345 if (info->cpu[0].cpu_count) {
1346 tbl = myrs_cpu_type_names;
1347 for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
1348 if (tbl[i].type == info->cpu[0].cpu_type) {
1349 first_processor = tbl[i].name;
1350 break;
1351 }
1352 }
1353 }
1354 if (info->cpu[1].cpu_count) {
1355 tbl = myrs_cpu_type_names;
1356 for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
1357 if (tbl[i].type == info->cpu[1].cpu_type) {
1358 second_processor = tbl[i].name;
1359 break;
1360 }
1361 }
1362 }
1363 if (first_processor && second_processor)
1364 ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
1365 "2: %s (%s, %d cpus)\n",
1366 info->cpu[0].cpu_name,
1367 first_processor, info->cpu[0].cpu_count,
1368 info->cpu[1].cpu_name,
1369 second_processor, info->cpu[1].cpu_count);
1370 else if (first_processor && !second_processor)
1371 ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
1372 info->cpu[0].cpu_name,
1373 first_processor, info->cpu[0].cpu_count);
1374 else if (!first_processor && second_processor)
1375 ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
1376 info->cpu[1].cpu_name,
1377 second_processor, info->cpu[1].cpu_count);
1378 else
1379 ret = snprintf(buf, 64, "1: absent\n2: absent\n");
1380
1381 return ret;
1382 }
1383 static DEVICE_ATTR_RO(processor);
1384
model_show(struct device * dev,struct device_attribute * attr,char * buf)1385 static ssize_t model_show(struct device *dev,
1386 struct device_attribute *attr, char *buf)
1387 {
1388 struct Scsi_Host *shost = class_to_shost(dev);
1389 struct myrs_hba *cs = shost_priv(shost);
1390
1391 return snprintf(buf, 28, "%s\n", cs->model_name);
1392 }
1393 static DEVICE_ATTR_RO(model);
1394
ctlr_type_show(struct device * dev,struct device_attribute * attr,char * buf)1395 static ssize_t ctlr_type_show(struct device *dev,
1396 struct device_attribute *attr, char *buf)
1397 {
1398 struct Scsi_Host *shost = class_to_shost(dev);
1399 struct myrs_hba *cs = shost_priv(shost);
1400
1401 return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type);
1402 }
1403 static DEVICE_ATTR_RO(ctlr_type);
1404
cache_size_show(struct device * dev,struct device_attribute * attr,char * buf)1405 static ssize_t cache_size_show(struct device *dev,
1406 struct device_attribute *attr, char *buf)
1407 {
1408 struct Scsi_Host *shost = class_to_shost(dev);
1409 struct myrs_hba *cs = shost_priv(shost);
1410
1411 return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb);
1412 }
1413 static DEVICE_ATTR_RO(cache_size);
1414
firmware_show(struct device * dev,struct device_attribute * attr,char * buf)1415 static ssize_t firmware_show(struct device *dev,
1416 struct device_attribute *attr, char *buf)
1417 {
1418 struct Scsi_Host *shost = class_to_shost(dev);
1419 struct myrs_hba *cs = shost_priv(shost);
1420
1421 return snprintf(buf, 16, "%d.%02d-%02d\n",
1422 cs->ctlr_info->fw_major_version,
1423 cs->ctlr_info->fw_minor_version,
1424 cs->ctlr_info->fw_turn_number);
1425 }
1426 static DEVICE_ATTR_RO(firmware);
1427
discovery_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1428 static ssize_t discovery_store(struct device *dev,
1429 struct device_attribute *attr, const char *buf, size_t count)
1430 {
1431 struct Scsi_Host *shost = class_to_shost(dev);
1432 struct myrs_hba *cs = shost_priv(shost);
1433 struct myrs_cmdblk *cmd_blk;
1434 union myrs_cmd_mbox *mbox;
1435 unsigned char status;
1436
1437 mutex_lock(&cs->dcmd_mutex);
1438 cmd_blk = &cs->dcmd_blk;
1439 myrs_reset_cmd(cmd_blk);
1440 mbox = &cmd_blk->mbox;
1441 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1442 mbox->common.id = MYRS_DCMD_TAG;
1443 mbox->common.control.dma_ctrl_to_host = true;
1444 mbox->common.control.no_autosense = true;
1445 mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY;
1446 myrs_exec_cmd(cs, cmd_blk);
1447 status = cmd_blk->status;
1448 mutex_unlock(&cs->dcmd_mutex);
1449 if (status != MYRS_STATUS_SUCCESS) {
1450 shost_printk(KERN_INFO, shost,
1451 "Discovery Not Initiated, status %02X\n",
1452 status);
1453 return -EINVAL;
1454 }
1455 shost_printk(KERN_INFO, shost, "Discovery Initiated\n");
1456 cs->next_evseq = 0;
1457 cs->needs_update = true;
1458 queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
1459 flush_delayed_work(&cs->monitor_work);
1460 shost_printk(KERN_INFO, shost, "Discovery Completed\n");
1461
1462 return count;
1463 }
1464 static DEVICE_ATTR_WO(discovery);
1465
flush_cache_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1466 static ssize_t flush_cache_store(struct device *dev,
1467 struct device_attribute *attr, const char *buf, size_t count)
1468 {
1469 struct Scsi_Host *shost = class_to_shost(dev);
1470 struct myrs_hba *cs = shost_priv(shost);
1471 unsigned char status;
1472
1473 status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA,
1474 MYRS_RAID_CONTROLLER);
1475 if (status == MYRS_STATUS_SUCCESS) {
1476 shost_printk(KERN_INFO, shost, "Cache Flush Completed\n");
1477 return count;
1478 }
1479 shost_printk(KERN_INFO, shost,
1480 "Cache Flush failed, status 0x%02x\n", status);
1481 return -EIO;
1482 }
1483 static DEVICE_ATTR_WO(flush_cache);
1484
disable_enclosure_messages_show(struct device * dev,struct device_attribute * attr,char * buf)1485 static ssize_t disable_enclosure_messages_show(struct device *dev,
1486 struct device_attribute *attr, char *buf)
1487 {
1488 struct Scsi_Host *shost = class_to_shost(dev);
1489 struct myrs_hba *cs = shost_priv(shost);
1490
1491 return snprintf(buf, 3, "%d\n", cs->disable_enc_msg);
1492 }
1493
disable_enclosure_messages_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1494 static ssize_t disable_enclosure_messages_store(struct device *dev,
1495 struct device_attribute *attr, const char *buf, size_t count)
1496 {
1497 struct scsi_device *sdev = to_scsi_device(dev);
1498 struct myrs_hba *cs = shost_priv(sdev->host);
1499 int value, ret;
1500
1501 ret = kstrtoint(buf, 0, &value);
1502 if (ret)
1503 return ret;
1504
1505 if (value > 2)
1506 return -EINVAL;
1507
1508 cs->disable_enc_msg = value;
1509 return count;
1510 }
1511 static DEVICE_ATTR_RW(disable_enclosure_messages);
1512
1513 static struct device_attribute *myrs_shost_attrs[] = {
1514 &dev_attr_serial,
1515 &dev_attr_ctlr_num,
1516 &dev_attr_processor,
1517 &dev_attr_model,
1518 &dev_attr_ctlr_type,
1519 &dev_attr_cache_size,
1520 &dev_attr_firmware,
1521 &dev_attr_discovery,
1522 &dev_attr_flush_cache,
1523 &dev_attr_disable_enclosure_messages,
1524 NULL,
1525 };
1526
1527 /*
1528 * SCSI midlayer interface
1529 */
myrs_host_reset(struct scsi_cmnd * scmd)1530 static int myrs_host_reset(struct scsi_cmnd *scmd)
1531 {
1532 struct Scsi_Host *shost = scmd->device->host;
1533 struct myrs_hba *cs = shost_priv(shost);
1534
1535 cs->reset(cs->io_base);
1536 return SUCCESS;
1537 }
1538
myrs_mode_sense(struct myrs_hba * cs,struct scsi_cmnd * scmd,struct myrs_ldev_info * ldev_info)1539 static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
1540 struct myrs_ldev_info *ldev_info)
1541 {
1542 unsigned char modes[32], *mode_pg;
1543 bool dbd;
1544 size_t mode_len;
1545
1546 dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1547 if (dbd) {
1548 mode_len = 24;
1549 mode_pg = &modes[4];
1550 } else {
1551 mode_len = 32;
1552 mode_pg = &modes[12];
1553 }
1554 memset(modes, 0, sizeof(modes));
1555 modes[0] = mode_len - 1;
1556 modes[2] = 0x10; /* Enable FUA */
1557 if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO)
1558 modes[2] |= 0x80;
1559 if (!dbd) {
1560 unsigned char *block_desc = &modes[4];
1561
1562 modes[3] = 8;
1563 put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]);
1564 put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]);
1565 }
1566 mode_pg[0] = 0x08;
1567 mode_pg[1] = 0x12;
1568 if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED)
1569 mode_pg[2] |= 0x01;
1570 if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
1571 ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
1572 mode_pg[2] |= 0x04;
1573 if (ldev_info->cacheline_size) {
1574 mode_pg[2] |= 0x08;
1575 put_unaligned_be16(1 << ldev_info->cacheline_size,
1576 &mode_pg[14]);
1577 }
1578
1579 scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1580 }
1581
myrs_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scmd)1582 static int myrs_queuecommand(struct Scsi_Host *shost,
1583 struct scsi_cmnd *scmd)
1584 {
1585 struct myrs_hba *cs = shost_priv(shost);
1586 struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1587 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
1588 struct scsi_device *sdev = scmd->device;
1589 union myrs_sgl *hw_sge;
1590 dma_addr_t sense_addr;
1591 struct scatterlist *sgl;
1592 unsigned long flags, timeout;
1593 int nsge;
1594
1595 if (!scmd->device->hostdata) {
1596 scmd->result = (DID_NO_CONNECT << 16);
1597 scmd->scsi_done(scmd);
1598 return 0;
1599 }
1600
1601 switch (scmd->cmnd[0]) {
1602 case REPORT_LUNS:
1603 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
1604 0x20, 0x0);
1605 scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1606 scmd->scsi_done(scmd);
1607 return 0;
1608 case MODE_SENSE:
1609 if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1610 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1611
1612 if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1613 (scmd->cmnd[2] & 0x3F) != 0x08) {
1614 /* Illegal request, invalid field in CDB */
1615 scsi_build_sense_buffer(0, scmd->sense_buffer,
1616 ILLEGAL_REQUEST, 0x24, 0);
1617 scmd->result = (DRIVER_SENSE << 24) |
1618 SAM_STAT_CHECK_CONDITION;
1619 } else {
1620 myrs_mode_sense(cs, scmd, ldev_info);
1621 scmd->result = (DID_OK << 16);
1622 }
1623 scmd->scsi_done(scmd);
1624 return 0;
1625 }
1626 break;
1627 }
1628
1629 myrs_reset_cmd(cmd_blk);
1630 cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC,
1631 &sense_addr);
1632 if (!cmd_blk->sense)
1633 return SCSI_MLQUEUE_HOST_BUSY;
1634 cmd_blk->sense_addr = sense_addr;
1635
1636 timeout = scmd->request->timeout;
1637 if (scmd->cmd_len <= 10) {
1638 if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1639 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1640
1641 mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10;
1642 mbox->SCSI_10.pdev.lun = ldev_info->lun;
1643 mbox->SCSI_10.pdev.target = ldev_info->target;
1644 mbox->SCSI_10.pdev.channel = ldev_info->channel;
1645 mbox->SCSI_10.pdev.ctlr = 0;
1646 } else {
1647 mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU;
1648 mbox->SCSI_10.pdev.lun = sdev->lun;
1649 mbox->SCSI_10.pdev.target = sdev->id;
1650 mbox->SCSI_10.pdev.channel = sdev->channel;
1651 }
1652 mbox->SCSI_10.id = scmd->request->tag + 3;
1653 mbox->SCSI_10.control.dma_ctrl_to_host =
1654 (scmd->sc_data_direction == DMA_FROM_DEVICE);
1655 if (scmd->request->cmd_flags & REQ_FUA)
1656 mbox->SCSI_10.control.fua = true;
1657 mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
1658 mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
1659 mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE;
1660 mbox->SCSI_10.cdb_len = scmd->cmd_len;
1661 if (timeout > 60) {
1662 mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
1663 mbox->SCSI_10.tmo.tmo_val = timeout / 60;
1664 } else {
1665 mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
1666 mbox->SCSI_10.tmo.tmo_val = timeout;
1667 }
1668 memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len);
1669 hw_sge = &mbox->SCSI_10.dma_addr;
1670 cmd_blk->dcdb = NULL;
1671 } else {
1672 dma_addr_t dcdb_dma;
1673
1674 cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC,
1675 &dcdb_dma);
1676 if (!cmd_blk->dcdb) {
1677 dma_pool_free(cs->sense_pool, cmd_blk->sense,
1678 cmd_blk->sense_addr);
1679 cmd_blk->sense = NULL;
1680 cmd_blk->sense_addr = 0;
1681 return SCSI_MLQUEUE_HOST_BUSY;
1682 }
1683 cmd_blk->dcdb_dma = dcdb_dma;
1684 if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1685 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1686
1687 mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256;
1688 mbox->SCSI_255.pdev.lun = ldev_info->lun;
1689 mbox->SCSI_255.pdev.target = ldev_info->target;
1690 mbox->SCSI_255.pdev.channel = ldev_info->channel;
1691 mbox->SCSI_255.pdev.ctlr = 0;
1692 } else {
1693 mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU;
1694 mbox->SCSI_255.pdev.lun = sdev->lun;
1695 mbox->SCSI_255.pdev.target = sdev->id;
1696 mbox->SCSI_255.pdev.channel = sdev->channel;
1697 }
1698 mbox->SCSI_255.id = scmd->request->tag + 3;
1699 mbox->SCSI_255.control.dma_ctrl_to_host =
1700 (scmd->sc_data_direction == DMA_FROM_DEVICE);
1701 if (scmd->request->cmd_flags & REQ_FUA)
1702 mbox->SCSI_255.control.fua = true;
1703 mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
1704 mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
1705 mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE;
1706 mbox->SCSI_255.cdb_len = scmd->cmd_len;
1707 mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma;
1708 if (timeout > 60) {
1709 mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
1710 mbox->SCSI_255.tmo.tmo_val = timeout / 60;
1711 } else {
1712 mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
1713 mbox->SCSI_255.tmo.tmo_val = timeout;
1714 }
1715 memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len);
1716 hw_sge = &mbox->SCSI_255.dma_addr;
1717 }
1718 if (scmd->sc_data_direction == DMA_NONE)
1719 goto submit;
1720 nsge = scsi_dma_map(scmd);
1721 if (nsge == 1) {
1722 sgl = scsi_sglist(scmd);
1723 hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl);
1724 hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl);
1725 } else {
1726 struct myrs_sge *hw_sgl;
1727 dma_addr_t hw_sgl_addr;
1728 int i;
1729
1730 if (nsge > 2) {
1731 hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC,
1732 &hw_sgl_addr);
1733 if (WARN_ON(!hw_sgl)) {
1734 if (cmd_blk->dcdb) {
1735 dma_pool_free(cs->dcdb_pool,
1736 cmd_blk->dcdb,
1737 cmd_blk->dcdb_dma);
1738 cmd_blk->dcdb = NULL;
1739 cmd_blk->dcdb_dma = 0;
1740 }
1741 dma_pool_free(cs->sense_pool,
1742 cmd_blk->sense,
1743 cmd_blk->sense_addr);
1744 cmd_blk->sense = NULL;
1745 cmd_blk->sense_addr = 0;
1746 return SCSI_MLQUEUE_HOST_BUSY;
1747 }
1748 cmd_blk->sgl = hw_sgl;
1749 cmd_blk->sgl_addr = hw_sgl_addr;
1750 if (scmd->cmd_len <= 10)
1751 mbox->SCSI_10.control.add_sge_mem = true;
1752 else
1753 mbox->SCSI_255.control.add_sge_mem = true;
1754 hw_sge->ext.sge0_len = nsge;
1755 hw_sge->ext.sge0_addr = cmd_blk->sgl_addr;
1756 } else
1757 hw_sgl = hw_sge->sge;
1758
1759 scsi_for_each_sg(scmd, sgl, nsge, i) {
1760 if (WARN_ON(!hw_sgl)) {
1761 scsi_dma_unmap(scmd);
1762 scmd->result = (DID_ERROR << 16);
1763 scmd->scsi_done(scmd);
1764 return 0;
1765 }
1766 hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
1767 hw_sgl->sge_count = (u64)sg_dma_len(sgl);
1768 hw_sgl++;
1769 }
1770 }
1771 submit:
1772 spin_lock_irqsave(&cs->queue_lock, flags);
1773 myrs_qcmd(cs, cmd_blk);
1774 spin_unlock_irqrestore(&cs->queue_lock, flags);
1775
1776 return 0;
1777 }
1778
myrs_translate_ldev(struct myrs_hba * cs,struct scsi_device * sdev)1779 static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
1780 struct scsi_device *sdev)
1781 {
1782 unsigned short ldev_num;
1783 unsigned int chan_offset =
1784 sdev->channel - cs->ctlr_info->physchan_present;
1785
1786 ldev_num = sdev->id + chan_offset * sdev->host->max_id;
1787
1788 return ldev_num;
1789 }
1790
myrs_slave_alloc(struct scsi_device * sdev)1791 static int myrs_slave_alloc(struct scsi_device *sdev)
1792 {
1793 struct myrs_hba *cs = shost_priv(sdev->host);
1794 unsigned char status;
1795
1796 if (sdev->channel > sdev->host->max_channel)
1797 return 0;
1798
1799 if (sdev->channel >= cs->ctlr_info->physchan_present) {
1800 struct myrs_ldev_info *ldev_info;
1801 unsigned short ldev_num;
1802
1803 if (sdev->lun > 0)
1804 return -ENXIO;
1805
1806 ldev_num = myrs_translate_ldev(cs, sdev);
1807
1808 ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
1809 if (!ldev_info)
1810 return -ENOMEM;
1811
1812 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1813 if (status != MYRS_STATUS_SUCCESS) {
1814 sdev->hostdata = NULL;
1815 kfree(ldev_info);
1816 } else {
1817 enum raid_level level;
1818
1819 dev_dbg(&sdev->sdev_gendev,
1820 "Logical device mapping %d:%d:%d -> %d\n",
1821 ldev_info->channel, ldev_info->target,
1822 ldev_info->lun, ldev_info->ldev_num);
1823
1824 sdev->hostdata = ldev_info;
1825 switch (ldev_info->raid_level) {
1826 case MYRS_RAID_LEVEL0:
1827 level = RAID_LEVEL_LINEAR;
1828 break;
1829 case MYRS_RAID_LEVEL1:
1830 level = RAID_LEVEL_1;
1831 break;
1832 case MYRS_RAID_LEVEL3:
1833 case MYRS_RAID_LEVEL3F:
1834 case MYRS_RAID_LEVEL3L:
1835 level = RAID_LEVEL_3;
1836 break;
1837 case MYRS_RAID_LEVEL5:
1838 case MYRS_RAID_LEVEL5L:
1839 level = RAID_LEVEL_5;
1840 break;
1841 case MYRS_RAID_LEVEL6:
1842 level = RAID_LEVEL_6;
1843 break;
1844 case MYRS_RAID_LEVELE:
1845 case MYRS_RAID_NEWSPAN:
1846 case MYRS_RAID_SPAN:
1847 level = RAID_LEVEL_LINEAR;
1848 break;
1849 case MYRS_RAID_JBOD:
1850 level = RAID_LEVEL_JBOD;
1851 break;
1852 default:
1853 level = RAID_LEVEL_UNKNOWN;
1854 break;
1855 }
1856 raid_set_level(myrs_raid_template,
1857 &sdev->sdev_gendev, level);
1858 if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) {
1859 const char *name;
1860
1861 name = myrs_devstate_name(ldev_info->dev_state);
1862 sdev_printk(KERN_DEBUG, sdev,
1863 "logical device in state %s\n",
1864 name ? name : "Invalid");
1865 }
1866 }
1867 } else {
1868 struct myrs_pdev_info *pdev_info;
1869
1870 pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1871 if (!pdev_info)
1872 return -ENOMEM;
1873
1874 status = myrs_get_pdev_info(cs, sdev->channel,
1875 sdev->id, sdev->lun,
1876 pdev_info);
1877 if (status != MYRS_STATUS_SUCCESS) {
1878 sdev->hostdata = NULL;
1879 kfree(pdev_info);
1880 return -ENXIO;
1881 }
1882 sdev->hostdata = pdev_info;
1883 }
1884 return 0;
1885 }
1886
myrs_slave_configure(struct scsi_device * sdev)1887 static int myrs_slave_configure(struct scsi_device *sdev)
1888 {
1889 struct myrs_hba *cs = shost_priv(sdev->host);
1890 struct myrs_ldev_info *ldev_info;
1891
1892 if (sdev->channel > sdev->host->max_channel)
1893 return -ENXIO;
1894
1895 if (sdev->channel < cs->ctlr_info->physchan_present) {
1896 /* Skip HBA device */
1897 if (sdev->type == TYPE_RAID)
1898 return -ENXIO;
1899 sdev->no_uld_attach = 1;
1900 return 0;
1901 }
1902 if (sdev->lun != 0)
1903 return -ENXIO;
1904
1905 ldev_info = sdev->hostdata;
1906 if (!ldev_info)
1907 return -ENXIO;
1908 if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
1909 ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
1910 sdev->wce_default_on = 1;
1911 sdev->tagged_supported = 1;
1912 return 0;
1913 }
1914
myrs_slave_destroy(struct scsi_device * sdev)1915 static void myrs_slave_destroy(struct scsi_device *sdev)
1916 {
1917 kfree(sdev->hostdata);
1918 }
1919
1920 static struct scsi_host_template myrs_template = {
1921 .module = THIS_MODULE,
1922 .name = "DAC960",
1923 .proc_name = "myrs",
1924 .queuecommand = myrs_queuecommand,
1925 .eh_host_reset_handler = myrs_host_reset,
1926 .slave_alloc = myrs_slave_alloc,
1927 .slave_configure = myrs_slave_configure,
1928 .slave_destroy = myrs_slave_destroy,
1929 .cmd_size = sizeof(struct myrs_cmdblk),
1930 .shost_attrs = myrs_shost_attrs,
1931 .sdev_attrs = myrs_sdev_attrs,
1932 .this_id = -1,
1933 };
1934
myrs_alloc_host(struct pci_dev * pdev,const struct pci_device_id * entry)1935 static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
1936 const struct pci_device_id *entry)
1937 {
1938 struct Scsi_Host *shost;
1939 struct myrs_hba *cs;
1940
1941 shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba));
1942 if (!shost)
1943 return NULL;
1944
1945 shost->max_cmd_len = 16;
1946 shost->max_lun = 256;
1947 cs = shost_priv(shost);
1948 mutex_init(&cs->dcmd_mutex);
1949 mutex_init(&cs->cinfo_mutex);
1950 cs->host = shost;
1951
1952 return cs;
1953 }
1954
1955 /*
1956 * RAID template functions
1957 */
1958
1959 /**
1960 * myrs_is_raid - return boolean indicating device is raid volume
1961 * @dev: the device struct object
1962 */
1963 static int
myrs_is_raid(struct device * dev)1964 myrs_is_raid(struct device *dev)
1965 {
1966 struct scsi_device *sdev = to_scsi_device(dev);
1967 struct myrs_hba *cs = shost_priv(sdev->host);
1968
1969 return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0;
1970 }
1971
1972 /**
1973 * myrs_get_resync - get raid volume resync percent complete
1974 * @dev: the device struct object
1975 */
1976 static void
myrs_get_resync(struct device * dev)1977 myrs_get_resync(struct device *dev)
1978 {
1979 struct scsi_device *sdev = to_scsi_device(dev);
1980 struct myrs_hba *cs = shost_priv(sdev->host);
1981 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1982 u64 percent_complete = 0;
1983
1984 if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
1985 return;
1986 if (ldev_info->rbld_active) {
1987 unsigned short ldev_num = ldev_info->ldev_num;
1988
1989 myrs_get_ldev_info(cs, ldev_num, ldev_info);
1990 percent_complete = ldev_info->rbld_lba * 100;
1991 do_div(percent_complete, ldev_info->cfg_devsize);
1992 }
1993 raid_set_resync(myrs_raid_template, dev, percent_complete);
1994 }
1995
1996 /**
1997 * myrs_get_state - get raid volume status
1998 * @dev: the device struct object
1999 */
2000 static void
myrs_get_state(struct device * dev)2001 myrs_get_state(struct device *dev)
2002 {
2003 struct scsi_device *sdev = to_scsi_device(dev);
2004 struct myrs_hba *cs = shost_priv(sdev->host);
2005 struct myrs_ldev_info *ldev_info = sdev->hostdata;
2006 enum raid_state state = RAID_STATE_UNKNOWN;
2007
2008 if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
2009 state = RAID_STATE_UNKNOWN;
2010 else {
2011 switch (ldev_info->dev_state) {
2012 case MYRS_DEVICE_ONLINE:
2013 state = RAID_STATE_ACTIVE;
2014 break;
2015 case MYRS_DEVICE_SUSPECTED_CRITICAL:
2016 case MYRS_DEVICE_CRITICAL:
2017 state = RAID_STATE_DEGRADED;
2018 break;
2019 case MYRS_DEVICE_REBUILD:
2020 state = RAID_STATE_RESYNCING;
2021 break;
2022 case MYRS_DEVICE_UNCONFIGURED:
2023 case MYRS_DEVICE_INVALID_STATE:
2024 state = RAID_STATE_UNKNOWN;
2025 break;
2026 default:
2027 state = RAID_STATE_OFFLINE;
2028 }
2029 }
2030 raid_set_state(myrs_raid_template, dev, state);
2031 }
2032
2033 static struct raid_function_template myrs_raid_functions = {
2034 .cookie = &myrs_template,
2035 .is_raid = myrs_is_raid,
2036 .get_resync = myrs_get_resync,
2037 .get_state = myrs_get_state,
2038 };
2039
2040 /*
2041 * PCI interface functions
2042 */
myrs_flush_cache(struct myrs_hba * cs)2043 static void myrs_flush_cache(struct myrs_hba *cs)
2044 {
2045 myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
2046 }
2047
myrs_handle_scsi(struct myrs_hba * cs,struct myrs_cmdblk * cmd_blk,struct scsi_cmnd * scmd)2048 static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
2049 struct scsi_cmnd *scmd)
2050 {
2051 unsigned char status;
2052
2053 if (!cmd_blk)
2054 return;
2055
2056 scsi_dma_unmap(scmd);
2057 status = cmd_blk->status;
2058 if (cmd_blk->sense) {
2059 if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) {
2060 unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
2061
2062 if (sense_len > cmd_blk->sense_len)
2063 sense_len = cmd_blk->sense_len;
2064 memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
2065 }
2066 dma_pool_free(cs->sense_pool, cmd_blk->sense,
2067 cmd_blk->sense_addr);
2068 cmd_blk->sense = NULL;
2069 cmd_blk->sense_addr = 0;
2070 }
2071 if (cmd_blk->dcdb) {
2072 dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb,
2073 cmd_blk->dcdb_dma);
2074 cmd_blk->dcdb = NULL;
2075 cmd_blk->dcdb_dma = 0;
2076 }
2077 if (cmd_blk->sgl) {
2078 dma_pool_free(cs->sg_pool, cmd_blk->sgl,
2079 cmd_blk->sgl_addr);
2080 cmd_blk->sgl = NULL;
2081 cmd_blk->sgl_addr = 0;
2082 }
2083 if (cmd_blk->residual)
2084 scsi_set_resid(scmd, cmd_blk->residual);
2085 if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE ||
2086 status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2)
2087 scmd->result = (DID_BAD_TARGET << 16);
2088 else
2089 scmd->result = (DID_OK << 16) | status;
2090 scmd->scsi_done(scmd);
2091 }
2092
myrs_handle_cmdblk(struct myrs_hba * cs,struct myrs_cmdblk * cmd_blk)2093 static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
2094 {
2095 if (!cmd_blk)
2096 return;
2097
2098 if (cmd_blk->complete) {
2099 complete(cmd_blk->complete);
2100 cmd_blk->complete = NULL;
2101 }
2102 }
2103
myrs_monitor(struct work_struct * work)2104 static void myrs_monitor(struct work_struct *work)
2105 {
2106 struct myrs_hba *cs = container_of(work, struct myrs_hba,
2107 monitor_work.work);
2108 struct Scsi_Host *shost = cs->host;
2109 struct myrs_ctlr_info *info = cs->ctlr_info;
2110 unsigned int epoch = cs->fwstat_buf->epoch;
2111 unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL;
2112 unsigned char status;
2113
2114 dev_dbg(&shost->shost_gendev, "monitor tick\n");
2115
2116 status = myrs_get_fwstatus(cs);
2117
2118 if (cs->needs_update) {
2119 cs->needs_update = false;
2120 mutex_lock(&cs->cinfo_mutex);
2121 status = myrs_get_ctlr_info(cs);
2122 mutex_unlock(&cs->cinfo_mutex);
2123 }
2124 if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) {
2125 status = myrs_get_event(cs, cs->next_evseq,
2126 cs->event_buf);
2127 if (status == MYRS_STATUS_SUCCESS) {
2128 myrs_log_event(cs, cs->event_buf);
2129 cs->next_evseq++;
2130 interval = 1;
2131 }
2132 }
2133
2134 if (time_after(jiffies, cs->secondary_monitor_time
2135 + MYRS_SECONDARY_MONITOR_INTERVAL))
2136 cs->secondary_monitor_time = jiffies;
2137
2138 if (info->bg_init_active +
2139 info->ldev_init_active +
2140 info->pdev_init_active +
2141 info->cc_active +
2142 info->rbld_active +
2143 info->exp_active != 0) {
2144 struct scsi_device *sdev;
2145
2146 shost_for_each_device(sdev, shost) {
2147 struct myrs_ldev_info *ldev_info;
2148 int ldev_num;
2149
2150 if (sdev->channel < info->physchan_present)
2151 continue;
2152 ldev_info = sdev->hostdata;
2153 if (!ldev_info)
2154 continue;
2155 ldev_num = ldev_info->ldev_num;
2156 myrs_get_ldev_info(cs, ldev_num, ldev_info);
2157 }
2158 cs->needs_update = true;
2159 }
2160 if (epoch == cs->epoch &&
2161 cs->fwstat_buf->next_evseq == cs->next_evseq &&
2162 (cs->needs_update == false ||
2163 time_before(jiffies, cs->primary_monitor_time
2164 + MYRS_PRIMARY_MONITOR_INTERVAL))) {
2165 interval = MYRS_SECONDARY_MONITOR_INTERVAL;
2166 }
2167
2168 if (interval > 1)
2169 cs->primary_monitor_time = jiffies;
2170 queue_delayed_work(cs->work_q, &cs->monitor_work, interval);
2171 }
2172
myrs_create_mempools(struct pci_dev * pdev,struct myrs_hba * cs)2173 static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
2174 {
2175 struct Scsi_Host *shost = cs->host;
2176 size_t elem_size, elem_align;
2177
2178 elem_align = sizeof(struct myrs_sge);
2179 elem_size = shost->sg_tablesize * elem_align;
2180 cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev,
2181 elem_size, elem_align, 0);
2182 if (cs->sg_pool == NULL) {
2183 shost_printk(KERN_ERR, shost,
2184 "Failed to allocate SG pool\n");
2185 return false;
2186 }
2187
2188 cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev,
2189 MYRS_SENSE_SIZE, sizeof(int), 0);
2190 if (cs->sense_pool == NULL) {
2191 dma_pool_destroy(cs->sg_pool);
2192 cs->sg_pool = NULL;
2193 shost_printk(KERN_ERR, shost,
2194 "Failed to allocate sense data pool\n");
2195 return false;
2196 }
2197
2198 cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev,
2199 MYRS_DCDB_SIZE,
2200 sizeof(unsigned char), 0);
2201 if (!cs->dcdb_pool) {
2202 dma_pool_destroy(cs->sg_pool);
2203 cs->sg_pool = NULL;
2204 dma_pool_destroy(cs->sense_pool);
2205 cs->sense_pool = NULL;
2206 shost_printk(KERN_ERR, shost,
2207 "Failed to allocate DCDB pool\n");
2208 return false;
2209 }
2210
2211 snprintf(cs->work_q_name, sizeof(cs->work_q_name),
2212 "myrs_wq_%d", shost->host_no);
2213 cs->work_q = create_singlethread_workqueue(cs->work_q_name);
2214 if (!cs->work_q) {
2215 dma_pool_destroy(cs->dcdb_pool);
2216 cs->dcdb_pool = NULL;
2217 dma_pool_destroy(cs->sg_pool);
2218 cs->sg_pool = NULL;
2219 dma_pool_destroy(cs->sense_pool);
2220 cs->sense_pool = NULL;
2221 shost_printk(KERN_ERR, shost,
2222 "Failed to create workqueue\n");
2223 return false;
2224 }
2225
2226 /* Initialize the Monitoring Timer. */
2227 INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor);
2228 queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
2229
2230 return true;
2231 }
2232
myrs_destroy_mempools(struct myrs_hba * cs)2233 static void myrs_destroy_mempools(struct myrs_hba *cs)
2234 {
2235 cancel_delayed_work_sync(&cs->monitor_work);
2236 destroy_workqueue(cs->work_q);
2237
2238 dma_pool_destroy(cs->sg_pool);
2239 dma_pool_destroy(cs->dcdb_pool);
2240 dma_pool_destroy(cs->sense_pool);
2241 }
2242
myrs_unmap(struct myrs_hba * cs)2243 static void myrs_unmap(struct myrs_hba *cs)
2244 {
2245 kfree(cs->event_buf);
2246 kfree(cs->ctlr_info);
2247 if (cs->fwstat_buf) {
2248 dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat),
2249 cs->fwstat_buf, cs->fwstat_addr);
2250 cs->fwstat_buf = NULL;
2251 }
2252 if (cs->first_stat_mbox) {
2253 dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size,
2254 cs->first_stat_mbox, cs->stat_mbox_addr);
2255 cs->first_stat_mbox = NULL;
2256 }
2257 if (cs->first_cmd_mbox) {
2258 dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size,
2259 cs->first_cmd_mbox, cs->cmd_mbox_addr);
2260 cs->first_cmd_mbox = NULL;
2261 }
2262 }
2263
myrs_cleanup(struct myrs_hba * cs)2264 static void myrs_cleanup(struct myrs_hba *cs)
2265 {
2266 struct pci_dev *pdev = cs->pdev;
2267
2268 /* Free the memory mailbox, status, and related structures */
2269 myrs_unmap(cs);
2270
2271 if (cs->mmio_base) {
2272 cs->disable_intr(cs);
2273 iounmap(cs->mmio_base);
2274 cs->mmio_base = NULL;
2275 }
2276 if (cs->irq)
2277 free_irq(cs->irq, cs);
2278 if (cs->io_addr)
2279 release_region(cs->io_addr, 0x80);
2280 pci_set_drvdata(pdev, NULL);
2281 pci_disable_device(pdev);
2282 scsi_host_put(cs->host);
2283 }
2284
myrs_detect(struct pci_dev * pdev,const struct pci_device_id * entry)2285 static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
2286 const struct pci_device_id *entry)
2287 {
2288 struct myrs_privdata *privdata =
2289 (struct myrs_privdata *)entry->driver_data;
2290 irq_handler_t irq_handler = privdata->irq_handler;
2291 unsigned int mmio_size = privdata->mmio_size;
2292 struct myrs_hba *cs = NULL;
2293
2294 cs = myrs_alloc_host(pdev, entry);
2295 if (!cs) {
2296 dev_err(&pdev->dev, "Unable to allocate Controller\n");
2297 return NULL;
2298 }
2299 cs->pdev = pdev;
2300
2301 if (pci_enable_device(pdev))
2302 goto Failure;
2303
2304 cs->pci_addr = pci_resource_start(pdev, 0);
2305
2306 pci_set_drvdata(pdev, cs);
2307 spin_lock_init(&cs->queue_lock);
2308 /* Map the Controller Register Window. */
2309 if (mmio_size < PAGE_SIZE)
2310 mmio_size = PAGE_SIZE;
2311 cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size);
2312 if (cs->mmio_base == NULL) {
2313 dev_err(&pdev->dev,
2314 "Unable to map Controller Register Window\n");
2315 goto Failure;
2316 }
2317
2318 cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK);
2319 if (privdata->hw_init(pdev, cs, cs->io_base))
2320 goto Failure;
2321
2322 /* Acquire shared access to the IRQ Channel. */
2323 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) {
2324 dev_err(&pdev->dev,
2325 "Unable to acquire IRQ Channel %d\n", pdev->irq);
2326 goto Failure;
2327 }
2328 cs->irq = pdev->irq;
2329 return cs;
2330
2331 Failure:
2332 dev_err(&pdev->dev,
2333 "Failed to initialize Controller\n");
2334 myrs_cleanup(cs);
2335 return NULL;
2336 }
2337
2338 /*
2339 * myrs_err_status reports Controller BIOS Messages passed through
2340 * the Error Status Register when the driver performs the BIOS handshaking.
2341 * It returns true for fatal errors and false otherwise.
2342 */
2343
myrs_err_status(struct myrs_hba * cs,unsigned char status,unsigned char parm0,unsigned char parm1)2344 static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
2345 unsigned char parm0, unsigned char parm1)
2346 {
2347 struct pci_dev *pdev = cs->pdev;
2348
2349 switch (status) {
2350 case 0x00:
2351 dev_info(&pdev->dev,
2352 "Physical Device %d:%d Not Responding\n",
2353 parm1, parm0);
2354 break;
2355 case 0x08:
2356 dev_notice(&pdev->dev, "Spinning Up Drives\n");
2357 break;
2358 case 0x30:
2359 dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2360 break;
2361 case 0x60:
2362 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2363 break;
2364 case 0x70:
2365 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2366 break;
2367 case 0x90:
2368 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2369 parm1, parm0);
2370 break;
2371 case 0xA0:
2372 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2373 break;
2374 case 0xB0:
2375 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2376 break;
2377 case 0xD0:
2378 dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2379 break;
2380 case 0xF0:
2381 dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2382 return true;
2383 default:
2384 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2385 status);
2386 return true;
2387 }
2388 return false;
2389 }
2390
2391 /*
2392 * Hardware-specific functions
2393 */
2394
2395 /*
2396 * DAC960 GEM Series Controllers.
2397 */
2398
DAC960_GEM_hw_mbox_new_cmd(void __iomem * base)2399 static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base)
2400 {
2401 __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
2402
2403 writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2404 }
2405
DAC960_GEM_ack_hw_mbox_status(void __iomem * base)2406 static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
2407 {
2408 __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24);
2409
2410 writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
2411 }
2412
DAC960_GEM_reset_ctrl(void __iomem * base)2413 static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
2414 {
2415 __le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
2416
2417 writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2418 }
2419
DAC960_GEM_mem_mbox_new_cmd(void __iomem * base)2420 static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base)
2421 {
2422 __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
2423
2424 writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2425 }
2426
DAC960_GEM_hw_mbox_is_full(void __iomem * base)2427 static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base)
2428 {
2429 __le32 val;
2430
2431 val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
2432 return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL;
2433 }
2434
DAC960_GEM_init_in_progress(void __iomem * base)2435 static inline bool DAC960_GEM_init_in_progress(void __iomem *base)
2436 {
2437 __le32 val;
2438
2439 val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
2440 return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS;
2441 }
2442
DAC960_GEM_ack_hw_mbox_intr(void __iomem * base)2443 static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
2444 {
2445 __le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24);
2446
2447 writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
2448 }
2449
DAC960_GEM_ack_intr(void __iomem * base)2450 static inline void DAC960_GEM_ack_intr(void __iomem *base)
2451 {
2452 __le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
2453 DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24);
2454
2455 writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
2456 }
2457
DAC960_GEM_hw_mbox_status_available(void __iomem * base)2458 static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
2459 {
2460 __le32 val;
2461
2462 val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
2463 return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
2464 }
2465
DAC960_GEM_enable_intr(void __iomem * base)2466 static inline void DAC960_GEM_enable_intr(void __iomem *base)
2467 {
2468 __le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
2469 DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24);
2470 writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET);
2471 }
2472
DAC960_GEM_disable_intr(void __iomem * base)2473 static inline void DAC960_GEM_disable_intr(void __iomem *base)
2474 {
2475 __le32 val = 0;
2476
2477 writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
2478 }
2479
DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox * mem_mbox,union myrs_cmd_mbox * mbox)2480 static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
2481 union myrs_cmd_mbox *mbox)
2482 {
2483 memcpy(&mem_mbox->words[1], &mbox->words[1],
2484 sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
2485 /* Barrier to avoid reordering */
2486 wmb();
2487 mem_mbox->words[0] = mbox->words[0];
2488 /* Barrier to force PCI access */
2489 mb();
2490 }
2491
DAC960_GEM_write_hw_mbox(void __iomem * base,dma_addr_t cmd_mbox_addr)2492 static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
2493 dma_addr_t cmd_mbox_addr)
2494 {
2495 dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
2496 }
2497
DAC960_GEM_read_cmd_status(void __iomem * base)2498 static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
2499 {
2500 return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
2501 }
2502
2503 static inline bool
DAC960_GEM_read_error_status(void __iomem * base,unsigned char * error,unsigned char * param0,unsigned char * param1)2504 DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error,
2505 unsigned char *param0, unsigned char *param1)
2506 {
2507 __le32 val;
2508
2509 val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET);
2510 if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING))
2511 return false;
2512 *error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24);
2513 *param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0);
2514 *param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1);
2515 writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET);
2516 return true;
2517 }
2518
2519 static inline unsigned char
DAC960_GEM_mbox_init(void __iomem * base,dma_addr_t mbox_addr)2520 DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
2521 {
2522 unsigned char status;
2523
2524 while (DAC960_GEM_hw_mbox_is_full(base))
2525 udelay(1);
2526 DAC960_GEM_write_hw_mbox(base, mbox_addr);
2527 DAC960_GEM_hw_mbox_new_cmd(base);
2528 while (!DAC960_GEM_hw_mbox_status_available(base))
2529 udelay(1);
2530 status = DAC960_GEM_read_cmd_status(base);
2531 DAC960_GEM_ack_hw_mbox_intr(base);
2532 DAC960_GEM_ack_hw_mbox_status(base);
2533
2534 return status;
2535 }
2536
DAC960_GEM_hw_init(struct pci_dev * pdev,struct myrs_hba * cs,void __iomem * base)2537 static int DAC960_GEM_hw_init(struct pci_dev *pdev,
2538 struct myrs_hba *cs, void __iomem *base)
2539 {
2540 int timeout = 0;
2541 unsigned char status, parm0, parm1;
2542
2543 DAC960_GEM_disable_intr(base);
2544 DAC960_GEM_ack_hw_mbox_status(base);
2545 udelay(1000);
2546 while (DAC960_GEM_init_in_progress(base) &&
2547 timeout < MYRS_MAILBOX_TIMEOUT) {
2548 if (DAC960_GEM_read_error_status(base, &status,
2549 &parm0, &parm1) &&
2550 myrs_err_status(cs, status, parm0, parm1))
2551 return -EIO;
2552 udelay(10);
2553 timeout++;
2554 }
2555 if (timeout == MYRS_MAILBOX_TIMEOUT) {
2556 dev_err(&pdev->dev,
2557 "Timeout waiting for Controller Initialisation\n");
2558 return -ETIMEDOUT;
2559 }
2560 if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) {
2561 dev_err(&pdev->dev,
2562 "Unable to Enable Memory Mailbox Interface\n");
2563 DAC960_GEM_reset_ctrl(base);
2564 return -EAGAIN;
2565 }
2566 DAC960_GEM_enable_intr(base);
2567 cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox;
2568 cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd;
2569 cs->disable_intr = DAC960_GEM_disable_intr;
2570 cs->reset = DAC960_GEM_reset_ctrl;
2571 return 0;
2572 }
2573
DAC960_GEM_intr_handler(int irq,void * arg)2574 static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
2575 {
2576 struct myrs_hba *cs = arg;
2577 void __iomem *base = cs->io_base;
2578 struct myrs_stat_mbox *next_stat_mbox;
2579 unsigned long flags;
2580
2581 spin_lock_irqsave(&cs->queue_lock, flags);
2582 DAC960_GEM_ack_intr(base);
2583 next_stat_mbox = cs->next_stat_mbox;
2584 while (next_stat_mbox->id > 0) {
2585 unsigned short id = next_stat_mbox->id;
2586 struct scsi_cmnd *scmd = NULL;
2587 struct myrs_cmdblk *cmd_blk = NULL;
2588
2589 if (id == MYRS_DCMD_TAG)
2590 cmd_blk = &cs->dcmd_blk;
2591 else if (id == MYRS_MCMD_TAG)
2592 cmd_blk = &cs->mcmd_blk;
2593 else {
2594 scmd = scsi_host_find_tag(cs->host, id - 3);
2595 if (scmd)
2596 cmd_blk = scsi_cmd_priv(scmd);
2597 }
2598 if (cmd_blk) {
2599 cmd_blk->status = next_stat_mbox->status;
2600 cmd_blk->sense_len = next_stat_mbox->sense_len;
2601 cmd_blk->residual = next_stat_mbox->residual;
2602 } else
2603 dev_err(&cs->pdev->dev,
2604 "Unhandled command completion %d\n", id);
2605
2606 memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
2607 if (++next_stat_mbox > cs->last_stat_mbox)
2608 next_stat_mbox = cs->first_stat_mbox;
2609
2610 if (cmd_blk) {
2611 if (id < 3)
2612 myrs_handle_cmdblk(cs, cmd_blk);
2613 else
2614 myrs_handle_scsi(cs, cmd_blk, scmd);
2615 }
2616 }
2617 cs->next_stat_mbox = next_stat_mbox;
2618 spin_unlock_irqrestore(&cs->queue_lock, flags);
2619 return IRQ_HANDLED;
2620 }
2621
2622 static struct myrs_privdata DAC960_GEM_privdata = {
2623 .hw_init = DAC960_GEM_hw_init,
2624 .irq_handler = DAC960_GEM_intr_handler,
2625 .mmio_size = DAC960_GEM_mmio_size,
2626 };
2627
2628 /*
2629 * DAC960 BA Series Controllers.
2630 */
2631
DAC960_BA_hw_mbox_new_cmd(void __iomem * base)2632 static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base)
2633 {
2634 writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
2635 }
2636
DAC960_BA_ack_hw_mbox_status(void __iomem * base)2637 static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
2638 {
2639 writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
2640 }
2641
DAC960_BA_reset_ctrl(void __iomem * base)2642 static inline void DAC960_BA_reset_ctrl(void __iomem *base)
2643 {
2644 writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
2645 }
2646
DAC960_BA_mem_mbox_new_cmd(void __iomem * base)2647 static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base)
2648 {
2649 writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
2650 }
2651
DAC960_BA_hw_mbox_is_full(void __iomem * base)2652 static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base)
2653 {
2654 u8 val;
2655
2656 val = readb(base + DAC960_BA_IDB_OFFSET);
2657 return !(val & DAC960_BA_IDB_HWMBOX_EMPTY);
2658 }
2659
DAC960_BA_init_in_progress(void __iomem * base)2660 static inline bool DAC960_BA_init_in_progress(void __iomem *base)
2661 {
2662 u8 val;
2663
2664 val = readb(base + DAC960_BA_IDB_OFFSET);
2665 return !(val & DAC960_BA_IDB_INIT_DONE);
2666 }
2667
DAC960_BA_ack_hw_mbox_intr(void __iomem * base)2668 static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
2669 {
2670 writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
2671 }
2672
DAC960_BA_ack_intr(void __iomem * base)2673 static inline void DAC960_BA_ack_intr(void __iomem *base)
2674 {
2675 writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
2676 base + DAC960_BA_ODB_OFFSET);
2677 }
2678
DAC960_BA_hw_mbox_status_available(void __iomem * base)2679 static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
2680 {
2681 u8 val;
2682
2683 val = readb(base + DAC960_BA_ODB_OFFSET);
2684 return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
2685 }
2686
DAC960_BA_enable_intr(void __iomem * base)2687 static inline void DAC960_BA_enable_intr(void __iomem *base)
2688 {
2689 writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
2690 }
2691
DAC960_BA_disable_intr(void __iomem * base)2692 static inline void DAC960_BA_disable_intr(void __iomem *base)
2693 {
2694 writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
2695 }
2696
DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox * mem_mbox,union myrs_cmd_mbox * mbox)2697 static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
2698 union myrs_cmd_mbox *mbox)
2699 {
2700 memcpy(&mem_mbox->words[1], &mbox->words[1],
2701 sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
2702 /* Barrier to avoid reordering */
2703 wmb();
2704 mem_mbox->words[0] = mbox->words[0];
2705 /* Barrier to force PCI access */
2706 mb();
2707 }
2708
2709
DAC960_BA_write_hw_mbox(void __iomem * base,dma_addr_t cmd_mbox_addr)2710 static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
2711 dma_addr_t cmd_mbox_addr)
2712 {
2713 dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
2714 }
2715
DAC960_BA_read_cmd_status(void __iomem * base)2716 static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
2717 {
2718 return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
2719 }
2720
2721 static inline bool
DAC960_BA_read_error_status(void __iomem * base,unsigned char * error,unsigned char * param0,unsigned char * param1)2722 DAC960_BA_read_error_status(void __iomem *base, unsigned char *error,
2723 unsigned char *param0, unsigned char *param1)
2724 {
2725 u8 val;
2726
2727 val = readb(base + DAC960_BA_ERRSTS_OFFSET);
2728 if (!(val & DAC960_BA_ERRSTS_PENDING))
2729 return false;
2730 val &= ~DAC960_BA_ERRSTS_PENDING;
2731 *error = val;
2732 *param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0);
2733 *param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1);
2734 writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET);
2735 return true;
2736 }
2737
2738 static inline unsigned char
DAC960_BA_mbox_init(void __iomem * base,dma_addr_t mbox_addr)2739 DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
2740 {
2741 unsigned char status;
2742
2743 while (DAC960_BA_hw_mbox_is_full(base))
2744 udelay(1);
2745 DAC960_BA_write_hw_mbox(base, mbox_addr);
2746 DAC960_BA_hw_mbox_new_cmd(base);
2747 while (!DAC960_BA_hw_mbox_status_available(base))
2748 udelay(1);
2749 status = DAC960_BA_read_cmd_status(base);
2750 DAC960_BA_ack_hw_mbox_intr(base);
2751 DAC960_BA_ack_hw_mbox_status(base);
2752
2753 return status;
2754 }
2755
DAC960_BA_hw_init(struct pci_dev * pdev,struct myrs_hba * cs,void __iomem * base)2756 static int DAC960_BA_hw_init(struct pci_dev *pdev,
2757 struct myrs_hba *cs, void __iomem *base)
2758 {
2759 int timeout = 0;
2760 unsigned char status, parm0, parm1;
2761
2762 DAC960_BA_disable_intr(base);
2763 DAC960_BA_ack_hw_mbox_status(base);
2764 udelay(1000);
2765 while (DAC960_BA_init_in_progress(base) &&
2766 timeout < MYRS_MAILBOX_TIMEOUT) {
2767 if (DAC960_BA_read_error_status(base, &status,
2768 &parm0, &parm1) &&
2769 myrs_err_status(cs, status, parm0, parm1))
2770 return -EIO;
2771 udelay(10);
2772 timeout++;
2773 }
2774 if (timeout == MYRS_MAILBOX_TIMEOUT) {
2775 dev_err(&pdev->dev,
2776 "Timeout waiting for Controller Initialisation\n");
2777 return -ETIMEDOUT;
2778 }
2779 if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) {
2780 dev_err(&pdev->dev,
2781 "Unable to Enable Memory Mailbox Interface\n");
2782 DAC960_BA_reset_ctrl(base);
2783 return -EAGAIN;
2784 }
2785 DAC960_BA_enable_intr(base);
2786 cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox;
2787 cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd;
2788 cs->disable_intr = DAC960_BA_disable_intr;
2789 cs->reset = DAC960_BA_reset_ctrl;
2790 return 0;
2791 }
2792
DAC960_BA_intr_handler(int irq,void * arg)2793 static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
2794 {
2795 struct myrs_hba *cs = arg;
2796 void __iomem *base = cs->io_base;
2797 struct myrs_stat_mbox *next_stat_mbox;
2798 unsigned long flags;
2799
2800 spin_lock_irqsave(&cs->queue_lock, flags);
2801 DAC960_BA_ack_intr(base);
2802 next_stat_mbox = cs->next_stat_mbox;
2803 while (next_stat_mbox->id > 0) {
2804 unsigned short id = next_stat_mbox->id;
2805 struct scsi_cmnd *scmd = NULL;
2806 struct myrs_cmdblk *cmd_blk = NULL;
2807
2808 if (id == MYRS_DCMD_TAG)
2809 cmd_blk = &cs->dcmd_blk;
2810 else if (id == MYRS_MCMD_TAG)
2811 cmd_blk = &cs->mcmd_blk;
2812 else {
2813 scmd = scsi_host_find_tag(cs->host, id - 3);
2814 if (scmd)
2815 cmd_blk = scsi_cmd_priv(scmd);
2816 }
2817 if (cmd_blk) {
2818 cmd_blk->status = next_stat_mbox->status;
2819 cmd_blk->sense_len = next_stat_mbox->sense_len;
2820 cmd_blk->residual = next_stat_mbox->residual;
2821 } else
2822 dev_err(&cs->pdev->dev,
2823 "Unhandled command completion %d\n", id);
2824
2825 memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
2826 if (++next_stat_mbox > cs->last_stat_mbox)
2827 next_stat_mbox = cs->first_stat_mbox;
2828
2829 if (cmd_blk) {
2830 if (id < 3)
2831 myrs_handle_cmdblk(cs, cmd_blk);
2832 else
2833 myrs_handle_scsi(cs, cmd_blk, scmd);
2834 }
2835 }
2836 cs->next_stat_mbox = next_stat_mbox;
2837 spin_unlock_irqrestore(&cs->queue_lock, flags);
2838 return IRQ_HANDLED;
2839 }
2840
2841 static struct myrs_privdata DAC960_BA_privdata = {
2842 .hw_init = DAC960_BA_hw_init,
2843 .irq_handler = DAC960_BA_intr_handler,
2844 .mmio_size = DAC960_BA_mmio_size,
2845 };
2846
2847 /*
2848 * DAC960 LP Series Controllers.
2849 */
2850
DAC960_LP_hw_mbox_new_cmd(void __iomem * base)2851 static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base)
2852 {
2853 writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
2854 }
2855
DAC960_LP_ack_hw_mbox_status(void __iomem * base)2856 static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
2857 {
2858 writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
2859 }
2860
DAC960_LP_reset_ctrl(void __iomem * base)2861 static inline void DAC960_LP_reset_ctrl(void __iomem *base)
2862 {
2863 writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
2864 }
2865
DAC960_LP_mem_mbox_new_cmd(void __iomem * base)2866 static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base)
2867 {
2868 writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
2869 }
2870
DAC960_LP_hw_mbox_is_full(void __iomem * base)2871 static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base)
2872 {
2873 u8 val;
2874
2875 val = readb(base + DAC960_LP_IDB_OFFSET);
2876 return val & DAC960_LP_IDB_HWMBOX_FULL;
2877 }
2878
DAC960_LP_init_in_progress(void __iomem * base)2879 static inline bool DAC960_LP_init_in_progress(void __iomem *base)
2880 {
2881 u8 val;
2882
2883 val = readb(base + DAC960_LP_IDB_OFFSET);
2884 return val & DAC960_LP_IDB_INIT_IN_PROGRESS;
2885 }
2886
DAC960_LP_ack_hw_mbox_intr(void __iomem * base)2887 static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
2888 {
2889 writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
2890 }
2891
DAC960_LP_ack_intr(void __iomem * base)2892 static inline void DAC960_LP_ack_intr(void __iomem *base)
2893 {
2894 writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
2895 base + DAC960_LP_ODB_OFFSET);
2896 }
2897
DAC960_LP_hw_mbox_status_available(void __iomem * base)2898 static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
2899 {
2900 u8 val;
2901
2902 val = readb(base + DAC960_LP_ODB_OFFSET);
2903 return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
2904 }
2905
DAC960_LP_enable_intr(void __iomem * base)2906 static inline void DAC960_LP_enable_intr(void __iomem *base)
2907 {
2908 writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
2909 }
2910
DAC960_LP_disable_intr(void __iomem * base)2911 static inline void DAC960_LP_disable_intr(void __iomem *base)
2912 {
2913 writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
2914 }
2915
DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox * mem_mbox,union myrs_cmd_mbox * mbox)2916 static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
2917 union myrs_cmd_mbox *mbox)
2918 {
2919 memcpy(&mem_mbox->words[1], &mbox->words[1],
2920 sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
2921 /* Barrier to avoid reordering */
2922 wmb();
2923 mem_mbox->words[0] = mbox->words[0];
2924 /* Barrier to force PCI access */
2925 mb();
2926 }
2927
DAC960_LP_write_hw_mbox(void __iomem * base,dma_addr_t cmd_mbox_addr)2928 static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
2929 dma_addr_t cmd_mbox_addr)
2930 {
2931 dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
2932 }
2933
DAC960_LP_read_cmd_status(void __iomem * base)2934 static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
2935 {
2936 return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
2937 }
2938
2939 static inline bool
DAC960_LP_read_error_status(void __iomem * base,unsigned char * error,unsigned char * param0,unsigned char * param1)2940 DAC960_LP_read_error_status(void __iomem *base, unsigned char *error,
2941 unsigned char *param0, unsigned char *param1)
2942 {
2943 u8 val;
2944
2945 val = readb(base + DAC960_LP_ERRSTS_OFFSET);
2946 if (!(val & DAC960_LP_ERRSTS_PENDING))
2947 return false;
2948 val &= ~DAC960_LP_ERRSTS_PENDING;
2949 *error = val;
2950 *param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0);
2951 *param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1);
2952 writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET);
2953 return true;
2954 }
2955
2956 static inline unsigned char
DAC960_LP_mbox_init(void __iomem * base,dma_addr_t mbox_addr)2957 DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
2958 {
2959 unsigned char status;
2960
2961 while (DAC960_LP_hw_mbox_is_full(base))
2962 udelay(1);
2963 DAC960_LP_write_hw_mbox(base, mbox_addr);
2964 DAC960_LP_hw_mbox_new_cmd(base);
2965 while (!DAC960_LP_hw_mbox_status_available(base))
2966 udelay(1);
2967 status = DAC960_LP_read_cmd_status(base);
2968 DAC960_LP_ack_hw_mbox_intr(base);
2969 DAC960_LP_ack_hw_mbox_status(base);
2970
2971 return status;
2972 }
2973
DAC960_LP_hw_init(struct pci_dev * pdev,struct myrs_hba * cs,void __iomem * base)2974 static int DAC960_LP_hw_init(struct pci_dev *pdev,
2975 struct myrs_hba *cs, void __iomem *base)
2976 {
2977 int timeout = 0;
2978 unsigned char status, parm0, parm1;
2979
2980 DAC960_LP_disable_intr(base);
2981 DAC960_LP_ack_hw_mbox_status(base);
2982 udelay(1000);
2983 while (DAC960_LP_init_in_progress(base) &&
2984 timeout < MYRS_MAILBOX_TIMEOUT) {
2985 if (DAC960_LP_read_error_status(base, &status,
2986 &parm0, &parm1) &&
2987 myrs_err_status(cs, status, parm0, parm1))
2988 return -EIO;
2989 udelay(10);
2990 timeout++;
2991 }
2992 if (timeout == MYRS_MAILBOX_TIMEOUT) {
2993 dev_err(&pdev->dev,
2994 "Timeout waiting for Controller Initialisation\n");
2995 return -ETIMEDOUT;
2996 }
2997 if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) {
2998 dev_err(&pdev->dev,
2999 "Unable to Enable Memory Mailbox Interface\n");
3000 DAC960_LP_reset_ctrl(base);
3001 return -ENODEV;
3002 }
3003 DAC960_LP_enable_intr(base);
3004 cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox;
3005 cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd;
3006 cs->disable_intr = DAC960_LP_disable_intr;
3007 cs->reset = DAC960_LP_reset_ctrl;
3008
3009 return 0;
3010 }
3011
DAC960_LP_intr_handler(int irq,void * arg)3012 static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
3013 {
3014 struct myrs_hba *cs = arg;
3015 void __iomem *base = cs->io_base;
3016 struct myrs_stat_mbox *next_stat_mbox;
3017 unsigned long flags;
3018
3019 spin_lock_irqsave(&cs->queue_lock, flags);
3020 DAC960_LP_ack_intr(base);
3021 next_stat_mbox = cs->next_stat_mbox;
3022 while (next_stat_mbox->id > 0) {
3023 unsigned short id = next_stat_mbox->id;
3024 struct scsi_cmnd *scmd = NULL;
3025 struct myrs_cmdblk *cmd_blk = NULL;
3026
3027 if (id == MYRS_DCMD_TAG)
3028 cmd_blk = &cs->dcmd_blk;
3029 else if (id == MYRS_MCMD_TAG)
3030 cmd_blk = &cs->mcmd_blk;
3031 else {
3032 scmd = scsi_host_find_tag(cs->host, id - 3);
3033 if (scmd)
3034 cmd_blk = scsi_cmd_priv(scmd);
3035 }
3036 if (cmd_blk) {
3037 cmd_blk->status = next_stat_mbox->status;
3038 cmd_blk->sense_len = next_stat_mbox->sense_len;
3039 cmd_blk->residual = next_stat_mbox->residual;
3040 } else
3041 dev_err(&cs->pdev->dev,
3042 "Unhandled command completion %d\n", id);
3043
3044 memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
3045 if (++next_stat_mbox > cs->last_stat_mbox)
3046 next_stat_mbox = cs->first_stat_mbox;
3047
3048 if (cmd_blk) {
3049 if (id < 3)
3050 myrs_handle_cmdblk(cs, cmd_blk);
3051 else
3052 myrs_handle_scsi(cs, cmd_blk, scmd);
3053 }
3054 }
3055 cs->next_stat_mbox = next_stat_mbox;
3056 spin_unlock_irqrestore(&cs->queue_lock, flags);
3057 return IRQ_HANDLED;
3058 }
3059
3060 static struct myrs_privdata DAC960_LP_privdata = {
3061 .hw_init = DAC960_LP_hw_init,
3062 .irq_handler = DAC960_LP_intr_handler,
3063 .mmio_size = DAC960_LP_mmio_size,
3064 };
3065
3066 /*
3067 * Module functions
3068 */
3069 static int
myrs_probe(struct pci_dev * dev,const struct pci_device_id * entry)3070 myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3071 {
3072 struct myrs_hba *cs;
3073 int ret;
3074
3075 cs = myrs_detect(dev, entry);
3076 if (!cs)
3077 return -ENODEV;
3078
3079 ret = myrs_get_config(cs);
3080 if (ret < 0) {
3081 myrs_cleanup(cs);
3082 return ret;
3083 }
3084
3085 if (!myrs_create_mempools(dev, cs)) {
3086 ret = -ENOMEM;
3087 goto failed;
3088 }
3089
3090 ret = scsi_add_host(cs->host, &dev->dev);
3091 if (ret) {
3092 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3093 myrs_destroy_mempools(cs);
3094 goto failed;
3095 }
3096 scsi_scan_host(cs->host);
3097 return 0;
3098 failed:
3099 myrs_cleanup(cs);
3100 return ret;
3101 }
3102
3103
myrs_remove(struct pci_dev * pdev)3104 static void myrs_remove(struct pci_dev *pdev)
3105 {
3106 struct myrs_hba *cs = pci_get_drvdata(pdev);
3107
3108 if (cs == NULL)
3109 return;
3110
3111 shost_printk(KERN_NOTICE, cs->host, "Flushing Cache...");
3112 myrs_flush_cache(cs);
3113 myrs_destroy_mempools(cs);
3114 myrs_cleanup(cs);
3115 }
3116
3117
3118 static const struct pci_device_id myrs_id_table[] = {
3119 {
3120 PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX,
3121 PCI_DEVICE_ID_MYLEX_DAC960_GEM,
3122 PCI_VENDOR_ID_MYLEX, PCI_ANY_ID),
3123 .driver_data = (unsigned long) &DAC960_GEM_privdata,
3124 },
3125 {
3126 PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata),
3127 },
3128 {
3129 PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata),
3130 },
3131 {0, },
3132 };
3133
3134 MODULE_DEVICE_TABLE(pci, myrs_id_table);
3135
3136 static struct pci_driver myrs_pci_driver = {
3137 .name = "myrs",
3138 .id_table = myrs_id_table,
3139 .probe = myrs_probe,
3140 .remove = myrs_remove,
3141 };
3142
myrs_init_module(void)3143 static int __init myrs_init_module(void)
3144 {
3145 int ret;
3146
3147 myrs_raid_template = raid_class_attach(&myrs_raid_functions);
3148 if (!myrs_raid_template)
3149 return -ENODEV;
3150
3151 ret = pci_register_driver(&myrs_pci_driver);
3152 if (ret)
3153 raid_class_release(myrs_raid_template);
3154
3155 return ret;
3156 }
3157
myrs_cleanup_module(void)3158 static void __exit myrs_cleanup_module(void)
3159 {
3160 pci_unregister_driver(&myrs_pci_driver);
3161 raid_class_release(myrs_raid_template);
3162 }
3163
3164 module_init(myrs_init_module);
3165 module_exit(myrs_cleanup_module);
3166
3167 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
3168 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3169 MODULE_LICENSE("GPL");
3170