1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4  *
5  * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
6  *
7  * Based on the original DAC960 driver,
8  * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9  * Portions Copyright 2002 by Mylex (An IBM Business Unit)
10  *
11  */
12 
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <asm/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
25 #include "myrb.h"
26 
27 static struct raid_template *myrb_raid_template;
28 
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
31 
myrb_logical_channel(struct Scsi_Host * shost)32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
33 {
34 	return shost->max_channel - 1;
35 }
36 
37 static struct myrb_devstate_name_entry {
38 	enum myrb_devstate state;
39 	const char *name;
40 } myrb_devstate_name_list[] = {
41 	{ MYRB_DEVICE_DEAD, "Dead" },
42 	{ MYRB_DEVICE_WO, "WriteOnly" },
43 	{ MYRB_DEVICE_ONLINE, "Online" },
44 	{ MYRB_DEVICE_CRITICAL, "Critical" },
45 	{ MYRB_DEVICE_STANDBY, "Standby" },
46 	{ MYRB_DEVICE_OFFLINE, "Offline" },
47 };
48 
myrb_devstate_name(enum myrb_devstate state)49 static const char *myrb_devstate_name(enum myrb_devstate state)
50 {
51 	struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
52 	int i;
53 
54 	for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 		if (entry[i].state == state)
56 			return entry[i].name;
57 	}
58 	return "Unknown";
59 }
60 
61 static struct myrb_raidlevel_name_entry {
62 	enum myrb_raidlevel level;
63 	const char *name;
64 } myrb_raidlevel_name_list[] = {
65 	{ MYRB_RAID_LEVEL0, "RAID0" },
66 	{ MYRB_RAID_LEVEL1, "RAID1" },
67 	{ MYRB_RAID_LEVEL3, "RAID3" },
68 	{ MYRB_RAID_LEVEL5, "RAID5" },
69 	{ MYRB_RAID_LEVEL6, "RAID6" },
70 	{ MYRB_RAID_JBOD, "JBOD" },
71 };
72 
myrb_raidlevel_name(enum myrb_raidlevel level)73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
74 {
75 	struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
76 	int i;
77 
78 	for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 		if (entry[i].level == level)
80 			return entry[i].name;
81 	}
82 	return NULL;
83 }
84 
85 /*
86  * myrb_create_mempools - allocates auxiliary data structures
87  *
88  * Return: true on success, false otherwise.
89  */
myrb_create_mempools(struct pci_dev * pdev,struct myrb_hba * cb)90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
91 {
92 	size_t elem_size, elem_align;
93 
94 	elem_align = sizeof(struct myrb_sge);
95 	elem_size = cb->host->sg_tablesize * elem_align;
96 	cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 				      elem_size, elem_align, 0);
98 	if (cb->sg_pool == NULL) {
99 		shost_printk(KERN_ERR, cb->host,
100 			     "Failed to allocate SG pool\n");
101 		return false;
102 	}
103 
104 	cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 				       sizeof(struct myrb_dcdb),
106 				       sizeof(unsigned int), 0);
107 	if (!cb->dcdb_pool) {
108 		dma_pool_destroy(cb->sg_pool);
109 		cb->sg_pool = NULL;
110 		shost_printk(KERN_ERR, cb->host,
111 			     "Failed to allocate DCDB pool\n");
112 		return false;
113 	}
114 
115 	snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 		 "myrb_wq_%d", cb->host->host_no);
117 	cb->work_q = create_singlethread_workqueue(cb->work_q_name);
118 	if (!cb->work_q) {
119 		dma_pool_destroy(cb->dcdb_pool);
120 		cb->dcdb_pool = NULL;
121 		dma_pool_destroy(cb->sg_pool);
122 		cb->sg_pool = NULL;
123 		shost_printk(KERN_ERR, cb->host,
124 			     "Failed to create workqueue\n");
125 		return false;
126 	}
127 
128 	/*
129 	 * Initialize the Monitoring Timer.
130 	 */
131 	INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 	queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
133 
134 	return true;
135 }
136 
137 /*
138  * myrb_destroy_mempools - tears down the memory pools for the controller
139  */
myrb_destroy_mempools(struct myrb_hba * cb)140 static void myrb_destroy_mempools(struct myrb_hba *cb)
141 {
142 	cancel_delayed_work_sync(&cb->monitor_work);
143 	destroy_workqueue(cb->work_q);
144 
145 	dma_pool_destroy(cb->sg_pool);
146 	dma_pool_destroy(cb->dcdb_pool);
147 }
148 
149 /*
150  * myrb_reset_cmd - reset command block
151  */
myrb_reset_cmd(struct myrb_cmdblk * cmd_blk)152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
153 {
154 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
155 
156 	memset(mbox, 0, sizeof(union myrb_cmd_mbox));
157 	cmd_blk->status = 0;
158 }
159 
160 /*
161  * myrb_qcmd - queues command block for execution
162  */
myrb_qcmd(struct myrb_hba * cb,struct myrb_cmdblk * cmd_blk)163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
164 {
165 	void __iomem *base = cb->io_base;
166 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 	union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
168 
169 	cb->write_cmd_mbox(next_mbox, mbox);
170 	if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 	    cb->prev_cmd_mbox2->words[0] == 0)
172 		cb->get_cmd_mbox(base);
173 	cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 	cb->prev_cmd_mbox1 = next_mbox;
175 	if (++next_mbox > cb->last_cmd_mbox)
176 		next_mbox = cb->first_cmd_mbox;
177 	cb->next_cmd_mbox = next_mbox;
178 }
179 
180 /*
181  * myrb_exec_cmd - executes command block and waits for completion.
182  *
183  * Return: command status
184  */
myrb_exec_cmd(struct myrb_hba * cb,struct myrb_cmdblk * cmd_blk)185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 		struct myrb_cmdblk *cmd_blk)
187 {
188 	DECLARE_COMPLETION_ONSTACK(cmpl);
189 	unsigned long flags;
190 
191 	cmd_blk->completion = &cmpl;
192 
193 	spin_lock_irqsave(&cb->queue_lock, flags);
194 	cb->qcmd(cb, cmd_blk);
195 	spin_unlock_irqrestore(&cb->queue_lock, flags);
196 
197 	wait_for_completion(&cmpl);
198 	return cmd_blk->status;
199 }
200 
201 /*
202  * myrb_exec_type3 - executes a type 3 command and waits for completion.
203  *
204  * Return: command status
205  */
myrb_exec_type3(struct myrb_hba * cb,enum myrb_cmd_opcode op,dma_addr_t addr)206 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
207 		enum myrb_cmd_opcode op, dma_addr_t addr)
208 {
209 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
210 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
211 	unsigned short status;
212 
213 	mutex_lock(&cb->dcmd_mutex);
214 	myrb_reset_cmd(cmd_blk);
215 	mbox->type3.id = MYRB_DCMD_TAG;
216 	mbox->type3.opcode = op;
217 	mbox->type3.addr = addr;
218 	status = myrb_exec_cmd(cb, cmd_blk);
219 	mutex_unlock(&cb->dcmd_mutex);
220 	return status;
221 }
222 
223 /*
224  * myrb_exec_type3D - executes a type 3D command and waits for completion.
225  *
226  * Return: command status
227  */
myrb_exec_type3D(struct myrb_hba * cb,enum myrb_cmd_opcode op,struct scsi_device * sdev,struct myrb_pdev_state * pdev_info)228 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
229 		enum myrb_cmd_opcode op, struct scsi_device *sdev,
230 		struct myrb_pdev_state *pdev_info)
231 {
232 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
233 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
234 	unsigned short status;
235 	dma_addr_t pdev_info_addr;
236 
237 	pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
238 					sizeof(struct myrb_pdev_state),
239 					DMA_FROM_DEVICE);
240 	if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
241 		return MYRB_STATUS_SUBSYS_FAILED;
242 
243 	mutex_lock(&cb->dcmd_mutex);
244 	myrb_reset_cmd(cmd_blk);
245 	mbox->type3D.id = MYRB_DCMD_TAG;
246 	mbox->type3D.opcode = op;
247 	mbox->type3D.channel = sdev->channel;
248 	mbox->type3D.target = sdev->id;
249 	mbox->type3D.addr = pdev_info_addr;
250 	status = myrb_exec_cmd(cb, cmd_blk);
251 	mutex_unlock(&cb->dcmd_mutex);
252 	dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
253 			 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
254 	if (status == MYRB_STATUS_SUCCESS &&
255 	    mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
256 		myrb_translate_devstate(pdev_info);
257 
258 	return status;
259 }
260 
261 static char *myrb_event_msg[] = {
262 	"killed because write recovery failed",
263 	"killed because of SCSI bus reset failure",
264 	"killed because of double check condition",
265 	"killed because it was removed",
266 	"killed because of gross error on SCSI chip",
267 	"killed because of bad tag returned from drive",
268 	"killed because of timeout on SCSI command",
269 	"killed because of reset SCSI command issued from system",
270 	"killed because busy or parity error count exceeded limit",
271 	"killed because of 'kill drive' command from system",
272 	"killed because of selection timeout",
273 	"killed due to SCSI phase sequence error",
274 	"killed due to unknown status",
275 };
276 
277 /**
278  * myrb_get_event - get event log from HBA
279  * @cb: pointer to the hba structure
280  * @event: number of the event
281  *
282  * Execute a type 3E command and logs the event message
283  */
myrb_get_event(struct myrb_hba * cb,unsigned int event)284 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
285 {
286 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
287 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
288 	struct myrb_log_entry *ev_buf;
289 	dma_addr_t ev_addr;
290 	unsigned short status;
291 
292 	ev_buf = dma_alloc_coherent(&cb->pdev->dev,
293 				    sizeof(struct myrb_log_entry),
294 				    &ev_addr, GFP_KERNEL);
295 	if (!ev_buf)
296 		return;
297 
298 	myrb_reset_cmd(cmd_blk);
299 	mbox->type3E.id = MYRB_MCMD_TAG;
300 	mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
301 	mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
302 	mbox->type3E.opqual = 1;
303 	mbox->type3E.ev_seq = event;
304 	mbox->type3E.addr = ev_addr;
305 	status = myrb_exec_cmd(cb, cmd_blk);
306 	if (status != MYRB_STATUS_SUCCESS)
307 		shost_printk(KERN_INFO, cb->host,
308 			     "Failed to get event log %d, status %04x\n",
309 			     event, status);
310 
311 	else if (ev_buf->seq_num == event) {
312 		struct scsi_sense_hdr sshdr;
313 
314 		memset(&sshdr, 0, sizeof(sshdr));
315 		scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
316 
317 		if (sshdr.sense_key == VENDOR_SPECIFIC &&
318 		    sshdr.asc == 0x80 &&
319 		    sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
320 			shost_printk(KERN_CRIT, cb->host,
321 				     "Physical drive %d:%d: %s\n",
322 				     ev_buf->channel, ev_buf->target,
323 				     myrb_event_msg[sshdr.ascq]);
324 		else
325 			shost_printk(KERN_CRIT, cb->host,
326 				     "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
327 				     ev_buf->channel, ev_buf->target,
328 				     sshdr.sense_key, sshdr.asc, sshdr.ascq);
329 	}
330 
331 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
332 			  ev_buf, ev_addr);
333 }
334 
335 /*
336  * myrb_get_errtable - retrieves the error table from the controller
337  *
338  * Executes a type 3 command and logs the error table from the controller.
339  */
myrb_get_errtable(struct myrb_hba * cb)340 static void myrb_get_errtable(struct myrb_hba *cb)
341 {
342 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
343 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
344 	unsigned short status;
345 	struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
346 
347 	memcpy(&old_table, cb->err_table, sizeof(old_table));
348 
349 	myrb_reset_cmd(cmd_blk);
350 	mbox->type3.id = MYRB_MCMD_TAG;
351 	mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
352 	mbox->type3.addr = cb->err_table_addr;
353 	status = myrb_exec_cmd(cb, cmd_blk);
354 	if (status == MYRB_STATUS_SUCCESS) {
355 		struct myrb_error_entry *table = cb->err_table;
356 		struct myrb_error_entry *new, *old;
357 		size_t err_table_offset;
358 		struct scsi_device *sdev;
359 
360 		shost_for_each_device(sdev, cb->host) {
361 			if (sdev->channel >= myrb_logical_channel(cb->host))
362 				continue;
363 			err_table_offset = sdev->channel * MYRB_MAX_TARGETS
364 				+ sdev->id;
365 			new = table + err_table_offset;
366 			old = &old_table[err_table_offset];
367 			if (new->parity_err == old->parity_err &&
368 			    new->soft_err == old->soft_err &&
369 			    new->hard_err == old->hard_err &&
370 			    new->misc_err == old->misc_err)
371 				continue;
372 			sdev_printk(KERN_CRIT, sdev,
373 				    "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
374 				    new->parity_err, new->soft_err,
375 				    new->hard_err, new->misc_err);
376 		}
377 	}
378 }
379 
380 /*
381  * myrb_get_ldev_info - retrieves the logical device table from the controller
382  *
383  * Executes a type 3 command and updates the logical device table.
384  *
385  * Return: command status
386  */
myrb_get_ldev_info(struct myrb_hba * cb)387 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
388 {
389 	unsigned short status;
390 	int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
391 	struct Scsi_Host *shost = cb->host;
392 
393 	status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
394 				 cb->ldev_info_addr);
395 	if (status != MYRB_STATUS_SUCCESS)
396 		return status;
397 
398 	for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
399 		struct myrb_ldev_info *old = NULL;
400 		struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
401 		struct scsi_device *sdev;
402 
403 		sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
404 					  ldev_num, 0);
405 		if (!sdev) {
406 			if (new->state == MYRB_DEVICE_OFFLINE)
407 				continue;
408 			shost_printk(KERN_INFO, shost,
409 				     "Adding Logical Drive %d in state %s\n",
410 				     ldev_num, myrb_devstate_name(new->state));
411 			scsi_add_device(shost, myrb_logical_channel(shost),
412 					ldev_num, 0);
413 			continue;
414 		}
415 		old = sdev->hostdata;
416 		if (new->state != old->state)
417 			shost_printk(KERN_INFO, shost,
418 				     "Logical Drive %d is now %s\n",
419 				     ldev_num, myrb_devstate_name(new->state));
420 		if (new->wb_enabled != old->wb_enabled)
421 			sdev_printk(KERN_INFO, sdev,
422 				    "Logical Drive is now WRITE %s\n",
423 				    (new->wb_enabled ? "BACK" : "THRU"));
424 		memcpy(old, new, sizeof(*new));
425 		scsi_device_put(sdev);
426 	}
427 	return status;
428 }
429 
430 /*
431  * myrb_get_rbld_progress - get rebuild progress information
432  *
433  * Executes a type 3 command and returns the rebuild progress
434  * information.
435  *
436  * Return: command status
437  */
myrb_get_rbld_progress(struct myrb_hba * cb,struct myrb_rbld_progress * rbld)438 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
439 		struct myrb_rbld_progress *rbld)
440 {
441 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
442 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
443 	struct myrb_rbld_progress *rbld_buf;
444 	dma_addr_t rbld_addr;
445 	unsigned short status;
446 
447 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
448 				      sizeof(struct myrb_rbld_progress),
449 				      &rbld_addr, GFP_KERNEL);
450 	if (!rbld_buf)
451 		return MYRB_STATUS_RBLD_NOT_CHECKED;
452 
453 	myrb_reset_cmd(cmd_blk);
454 	mbox->type3.id = MYRB_MCMD_TAG;
455 	mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
456 	mbox->type3.addr = rbld_addr;
457 	status = myrb_exec_cmd(cb, cmd_blk);
458 	if (rbld)
459 		memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
460 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
461 			  rbld_buf, rbld_addr);
462 	return status;
463 }
464 
465 /*
466  * myrb_update_rbld_progress - updates the rebuild status
467  *
468  * Updates the rebuild status for the attached logical devices.
469  */
myrb_update_rbld_progress(struct myrb_hba * cb)470 static void myrb_update_rbld_progress(struct myrb_hba *cb)
471 {
472 	struct myrb_rbld_progress rbld_buf;
473 	unsigned short status;
474 
475 	status = myrb_get_rbld_progress(cb, &rbld_buf);
476 	if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
477 	    cb->last_rbld_status == MYRB_STATUS_SUCCESS)
478 		status = MYRB_STATUS_RBLD_SUCCESS;
479 	if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
480 		unsigned int blocks_done =
481 			rbld_buf.ldev_size - rbld_buf.blocks_left;
482 		struct scsi_device *sdev;
483 
484 		sdev = scsi_device_lookup(cb->host,
485 					  myrb_logical_channel(cb->host),
486 					  rbld_buf.ldev_num, 0);
487 		if (!sdev)
488 			return;
489 
490 		switch (status) {
491 		case MYRB_STATUS_SUCCESS:
492 			sdev_printk(KERN_INFO, sdev,
493 				    "Rebuild in Progress, %d%% completed\n",
494 				    (100 * (blocks_done >> 7))
495 				    / (rbld_buf.ldev_size >> 7));
496 			break;
497 		case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
498 			sdev_printk(KERN_INFO, sdev,
499 				    "Rebuild Failed due to Logical Drive Failure\n");
500 			break;
501 		case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
502 			sdev_printk(KERN_INFO, sdev,
503 				    "Rebuild Failed due to Bad Blocks on Other Drives\n");
504 			break;
505 		case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
506 			sdev_printk(KERN_INFO, sdev,
507 				    "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
508 			break;
509 		case MYRB_STATUS_RBLD_SUCCESS:
510 			sdev_printk(KERN_INFO, sdev,
511 				    "Rebuild Completed Successfully\n");
512 			break;
513 		case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
514 			sdev_printk(KERN_INFO, sdev,
515 				     "Rebuild Successfully Terminated\n");
516 			break;
517 		default:
518 			break;
519 		}
520 		scsi_device_put(sdev);
521 	}
522 	cb->last_rbld_status = status;
523 }
524 
525 /*
526  * myrb_get_cc_progress - retrieve the rebuild status
527  *
528  * Execute a type 3 Command and fetch the rebuild / consistency check
529  * status.
530  */
myrb_get_cc_progress(struct myrb_hba * cb)531 static void myrb_get_cc_progress(struct myrb_hba *cb)
532 {
533 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
534 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
535 	struct myrb_rbld_progress *rbld_buf;
536 	dma_addr_t rbld_addr;
537 	unsigned short status;
538 
539 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
540 				      sizeof(struct myrb_rbld_progress),
541 				      &rbld_addr, GFP_KERNEL);
542 	if (!rbld_buf) {
543 		cb->need_cc_status = true;
544 		return;
545 	}
546 	myrb_reset_cmd(cmd_blk);
547 	mbox->type3.id = MYRB_MCMD_TAG;
548 	mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
549 	mbox->type3.addr = rbld_addr;
550 	status = myrb_exec_cmd(cb, cmd_blk);
551 	if (status == MYRB_STATUS_SUCCESS) {
552 		unsigned int ldev_num = rbld_buf->ldev_num;
553 		unsigned int ldev_size = rbld_buf->ldev_size;
554 		unsigned int blocks_done =
555 			ldev_size - rbld_buf->blocks_left;
556 		struct scsi_device *sdev;
557 
558 		sdev = scsi_device_lookup(cb->host,
559 					  myrb_logical_channel(cb->host),
560 					  ldev_num, 0);
561 		if (sdev) {
562 			sdev_printk(KERN_INFO, sdev,
563 				    "Consistency Check in Progress: %d%% completed\n",
564 				    (100 * (blocks_done >> 7))
565 				    / (ldev_size >> 7));
566 			scsi_device_put(sdev);
567 		}
568 	}
569 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
570 			  rbld_buf, rbld_addr);
571 }
572 
573 /*
574  * myrb_bgi_control - updates background initialisation status
575  *
576  * Executes a type 3B command and updates the background initialisation status
577  */
myrb_bgi_control(struct myrb_hba * cb)578 static void myrb_bgi_control(struct myrb_hba *cb)
579 {
580 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
581 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
582 	struct myrb_bgi_status *bgi, *last_bgi;
583 	dma_addr_t bgi_addr;
584 	struct scsi_device *sdev = NULL;
585 	unsigned short status;
586 
587 	bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
588 				 &bgi_addr, GFP_KERNEL);
589 	if (!bgi) {
590 		shost_printk(KERN_ERR, cb->host,
591 			     "Failed to allocate bgi memory\n");
592 		return;
593 	}
594 	myrb_reset_cmd(cmd_blk);
595 	mbox->type3B.id = MYRB_DCMD_TAG;
596 	mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
597 	mbox->type3B.optype = 0x20;
598 	mbox->type3B.addr = bgi_addr;
599 	status = myrb_exec_cmd(cb, cmd_blk);
600 	last_bgi = &cb->bgi_status;
601 	sdev = scsi_device_lookup(cb->host,
602 				  myrb_logical_channel(cb->host),
603 				  bgi->ldev_num, 0);
604 	switch (status) {
605 	case MYRB_STATUS_SUCCESS:
606 		switch (bgi->status) {
607 		case MYRB_BGI_INVALID:
608 			break;
609 		case MYRB_BGI_STARTED:
610 			if (!sdev)
611 				break;
612 			sdev_printk(KERN_INFO, sdev,
613 				    "Background Initialization Started\n");
614 			break;
615 		case MYRB_BGI_INPROGRESS:
616 			if (!sdev)
617 				break;
618 			if (bgi->blocks_done == last_bgi->blocks_done &&
619 			    bgi->ldev_num == last_bgi->ldev_num)
620 				break;
621 			sdev_printk(KERN_INFO, sdev,
622 				 "Background Initialization in Progress: %d%% completed\n",
623 				 (100 * (bgi->blocks_done >> 7))
624 				 / (bgi->ldev_size >> 7));
625 			break;
626 		case MYRB_BGI_SUSPENDED:
627 			if (!sdev)
628 				break;
629 			sdev_printk(KERN_INFO, sdev,
630 				    "Background Initialization Suspended\n");
631 			break;
632 		case MYRB_BGI_CANCELLED:
633 			if (!sdev)
634 				break;
635 			sdev_printk(KERN_INFO, sdev,
636 				    "Background Initialization Cancelled\n");
637 			break;
638 		}
639 		memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
640 		break;
641 	case MYRB_STATUS_BGI_SUCCESS:
642 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
643 			sdev_printk(KERN_INFO, sdev,
644 				    "Background Initialization Completed Successfully\n");
645 		cb->bgi_status.status = MYRB_BGI_INVALID;
646 		break;
647 	case MYRB_STATUS_BGI_ABORTED:
648 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
649 			sdev_printk(KERN_INFO, sdev,
650 				    "Background Initialization Aborted\n");
651 		fallthrough;
652 	case MYRB_STATUS_NO_BGI_INPROGRESS:
653 		cb->bgi_status.status = MYRB_BGI_INVALID;
654 		break;
655 	}
656 	if (sdev)
657 		scsi_device_put(sdev);
658 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
659 			  bgi, bgi_addr);
660 }
661 
662 /*
663  * myrb_hba_enquiry - updates the controller status
664  *
665  * Executes a DAC_V1_Enquiry command and updates the controller status.
666  *
667  * Return: command status
668  */
myrb_hba_enquiry(struct myrb_hba * cb)669 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
670 {
671 	struct myrb_enquiry old, *new;
672 	unsigned short status;
673 
674 	memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
675 
676 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
677 	if (status != MYRB_STATUS_SUCCESS)
678 		return status;
679 
680 	new = cb->enquiry;
681 	if (new->ldev_count > old.ldev_count) {
682 		int ldev_num = old.ldev_count - 1;
683 
684 		while (++ldev_num < new->ldev_count)
685 			shost_printk(KERN_CRIT, cb->host,
686 				     "Logical Drive %d Now Exists\n",
687 				     ldev_num);
688 	}
689 	if (new->ldev_count < old.ldev_count) {
690 		int ldev_num = new->ldev_count - 1;
691 
692 		while (++ldev_num < old.ldev_count)
693 			shost_printk(KERN_CRIT, cb->host,
694 				     "Logical Drive %d No Longer Exists\n",
695 				     ldev_num);
696 	}
697 	if (new->status.deferred != old.status.deferred)
698 		shost_printk(KERN_CRIT, cb->host,
699 			     "Deferred Write Error Flag is now %s\n",
700 			     (new->status.deferred ? "TRUE" : "FALSE"));
701 	if (new->ev_seq != old.ev_seq) {
702 		cb->new_ev_seq = new->ev_seq;
703 		cb->need_err_info = true;
704 		shost_printk(KERN_INFO, cb->host,
705 			     "Event log %d/%d (%d/%d) available\n",
706 			     cb->old_ev_seq, cb->new_ev_seq,
707 			     old.ev_seq, new->ev_seq);
708 	}
709 	if ((new->ldev_critical > 0 &&
710 	     new->ldev_critical != old.ldev_critical) ||
711 	    (new->ldev_offline > 0 &&
712 	     new->ldev_offline != old.ldev_offline) ||
713 	    (new->ldev_count != old.ldev_count)) {
714 		shost_printk(KERN_INFO, cb->host,
715 			     "Logical drive count changed (%d/%d/%d)\n",
716 			     new->ldev_critical,
717 			     new->ldev_offline,
718 			     new->ldev_count);
719 		cb->need_ldev_info = true;
720 	}
721 	if (new->pdev_dead > 0 ||
722 	    new->pdev_dead != old.pdev_dead ||
723 	    time_after_eq(jiffies, cb->secondary_monitor_time
724 			  + MYRB_SECONDARY_MONITOR_INTERVAL)) {
725 		cb->need_bgi_status = cb->bgi_status_supported;
726 		cb->secondary_monitor_time = jiffies;
727 	}
728 	if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
729 	    new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
730 	    old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731 	    old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
732 		cb->need_rbld = true;
733 		cb->rbld_first = (new->ldev_critical < old.ldev_critical);
734 	}
735 	if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
736 		switch (new->rbld) {
737 		case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
738 			shost_printk(KERN_INFO, cb->host,
739 				     "Consistency Check Completed Successfully\n");
740 			break;
741 		case MYRB_STDBY_RBLD_IN_PROGRESS:
742 		case MYRB_BG_RBLD_IN_PROGRESS:
743 			break;
744 		case MYRB_BG_CHECK_IN_PROGRESS:
745 			cb->need_cc_status = true;
746 			break;
747 		case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
748 			shost_printk(KERN_INFO, cb->host,
749 				     "Consistency Check Completed with Error\n");
750 			break;
751 		case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
752 			shost_printk(KERN_INFO, cb->host,
753 				     "Consistency Check Failed - Physical Device Failed\n");
754 			break;
755 		case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
756 			shost_printk(KERN_INFO, cb->host,
757 				     "Consistency Check Failed - Logical Drive Failed\n");
758 			break;
759 		case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
760 			shost_printk(KERN_INFO, cb->host,
761 				     "Consistency Check Failed - Other Causes\n");
762 			break;
763 		case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
764 			shost_printk(KERN_INFO, cb->host,
765 				     "Consistency Check Successfully Terminated\n");
766 			break;
767 		}
768 	else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
769 		cb->need_cc_status = true;
770 
771 	return MYRB_STATUS_SUCCESS;
772 }
773 
774 /*
775  * myrb_set_pdev_state - sets the device state for a physical device
776  *
777  * Return: command status
778  */
myrb_set_pdev_state(struct myrb_hba * cb,struct scsi_device * sdev,enum myrb_devstate state)779 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
780 		struct scsi_device *sdev, enum myrb_devstate state)
781 {
782 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
783 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
784 	unsigned short status;
785 
786 	mutex_lock(&cb->dcmd_mutex);
787 	mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
788 	mbox->type3D.id = MYRB_DCMD_TAG;
789 	mbox->type3D.channel = sdev->channel;
790 	mbox->type3D.target = sdev->id;
791 	mbox->type3D.state = state & 0x1F;
792 	status = myrb_exec_cmd(cb, cmd_blk);
793 	mutex_unlock(&cb->dcmd_mutex);
794 
795 	return status;
796 }
797 
798 /*
799  * myrb_enable_mmio - enables the Memory Mailbox Interface
800  *
801  * PD and P controller types have no memory mailbox, but still need the
802  * other dma mapped memory.
803  *
804  * Return: true on success, false otherwise.
805  */
myrb_enable_mmio(struct myrb_hba * cb,mbox_mmio_init_t mmio_init_fn)806 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
807 {
808 	void __iomem *base = cb->io_base;
809 	struct pci_dev *pdev = cb->pdev;
810 	size_t err_table_size;
811 	size_t ldev_info_size;
812 	union myrb_cmd_mbox *cmd_mbox_mem;
813 	struct myrb_stat_mbox *stat_mbox_mem;
814 	union myrb_cmd_mbox mbox;
815 	unsigned short status;
816 
817 	memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
818 
819 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
820 		dev_err(&pdev->dev, "DMA mask out of range\n");
821 		return false;
822 	}
823 
824 	cb->enquiry = dma_alloc_coherent(&pdev->dev,
825 					 sizeof(struct myrb_enquiry),
826 					 &cb->enquiry_addr, GFP_KERNEL);
827 	if (!cb->enquiry)
828 		return false;
829 
830 	err_table_size = sizeof(struct myrb_error_entry) *
831 		MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
832 	cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
833 					   &cb->err_table_addr, GFP_KERNEL);
834 	if (!cb->err_table)
835 		return false;
836 
837 	ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
838 	cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
839 					       &cb->ldev_info_addr, GFP_KERNEL);
840 	if (!cb->ldev_info_buf)
841 		return false;
842 
843 	/*
844 	 * Skip mailbox initialisation for PD and P Controllers
845 	 */
846 	if (!mmio_init_fn)
847 		return true;
848 
849 	/* These are the base addresses for the command memory mailbox array */
850 	cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
851 	cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
852 						cb->cmd_mbox_size,
853 						&cb->cmd_mbox_addr,
854 						GFP_KERNEL);
855 	if (!cb->first_cmd_mbox)
856 		return false;
857 
858 	cmd_mbox_mem = cb->first_cmd_mbox;
859 	cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
860 	cb->last_cmd_mbox = cmd_mbox_mem;
861 	cb->next_cmd_mbox = cb->first_cmd_mbox;
862 	cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
863 	cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
864 
865 	/* These are the base addresses for the status memory mailbox array */
866 	cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
867 	    sizeof(struct myrb_stat_mbox);
868 	cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
869 						 cb->stat_mbox_size,
870 						 &cb->stat_mbox_addr,
871 						 GFP_KERNEL);
872 	if (!cb->first_stat_mbox)
873 		return false;
874 
875 	stat_mbox_mem = cb->first_stat_mbox;
876 	stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
877 	cb->last_stat_mbox = stat_mbox_mem;
878 	cb->next_stat_mbox = cb->first_stat_mbox;
879 
880 	/* Enable the Memory Mailbox Interface. */
881 	cb->dual_mode_interface = true;
882 	mbox.typeX.opcode = 0x2B;
883 	mbox.typeX.id = 0;
884 	mbox.typeX.opcode2 = 0x14;
885 	mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
886 	mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
887 
888 	status = mmio_init_fn(pdev, base, &mbox);
889 	if (status != MYRB_STATUS_SUCCESS) {
890 		cb->dual_mode_interface = false;
891 		mbox.typeX.opcode2 = 0x10;
892 		status = mmio_init_fn(pdev, base, &mbox);
893 		if (status != MYRB_STATUS_SUCCESS) {
894 			dev_err(&pdev->dev,
895 				"Failed to enable mailbox, statux %02X\n",
896 				status);
897 			return false;
898 		}
899 	}
900 	return true;
901 }
902 
903 /*
904  * myrb_get_hba_config - reads the configuration information
905  *
906  * Reads the configuration information from the controller and
907  * initializes the controller structure.
908  *
909  * Return: 0 on success, errno otherwise
910  */
myrb_get_hba_config(struct myrb_hba * cb)911 static int myrb_get_hba_config(struct myrb_hba *cb)
912 {
913 	struct myrb_enquiry2 *enquiry2;
914 	dma_addr_t enquiry2_addr;
915 	struct myrb_config2 *config2;
916 	dma_addr_t config2_addr;
917 	struct Scsi_Host *shost = cb->host;
918 	struct pci_dev *pdev = cb->pdev;
919 	int pchan_max = 0, pchan_cur = 0;
920 	unsigned short status;
921 	int ret = -ENODEV, memsize = 0;
922 
923 	enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
924 				      &enquiry2_addr, GFP_KERNEL);
925 	if (!enquiry2) {
926 		shost_printk(KERN_ERR, cb->host,
927 			     "Failed to allocate V1 enquiry2 memory\n");
928 		return -ENOMEM;
929 	}
930 	config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
931 				     &config2_addr, GFP_KERNEL);
932 	if (!config2) {
933 		shost_printk(KERN_ERR, cb->host,
934 			     "Failed to allocate V1 config2 memory\n");
935 		dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
936 				  enquiry2, enquiry2_addr);
937 		return -ENOMEM;
938 	}
939 	mutex_lock(&cb->dma_mutex);
940 	status = myrb_hba_enquiry(cb);
941 	mutex_unlock(&cb->dma_mutex);
942 	if (status != MYRB_STATUS_SUCCESS) {
943 		shost_printk(KERN_WARNING, cb->host,
944 			     "Failed it issue V1 Enquiry\n");
945 		goto out_free;
946 	}
947 
948 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
949 	if (status != MYRB_STATUS_SUCCESS) {
950 		shost_printk(KERN_WARNING, cb->host,
951 			     "Failed to issue V1 Enquiry2\n");
952 		goto out_free;
953 	}
954 
955 	status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
956 	if (status != MYRB_STATUS_SUCCESS) {
957 		shost_printk(KERN_WARNING, cb->host,
958 			     "Failed to issue ReadConfig2\n");
959 		goto out_free;
960 	}
961 
962 	status = myrb_get_ldev_info(cb);
963 	if (status != MYRB_STATUS_SUCCESS) {
964 		shost_printk(KERN_WARNING, cb->host,
965 			     "Failed to get logical drive information\n");
966 		goto out_free;
967 	}
968 
969 	/*
970 	 * Initialize the Controller Model Name and Full Model Name fields.
971 	 */
972 	switch (enquiry2->hw.sub_model) {
973 	case DAC960_V1_P_PD_PU:
974 		if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
975 			strcpy(cb->model_name, "DAC960PU");
976 		else
977 			strcpy(cb->model_name, "DAC960PD");
978 		break;
979 	case DAC960_V1_PL:
980 		strcpy(cb->model_name, "DAC960PL");
981 		break;
982 	case DAC960_V1_PG:
983 		strcpy(cb->model_name, "DAC960PG");
984 		break;
985 	case DAC960_V1_PJ:
986 		strcpy(cb->model_name, "DAC960PJ");
987 		break;
988 	case DAC960_V1_PR:
989 		strcpy(cb->model_name, "DAC960PR");
990 		break;
991 	case DAC960_V1_PT:
992 		strcpy(cb->model_name, "DAC960PT");
993 		break;
994 	case DAC960_V1_PTL0:
995 		strcpy(cb->model_name, "DAC960PTL0");
996 		break;
997 	case DAC960_V1_PRL:
998 		strcpy(cb->model_name, "DAC960PRL");
999 		break;
1000 	case DAC960_V1_PTL1:
1001 		strcpy(cb->model_name, "DAC960PTL1");
1002 		break;
1003 	case DAC960_V1_1164P:
1004 		strcpy(cb->model_name, "eXtremeRAID 1100");
1005 		break;
1006 	default:
1007 		shost_printk(KERN_WARNING, cb->host,
1008 			     "Unknown Model %X\n",
1009 			     enquiry2->hw.sub_model);
1010 		goto out;
1011 	}
1012 	/*
1013 	 * Initialize the Controller Firmware Version field and verify that it
1014 	 * is a supported firmware version.
1015 	 * The supported firmware versions are:
1016 	 *
1017 	 * DAC1164P		    5.06 and above
1018 	 * DAC960PTL/PRL/PJ/PG	    4.06 and above
1019 	 * DAC960PU/PD/PL	    3.51 and above
1020 	 * DAC960PU/PD/PL/P	    2.73 and above
1021 	 */
1022 #if defined(CONFIG_ALPHA)
1023 	/*
1024 	 * DEC Alpha machines were often equipped with DAC960 cards that were
1025 	 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1026 	 * the last custom FW revision to be released by DEC for these older
1027 	 * controllers, appears to work quite well with this driver.
1028 	 *
1029 	 * Cards tested successfully were several versions each of the PD and
1030 	 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1031 	 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1032 	 * back of the board, of:
1033 	 *
1034 	 * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
1035 	 *         or D040349 (3-channel)
1036 	 * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
1037 	 *         or D040397 (3-channel)
1038 	 */
1039 # define FIRMWARE_27X	"2.70"
1040 #else
1041 # define FIRMWARE_27X	"2.73"
1042 #endif
1043 
1044 	if (enquiry2->fw.major_version == 0) {
1045 		enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1046 		enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1047 		enquiry2->fw.firmware_type = '0';
1048 		enquiry2->fw.turn_id = 0;
1049 	}
1050 	snprintf(cb->fw_version, sizeof(cb->fw_version),
1051 		"%u.%02u-%c-%02u",
1052 		enquiry2->fw.major_version,
1053 		enquiry2->fw.minor_version,
1054 		enquiry2->fw.firmware_type,
1055 		enquiry2->fw.turn_id);
1056 	if (!((enquiry2->fw.major_version == 5 &&
1057 	       enquiry2->fw.minor_version >= 6) ||
1058 	      (enquiry2->fw.major_version == 4 &&
1059 	       enquiry2->fw.minor_version >= 6) ||
1060 	      (enquiry2->fw.major_version == 3 &&
1061 	       enquiry2->fw.minor_version >= 51) ||
1062 	      (enquiry2->fw.major_version == 2 &&
1063 	       strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1064 		shost_printk(KERN_WARNING, cb->host,
1065 			"Firmware Version '%s' unsupported\n",
1066 			cb->fw_version);
1067 		goto out;
1068 	}
1069 	/*
1070 	 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1071 	 * Enclosure Management Enabled fields.
1072 	 */
1073 	switch (enquiry2->hw.model) {
1074 	case MYRB_5_CHANNEL_BOARD:
1075 		pchan_max = 5;
1076 		break;
1077 	case MYRB_3_CHANNEL_BOARD:
1078 	case MYRB_3_CHANNEL_ASIC_DAC:
1079 		pchan_max = 3;
1080 		break;
1081 	case MYRB_2_CHANNEL_BOARD:
1082 		pchan_max = 2;
1083 		break;
1084 	default:
1085 		pchan_max = enquiry2->cfg_chan;
1086 		break;
1087 	}
1088 	pchan_cur = enquiry2->cur_chan;
1089 	if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1090 		cb->bus_width = 32;
1091 	else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1092 		cb->bus_width = 16;
1093 	else
1094 		cb->bus_width = 8;
1095 	cb->ldev_block_size = enquiry2->ldev_block_size;
1096 	shost->max_channel = pchan_cur;
1097 	shost->max_id = enquiry2->max_targets;
1098 	memsize = enquiry2->mem_size >> 20;
1099 	cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1100 	/*
1101 	 * Initialize the Controller Queue Depth, Driver Queue Depth,
1102 	 * Logical Drive Count, Maximum Blocks per Command, Controller
1103 	 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1104 	 * The Driver Queue Depth must be at most one less than the
1105 	 * Controller Queue Depth to allow for an automatic drive
1106 	 * rebuild operation.
1107 	 */
1108 	shost->can_queue = cb->enquiry->max_tcq;
1109 	if (shost->can_queue < 3)
1110 		shost->can_queue = enquiry2->max_cmds;
1111 	if (shost->can_queue < 3)
1112 		/* Play safe and disable TCQ */
1113 		shost->can_queue = 1;
1114 
1115 	if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1116 		shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1117 	shost->max_sectors = enquiry2->max_sectors;
1118 	shost->sg_tablesize = enquiry2->max_sge;
1119 	if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1120 		shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1121 	/*
1122 	 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1123 	 */
1124 	cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1125 		>> (10 - MYRB_BLKSIZE_BITS);
1126 	cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1127 		>> (10 - MYRB_BLKSIZE_BITS);
1128 	/* Assume 255/63 translation */
1129 	cb->ldev_geom_heads = 255;
1130 	cb->ldev_geom_sectors = 63;
1131 	if (config2->drive_geometry) {
1132 		cb->ldev_geom_heads = 128;
1133 		cb->ldev_geom_sectors = 32;
1134 	}
1135 
1136 	/*
1137 	 * Initialize the Background Initialization Status.
1138 	 */
1139 	if ((cb->fw_version[0] == '4' &&
1140 	     strcmp(cb->fw_version, "4.08") >= 0) ||
1141 	    (cb->fw_version[0] == '5' &&
1142 	     strcmp(cb->fw_version, "5.08") >= 0)) {
1143 		cb->bgi_status_supported = true;
1144 		myrb_bgi_control(cb);
1145 	}
1146 	cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1147 	ret = 0;
1148 
1149 out:
1150 	shost_printk(KERN_INFO, cb->host,
1151 		"Configuring %s PCI RAID Controller\n", cb->model_name);
1152 	shost_printk(KERN_INFO, cb->host,
1153 		"  Firmware Version: %s, Memory Size: %dMB\n",
1154 		cb->fw_version, memsize);
1155 	if (cb->io_addr == 0)
1156 		shost_printk(KERN_INFO, cb->host,
1157 			"  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1158 			(unsigned long)cb->pci_addr, cb->irq);
1159 	else
1160 		shost_printk(KERN_INFO, cb->host,
1161 			"  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1162 			(unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1163 			cb->irq);
1164 	shost_printk(KERN_INFO, cb->host,
1165 		"  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1166 		cb->host->can_queue, cb->host->max_sectors);
1167 	shost_printk(KERN_INFO, cb->host,
1168 		     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1169 		     cb->host->can_queue, cb->host->sg_tablesize,
1170 		     MYRB_SCATTER_GATHER_LIMIT);
1171 	shost_printk(KERN_INFO, cb->host,
1172 		     "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1173 		     cb->stripe_size, cb->segment_size,
1174 		     cb->ldev_geom_heads, cb->ldev_geom_sectors,
1175 		     cb->safte_enabled ?
1176 		     "  SAF-TE Enclosure Management Enabled" : "");
1177 	shost_printk(KERN_INFO, cb->host,
1178 		     "  Physical: %d/%d channels %d/%d/%d devices\n",
1179 		     pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1180 		     cb->host->max_id);
1181 
1182 	shost_printk(KERN_INFO, cb->host,
1183 		     "  Logical: 1/1 channels, %d/%d disks\n",
1184 		     cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1185 
1186 out_free:
1187 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1188 			  enquiry2, enquiry2_addr);
1189 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1190 			  config2, config2_addr);
1191 
1192 	return ret;
1193 }
1194 
1195 /*
1196  * myrb_unmap - unmaps controller structures
1197  */
myrb_unmap(struct myrb_hba * cb)1198 static void myrb_unmap(struct myrb_hba *cb)
1199 {
1200 	if (cb->ldev_info_buf) {
1201 		size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1202 			MYRB_MAX_LDEVS;
1203 		dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1204 				  cb->ldev_info_buf, cb->ldev_info_addr);
1205 		cb->ldev_info_buf = NULL;
1206 	}
1207 	if (cb->err_table) {
1208 		size_t err_table_size = sizeof(struct myrb_error_entry) *
1209 			MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1210 		dma_free_coherent(&cb->pdev->dev, err_table_size,
1211 				  cb->err_table, cb->err_table_addr);
1212 		cb->err_table = NULL;
1213 	}
1214 	if (cb->enquiry) {
1215 		dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1216 				  cb->enquiry, cb->enquiry_addr);
1217 		cb->enquiry = NULL;
1218 	}
1219 	if (cb->first_stat_mbox) {
1220 		dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1221 				  cb->first_stat_mbox, cb->stat_mbox_addr);
1222 		cb->first_stat_mbox = NULL;
1223 	}
1224 	if (cb->first_cmd_mbox) {
1225 		dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1226 				  cb->first_cmd_mbox, cb->cmd_mbox_addr);
1227 		cb->first_cmd_mbox = NULL;
1228 	}
1229 }
1230 
1231 /*
1232  * myrb_cleanup - cleanup controller structures
1233  */
myrb_cleanup(struct myrb_hba * cb)1234 static void myrb_cleanup(struct myrb_hba *cb)
1235 {
1236 	struct pci_dev *pdev = cb->pdev;
1237 
1238 	/* Free the memory mailbox, status, and related structures */
1239 	myrb_unmap(cb);
1240 
1241 	if (cb->mmio_base) {
1242 		cb->disable_intr(cb->io_base);
1243 		iounmap(cb->mmio_base);
1244 	}
1245 	if (cb->irq)
1246 		free_irq(cb->irq, cb);
1247 	if (cb->io_addr)
1248 		release_region(cb->io_addr, 0x80);
1249 	pci_set_drvdata(pdev, NULL);
1250 	pci_disable_device(pdev);
1251 	scsi_host_put(cb->host);
1252 }
1253 
myrb_host_reset(struct scsi_cmnd * scmd)1254 static int myrb_host_reset(struct scsi_cmnd *scmd)
1255 {
1256 	struct Scsi_Host *shost = scmd->device->host;
1257 	struct myrb_hba *cb = shost_priv(shost);
1258 
1259 	cb->reset(cb->io_base);
1260 	return SUCCESS;
1261 }
1262 
myrb_pthru_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scmd)1263 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1264 		struct scsi_cmnd *scmd)
1265 {
1266 	struct myrb_hba *cb = shost_priv(shost);
1267 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1268 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1269 	struct myrb_dcdb *dcdb;
1270 	dma_addr_t dcdb_addr;
1271 	struct scsi_device *sdev = scmd->device;
1272 	struct scatterlist *sgl;
1273 	unsigned long flags;
1274 	int nsge;
1275 
1276 	myrb_reset_cmd(cmd_blk);
1277 	dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1278 	if (!dcdb)
1279 		return SCSI_MLQUEUE_HOST_BUSY;
1280 	nsge = scsi_dma_map(scmd);
1281 	if (nsge > 1) {
1282 		dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1283 		scmd->result = (DID_ERROR << 16);
1284 		scmd->scsi_done(scmd);
1285 		return 0;
1286 	}
1287 
1288 	mbox->type3.opcode = MYRB_CMD_DCDB;
1289 	mbox->type3.id = scmd->request->tag + 3;
1290 	mbox->type3.addr = dcdb_addr;
1291 	dcdb->channel = sdev->channel;
1292 	dcdb->target = sdev->id;
1293 	switch (scmd->sc_data_direction) {
1294 	case DMA_NONE:
1295 		dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1296 		break;
1297 	case DMA_TO_DEVICE:
1298 		dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1299 		break;
1300 	case DMA_FROM_DEVICE:
1301 		dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1302 		break;
1303 	default:
1304 		dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1305 		break;
1306 	}
1307 	dcdb->early_status = false;
1308 	if (scmd->request->timeout <= 10)
1309 		dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1310 	else if (scmd->request->timeout <= 60)
1311 		dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1312 	else if (scmd->request->timeout <= 600)
1313 		dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1314 	else
1315 		dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1316 	dcdb->no_autosense = false;
1317 	dcdb->allow_disconnect = true;
1318 	sgl = scsi_sglist(scmd);
1319 	dcdb->dma_addr = sg_dma_address(sgl);
1320 	if (sg_dma_len(sgl) > USHRT_MAX) {
1321 		dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1322 		dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1323 	} else {
1324 		dcdb->xfer_len_lo = sg_dma_len(sgl);
1325 		dcdb->xfer_len_hi4 = 0;
1326 	}
1327 	dcdb->cdb_len = scmd->cmd_len;
1328 	dcdb->sense_len = sizeof(dcdb->sense);
1329 	memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1330 
1331 	spin_lock_irqsave(&cb->queue_lock, flags);
1332 	cb->qcmd(cb, cmd_blk);
1333 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1334 	return 0;
1335 }
1336 
myrb_inquiry(struct myrb_hba * cb,struct scsi_cmnd * scmd)1337 static void myrb_inquiry(struct myrb_hba *cb,
1338 		struct scsi_cmnd *scmd)
1339 {
1340 	unsigned char inq[36] = {
1341 		0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1342 		0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1343 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1344 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1345 		0x20, 0x20, 0x20, 0x20,
1346 	};
1347 
1348 	if (cb->bus_width > 16)
1349 		inq[7] |= 1 << 6;
1350 	if (cb->bus_width > 8)
1351 		inq[7] |= 1 << 5;
1352 	memcpy(&inq[16], cb->model_name, 16);
1353 	memcpy(&inq[32], cb->fw_version, 1);
1354 	memcpy(&inq[33], &cb->fw_version[2], 2);
1355 	memcpy(&inq[35], &cb->fw_version[7], 1);
1356 
1357 	scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1358 }
1359 
1360 static void
myrb_mode_sense(struct myrb_hba * cb,struct scsi_cmnd * scmd,struct myrb_ldev_info * ldev_info)1361 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1362 		struct myrb_ldev_info *ldev_info)
1363 {
1364 	unsigned char modes[32], *mode_pg;
1365 	bool dbd;
1366 	size_t mode_len;
1367 
1368 	dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1369 	if (dbd) {
1370 		mode_len = 24;
1371 		mode_pg = &modes[4];
1372 	} else {
1373 		mode_len = 32;
1374 		mode_pg = &modes[12];
1375 	}
1376 	memset(modes, 0, sizeof(modes));
1377 	modes[0] = mode_len - 1;
1378 	if (!dbd) {
1379 		unsigned char *block_desc = &modes[4];
1380 
1381 		modes[3] = 8;
1382 		put_unaligned_be32(ldev_info->size, &block_desc[0]);
1383 		put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1384 	}
1385 	mode_pg[0] = 0x08;
1386 	mode_pg[1] = 0x12;
1387 	if (ldev_info->wb_enabled)
1388 		mode_pg[2] |= 0x04;
1389 	if (cb->segment_size) {
1390 		mode_pg[2] |= 0x08;
1391 		put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1392 	}
1393 
1394 	scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1395 }
1396 
myrb_request_sense(struct myrb_hba * cb,struct scsi_cmnd * scmd)1397 static void myrb_request_sense(struct myrb_hba *cb,
1398 		struct scsi_cmnd *scmd)
1399 {
1400 	scsi_build_sense_buffer(0, scmd->sense_buffer,
1401 				NO_SENSE, 0, 0);
1402 	scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1403 				 SCSI_SENSE_BUFFERSIZE);
1404 }
1405 
myrb_read_capacity(struct myrb_hba * cb,struct scsi_cmnd * scmd,struct myrb_ldev_info * ldev_info)1406 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1407 		struct myrb_ldev_info *ldev_info)
1408 {
1409 	unsigned char data[8];
1410 
1411 	dev_dbg(&scmd->device->sdev_gendev,
1412 		"Capacity %u, blocksize %u\n",
1413 		ldev_info->size, cb->ldev_block_size);
1414 	put_unaligned_be32(ldev_info->size - 1, &data[0]);
1415 	put_unaligned_be32(cb->ldev_block_size, &data[4]);
1416 	scsi_sg_copy_from_buffer(scmd, data, 8);
1417 }
1418 
myrb_ldev_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scmd)1419 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1420 		struct scsi_cmnd *scmd)
1421 {
1422 	struct myrb_hba *cb = shost_priv(shost);
1423 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1424 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1425 	struct myrb_ldev_info *ldev_info;
1426 	struct scsi_device *sdev = scmd->device;
1427 	struct scatterlist *sgl;
1428 	unsigned long flags;
1429 	u64 lba;
1430 	u32 block_cnt;
1431 	int nsge;
1432 
1433 	ldev_info = sdev->hostdata;
1434 	if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1435 	    ldev_info->state != MYRB_DEVICE_WO) {
1436 		dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1437 			sdev->id, ldev_info ? ldev_info->state : 0xff);
1438 		scmd->result = (DID_BAD_TARGET << 16);
1439 		scmd->scsi_done(scmd);
1440 		return 0;
1441 	}
1442 	switch (scmd->cmnd[0]) {
1443 	case TEST_UNIT_READY:
1444 		scmd->result = (DID_OK << 16);
1445 		scmd->scsi_done(scmd);
1446 		return 0;
1447 	case INQUIRY:
1448 		if (scmd->cmnd[1] & 1) {
1449 			/* Illegal request, invalid field in CDB */
1450 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1451 						ILLEGAL_REQUEST, 0x24, 0);
1452 			scmd->result = (DRIVER_SENSE << 24) |
1453 				SAM_STAT_CHECK_CONDITION;
1454 		} else {
1455 			myrb_inquiry(cb, scmd);
1456 			scmd->result = (DID_OK << 16);
1457 		}
1458 		scmd->scsi_done(scmd);
1459 		return 0;
1460 	case SYNCHRONIZE_CACHE:
1461 		scmd->result = (DID_OK << 16);
1462 		scmd->scsi_done(scmd);
1463 		return 0;
1464 	case MODE_SENSE:
1465 		if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1466 		    (scmd->cmnd[2] & 0x3F) != 0x08) {
1467 			/* Illegal request, invalid field in CDB */
1468 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1469 						ILLEGAL_REQUEST, 0x24, 0);
1470 			scmd->result = (DRIVER_SENSE << 24) |
1471 				SAM_STAT_CHECK_CONDITION;
1472 		} else {
1473 			myrb_mode_sense(cb, scmd, ldev_info);
1474 			scmd->result = (DID_OK << 16);
1475 		}
1476 		scmd->scsi_done(scmd);
1477 		return 0;
1478 	case READ_CAPACITY:
1479 		if ((scmd->cmnd[1] & 1) ||
1480 		    (scmd->cmnd[8] & 1)) {
1481 			/* Illegal request, invalid field in CDB */
1482 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1483 						ILLEGAL_REQUEST, 0x24, 0);
1484 			scmd->result = (DRIVER_SENSE << 24) |
1485 				SAM_STAT_CHECK_CONDITION;
1486 			scmd->scsi_done(scmd);
1487 			return 0;
1488 		}
1489 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1490 		if (lba) {
1491 			/* Illegal request, invalid field in CDB */
1492 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1493 						ILLEGAL_REQUEST, 0x24, 0);
1494 			scmd->result = (DRIVER_SENSE << 24) |
1495 				SAM_STAT_CHECK_CONDITION;
1496 			scmd->scsi_done(scmd);
1497 			return 0;
1498 		}
1499 		myrb_read_capacity(cb, scmd, ldev_info);
1500 		scmd->scsi_done(scmd);
1501 		return 0;
1502 	case REQUEST_SENSE:
1503 		myrb_request_sense(cb, scmd);
1504 		scmd->result = (DID_OK << 16);
1505 		return 0;
1506 	case SEND_DIAGNOSTIC:
1507 		if (scmd->cmnd[1] != 0x04) {
1508 			/* Illegal request, invalid field in CDB */
1509 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1510 						ILLEGAL_REQUEST, 0x24, 0);
1511 			scmd->result = (DRIVER_SENSE << 24) |
1512 				SAM_STAT_CHECK_CONDITION;
1513 		} else {
1514 			/* Assume good status */
1515 			scmd->result = (DID_OK << 16);
1516 		}
1517 		scmd->scsi_done(scmd);
1518 		return 0;
1519 	case READ_6:
1520 		if (ldev_info->state == MYRB_DEVICE_WO) {
1521 			/* Data protect, attempt to read invalid data */
1522 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1523 						DATA_PROTECT, 0x21, 0x06);
1524 			scmd->result = (DRIVER_SENSE << 24) |
1525 				SAM_STAT_CHECK_CONDITION;
1526 			scmd->scsi_done(scmd);
1527 			return 0;
1528 		}
1529 		fallthrough;
1530 	case WRITE_6:
1531 		lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1532 		       (scmd->cmnd[2] << 8) |
1533 		       scmd->cmnd[3]);
1534 		block_cnt = scmd->cmnd[4];
1535 		break;
1536 	case READ_10:
1537 		if (ldev_info->state == MYRB_DEVICE_WO) {
1538 			/* Data protect, attempt to read invalid data */
1539 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1540 						DATA_PROTECT, 0x21, 0x06);
1541 			scmd->result = (DRIVER_SENSE << 24) |
1542 				SAM_STAT_CHECK_CONDITION;
1543 			scmd->scsi_done(scmd);
1544 			return 0;
1545 		}
1546 		fallthrough;
1547 	case WRITE_10:
1548 	case VERIFY:		/* 0x2F */
1549 	case WRITE_VERIFY:	/* 0x2E */
1550 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1551 		block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1552 		break;
1553 	case READ_12:
1554 		if (ldev_info->state == MYRB_DEVICE_WO) {
1555 			/* Data protect, attempt to read invalid data */
1556 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1557 						DATA_PROTECT, 0x21, 0x06);
1558 			scmd->result = (DRIVER_SENSE << 24) |
1559 				SAM_STAT_CHECK_CONDITION;
1560 			scmd->scsi_done(scmd);
1561 			return 0;
1562 		}
1563 		fallthrough;
1564 	case WRITE_12:
1565 	case VERIFY_12: /* 0xAF */
1566 	case WRITE_VERIFY_12:	/* 0xAE */
1567 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1568 		block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1569 		break;
1570 	default:
1571 		/* Illegal request, invalid opcode */
1572 		scsi_build_sense_buffer(0, scmd->sense_buffer,
1573 					ILLEGAL_REQUEST, 0x20, 0);
1574 		scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1575 		scmd->scsi_done(scmd);
1576 		return 0;
1577 	}
1578 
1579 	myrb_reset_cmd(cmd_blk);
1580 	mbox->type5.id = scmd->request->tag + 3;
1581 	if (scmd->sc_data_direction == DMA_NONE)
1582 		goto submit;
1583 	nsge = scsi_dma_map(scmd);
1584 	if (nsge == 1) {
1585 		sgl = scsi_sglist(scmd);
1586 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1587 			mbox->type5.opcode = MYRB_CMD_READ;
1588 		else
1589 			mbox->type5.opcode = MYRB_CMD_WRITE;
1590 
1591 		mbox->type5.ld.xfer_len = block_cnt;
1592 		mbox->type5.ld.ldev_num = sdev->id;
1593 		mbox->type5.lba = lba;
1594 		mbox->type5.addr = (u32)sg_dma_address(sgl);
1595 	} else {
1596 		struct myrb_sge *hw_sgl;
1597 		dma_addr_t hw_sgl_addr;
1598 		int i;
1599 
1600 		hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1601 		if (!hw_sgl)
1602 			return SCSI_MLQUEUE_HOST_BUSY;
1603 
1604 		cmd_blk->sgl = hw_sgl;
1605 		cmd_blk->sgl_addr = hw_sgl_addr;
1606 
1607 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1608 			mbox->type5.opcode = MYRB_CMD_READ_SG;
1609 		else
1610 			mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1611 
1612 		mbox->type5.ld.xfer_len = block_cnt;
1613 		mbox->type5.ld.ldev_num = sdev->id;
1614 		mbox->type5.lba = lba;
1615 		mbox->type5.addr = hw_sgl_addr;
1616 		mbox->type5.sg_count = nsge;
1617 
1618 		scsi_for_each_sg(scmd, sgl, nsge, i) {
1619 			hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1620 			hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1621 			hw_sgl++;
1622 		}
1623 	}
1624 submit:
1625 	spin_lock_irqsave(&cb->queue_lock, flags);
1626 	cb->qcmd(cb, cmd_blk);
1627 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1628 
1629 	return 0;
1630 }
1631 
myrb_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scmd)1632 static int myrb_queuecommand(struct Scsi_Host *shost,
1633 		struct scsi_cmnd *scmd)
1634 {
1635 	struct scsi_device *sdev = scmd->device;
1636 
1637 	if (sdev->channel > myrb_logical_channel(shost)) {
1638 		scmd->result = (DID_BAD_TARGET << 16);
1639 		scmd->scsi_done(scmd);
1640 		return 0;
1641 	}
1642 	if (sdev->channel == myrb_logical_channel(shost))
1643 		return myrb_ldev_queuecommand(shost, scmd);
1644 
1645 	return myrb_pthru_queuecommand(shost, scmd);
1646 }
1647 
myrb_ldev_slave_alloc(struct scsi_device * sdev)1648 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1649 {
1650 	struct myrb_hba *cb = shost_priv(sdev->host);
1651 	struct myrb_ldev_info *ldev_info;
1652 	unsigned short ldev_num = sdev->id;
1653 	enum raid_level level;
1654 
1655 	ldev_info = cb->ldev_info_buf + ldev_num;
1656 	if (!ldev_info)
1657 		return -ENXIO;
1658 
1659 	sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1660 	if (!sdev->hostdata)
1661 		return -ENOMEM;
1662 	dev_dbg(&sdev->sdev_gendev,
1663 		"slave alloc ldev %d state %x\n",
1664 		ldev_num, ldev_info->state);
1665 	memcpy(sdev->hostdata, ldev_info,
1666 	       sizeof(*ldev_info));
1667 	switch (ldev_info->raid_level) {
1668 	case MYRB_RAID_LEVEL0:
1669 		level = RAID_LEVEL_LINEAR;
1670 		break;
1671 	case MYRB_RAID_LEVEL1:
1672 		level = RAID_LEVEL_1;
1673 		break;
1674 	case MYRB_RAID_LEVEL3:
1675 		level = RAID_LEVEL_3;
1676 		break;
1677 	case MYRB_RAID_LEVEL5:
1678 		level = RAID_LEVEL_5;
1679 		break;
1680 	case MYRB_RAID_LEVEL6:
1681 		level = RAID_LEVEL_6;
1682 		break;
1683 	case MYRB_RAID_JBOD:
1684 		level = RAID_LEVEL_JBOD;
1685 		break;
1686 	default:
1687 		level = RAID_LEVEL_UNKNOWN;
1688 		break;
1689 	}
1690 	raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1691 	return 0;
1692 }
1693 
myrb_pdev_slave_alloc(struct scsi_device * sdev)1694 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1695 {
1696 	struct myrb_hba *cb = shost_priv(sdev->host);
1697 	struct myrb_pdev_state *pdev_info;
1698 	unsigned short status;
1699 
1700 	if (sdev->id > MYRB_MAX_TARGETS)
1701 		return -ENXIO;
1702 
1703 	pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1704 	if (!pdev_info)
1705 		return -ENOMEM;
1706 
1707 	status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1708 				  sdev, pdev_info);
1709 	if (status != MYRB_STATUS_SUCCESS) {
1710 		dev_dbg(&sdev->sdev_gendev,
1711 			"Failed to get device state, status %x\n",
1712 			status);
1713 		kfree(pdev_info);
1714 		return -ENXIO;
1715 	}
1716 	if (!pdev_info->present) {
1717 		dev_dbg(&sdev->sdev_gendev,
1718 			"device not present, skip\n");
1719 		kfree(pdev_info);
1720 		return -ENXIO;
1721 	}
1722 	dev_dbg(&sdev->sdev_gendev,
1723 		"slave alloc pdev %d:%d state %x\n",
1724 		sdev->channel, sdev->id, pdev_info->state);
1725 	sdev->hostdata = pdev_info;
1726 
1727 	return 0;
1728 }
1729 
myrb_slave_alloc(struct scsi_device * sdev)1730 static int myrb_slave_alloc(struct scsi_device *sdev)
1731 {
1732 	if (sdev->channel > myrb_logical_channel(sdev->host))
1733 		return -ENXIO;
1734 
1735 	if (sdev->lun > 0)
1736 		return -ENXIO;
1737 
1738 	if (sdev->channel == myrb_logical_channel(sdev->host))
1739 		return myrb_ldev_slave_alloc(sdev);
1740 
1741 	return myrb_pdev_slave_alloc(sdev);
1742 }
1743 
myrb_slave_configure(struct scsi_device * sdev)1744 static int myrb_slave_configure(struct scsi_device *sdev)
1745 {
1746 	struct myrb_ldev_info *ldev_info;
1747 
1748 	if (sdev->channel > myrb_logical_channel(sdev->host))
1749 		return -ENXIO;
1750 
1751 	if (sdev->channel < myrb_logical_channel(sdev->host)) {
1752 		sdev->no_uld_attach = 1;
1753 		return 0;
1754 	}
1755 	if (sdev->lun != 0)
1756 		return -ENXIO;
1757 
1758 	ldev_info = sdev->hostdata;
1759 	if (!ldev_info)
1760 		return -ENXIO;
1761 	if (ldev_info->state != MYRB_DEVICE_ONLINE)
1762 		sdev_printk(KERN_INFO, sdev,
1763 			    "Logical drive is %s\n",
1764 			    myrb_devstate_name(ldev_info->state));
1765 
1766 	sdev->tagged_supported = 1;
1767 	return 0;
1768 }
1769 
myrb_slave_destroy(struct scsi_device * sdev)1770 static void myrb_slave_destroy(struct scsi_device *sdev)
1771 {
1772 	kfree(sdev->hostdata);
1773 }
1774 
myrb_biosparam(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])1775 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1776 		sector_t capacity, int geom[])
1777 {
1778 	struct myrb_hba *cb = shost_priv(sdev->host);
1779 
1780 	geom[0] = cb->ldev_geom_heads;
1781 	geom[1] = cb->ldev_geom_sectors;
1782 	geom[2] = sector_div(capacity, geom[0] * geom[1]);
1783 
1784 	return 0;
1785 }
1786 
raid_state_show(struct device * dev,struct device_attribute * attr,char * buf)1787 static ssize_t raid_state_show(struct device *dev,
1788 		struct device_attribute *attr, char *buf)
1789 {
1790 	struct scsi_device *sdev = to_scsi_device(dev);
1791 	struct myrb_hba *cb = shost_priv(sdev->host);
1792 	int ret;
1793 
1794 	if (!sdev->hostdata)
1795 		return snprintf(buf, 16, "Unknown\n");
1796 
1797 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1798 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1799 		const char *name;
1800 
1801 		name = myrb_devstate_name(ldev_info->state);
1802 		if (name)
1803 			ret = snprintf(buf, 32, "%s\n", name);
1804 		else
1805 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1806 				       ldev_info->state);
1807 	} else {
1808 		struct myrb_pdev_state *pdev_info = sdev->hostdata;
1809 		unsigned short status;
1810 		const char *name;
1811 
1812 		status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1813 					  sdev, pdev_info);
1814 		if (status != MYRB_STATUS_SUCCESS)
1815 			sdev_printk(KERN_INFO, sdev,
1816 				    "Failed to get device state, status %x\n",
1817 				    status);
1818 
1819 		if (!pdev_info->present)
1820 			name = "Removed";
1821 		else
1822 			name = myrb_devstate_name(pdev_info->state);
1823 		if (name)
1824 			ret = snprintf(buf, 32, "%s\n", name);
1825 		else
1826 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1827 				       pdev_info->state);
1828 	}
1829 	return ret;
1830 }
1831 
raid_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1832 static ssize_t raid_state_store(struct device *dev,
1833 		struct device_attribute *attr, const char *buf, size_t count)
1834 {
1835 	struct scsi_device *sdev = to_scsi_device(dev);
1836 	struct myrb_hba *cb = shost_priv(sdev->host);
1837 	struct myrb_pdev_state *pdev_info;
1838 	enum myrb_devstate new_state;
1839 	unsigned short status;
1840 
1841 	if (!strncmp(buf, "kill", 4) ||
1842 	    !strncmp(buf, "offline", 7))
1843 		new_state = MYRB_DEVICE_DEAD;
1844 	else if (!strncmp(buf, "online", 6))
1845 		new_state = MYRB_DEVICE_ONLINE;
1846 	else if (!strncmp(buf, "standby", 7))
1847 		new_state = MYRB_DEVICE_STANDBY;
1848 	else
1849 		return -EINVAL;
1850 
1851 	pdev_info = sdev->hostdata;
1852 	if (!pdev_info) {
1853 		sdev_printk(KERN_INFO, sdev,
1854 			    "Failed - no physical device information\n");
1855 		return -ENXIO;
1856 	}
1857 	if (!pdev_info->present) {
1858 		sdev_printk(KERN_INFO, sdev,
1859 			    "Failed - device not present\n");
1860 		return -ENXIO;
1861 	}
1862 
1863 	if (pdev_info->state == new_state)
1864 		return count;
1865 
1866 	status = myrb_set_pdev_state(cb, sdev, new_state);
1867 	switch (status) {
1868 	case MYRB_STATUS_SUCCESS:
1869 		break;
1870 	case MYRB_STATUS_START_DEVICE_FAILED:
1871 		sdev_printk(KERN_INFO, sdev,
1872 			     "Failed - Unable to Start Device\n");
1873 		count = -EAGAIN;
1874 		break;
1875 	case MYRB_STATUS_NO_DEVICE:
1876 		sdev_printk(KERN_INFO, sdev,
1877 			    "Failed - No Device at Address\n");
1878 		count = -ENODEV;
1879 		break;
1880 	case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1881 		sdev_printk(KERN_INFO, sdev,
1882 			 "Failed - Invalid Channel or Target or Modifier\n");
1883 		count = -EINVAL;
1884 		break;
1885 	case MYRB_STATUS_CHANNEL_BUSY:
1886 		sdev_printk(KERN_INFO, sdev,
1887 			 "Failed - Channel Busy\n");
1888 		count = -EBUSY;
1889 		break;
1890 	default:
1891 		sdev_printk(KERN_INFO, sdev,
1892 			 "Failed - Unexpected Status %04X\n", status);
1893 		count = -EIO;
1894 		break;
1895 	}
1896 	return count;
1897 }
1898 static DEVICE_ATTR_RW(raid_state);
1899 
raid_level_show(struct device * dev,struct device_attribute * attr,char * buf)1900 static ssize_t raid_level_show(struct device *dev,
1901 		struct device_attribute *attr, char *buf)
1902 {
1903 	struct scsi_device *sdev = to_scsi_device(dev);
1904 
1905 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1906 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1907 		const char *name;
1908 
1909 		if (!ldev_info)
1910 			return -ENXIO;
1911 
1912 		name = myrb_raidlevel_name(ldev_info->raid_level);
1913 		if (!name)
1914 			return snprintf(buf, 32, "Invalid (%02X)\n",
1915 					ldev_info->state);
1916 		return snprintf(buf, 32, "%s\n", name);
1917 	}
1918 	return snprintf(buf, 32, "Physical Drive\n");
1919 }
1920 static DEVICE_ATTR_RO(raid_level);
1921 
rebuild_show(struct device * dev,struct device_attribute * attr,char * buf)1922 static ssize_t rebuild_show(struct device *dev,
1923 		struct device_attribute *attr, char *buf)
1924 {
1925 	struct scsi_device *sdev = to_scsi_device(dev);
1926 	struct myrb_hba *cb = shost_priv(sdev->host);
1927 	struct myrb_rbld_progress rbld_buf;
1928 	unsigned char status;
1929 
1930 	if (sdev->channel < myrb_logical_channel(sdev->host))
1931 		return snprintf(buf, 32, "physical device - not rebuilding\n");
1932 
1933 	status = myrb_get_rbld_progress(cb, &rbld_buf);
1934 
1935 	if (rbld_buf.ldev_num != sdev->id ||
1936 	    status != MYRB_STATUS_SUCCESS)
1937 		return snprintf(buf, 32, "not rebuilding\n");
1938 
1939 	return snprintf(buf, 32, "rebuilding block %u of %u\n",
1940 			rbld_buf.ldev_size - rbld_buf.blocks_left,
1941 			rbld_buf.ldev_size);
1942 }
1943 
rebuild_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1944 static ssize_t rebuild_store(struct device *dev,
1945 		struct device_attribute *attr, const char *buf, size_t count)
1946 {
1947 	struct scsi_device *sdev = to_scsi_device(dev);
1948 	struct myrb_hba *cb = shost_priv(sdev->host);
1949 	struct myrb_cmdblk *cmd_blk;
1950 	union myrb_cmd_mbox *mbox;
1951 	unsigned short status;
1952 	int rc, start;
1953 	const char *msg;
1954 
1955 	rc = kstrtoint(buf, 0, &start);
1956 	if (rc)
1957 		return rc;
1958 
1959 	if (sdev->channel >= myrb_logical_channel(sdev->host))
1960 		return -ENXIO;
1961 
1962 	status = myrb_get_rbld_progress(cb, NULL);
1963 	if (start) {
1964 		if (status == MYRB_STATUS_SUCCESS) {
1965 			sdev_printk(KERN_INFO, sdev,
1966 				    "Rebuild Not Initiated; already in progress\n");
1967 			return -EALREADY;
1968 		}
1969 		mutex_lock(&cb->dcmd_mutex);
1970 		cmd_blk = &cb->dcmd_blk;
1971 		myrb_reset_cmd(cmd_blk);
1972 		mbox = &cmd_blk->mbox;
1973 		mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1974 		mbox->type3D.id = MYRB_DCMD_TAG;
1975 		mbox->type3D.channel = sdev->channel;
1976 		mbox->type3D.target = sdev->id;
1977 		status = myrb_exec_cmd(cb, cmd_blk);
1978 		mutex_unlock(&cb->dcmd_mutex);
1979 	} else {
1980 		struct pci_dev *pdev = cb->pdev;
1981 		unsigned char *rate;
1982 		dma_addr_t rate_addr;
1983 
1984 		if (status != MYRB_STATUS_SUCCESS) {
1985 			sdev_printk(KERN_INFO, sdev,
1986 				    "Rebuild Not Cancelled; not in progress\n");
1987 			return 0;
1988 		}
1989 
1990 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1991 					  &rate_addr, GFP_KERNEL);
1992 		if (rate == NULL) {
1993 			sdev_printk(KERN_INFO, sdev,
1994 				    "Cancellation of Rebuild Failed - Out of Memory\n");
1995 			return -ENOMEM;
1996 		}
1997 		mutex_lock(&cb->dcmd_mutex);
1998 		cmd_blk = &cb->dcmd_blk;
1999 		myrb_reset_cmd(cmd_blk);
2000 		mbox = &cmd_blk->mbox;
2001 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2002 		mbox->type3R.id = MYRB_DCMD_TAG;
2003 		mbox->type3R.rbld_rate = 0xFF;
2004 		mbox->type3R.addr = rate_addr;
2005 		status = myrb_exec_cmd(cb, cmd_blk);
2006 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2007 		mutex_unlock(&cb->dcmd_mutex);
2008 	}
2009 	if (status == MYRB_STATUS_SUCCESS) {
2010 		sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
2011 			    start ? "Initiated" : "Cancelled");
2012 		return count;
2013 	}
2014 	if (!start) {
2015 		sdev_printk(KERN_INFO, sdev,
2016 			    "Rebuild Not Cancelled, status 0x%x\n",
2017 			    status);
2018 		return -EIO;
2019 	}
2020 
2021 	switch (status) {
2022 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2023 		msg = "Attempt to Rebuild Online or Unresponsive Drive";
2024 		break;
2025 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2026 		msg = "New Disk Failed During Rebuild";
2027 		break;
2028 	case MYRB_STATUS_INVALID_ADDRESS:
2029 		msg = "Invalid Device Address";
2030 		break;
2031 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2032 		msg = "Already in Progress";
2033 		break;
2034 	default:
2035 		msg = NULL;
2036 		break;
2037 	}
2038 	if (msg)
2039 		sdev_printk(KERN_INFO, sdev,
2040 			    "Rebuild Failed - %s\n", msg);
2041 	else
2042 		sdev_printk(KERN_INFO, sdev,
2043 			    "Rebuild Failed, status 0x%x\n", status);
2044 
2045 	return -EIO;
2046 }
2047 static DEVICE_ATTR_RW(rebuild);
2048 
consistency_check_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2049 static ssize_t consistency_check_store(struct device *dev,
2050 		struct device_attribute *attr, const char *buf, size_t count)
2051 {
2052 	struct scsi_device *sdev = to_scsi_device(dev);
2053 	struct myrb_hba *cb = shost_priv(sdev->host);
2054 	struct myrb_rbld_progress rbld_buf;
2055 	struct myrb_cmdblk *cmd_blk;
2056 	union myrb_cmd_mbox *mbox;
2057 	unsigned short ldev_num = 0xFFFF;
2058 	unsigned short status;
2059 	int rc, start;
2060 	const char *msg;
2061 
2062 	rc = kstrtoint(buf, 0, &start);
2063 	if (rc)
2064 		return rc;
2065 
2066 	if (sdev->channel < myrb_logical_channel(sdev->host))
2067 		return -ENXIO;
2068 
2069 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2070 	if (start) {
2071 		if (status == MYRB_STATUS_SUCCESS) {
2072 			sdev_printk(KERN_INFO, sdev,
2073 				    "Check Consistency Not Initiated; already in progress\n");
2074 			return -EALREADY;
2075 		}
2076 		mutex_lock(&cb->dcmd_mutex);
2077 		cmd_blk = &cb->dcmd_blk;
2078 		myrb_reset_cmd(cmd_blk);
2079 		mbox = &cmd_blk->mbox;
2080 		mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2081 		mbox->type3C.id = MYRB_DCMD_TAG;
2082 		mbox->type3C.ldev_num = sdev->id;
2083 		mbox->type3C.auto_restore = true;
2084 
2085 		status = myrb_exec_cmd(cb, cmd_blk);
2086 		mutex_unlock(&cb->dcmd_mutex);
2087 	} else {
2088 		struct pci_dev *pdev = cb->pdev;
2089 		unsigned char *rate;
2090 		dma_addr_t rate_addr;
2091 
2092 		if (ldev_num != sdev->id) {
2093 			sdev_printk(KERN_INFO, sdev,
2094 				    "Check Consistency Not Cancelled; not in progress\n");
2095 			return 0;
2096 		}
2097 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2098 					  &rate_addr, GFP_KERNEL);
2099 		if (rate == NULL) {
2100 			sdev_printk(KERN_INFO, sdev,
2101 				    "Cancellation of Check Consistency Failed - Out of Memory\n");
2102 			return -ENOMEM;
2103 		}
2104 		mutex_lock(&cb->dcmd_mutex);
2105 		cmd_blk = &cb->dcmd_blk;
2106 		myrb_reset_cmd(cmd_blk);
2107 		mbox = &cmd_blk->mbox;
2108 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2109 		mbox->type3R.id = MYRB_DCMD_TAG;
2110 		mbox->type3R.rbld_rate = 0xFF;
2111 		mbox->type3R.addr = rate_addr;
2112 		status = myrb_exec_cmd(cb, cmd_blk);
2113 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2114 		mutex_unlock(&cb->dcmd_mutex);
2115 	}
2116 	if (status == MYRB_STATUS_SUCCESS) {
2117 		sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2118 			    start ? "Initiated" : "Cancelled");
2119 		return count;
2120 	}
2121 	if (!start) {
2122 		sdev_printk(KERN_INFO, sdev,
2123 			    "Check Consistency Not Cancelled, status 0x%x\n",
2124 			    status);
2125 		return -EIO;
2126 	}
2127 
2128 	switch (status) {
2129 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2130 		msg = "Dependent Physical Device is DEAD";
2131 		break;
2132 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2133 		msg = "New Disk Failed During Rebuild";
2134 		break;
2135 	case MYRB_STATUS_INVALID_ADDRESS:
2136 		msg = "Invalid or Nonredundant Logical Drive";
2137 		break;
2138 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2139 		msg = "Already in Progress";
2140 		break;
2141 	default:
2142 		msg = NULL;
2143 		break;
2144 	}
2145 	if (msg)
2146 		sdev_printk(KERN_INFO, sdev,
2147 			    "Check Consistency Failed - %s\n", msg);
2148 	else
2149 		sdev_printk(KERN_INFO, sdev,
2150 			    "Check Consistency Failed, status 0x%x\n", status);
2151 
2152 	return -EIO;
2153 }
2154 
consistency_check_show(struct device * dev,struct device_attribute * attr,char * buf)2155 static ssize_t consistency_check_show(struct device *dev,
2156 		struct device_attribute *attr, char *buf)
2157 {
2158 	return rebuild_show(dev, attr, buf);
2159 }
2160 static DEVICE_ATTR_RW(consistency_check);
2161 
ctlr_num_show(struct device * dev,struct device_attribute * attr,char * buf)2162 static ssize_t ctlr_num_show(struct device *dev,
2163 		struct device_attribute *attr, char *buf)
2164 {
2165 	struct Scsi_Host *shost = class_to_shost(dev);
2166 	struct myrb_hba *cb = shost_priv(shost);
2167 
2168 	return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2169 }
2170 static DEVICE_ATTR_RO(ctlr_num);
2171 
firmware_show(struct device * dev,struct device_attribute * attr,char * buf)2172 static ssize_t firmware_show(struct device *dev,
2173 		struct device_attribute *attr, char *buf)
2174 {
2175 	struct Scsi_Host *shost = class_to_shost(dev);
2176 	struct myrb_hba *cb = shost_priv(shost);
2177 
2178 	return snprintf(buf, 16, "%s\n", cb->fw_version);
2179 }
2180 static DEVICE_ATTR_RO(firmware);
2181 
model_show(struct device * dev,struct device_attribute * attr,char * buf)2182 static ssize_t model_show(struct device *dev,
2183 		struct device_attribute *attr, char *buf)
2184 {
2185 	struct Scsi_Host *shost = class_to_shost(dev);
2186 	struct myrb_hba *cb = shost_priv(shost);
2187 
2188 	return snprintf(buf, 16, "%s\n", cb->model_name);
2189 }
2190 static DEVICE_ATTR_RO(model);
2191 
flush_cache_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2192 static ssize_t flush_cache_store(struct device *dev,
2193 		struct device_attribute *attr, const char *buf, size_t count)
2194 {
2195 	struct Scsi_Host *shost = class_to_shost(dev);
2196 	struct myrb_hba *cb = shost_priv(shost);
2197 	unsigned short status;
2198 
2199 	status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2200 	if (status == MYRB_STATUS_SUCCESS) {
2201 		shost_printk(KERN_INFO, shost,
2202 			     "Cache Flush Completed\n");
2203 		return count;
2204 	}
2205 	shost_printk(KERN_INFO, shost,
2206 		     "Cache Flush Failed, status %x\n", status);
2207 	return -EIO;
2208 }
2209 static DEVICE_ATTR_WO(flush_cache);
2210 
2211 static struct device_attribute *myrb_sdev_attrs[] = {
2212 	&dev_attr_rebuild,
2213 	&dev_attr_consistency_check,
2214 	&dev_attr_raid_state,
2215 	&dev_attr_raid_level,
2216 	NULL,
2217 };
2218 
2219 static struct device_attribute *myrb_shost_attrs[] = {
2220 	&dev_attr_ctlr_num,
2221 	&dev_attr_model,
2222 	&dev_attr_firmware,
2223 	&dev_attr_flush_cache,
2224 	NULL,
2225 };
2226 
2227 static struct scsi_host_template myrb_template = {
2228 	.module			= THIS_MODULE,
2229 	.name			= "DAC960",
2230 	.proc_name		= "myrb",
2231 	.queuecommand		= myrb_queuecommand,
2232 	.eh_host_reset_handler	= myrb_host_reset,
2233 	.slave_alloc		= myrb_slave_alloc,
2234 	.slave_configure	= myrb_slave_configure,
2235 	.slave_destroy		= myrb_slave_destroy,
2236 	.bios_param		= myrb_biosparam,
2237 	.cmd_size		= sizeof(struct myrb_cmdblk),
2238 	.shost_attrs		= myrb_shost_attrs,
2239 	.sdev_attrs		= myrb_sdev_attrs,
2240 	.this_id		= -1,
2241 };
2242 
2243 /**
2244  * myrb_is_raid - return boolean indicating device is raid volume
2245  * @dev: the device struct object
2246  */
myrb_is_raid(struct device * dev)2247 static int myrb_is_raid(struct device *dev)
2248 {
2249 	struct scsi_device *sdev = to_scsi_device(dev);
2250 
2251 	return sdev->channel == myrb_logical_channel(sdev->host);
2252 }
2253 
2254 /**
2255  * myrb_get_resync - get raid volume resync percent complete
2256  * @dev: the device struct object
2257  */
myrb_get_resync(struct device * dev)2258 static void myrb_get_resync(struct device *dev)
2259 {
2260 	struct scsi_device *sdev = to_scsi_device(dev);
2261 	struct myrb_hba *cb = shost_priv(sdev->host);
2262 	struct myrb_rbld_progress rbld_buf;
2263 	unsigned int percent_complete = 0;
2264 	unsigned short status;
2265 	unsigned int ldev_size = 0, remaining = 0;
2266 
2267 	if (sdev->channel < myrb_logical_channel(sdev->host))
2268 		return;
2269 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2270 	if (status == MYRB_STATUS_SUCCESS) {
2271 		if (rbld_buf.ldev_num == sdev->id) {
2272 			ldev_size = rbld_buf.ldev_size;
2273 			remaining = rbld_buf.blocks_left;
2274 		}
2275 	}
2276 	if (remaining && ldev_size)
2277 		percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2278 	raid_set_resync(myrb_raid_template, dev, percent_complete);
2279 }
2280 
2281 /**
2282  * myrb_get_state - get raid volume status
2283  * @dev: the device struct object
2284  */
myrb_get_state(struct device * dev)2285 static void myrb_get_state(struct device *dev)
2286 {
2287 	struct scsi_device *sdev = to_scsi_device(dev);
2288 	struct myrb_hba *cb = shost_priv(sdev->host);
2289 	struct myrb_ldev_info *ldev_info = sdev->hostdata;
2290 	enum raid_state state = RAID_STATE_UNKNOWN;
2291 	unsigned short status;
2292 
2293 	if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2294 		state = RAID_STATE_UNKNOWN;
2295 	else {
2296 		status = myrb_get_rbld_progress(cb, NULL);
2297 		if (status == MYRB_STATUS_SUCCESS)
2298 			state = RAID_STATE_RESYNCING;
2299 		else {
2300 			switch (ldev_info->state) {
2301 			case MYRB_DEVICE_ONLINE:
2302 				state = RAID_STATE_ACTIVE;
2303 				break;
2304 			case MYRB_DEVICE_WO:
2305 			case MYRB_DEVICE_CRITICAL:
2306 				state = RAID_STATE_DEGRADED;
2307 				break;
2308 			default:
2309 				state = RAID_STATE_OFFLINE;
2310 			}
2311 		}
2312 	}
2313 	raid_set_state(myrb_raid_template, dev, state);
2314 }
2315 
2316 static struct raid_function_template myrb_raid_functions = {
2317 	.cookie		= &myrb_template,
2318 	.is_raid	= myrb_is_raid,
2319 	.get_resync	= myrb_get_resync,
2320 	.get_state	= myrb_get_state,
2321 };
2322 
myrb_handle_scsi(struct myrb_hba * cb,struct myrb_cmdblk * cmd_blk,struct scsi_cmnd * scmd)2323 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2324 		struct scsi_cmnd *scmd)
2325 {
2326 	unsigned short status;
2327 
2328 	if (!cmd_blk)
2329 		return;
2330 
2331 	scsi_dma_unmap(scmd);
2332 
2333 	if (cmd_blk->dcdb) {
2334 		memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2335 		dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2336 			      cmd_blk->dcdb_addr);
2337 		cmd_blk->dcdb = NULL;
2338 	}
2339 	if (cmd_blk->sgl) {
2340 		dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2341 		cmd_blk->sgl = NULL;
2342 		cmd_blk->sgl_addr = 0;
2343 	}
2344 	status = cmd_blk->status;
2345 	switch (status) {
2346 	case MYRB_STATUS_SUCCESS:
2347 	case MYRB_STATUS_DEVICE_BUSY:
2348 		scmd->result = (DID_OK << 16) | status;
2349 		break;
2350 	case MYRB_STATUS_BAD_DATA:
2351 		dev_dbg(&scmd->device->sdev_gendev,
2352 			"Bad Data Encountered\n");
2353 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2354 			/* Unrecovered read error */
2355 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2356 						MEDIUM_ERROR, 0x11, 0);
2357 		else
2358 			/* Write error */
2359 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2360 						MEDIUM_ERROR, 0x0C, 0);
2361 		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2362 		break;
2363 	case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2364 		scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2365 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2366 			/* Unrecovered read error, auto-reallocation failed */
2367 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2368 						MEDIUM_ERROR, 0x11, 0x04);
2369 		else
2370 			/* Write error, auto-reallocation failed */
2371 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2372 						MEDIUM_ERROR, 0x0C, 0x02);
2373 		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2374 		break;
2375 	case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2376 		dev_dbg(&scmd->device->sdev_gendev,
2377 			    "Logical Drive Nonexistent or Offline");
2378 		scmd->result = (DID_BAD_TARGET << 16);
2379 		break;
2380 	case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2381 		dev_dbg(&scmd->device->sdev_gendev,
2382 			    "Attempt to Access Beyond End of Logical Drive");
2383 		/* Logical block address out of range */
2384 		scsi_build_sense_buffer(0, scmd->sense_buffer,
2385 					NOT_READY, 0x21, 0);
2386 		break;
2387 	case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2388 		dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2389 		scmd->result = (DID_BAD_TARGET << 16);
2390 		break;
2391 	default:
2392 		scmd_printk(KERN_ERR, scmd,
2393 			    "Unexpected Error Status %04X", status);
2394 		scmd->result = (DID_ERROR << 16);
2395 		break;
2396 	}
2397 	scmd->scsi_done(scmd);
2398 }
2399 
myrb_handle_cmdblk(struct myrb_hba * cb,struct myrb_cmdblk * cmd_blk)2400 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2401 {
2402 	if (!cmd_blk)
2403 		return;
2404 
2405 	if (cmd_blk->completion) {
2406 		complete(cmd_blk->completion);
2407 		cmd_blk->completion = NULL;
2408 	}
2409 }
2410 
myrb_monitor(struct work_struct * work)2411 static void myrb_monitor(struct work_struct *work)
2412 {
2413 	struct myrb_hba *cb = container_of(work,
2414 			struct myrb_hba, monitor_work.work);
2415 	struct Scsi_Host *shost = cb->host;
2416 	unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2417 
2418 	dev_dbg(&shost->shost_gendev, "monitor tick\n");
2419 
2420 	if (cb->new_ev_seq > cb->old_ev_seq) {
2421 		int event = cb->old_ev_seq;
2422 
2423 		dev_dbg(&shost->shost_gendev,
2424 			"get event log no %d/%d\n",
2425 			cb->new_ev_seq, event);
2426 		myrb_get_event(cb, event);
2427 		cb->old_ev_seq = event + 1;
2428 		interval = 10;
2429 	} else if (cb->need_err_info) {
2430 		cb->need_err_info = false;
2431 		dev_dbg(&shost->shost_gendev, "get error table\n");
2432 		myrb_get_errtable(cb);
2433 		interval = 10;
2434 	} else if (cb->need_rbld && cb->rbld_first) {
2435 		cb->need_rbld = false;
2436 		dev_dbg(&shost->shost_gendev,
2437 			"get rebuild progress\n");
2438 		myrb_update_rbld_progress(cb);
2439 		interval = 10;
2440 	} else if (cb->need_ldev_info) {
2441 		cb->need_ldev_info = false;
2442 		dev_dbg(&shost->shost_gendev,
2443 			"get logical drive info\n");
2444 		myrb_get_ldev_info(cb);
2445 		interval = 10;
2446 	} else if (cb->need_rbld) {
2447 		cb->need_rbld = false;
2448 		dev_dbg(&shost->shost_gendev,
2449 			"get rebuild progress\n");
2450 		myrb_update_rbld_progress(cb);
2451 		interval = 10;
2452 	} else if (cb->need_cc_status) {
2453 		cb->need_cc_status = false;
2454 		dev_dbg(&shost->shost_gendev,
2455 			"get consistency check progress\n");
2456 		myrb_get_cc_progress(cb);
2457 		interval = 10;
2458 	} else if (cb->need_bgi_status) {
2459 		cb->need_bgi_status = false;
2460 		dev_dbg(&shost->shost_gendev, "get background init status\n");
2461 		myrb_bgi_control(cb);
2462 		interval = 10;
2463 	} else {
2464 		dev_dbg(&shost->shost_gendev, "new enquiry\n");
2465 		mutex_lock(&cb->dma_mutex);
2466 		myrb_hba_enquiry(cb);
2467 		mutex_unlock(&cb->dma_mutex);
2468 		if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2469 		    cb->need_err_info || cb->need_rbld ||
2470 		    cb->need_ldev_info || cb->need_cc_status ||
2471 		    cb->need_bgi_status) {
2472 			dev_dbg(&shost->shost_gendev,
2473 				"reschedule monitor\n");
2474 			interval = 0;
2475 		}
2476 	}
2477 	if (interval > 1)
2478 		cb->primary_monitor_time = jiffies;
2479 	queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2480 }
2481 
2482 /*
2483  * myrb_err_status - reports controller BIOS messages
2484  *
2485  * Controller BIOS messages are passed through the Error Status Register
2486  * when the driver performs the BIOS handshaking.
2487  *
2488  * Return: true for fatal errors and false otherwise.
2489  */
myrb_err_status(struct myrb_hba * cb,unsigned char error,unsigned char parm0,unsigned char parm1)2490 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2491 		unsigned char parm0, unsigned char parm1)
2492 {
2493 	struct pci_dev *pdev = cb->pdev;
2494 
2495 	switch (error) {
2496 	case 0x00:
2497 		dev_info(&pdev->dev,
2498 			 "Physical Device %d:%d Not Responding\n",
2499 			 parm1, parm0);
2500 		break;
2501 	case 0x08:
2502 		dev_notice(&pdev->dev, "Spinning Up Drives\n");
2503 		break;
2504 	case 0x30:
2505 		dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2506 		break;
2507 	case 0x60:
2508 		dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2509 		break;
2510 	case 0x70:
2511 		dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2512 		break;
2513 	case 0x90:
2514 		dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2515 			   parm1, parm0);
2516 		break;
2517 	case 0xA0:
2518 		dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2519 		break;
2520 	case 0xB0:
2521 		dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2522 		break;
2523 	case 0xD0:
2524 		dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2525 		break;
2526 	case 0xF0:
2527 		dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2528 		return true;
2529 	default:
2530 		dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2531 			error);
2532 		return true;
2533 	}
2534 	return false;
2535 }
2536 
2537 /*
2538  * Hardware-specific functions
2539  */
2540 
2541 /*
2542  * DAC960 LA Series Controllers
2543  */
2544 
DAC960_LA_hw_mbox_new_cmd(void __iomem * base)2545 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2546 {
2547 	writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2548 }
2549 
DAC960_LA_ack_hw_mbox_status(void __iomem * base)2550 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2551 {
2552 	writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2553 }
2554 
DAC960_LA_reset_ctrl(void __iomem * base)2555 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2556 {
2557 	writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2558 }
2559 
DAC960_LA_mem_mbox_new_cmd(void __iomem * base)2560 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2561 {
2562 	writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2563 }
2564 
DAC960_LA_hw_mbox_is_full(void __iomem * base)2565 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2566 {
2567 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2568 
2569 	return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2570 }
2571 
DAC960_LA_init_in_progress(void __iomem * base)2572 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2573 {
2574 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2575 
2576 	return !(idb & DAC960_LA_IDB_INIT_DONE);
2577 }
2578 
DAC960_LA_ack_hw_mbox_intr(void __iomem * base)2579 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2580 {
2581 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2582 }
2583 
DAC960_LA_ack_intr(void __iomem * base)2584 static inline void DAC960_LA_ack_intr(void __iomem *base)
2585 {
2586 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2587 	       base + DAC960_LA_ODB_OFFSET);
2588 }
2589 
DAC960_LA_hw_mbox_status_available(void __iomem * base)2590 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2591 {
2592 	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2593 
2594 	return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2595 }
2596 
DAC960_LA_enable_intr(void __iomem * base)2597 static inline void DAC960_LA_enable_intr(void __iomem *base)
2598 {
2599 	unsigned char odb = 0xFF;
2600 
2601 	odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2602 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2603 }
2604 
DAC960_LA_disable_intr(void __iomem * base)2605 static inline void DAC960_LA_disable_intr(void __iomem *base)
2606 {
2607 	unsigned char odb = 0xFF;
2608 
2609 	odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2610 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2611 }
2612 
DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox * mem_mbox,union myrb_cmd_mbox * mbox)2613 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2614 		union myrb_cmd_mbox *mbox)
2615 {
2616 	mem_mbox->words[1] = mbox->words[1];
2617 	mem_mbox->words[2] = mbox->words[2];
2618 	mem_mbox->words[3] = mbox->words[3];
2619 	/* Memory barrier to prevent reordering */
2620 	wmb();
2621 	mem_mbox->words[0] = mbox->words[0];
2622 	/* Memory barrier to force PCI access */
2623 	mb();
2624 }
2625 
DAC960_LA_write_hw_mbox(void __iomem * base,union myrb_cmd_mbox * mbox)2626 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2627 		union myrb_cmd_mbox *mbox)
2628 {
2629 	writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2630 	writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2631 	writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2632 	writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2633 }
2634 
DAC960_LA_read_status(void __iomem * base)2635 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2636 {
2637 	return readw(base + DAC960_LA_STS_OFFSET);
2638 }
2639 
2640 static inline bool
DAC960_LA_read_error_status(void __iomem * base,unsigned char * error,unsigned char * param0,unsigned char * param1)2641 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2642 		unsigned char *param0, unsigned char *param1)
2643 {
2644 	unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2645 
2646 	if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2647 		return false;
2648 	errsts &= ~DAC960_LA_ERRSTS_PENDING;
2649 
2650 	*error = errsts;
2651 	*param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2652 	*param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2653 	writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2654 	return true;
2655 }
2656 
2657 static inline unsigned short
DAC960_LA_mbox_init(struct pci_dev * pdev,void __iomem * base,union myrb_cmd_mbox * mbox)2658 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2659 		union myrb_cmd_mbox *mbox)
2660 {
2661 	unsigned short status;
2662 	int timeout = 0;
2663 
2664 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2665 		if (!DAC960_LA_hw_mbox_is_full(base))
2666 			break;
2667 		udelay(10);
2668 		timeout++;
2669 	}
2670 	if (DAC960_LA_hw_mbox_is_full(base)) {
2671 		dev_err(&pdev->dev,
2672 			"Timeout waiting for empty mailbox\n");
2673 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2674 	}
2675 	DAC960_LA_write_hw_mbox(base, mbox);
2676 	DAC960_LA_hw_mbox_new_cmd(base);
2677 	timeout = 0;
2678 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2679 		if (DAC960_LA_hw_mbox_status_available(base))
2680 			break;
2681 		udelay(10);
2682 		timeout++;
2683 	}
2684 	if (!DAC960_LA_hw_mbox_status_available(base)) {
2685 		dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2686 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2687 	}
2688 	status = DAC960_LA_read_status(base);
2689 	DAC960_LA_ack_hw_mbox_intr(base);
2690 	DAC960_LA_ack_hw_mbox_status(base);
2691 
2692 	return status;
2693 }
2694 
DAC960_LA_hw_init(struct pci_dev * pdev,struct myrb_hba * cb,void __iomem * base)2695 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2696 		struct myrb_hba *cb, void __iomem *base)
2697 {
2698 	int timeout = 0;
2699 	unsigned char error, parm0, parm1;
2700 
2701 	DAC960_LA_disable_intr(base);
2702 	DAC960_LA_ack_hw_mbox_status(base);
2703 	udelay(1000);
2704 	while (DAC960_LA_init_in_progress(base) &&
2705 	       timeout < MYRB_MAILBOX_TIMEOUT) {
2706 		if (DAC960_LA_read_error_status(base, &error,
2707 					      &parm0, &parm1) &&
2708 		    myrb_err_status(cb, error, parm0, parm1))
2709 			return -ENODEV;
2710 		udelay(10);
2711 		timeout++;
2712 	}
2713 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
2714 		dev_err(&pdev->dev,
2715 			"Timeout waiting for Controller Initialisation\n");
2716 		return -ETIMEDOUT;
2717 	}
2718 	if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2719 		dev_err(&pdev->dev,
2720 			"Unable to Enable Memory Mailbox Interface\n");
2721 		DAC960_LA_reset_ctrl(base);
2722 		return -ENODEV;
2723 	}
2724 	DAC960_LA_enable_intr(base);
2725 	cb->qcmd = myrb_qcmd;
2726 	cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2727 	if (cb->dual_mode_interface)
2728 		cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2729 	else
2730 		cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2731 	cb->disable_intr = DAC960_LA_disable_intr;
2732 	cb->reset = DAC960_LA_reset_ctrl;
2733 
2734 	return 0;
2735 }
2736 
DAC960_LA_intr_handler(int irq,void * arg)2737 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2738 {
2739 	struct myrb_hba *cb = arg;
2740 	void __iomem *base = cb->io_base;
2741 	struct myrb_stat_mbox *next_stat_mbox;
2742 	unsigned long flags;
2743 
2744 	spin_lock_irqsave(&cb->queue_lock, flags);
2745 	DAC960_LA_ack_intr(base);
2746 	next_stat_mbox = cb->next_stat_mbox;
2747 	while (next_stat_mbox->valid) {
2748 		unsigned char id = next_stat_mbox->id;
2749 		struct scsi_cmnd *scmd = NULL;
2750 		struct myrb_cmdblk *cmd_blk = NULL;
2751 
2752 		if (id == MYRB_DCMD_TAG)
2753 			cmd_blk = &cb->dcmd_blk;
2754 		else if (id == MYRB_MCMD_TAG)
2755 			cmd_blk = &cb->mcmd_blk;
2756 		else {
2757 			scmd = scsi_host_find_tag(cb->host, id - 3);
2758 			if (scmd)
2759 				cmd_blk = scsi_cmd_priv(scmd);
2760 		}
2761 		if (cmd_blk)
2762 			cmd_blk->status = next_stat_mbox->status;
2763 		else
2764 			dev_err(&cb->pdev->dev,
2765 				"Unhandled command completion %d\n", id);
2766 
2767 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2768 		if (++next_stat_mbox > cb->last_stat_mbox)
2769 			next_stat_mbox = cb->first_stat_mbox;
2770 
2771 		if (cmd_blk) {
2772 			if (id < 3)
2773 				myrb_handle_cmdblk(cb, cmd_blk);
2774 			else
2775 				myrb_handle_scsi(cb, cmd_blk, scmd);
2776 		}
2777 	}
2778 	cb->next_stat_mbox = next_stat_mbox;
2779 	spin_unlock_irqrestore(&cb->queue_lock, flags);
2780 	return IRQ_HANDLED;
2781 }
2782 
2783 static struct myrb_privdata DAC960_LA_privdata = {
2784 	.hw_init =	DAC960_LA_hw_init,
2785 	.irq_handler =	DAC960_LA_intr_handler,
2786 	.mmio_size =	DAC960_LA_mmio_size,
2787 };
2788 
2789 /*
2790  * DAC960 PG Series Controllers
2791  */
DAC960_PG_hw_mbox_new_cmd(void __iomem * base)2792 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2793 {
2794 	writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2795 }
2796 
DAC960_PG_ack_hw_mbox_status(void __iomem * base)2797 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2798 {
2799 	writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2800 }
2801 
DAC960_PG_reset_ctrl(void __iomem * base)2802 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2803 {
2804 	writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2805 }
2806 
DAC960_PG_mem_mbox_new_cmd(void __iomem * base)2807 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2808 {
2809 	writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2810 }
2811 
DAC960_PG_hw_mbox_is_full(void __iomem * base)2812 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2813 {
2814 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2815 
2816 	return idb & DAC960_PG_IDB_HWMBOX_FULL;
2817 }
2818 
DAC960_PG_init_in_progress(void __iomem * base)2819 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2820 {
2821 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2822 
2823 	return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2824 }
2825 
DAC960_PG_ack_hw_mbox_intr(void __iomem * base)2826 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2827 {
2828 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2829 }
2830 
DAC960_PG_ack_intr(void __iomem * base)2831 static inline void DAC960_PG_ack_intr(void __iomem *base)
2832 {
2833 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2834 	       base + DAC960_PG_ODB_OFFSET);
2835 }
2836 
DAC960_PG_hw_mbox_status_available(void __iomem * base)2837 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2838 {
2839 	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2840 
2841 	return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2842 }
2843 
DAC960_PG_enable_intr(void __iomem * base)2844 static inline void DAC960_PG_enable_intr(void __iomem *base)
2845 {
2846 	unsigned int imask = (unsigned int)-1;
2847 
2848 	imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2849 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2850 }
2851 
DAC960_PG_disable_intr(void __iomem * base)2852 static inline void DAC960_PG_disable_intr(void __iomem *base)
2853 {
2854 	unsigned int imask = (unsigned int)-1;
2855 
2856 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2857 }
2858 
DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox * mem_mbox,union myrb_cmd_mbox * mbox)2859 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2860 		union myrb_cmd_mbox *mbox)
2861 {
2862 	mem_mbox->words[1] = mbox->words[1];
2863 	mem_mbox->words[2] = mbox->words[2];
2864 	mem_mbox->words[3] = mbox->words[3];
2865 	/* Memory barrier to prevent reordering */
2866 	wmb();
2867 	mem_mbox->words[0] = mbox->words[0];
2868 	/* Memory barrier to force PCI access */
2869 	mb();
2870 }
2871 
DAC960_PG_write_hw_mbox(void __iomem * base,union myrb_cmd_mbox * mbox)2872 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2873 		union myrb_cmd_mbox *mbox)
2874 {
2875 	writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2876 	writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2877 	writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2878 	writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2879 }
2880 
2881 static inline unsigned short
DAC960_PG_read_status(void __iomem * base)2882 DAC960_PG_read_status(void __iomem *base)
2883 {
2884 	return readw(base + DAC960_PG_STS_OFFSET);
2885 }
2886 
2887 static inline bool
DAC960_PG_read_error_status(void __iomem * base,unsigned char * error,unsigned char * param0,unsigned char * param1)2888 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2889 		unsigned char *param0, unsigned char *param1)
2890 {
2891 	unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2892 
2893 	if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2894 		return false;
2895 	errsts &= ~DAC960_PG_ERRSTS_PENDING;
2896 	*error = errsts;
2897 	*param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2898 	*param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2899 	writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2900 	return true;
2901 }
2902 
2903 static inline unsigned short
DAC960_PG_mbox_init(struct pci_dev * pdev,void __iomem * base,union myrb_cmd_mbox * mbox)2904 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2905 		union myrb_cmd_mbox *mbox)
2906 {
2907 	unsigned short status;
2908 	int timeout = 0;
2909 
2910 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2911 		if (!DAC960_PG_hw_mbox_is_full(base))
2912 			break;
2913 		udelay(10);
2914 		timeout++;
2915 	}
2916 	if (DAC960_PG_hw_mbox_is_full(base)) {
2917 		dev_err(&pdev->dev,
2918 			"Timeout waiting for empty mailbox\n");
2919 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2920 	}
2921 	DAC960_PG_write_hw_mbox(base, mbox);
2922 	DAC960_PG_hw_mbox_new_cmd(base);
2923 
2924 	timeout = 0;
2925 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2926 		if (DAC960_PG_hw_mbox_status_available(base))
2927 			break;
2928 		udelay(10);
2929 		timeout++;
2930 	}
2931 	if (!DAC960_PG_hw_mbox_status_available(base)) {
2932 		dev_err(&pdev->dev,
2933 			"Timeout waiting for mailbox status\n");
2934 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2935 	}
2936 	status = DAC960_PG_read_status(base);
2937 	DAC960_PG_ack_hw_mbox_intr(base);
2938 	DAC960_PG_ack_hw_mbox_status(base);
2939 
2940 	return status;
2941 }
2942 
DAC960_PG_hw_init(struct pci_dev * pdev,struct myrb_hba * cb,void __iomem * base)2943 static int DAC960_PG_hw_init(struct pci_dev *pdev,
2944 		struct myrb_hba *cb, void __iomem *base)
2945 {
2946 	int timeout = 0;
2947 	unsigned char error, parm0, parm1;
2948 
2949 	DAC960_PG_disable_intr(base);
2950 	DAC960_PG_ack_hw_mbox_status(base);
2951 	udelay(1000);
2952 	while (DAC960_PG_init_in_progress(base) &&
2953 	       timeout < MYRB_MAILBOX_TIMEOUT) {
2954 		if (DAC960_PG_read_error_status(base, &error,
2955 						&parm0, &parm1) &&
2956 		    myrb_err_status(cb, error, parm0, parm1))
2957 			return -EIO;
2958 		udelay(10);
2959 		timeout++;
2960 	}
2961 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
2962 		dev_err(&pdev->dev,
2963 			"Timeout waiting for Controller Initialisation\n");
2964 		return -ETIMEDOUT;
2965 	}
2966 	if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
2967 		dev_err(&pdev->dev,
2968 			"Unable to Enable Memory Mailbox Interface\n");
2969 		DAC960_PG_reset_ctrl(base);
2970 		return -ENODEV;
2971 	}
2972 	DAC960_PG_enable_intr(base);
2973 	cb->qcmd = myrb_qcmd;
2974 	cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
2975 	if (cb->dual_mode_interface)
2976 		cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
2977 	else
2978 		cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
2979 	cb->disable_intr = DAC960_PG_disable_intr;
2980 	cb->reset = DAC960_PG_reset_ctrl;
2981 
2982 	return 0;
2983 }
2984 
DAC960_PG_intr_handler(int irq,void * arg)2985 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
2986 {
2987 	struct myrb_hba *cb = arg;
2988 	void __iomem *base = cb->io_base;
2989 	struct myrb_stat_mbox *next_stat_mbox;
2990 	unsigned long flags;
2991 
2992 	spin_lock_irqsave(&cb->queue_lock, flags);
2993 	DAC960_PG_ack_intr(base);
2994 	next_stat_mbox = cb->next_stat_mbox;
2995 	while (next_stat_mbox->valid) {
2996 		unsigned char id = next_stat_mbox->id;
2997 		struct scsi_cmnd *scmd = NULL;
2998 		struct myrb_cmdblk *cmd_blk = NULL;
2999 
3000 		if (id == MYRB_DCMD_TAG)
3001 			cmd_blk = &cb->dcmd_blk;
3002 		else if (id == MYRB_MCMD_TAG)
3003 			cmd_blk = &cb->mcmd_blk;
3004 		else {
3005 			scmd = scsi_host_find_tag(cb->host, id - 3);
3006 			if (scmd)
3007 				cmd_blk = scsi_cmd_priv(scmd);
3008 		}
3009 		if (cmd_blk)
3010 			cmd_blk->status = next_stat_mbox->status;
3011 		else
3012 			dev_err(&cb->pdev->dev,
3013 				"Unhandled command completion %d\n", id);
3014 
3015 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
3016 		if (++next_stat_mbox > cb->last_stat_mbox)
3017 			next_stat_mbox = cb->first_stat_mbox;
3018 
3019 		if (id < 3)
3020 			myrb_handle_cmdblk(cb, cmd_blk);
3021 		else
3022 			myrb_handle_scsi(cb, cmd_blk, scmd);
3023 	}
3024 	cb->next_stat_mbox = next_stat_mbox;
3025 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3026 	return IRQ_HANDLED;
3027 }
3028 
3029 static struct myrb_privdata DAC960_PG_privdata = {
3030 	.hw_init =	DAC960_PG_hw_init,
3031 	.irq_handler =	DAC960_PG_intr_handler,
3032 	.mmio_size =	DAC960_PG_mmio_size,
3033 };
3034 
3035 
3036 /*
3037  * DAC960 PD Series Controllers
3038  */
3039 
DAC960_PD_hw_mbox_new_cmd(void __iomem * base)3040 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3041 {
3042 	writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3043 }
3044 
DAC960_PD_ack_hw_mbox_status(void __iomem * base)3045 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3046 {
3047 	writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3048 }
3049 
DAC960_PD_reset_ctrl(void __iomem * base)3050 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3051 {
3052 	writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3053 }
3054 
DAC960_PD_hw_mbox_is_full(void __iomem * base)3055 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3056 {
3057 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3058 
3059 	return idb & DAC960_PD_IDB_HWMBOX_FULL;
3060 }
3061 
DAC960_PD_init_in_progress(void __iomem * base)3062 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3063 {
3064 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3065 
3066 	return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3067 }
3068 
DAC960_PD_ack_intr(void __iomem * base)3069 static inline void DAC960_PD_ack_intr(void __iomem *base)
3070 {
3071 	writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3072 }
3073 
DAC960_PD_hw_mbox_status_available(void __iomem * base)3074 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3075 {
3076 	unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3077 
3078 	return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3079 }
3080 
DAC960_PD_enable_intr(void __iomem * base)3081 static inline void DAC960_PD_enable_intr(void __iomem *base)
3082 {
3083 	writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3084 }
3085 
DAC960_PD_disable_intr(void __iomem * base)3086 static inline void DAC960_PD_disable_intr(void __iomem *base)
3087 {
3088 	writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3089 }
3090 
DAC960_PD_write_cmd_mbox(void __iomem * base,union myrb_cmd_mbox * mbox)3091 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3092 		union myrb_cmd_mbox *mbox)
3093 {
3094 	writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3095 	writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3096 	writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3097 	writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3098 }
3099 
3100 static inline unsigned char
DAC960_PD_read_status_cmd_ident(void __iomem * base)3101 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3102 {
3103 	return readb(base + DAC960_PD_STSID_OFFSET);
3104 }
3105 
3106 static inline unsigned short
DAC960_PD_read_status(void __iomem * base)3107 DAC960_PD_read_status(void __iomem *base)
3108 {
3109 	return readw(base + DAC960_PD_STS_OFFSET);
3110 }
3111 
3112 static inline bool
DAC960_PD_read_error_status(void __iomem * base,unsigned char * error,unsigned char * param0,unsigned char * param1)3113 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3114 		unsigned char *param0, unsigned char *param1)
3115 {
3116 	unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3117 
3118 	if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3119 		return false;
3120 	errsts &= ~DAC960_PD_ERRSTS_PENDING;
3121 	*error = errsts;
3122 	*param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3123 	*param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3124 	writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3125 	return true;
3126 }
3127 
DAC960_PD_qcmd(struct myrb_hba * cb,struct myrb_cmdblk * cmd_blk)3128 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3129 {
3130 	void __iomem *base = cb->io_base;
3131 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3132 
3133 	while (DAC960_PD_hw_mbox_is_full(base))
3134 		udelay(1);
3135 	DAC960_PD_write_cmd_mbox(base, mbox);
3136 	DAC960_PD_hw_mbox_new_cmd(base);
3137 }
3138 
DAC960_PD_hw_init(struct pci_dev * pdev,struct myrb_hba * cb,void __iomem * base)3139 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3140 		struct myrb_hba *cb, void __iomem *base)
3141 {
3142 	int timeout = 0;
3143 	unsigned char error, parm0, parm1;
3144 
3145 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3146 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3147 			(unsigned long)cb->io_addr);
3148 		return -EBUSY;
3149 	}
3150 	DAC960_PD_disable_intr(base);
3151 	DAC960_PD_ack_hw_mbox_status(base);
3152 	udelay(1000);
3153 	while (DAC960_PD_init_in_progress(base) &&
3154 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3155 		if (DAC960_PD_read_error_status(base, &error,
3156 					      &parm0, &parm1) &&
3157 		    myrb_err_status(cb, error, parm0, parm1))
3158 			return -EIO;
3159 		udelay(10);
3160 		timeout++;
3161 	}
3162 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3163 		dev_err(&pdev->dev,
3164 			"Timeout waiting for Controller Initialisation\n");
3165 		return -ETIMEDOUT;
3166 	}
3167 	if (!myrb_enable_mmio(cb, NULL)) {
3168 		dev_err(&pdev->dev,
3169 			"Unable to Enable Memory Mailbox Interface\n");
3170 		DAC960_PD_reset_ctrl(base);
3171 		return -ENODEV;
3172 	}
3173 	DAC960_PD_enable_intr(base);
3174 	cb->qcmd = DAC960_PD_qcmd;
3175 	cb->disable_intr = DAC960_PD_disable_intr;
3176 	cb->reset = DAC960_PD_reset_ctrl;
3177 
3178 	return 0;
3179 }
3180 
DAC960_PD_intr_handler(int irq,void * arg)3181 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3182 {
3183 	struct myrb_hba *cb = arg;
3184 	void __iomem *base = cb->io_base;
3185 	unsigned long flags;
3186 
3187 	spin_lock_irqsave(&cb->queue_lock, flags);
3188 	while (DAC960_PD_hw_mbox_status_available(base)) {
3189 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3190 		struct scsi_cmnd *scmd = NULL;
3191 		struct myrb_cmdblk *cmd_blk = NULL;
3192 
3193 		if (id == MYRB_DCMD_TAG)
3194 			cmd_blk = &cb->dcmd_blk;
3195 		else if (id == MYRB_MCMD_TAG)
3196 			cmd_blk = &cb->mcmd_blk;
3197 		else {
3198 			scmd = scsi_host_find_tag(cb->host, id - 3);
3199 			if (scmd)
3200 				cmd_blk = scsi_cmd_priv(scmd);
3201 		}
3202 		if (cmd_blk)
3203 			cmd_blk->status = DAC960_PD_read_status(base);
3204 		else
3205 			dev_err(&cb->pdev->dev,
3206 				"Unhandled command completion %d\n", id);
3207 
3208 		DAC960_PD_ack_intr(base);
3209 		DAC960_PD_ack_hw_mbox_status(base);
3210 
3211 		if (id < 3)
3212 			myrb_handle_cmdblk(cb, cmd_blk);
3213 		else
3214 			myrb_handle_scsi(cb, cmd_blk, scmd);
3215 	}
3216 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3217 	return IRQ_HANDLED;
3218 }
3219 
3220 static struct myrb_privdata DAC960_PD_privdata = {
3221 	.hw_init =	DAC960_PD_hw_init,
3222 	.irq_handler =	DAC960_PD_intr_handler,
3223 	.mmio_size =	DAC960_PD_mmio_size,
3224 };
3225 
3226 
3227 /*
3228  * DAC960 P Series Controllers
3229  *
3230  * Similar to the DAC960 PD Series Controllers, but some commands have
3231  * to be translated.
3232  */
3233 
myrb_translate_enquiry(void * enq)3234 static inline void myrb_translate_enquiry(void *enq)
3235 {
3236 	memcpy(enq + 132, enq + 36, 64);
3237 	memset(enq + 36, 0, 96);
3238 }
3239 
myrb_translate_devstate(void * state)3240 static inline void myrb_translate_devstate(void *state)
3241 {
3242 	memcpy(state + 2, state + 3, 1);
3243 	memmove(state + 4, state + 5, 2);
3244 	memmove(state + 6, state + 8, 4);
3245 }
3246 
myrb_translate_to_rw_command(struct myrb_cmdblk * cmd_blk)3247 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3248 {
3249 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3250 	int ldev_num = mbox->type5.ld.ldev_num;
3251 
3252 	mbox->bytes[3] &= 0x7;
3253 	mbox->bytes[3] |= mbox->bytes[7] << 6;
3254 	mbox->bytes[7] = ldev_num;
3255 }
3256 
myrb_translate_from_rw_command(struct myrb_cmdblk * cmd_blk)3257 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3258 {
3259 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3260 	int ldev_num = mbox->bytes[7];
3261 
3262 	mbox->bytes[7] = mbox->bytes[3] >> 6;
3263 	mbox->bytes[3] &= 0x7;
3264 	mbox->bytes[3] |= ldev_num << 3;
3265 }
3266 
DAC960_P_qcmd(struct myrb_hba * cb,struct myrb_cmdblk * cmd_blk)3267 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3268 {
3269 	void __iomem *base = cb->io_base;
3270 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3271 
3272 	switch (mbox->common.opcode) {
3273 	case MYRB_CMD_ENQUIRY:
3274 		mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3275 		break;
3276 	case MYRB_CMD_GET_DEVICE_STATE:
3277 		mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3278 		break;
3279 	case MYRB_CMD_READ:
3280 		mbox->common.opcode = MYRB_CMD_READ_OLD;
3281 		myrb_translate_to_rw_command(cmd_blk);
3282 		break;
3283 	case MYRB_CMD_WRITE:
3284 		mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3285 		myrb_translate_to_rw_command(cmd_blk);
3286 		break;
3287 	case MYRB_CMD_READ_SG:
3288 		mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3289 		myrb_translate_to_rw_command(cmd_blk);
3290 		break;
3291 	case MYRB_CMD_WRITE_SG:
3292 		mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3293 		myrb_translate_to_rw_command(cmd_blk);
3294 		break;
3295 	default:
3296 		break;
3297 	}
3298 	while (DAC960_PD_hw_mbox_is_full(base))
3299 		udelay(1);
3300 	DAC960_PD_write_cmd_mbox(base, mbox);
3301 	DAC960_PD_hw_mbox_new_cmd(base);
3302 }
3303 
3304 
DAC960_P_hw_init(struct pci_dev * pdev,struct myrb_hba * cb,void __iomem * base)3305 static int DAC960_P_hw_init(struct pci_dev *pdev,
3306 		struct myrb_hba *cb, void __iomem *base)
3307 {
3308 	int timeout = 0;
3309 	unsigned char error, parm0, parm1;
3310 
3311 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3312 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3313 			(unsigned long)cb->io_addr);
3314 		return -EBUSY;
3315 	}
3316 	DAC960_PD_disable_intr(base);
3317 	DAC960_PD_ack_hw_mbox_status(base);
3318 	udelay(1000);
3319 	while (DAC960_PD_init_in_progress(base) &&
3320 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3321 		if (DAC960_PD_read_error_status(base, &error,
3322 						&parm0, &parm1) &&
3323 		    myrb_err_status(cb, error, parm0, parm1))
3324 			return -EAGAIN;
3325 		udelay(10);
3326 		timeout++;
3327 	}
3328 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3329 		dev_err(&pdev->dev,
3330 			"Timeout waiting for Controller Initialisation\n");
3331 		return -ETIMEDOUT;
3332 	}
3333 	if (!myrb_enable_mmio(cb, NULL)) {
3334 		dev_err(&pdev->dev,
3335 			"Unable to allocate DMA mapped memory\n");
3336 		DAC960_PD_reset_ctrl(base);
3337 		return -ETIMEDOUT;
3338 	}
3339 	DAC960_PD_enable_intr(base);
3340 	cb->qcmd = DAC960_P_qcmd;
3341 	cb->disable_intr = DAC960_PD_disable_intr;
3342 	cb->reset = DAC960_PD_reset_ctrl;
3343 
3344 	return 0;
3345 }
3346 
DAC960_P_intr_handler(int irq,void * arg)3347 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3348 {
3349 	struct myrb_hba *cb = arg;
3350 	void __iomem *base = cb->io_base;
3351 	unsigned long flags;
3352 
3353 	spin_lock_irqsave(&cb->queue_lock, flags);
3354 	while (DAC960_PD_hw_mbox_status_available(base)) {
3355 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3356 		struct scsi_cmnd *scmd = NULL;
3357 		struct myrb_cmdblk *cmd_blk = NULL;
3358 		union myrb_cmd_mbox *mbox;
3359 		enum myrb_cmd_opcode op;
3360 
3361 
3362 		if (id == MYRB_DCMD_TAG)
3363 			cmd_blk = &cb->dcmd_blk;
3364 		else if (id == MYRB_MCMD_TAG)
3365 			cmd_blk = &cb->mcmd_blk;
3366 		else {
3367 			scmd = scsi_host_find_tag(cb->host, id - 3);
3368 			if (scmd)
3369 				cmd_blk = scsi_cmd_priv(scmd);
3370 		}
3371 		if (cmd_blk)
3372 			cmd_blk->status = DAC960_PD_read_status(base);
3373 		else
3374 			dev_err(&cb->pdev->dev,
3375 				"Unhandled command completion %d\n", id);
3376 
3377 		DAC960_PD_ack_intr(base);
3378 		DAC960_PD_ack_hw_mbox_status(base);
3379 
3380 		if (!cmd_blk)
3381 			continue;
3382 
3383 		mbox = &cmd_blk->mbox;
3384 		op = mbox->common.opcode;
3385 		switch (op) {
3386 		case MYRB_CMD_ENQUIRY_OLD:
3387 			mbox->common.opcode = MYRB_CMD_ENQUIRY;
3388 			myrb_translate_enquiry(cb->enquiry);
3389 			break;
3390 		case MYRB_CMD_READ_OLD:
3391 			mbox->common.opcode = MYRB_CMD_READ;
3392 			myrb_translate_from_rw_command(cmd_blk);
3393 			break;
3394 		case MYRB_CMD_WRITE_OLD:
3395 			mbox->common.opcode = MYRB_CMD_WRITE;
3396 			myrb_translate_from_rw_command(cmd_blk);
3397 			break;
3398 		case MYRB_CMD_READ_SG_OLD:
3399 			mbox->common.opcode = MYRB_CMD_READ_SG;
3400 			myrb_translate_from_rw_command(cmd_blk);
3401 			break;
3402 		case MYRB_CMD_WRITE_SG_OLD:
3403 			mbox->common.opcode = MYRB_CMD_WRITE_SG;
3404 			myrb_translate_from_rw_command(cmd_blk);
3405 			break;
3406 		default:
3407 			break;
3408 		}
3409 		if (id < 3)
3410 			myrb_handle_cmdblk(cb, cmd_blk);
3411 		else
3412 			myrb_handle_scsi(cb, cmd_blk, scmd);
3413 	}
3414 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3415 	return IRQ_HANDLED;
3416 }
3417 
3418 static struct myrb_privdata DAC960_P_privdata = {
3419 	.hw_init =	DAC960_P_hw_init,
3420 	.irq_handler =	DAC960_P_intr_handler,
3421 	.mmio_size =	DAC960_PD_mmio_size,
3422 };
3423 
myrb_detect(struct pci_dev * pdev,const struct pci_device_id * entry)3424 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3425 		const struct pci_device_id *entry)
3426 {
3427 	struct myrb_privdata *privdata =
3428 		(struct myrb_privdata *)entry->driver_data;
3429 	irq_handler_t irq_handler = privdata->irq_handler;
3430 	unsigned int mmio_size = privdata->mmio_size;
3431 	struct Scsi_Host *shost;
3432 	struct myrb_hba *cb = NULL;
3433 
3434 	shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3435 	if (!shost) {
3436 		dev_err(&pdev->dev, "Unable to allocate Controller\n");
3437 		return NULL;
3438 	}
3439 	shost->max_cmd_len = 12;
3440 	shost->max_lun = 256;
3441 	cb = shost_priv(shost);
3442 	mutex_init(&cb->dcmd_mutex);
3443 	mutex_init(&cb->dma_mutex);
3444 	cb->pdev = pdev;
3445 
3446 	if (pci_enable_device(pdev))
3447 		goto failure;
3448 
3449 	if (privdata->hw_init == DAC960_PD_hw_init ||
3450 	    privdata->hw_init == DAC960_P_hw_init) {
3451 		cb->io_addr = pci_resource_start(pdev, 0);
3452 		cb->pci_addr = pci_resource_start(pdev, 1);
3453 	} else
3454 		cb->pci_addr = pci_resource_start(pdev, 0);
3455 
3456 	pci_set_drvdata(pdev, cb);
3457 	spin_lock_init(&cb->queue_lock);
3458 	if (mmio_size < PAGE_SIZE)
3459 		mmio_size = PAGE_SIZE;
3460 	cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3461 	if (cb->mmio_base == NULL) {
3462 		dev_err(&pdev->dev,
3463 			"Unable to map Controller Register Window\n");
3464 		goto failure;
3465 	}
3466 
3467 	cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3468 	if (privdata->hw_init(pdev, cb, cb->io_base))
3469 		goto failure;
3470 
3471 	if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3472 		dev_err(&pdev->dev,
3473 			"Unable to acquire IRQ Channel %d\n", pdev->irq);
3474 		goto failure;
3475 	}
3476 	cb->irq = pdev->irq;
3477 	return cb;
3478 
3479 failure:
3480 	dev_err(&pdev->dev,
3481 		"Failed to initialize Controller\n");
3482 	myrb_cleanup(cb);
3483 	return NULL;
3484 }
3485 
myrb_probe(struct pci_dev * dev,const struct pci_device_id * entry)3486 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3487 {
3488 	struct myrb_hba *cb;
3489 	int ret;
3490 
3491 	cb = myrb_detect(dev, entry);
3492 	if (!cb)
3493 		return -ENODEV;
3494 
3495 	ret = myrb_get_hba_config(cb);
3496 	if (ret < 0) {
3497 		myrb_cleanup(cb);
3498 		return ret;
3499 	}
3500 
3501 	if (!myrb_create_mempools(dev, cb)) {
3502 		ret = -ENOMEM;
3503 		goto failed;
3504 	}
3505 
3506 	ret = scsi_add_host(cb->host, &dev->dev);
3507 	if (ret) {
3508 		dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3509 		myrb_destroy_mempools(cb);
3510 		goto failed;
3511 	}
3512 	scsi_scan_host(cb->host);
3513 	return 0;
3514 failed:
3515 	myrb_cleanup(cb);
3516 	return ret;
3517 }
3518 
3519 
myrb_remove(struct pci_dev * pdev)3520 static void myrb_remove(struct pci_dev *pdev)
3521 {
3522 	struct myrb_hba *cb = pci_get_drvdata(pdev);
3523 
3524 	shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3525 	myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3526 	myrb_cleanup(cb);
3527 	myrb_destroy_mempools(cb);
3528 }
3529 
3530 
3531 static const struct pci_device_id myrb_id_table[] = {
3532 	{
3533 		PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3534 			       PCI_DEVICE_ID_DEC_21285,
3535 			       PCI_VENDOR_ID_MYLEX,
3536 			       PCI_DEVICE_ID_MYLEX_DAC960_LA),
3537 		.driver_data	= (unsigned long) &DAC960_LA_privdata,
3538 	},
3539 	{
3540 		PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3541 	},
3542 	{
3543 		PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3544 	},
3545 	{
3546 		PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3547 	},
3548 	{0, },
3549 };
3550 
3551 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3552 
3553 static struct pci_driver myrb_pci_driver = {
3554 	.name		= "myrb",
3555 	.id_table	= myrb_id_table,
3556 	.probe		= myrb_probe,
3557 	.remove		= myrb_remove,
3558 };
3559 
myrb_init_module(void)3560 static int __init myrb_init_module(void)
3561 {
3562 	int ret;
3563 
3564 	myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3565 	if (!myrb_raid_template)
3566 		return -ENODEV;
3567 
3568 	ret = pci_register_driver(&myrb_pci_driver);
3569 	if (ret)
3570 		raid_class_release(myrb_raid_template);
3571 
3572 	return ret;
3573 }
3574 
myrb_cleanup_module(void)3575 static void __exit myrb_cleanup_module(void)
3576 {
3577 	pci_unregister_driver(&myrb_pci_driver);
3578 	raid_class_release(myrb_raid_template);
3579 }
3580 
3581 module_init(myrb_init_module);
3582 module_exit(myrb_cleanup_module);
3583 
3584 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3585 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3586 MODULE_LICENSE("GPL");
3587