xref: /linux/drivers/ata/libata-sff.c (revision b4082450)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  libata-sff.c - helper library for PCI IDE BMDMA
4  *
5  *  Copyright 2003-2006 Red Hat, Inc.  All rights reserved.
6  *  Copyright 2003-2006 Jeff Garzik
7  *
8  *  libata documentation is available via 'make {ps|pdf}docs',
9  *  as Documentation/driver-api/libata.rst
10  *
11  *  Hardware documentation available from http://www.t13.org/ and
12  *  http://www.sata-io.org/
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/gfp.h>
17 #include <linux/pci.h>
18 #include <linux/module.h>
19 #include <linux/libata.h>
20 #include <linux/highmem.h>
21 #include <trace/events/libata.h>
22 #include "libata.h"
23 
24 static struct workqueue_struct *ata_sff_wq;
25 
26 const struct ata_port_operations ata_sff_port_ops = {
27 	.inherits		= &ata_base_port_ops,
28 
29 	.qc_issue		= ata_sff_qc_issue,
30 	.qc_fill_rtf		= ata_sff_qc_fill_rtf,
31 
32 	.freeze			= ata_sff_freeze,
33 	.thaw			= ata_sff_thaw,
34 	.prereset		= ata_sff_prereset,
35 	.softreset		= ata_sff_softreset,
36 	.hardreset		= sata_sff_hardreset,
37 	.postreset		= ata_sff_postreset,
38 	.error_handler		= ata_sff_error_handler,
39 
40 	.sff_dev_select		= ata_sff_dev_select,
41 	.sff_check_status	= ata_sff_check_status,
42 	.sff_tf_load		= ata_sff_tf_load,
43 	.sff_tf_read		= ata_sff_tf_read,
44 	.sff_exec_command	= ata_sff_exec_command,
45 	.sff_data_xfer		= ata_sff_data_xfer,
46 	.sff_drain_fifo		= ata_sff_drain_fifo,
47 
48 	.lost_interrupt		= ata_sff_lost_interrupt,
49 };
50 EXPORT_SYMBOL_GPL(ata_sff_port_ops);
51 
52 /**
53  *	ata_sff_check_status - Read device status reg & clear interrupt
54  *	@ap: port where the device is
55  *
56  *	Reads ATA taskfile status register for currently-selected device
57  *	and return its value. This also clears pending interrupts
58  *      from this device
59  *
60  *	LOCKING:
61  *	Inherited from caller.
62  */
ata_sff_check_status(struct ata_port * ap)63 u8 ata_sff_check_status(struct ata_port *ap)
64 {
65 	return ioread8(ap->ioaddr.status_addr);
66 }
67 EXPORT_SYMBOL_GPL(ata_sff_check_status);
68 
69 /**
70  *	ata_sff_altstatus - Read device alternate status reg
71  *	@ap: port where the device is
72  *	@status: pointer to a status value
73  *
74  *	Reads ATA alternate status register for currently-selected device
75  *	and return its value.
76  *
77  *	RETURN:
78  *	true if the register exists, false if not.
79  *
80  *	LOCKING:
81  *	Inherited from caller.
82  */
ata_sff_altstatus(struct ata_port * ap,u8 * status)83 static bool ata_sff_altstatus(struct ata_port *ap, u8 *status)
84 {
85 	u8 tmp;
86 
87 	if (ap->ops->sff_check_altstatus) {
88 		tmp = ap->ops->sff_check_altstatus(ap);
89 		goto read;
90 	}
91 	if (ap->ioaddr.altstatus_addr) {
92 		tmp = ioread8(ap->ioaddr.altstatus_addr);
93 		goto read;
94 	}
95 	return false;
96 
97 read:
98 	if (status)
99 		*status = tmp;
100 	return true;
101 }
102 
103 /**
104  *	ata_sff_irq_status - Check if the device is busy
105  *	@ap: port where the device is
106  *
107  *	Determine if the port is currently busy. Uses altstatus
108  *	if available in order to avoid clearing shared IRQ status
109  *	when finding an IRQ source. Non ctl capable devices don't
110  *	share interrupt lines fortunately for us.
111  *
112  *	LOCKING:
113  *	Inherited from caller.
114  */
ata_sff_irq_status(struct ata_port * ap)115 static u8 ata_sff_irq_status(struct ata_port *ap)
116 {
117 	u8 status;
118 
119 	/* Not us: We are busy */
120 	if (ata_sff_altstatus(ap, &status) && (status & ATA_BUSY))
121 		return status;
122 	/* Clear INTRQ latch */
123 	status = ap->ops->sff_check_status(ap);
124 	return status;
125 }
126 
127 /**
128  *	ata_sff_sync - Flush writes
129  *	@ap: Port to wait for.
130  *
131  *	CAUTION:
132  *	If we have an mmio device with no ctl and no altstatus
133  *	method this will fail. No such devices are known to exist.
134  *
135  *	LOCKING:
136  *	Inherited from caller.
137  */
138 
ata_sff_sync(struct ata_port * ap)139 static void ata_sff_sync(struct ata_port *ap)
140 {
141 	ata_sff_altstatus(ap, NULL);
142 }
143 
144 /**
145  *	ata_sff_pause		-	Flush writes and wait 400nS
146  *	@ap: Port to pause for.
147  *
148  *	CAUTION:
149  *	If we have an mmio device with no ctl and no altstatus
150  *	method this will fail. No such devices are known to exist.
151  *
152  *	LOCKING:
153  *	Inherited from caller.
154  */
155 
ata_sff_pause(struct ata_port * ap)156 void ata_sff_pause(struct ata_port *ap)
157 {
158 	ata_sff_sync(ap);
159 	ndelay(400);
160 }
161 EXPORT_SYMBOL_GPL(ata_sff_pause);
162 
163 /**
164  *	ata_sff_dma_pause	-	Pause before commencing DMA
165  *	@ap: Port to pause for.
166  *
167  *	Perform I/O fencing and ensure sufficient cycle delays occur
168  *	for the HDMA1:0 transition
169  */
170 
ata_sff_dma_pause(struct ata_port * ap)171 void ata_sff_dma_pause(struct ata_port *ap)
172 {
173 	/*
174 	 * An altstatus read will cause the needed delay without
175 	 * messing up the IRQ status
176 	 */
177 	if (ata_sff_altstatus(ap, NULL))
178 		return;
179 	/* There are no DMA controllers without ctl. BUG here to ensure
180 	   we never violate the HDMA1:0 transition timing and risk
181 	   corruption. */
182 	BUG();
183 }
184 EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
185 
ata_sff_check_ready(struct ata_link * link)186 static int ata_sff_check_ready(struct ata_link *link)
187 {
188 	u8 status = link->ap->ops->sff_check_status(link->ap);
189 
190 	return ata_check_ready(status);
191 }
192 
193 /**
194  *	ata_sff_wait_ready - sleep until BSY clears, or timeout
195  *	@link: SFF link to wait ready status for
196  *	@deadline: deadline jiffies for the operation
197  *
198  *	Sleep until ATA Status register bit BSY clears, or timeout
199  *	occurs.
200  *
201  *	LOCKING:
202  *	Kernel thread context (may sleep).
203  *
204  *	RETURNS:
205  *	0 on success, -errno otherwise.
206  */
ata_sff_wait_ready(struct ata_link * link,unsigned long deadline)207 int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
208 {
209 	return ata_wait_ready(link, deadline, ata_sff_check_ready);
210 }
211 EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
212 
213 /**
214  *	ata_sff_set_devctl - Write device control reg
215  *	@ap: port where the device is
216  *	@ctl: value to write
217  *
218  *	Writes ATA device control register.
219  *
220  *	RETURN:
221  *	true if the register exists, false if not.
222  *
223  *	LOCKING:
224  *	Inherited from caller.
225  */
ata_sff_set_devctl(struct ata_port * ap,u8 ctl)226 static bool ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
227 {
228 	if (ap->ops->sff_set_devctl) {
229 		ap->ops->sff_set_devctl(ap, ctl);
230 		return true;
231 	}
232 	if (ap->ioaddr.ctl_addr) {
233 		iowrite8(ctl, ap->ioaddr.ctl_addr);
234 		return true;
235 	}
236 
237 	return false;
238 }
239 
240 /**
241  *	ata_sff_dev_select - Select device 0/1 on ATA bus
242  *	@ap: ATA channel to manipulate
243  *	@device: ATA device (numbered from zero) to select
244  *
245  *	Use the method defined in the ATA specification to
246  *	make either device 0, or device 1, active on the
247  *	ATA channel.  Works with both PIO and MMIO.
248  *
249  *	May be used as the dev_select() entry in ata_port_operations.
250  *
251  *	LOCKING:
252  *	caller.
253  */
ata_sff_dev_select(struct ata_port * ap,unsigned int device)254 void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
255 {
256 	u8 tmp;
257 
258 	if (device == 0)
259 		tmp = ATA_DEVICE_OBS;
260 	else
261 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
262 
263 	iowrite8(tmp, ap->ioaddr.device_addr);
264 	ata_sff_pause(ap);	/* needed; also flushes, for mmio */
265 }
266 EXPORT_SYMBOL_GPL(ata_sff_dev_select);
267 
268 /**
269  *	ata_dev_select - Select device 0/1 on ATA bus
270  *	@ap: ATA channel to manipulate
271  *	@device: ATA device (numbered from zero) to select
272  *	@wait: non-zero to wait for Status register BSY bit to clear
273  *	@can_sleep: non-zero if context allows sleeping
274  *
275  *	Use the method defined in the ATA specification to
276  *	make either device 0, or device 1, active on the
277  *	ATA channel.
278  *
279  *	This is a high-level version of ata_sff_dev_select(), which
280  *	additionally provides the services of inserting the proper
281  *	pauses and status polling, where needed.
282  *
283  *	LOCKING:
284  *	caller.
285  */
ata_dev_select(struct ata_port * ap,unsigned int device,unsigned int wait,unsigned int can_sleep)286 static void ata_dev_select(struct ata_port *ap, unsigned int device,
287 			   unsigned int wait, unsigned int can_sleep)
288 {
289 	if (wait)
290 		ata_wait_idle(ap);
291 
292 	ap->ops->sff_dev_select(ap, device);
293 
294 	if (wait) {
295 		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
296 			ata_msleep(ap, 150);
297 		ata_wait_idle(ap);
298 	}
299 }
300 
301 /**
302  *	ata_sff_irq_on - Enable interrupts on a port.
303  *	@ap: Port on which interrupts are enabled.
304  *
305  *	Enable interrupts on a legacy IDE device using MMIO or PIO,
306  *	wait for idle, clear any pending interrupts.
307  *
308  *	Note: may NOT be used as the sff_irq_on() entry in
309  *	ata_port_operations.
310  *
311  *	LOCKING:
312  *	Inherited from caller.
313  */
ata_sff_irq_on(struct ata_port * ap)314 void ata_sff_irq_on(struct ata_port *ap)
315 {
316 	if (ap->ops->sff_irq_on) {
317 		ap->ops->sff_irq_on(ap);
318 		return;
319 	}
320 
321 	ap->ctl &= ~ATA_NIEN;
322 	ap->last_ctl = ap->ctl;
323 
324 	ata_sff_set_devctl(ap, ap->ctl);
325 	ata_wait_idle(ap);
326 
327 	if (ap->ops->sff_irq_clear)
328 		ap->ops->sff_irq_clear(ap);
329 }
330 EXPORT_SYMBOL_GPL(ata_sff_irq_on);
331 
332 /**
333  *	ata_sff_tf_load - send taskfile registers to host controller
334  *	@ap: Port to which output is sent
335  *	@tf: ATA taskfile register set
336  *
337  *	Outputs ATA taskfile to standard ATA host controller.
338  *
339  *	LOCKING:
340  *	Inherited from caller.
341  */
ata_sff_tf_load(struct ata_port * ap,const struct ata_taskfile * tf)342 void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
343 {
344 	struct ata_ioports *ioaddr = &ap->ioaddr;
345 	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
346 
347 	if (tf->ctl != ap->last_ctl) {
348 		if (ioaddr->ctl_addr)
349 			iowrite8(tf->ctl, ioaddr->ctl_addr);
350 		ap->last_ctl = tf->ctl;
351 		ata_wait_idle(ap);
352 	}
353 
354 	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
355 		WARN_ON_ONCE(!ioaddr->ctl_addr);
356 		iowrite8(tf->hob_feature, ioaddr->feature_addr);
357 		iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
358 		iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
359 		iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
360 		iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
361 	}
362 
363 	if (is_addr) {
364 		iowrite8(tf->feature, ioaddr->feature_addr);
365 		iowrite8(tf->nsect, ioaddr->nsect_addr);
366 		iowrite8(tf->lbal, ioaddr->lbal_addr);
367 		iowrite8(tf->lbam, ioaddr->lbam_addr);
368 		iowrite8(tf->lbah, ioaddr->lbah_addr);
369 	}
370 
371 	if (tf->flags & ATA_TFLAG_DEVICE)
372 		iowrite8(tf->device, ioaddr->device_addr);
373 
374 	ata_wait_idle(ap);
375 }
376 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
377 
378 /**
379  *	ata_sff_tf_read - input device's ATA taskfile shadow registers
380  *	@ap: Port from which input is read
381  *	@tf: ATA taskfile register set for storing input
382  *
383  *	Reads ATA taskfile registers for currently-selected device
384  *	into @tf. Assumes the device has a fully SFF compliant task file
385  *	layout and behaviour. If you device does not (eg has a different
386  *	status method) then you will need to provide a replacement tf_read
387  *
388  *	LOCKING:
389  *	Inherited from caller.
390  */
ata_sff_tf_read(struct ata_port * ap,struct ata_taskfile * tf)391 void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
392 {
393 	struct ata_ioports *ioaddr = &ap->ioaddr;
394 
395 	tf->status = ata_sff_check_status(ap);
396 	tf->error = ioread8(ioaddr->error_addr);
397 	tf->nsect = ioread8(ioaddr->nsect_addr);
398 	tf->lbal = ioread8(ioaddr->lbal_addr);
399 	tf->lbam = ioread8(ioaddr->lbam_addr);
400 	tf->lbah = ioread8(ioaddr->lbah_addr);
401 	tf->device = ioread8(ioaddr->device_addr);
402 
403 	if (tf->flags & ATA_TFLAG_LBA48) {
404 		if (likely(ioaddr->ctl_addr)) {
405 			iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
406 			tf->hob_feature = ioread8(ioaddr->error_addr);
407 			tf->hob_nsect = ioread8(ioaddr->nsect_addr);
408 			tf->hob_lbal = ioread8(ioaddr->lbal_addr);
409 			tf->hob_lbam = ioread8(ioaddr->lbam_addr);
410 			tf->hob_lbah = ioread8(ioaddr->lbah_addr);
411 			iowrite8(tf->ctl, ioaddr->ctl_addr);
412 			ap->last_ctl = tf->ctl;
413 		} else
414 			WARN_ON_ONCE(1);
415 	}
416 }
417 EXPORT_SYMBOL_GPL(ata_sff_tf_read);
418 
419 /**
420  *	ata_sff_exec_command - issue ATA command to host controller
421  *	@ap: port to which command is being issued
422  *	@tf: ATA taskfile register set
423  *
424  *	Issues ATA command, with proper synchronization with interrupt
425  *	handler / other threads.
426  *
427  *	LOCKING:
428  *	spin_lock_irqsave(host lock)
429  */
ata_sff_exec_command(struct ata_port * ap,const struct ata_taskfile * tf)430 void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
431 {
432 	iowrite8(tf->command, ap->ioaddr.command_addr);
433 	ata_sff_pause(ap);
434 }
435 EXPORT_SYMBOL_GPL(ata_sff_exec_command);
436 
437 /**
438  *	ata_tf_to_host - issue ATA taskfile to host controller
439  *	@ap: port to which command is being issued
440  *	@tf: ATA taskfile register set
441  *	@tag: tag of the associated command
442  *
443  *	Issues ATA taskfile register set to ATA host controller,
444  *	with proper synchronization with interrupt handler and
445  *	other threads.
446  *
447  *	LOCKING:
448  *	spin_lock_irqsave(host lock)
449  */
ata_tf_to_host(struct ata_port * ap,const struct ata_taskfile * tf,unsigned int tag)450 static inline void ata_tf_to_host(struct ata_port *ap,
451 				  const struct ata_taskfile *tf,
452 				  unsigned int tag)
453 {
454 	trace_ata_tf_load(ap, tf);
455 	ap->ops->sff_tf_load(ap, tf);
456 	trace_ata_exec_command(ap, tf, tag);
457 	ap->ops->sff_exec_command(ap, tf);
458 }
459 
460 /**
461  *	ata_sff_data_xfer - Transfer data by PIO
462  *	@qc: queued command
463  *	@buf: data buffer
464  *	@buflen: buffer length
465  *	@rw: read/write
466  *
467  *	Transfer data from/to the device data register by PIO.
468  *
469  *	LOCKING:
470  *	Inherited from caller.
471  *
472  *	RETURNS:
473  *	Bytes consumed.
474  */
ata_sff_data_xfer(struct ata_queued_cmd * qc,unsigned char * buf,unsigned int buflen,int rw)475 unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf,
476 			       unsigned int buflen, int rw)
477 {
478 	struct ata_port *ap = qc->dev->link->ap;
479 	void __iomem *data_addr = ap->ioaddr.data_addr;
480 	unsigned int words = buflen >> 1;
481 
482 	/* Transfer multiple of 2 bytes */
483 	if (rw == READ)
484 		ioread16_rep(data_addr, buf, words);
485 	else
486 		iowrite16_rep(data_addr, buf, words);
487 
488 	/* Transfer trailing byte, if any. */
489 	if (unlikely(buflen & 0x01)) {
490 		unsigned char pad[2] = { };
491 
492 		/* Point buf to the tail of buffer */
493 		buf += buflen - 1;
494 
495 		/*
496 		 * Use io*16_rep() accessors here as well to avoid pointlessly
497 		 * swapping bytes to and from on the big endian machines...
498 		 */
499 		if (rw == READ) {
500 			ioread16_rep(data_addr, pad, 1);
501 			*buf = pad[0];
502 		} else {
503 			pad[0] = *buf;
504 			iowrite16_rep(data_addr, pad, 1);
505 		}
506 		words++;
507 	}
508 
509 	return words << 1;
510 }
511 EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
512 
513 /**
514  *	ata_sff_data_xfer32 - Transfer data by PIO
515  *	@qc: queued command
516  *	@buf: data buffer
517  *	@buflen: buffer length
518  *	@rw: read/write
519  *
520  *	Transfer data from/to the device data register by PIO using 32bit
521  *	I/O operations.
522  *
523  *	LOCKING:
524  *	Inherited from caller.
525  *
526  *	RETURNS:
527  *	Bytes consumed.
528  */
529 
ata_sff_data_xfer32(struct ata_queued_cmd * qc,unsigned char * buf,unsigned int buflen,int rw)530 unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
531 			       unsigned int buflen, int rw)
532 {
533 	struct ata_device *dev = qc->dev;
534 	struct ata_port *ap = dev->link->ap;
535 	void __iomem *data_addr = ap->ioaddr.data_addr;
536 	unsigned int words = buflen >> 2;
537 	int slop = buflen & 3;
538 
539 	if (!(ap->pflags & ATA_PFLAG_PIO32))
540 		return ata_sff_data_xfer(qc, buf, buflen, rw);
541 
542 	/* Transfer multiple of 4 bytes */
543 	if (rw == READ)
544 		ioread32_rep(data_addr, buf, words);
545 	else
546 		iowrite32_rep(data_addr, buf, words);
547 
548 	/* Transfer trailing bytes, if any */
549 	if (unlikely(slop)) {
550 		unsigned char pad[4] = { };
551 
552 		/* Point buf to the tail of buffer */
553 		buf += buflen - slop;
554 
555 		/*
556 		 * Use io*_rep() accessors here as well to avoid pointlessly
557 		 * swapping bytes to and from on the big endian machines...
558 		 */
559 		if (rw == READ) {
560 			if (slop < 3)
561 				ioread16_rep(data_addr, pad, 1);
562 			else
563 				ioread32_rep(data_addr, pad, 1);
564 			memcpy(buf, pad, slop);
565 		} else {
566 			memcpy(pad, buf, slop);
567 			if (slop < 3)
568 				iowrite16_rep(data_addr, pad, 1);
569 			else
570 				iowrite32_rep(data_addr, pad, 1);
571 		}
572 	}
573 	return (buflen + 1) & ~1;
574 }
575 EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
576 
ata_pio_xfer(struct ata_queued_cmd * qc,struct page * page,unsigned int offset,size_t xfer_size)577 static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page,
578 		unsigned int offset, size_t xfer_size)
579 {
580 	bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
581 	unsigned char *buf;
582 
583 	buf = kmap_atomic(page);
584 	qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write);
585 	kunmap_atomic(buf);
586 
587 	if (!do_write && !PageSlab(page))
588 		flush_dcache_page(page);
589 }
590 
591 /**
592  *	ata_pio_sector - Transfer a sector of data.
593  *	@qc: Command on going
594  *
595  *	Transfer qc->sect_size bytes of data from/to the ATA device.
596  *
597  *	LOCKING:
598  *	Inherited from caller.
599  */
ata_pio_sector(struct ata_queued_cmd * qc)600 static void ata_pio_sector(struct ata_queued_cmd *qc)
601 {
602 	struct ata_port *ap = qc->ap;
603 	struct page *page;
604 	unsigned int offset;
605 
606 	if (!qc->cursg) {
607 		qc->curbytes = qc->nbytes;
608 		return;
609 	}
610 	if (qc->curbytes == qc->nbytes - qc->sect_size)
611 		ap->hsm_task_state = HSM_ST_LAST;
612 
613 	page = sg_page(qc->cursg);
614 	offset = qc->cursg->offset + qc->cursg_ofs;
615 
616 	/* get the current page and offset */
617 	page = nth_page(page, (offset >> PAGE_SHIFT));
618 	offset %= PAGE_SIZE;
619 
620 	trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size);
621 
622 	/*
623 	 * Split the transfer when it splits a page boundary.  Note that the
624 	 * split still has to be dword aligned like all ATA data transfers.
625 	 */
626 	WARN_ON_ONCE(offset % 4);
627 	if (offset + qc->sect_size > PAGE_SIZE) {
628 		unsigned int split_len = PAGE_SIZE - offset;
629 
630 		ata_pio_xfer(qc, page, offset, split_len);
631 		ata_pio_xfer(qc, nth_page(page, 1), 0,
632 			     qc->sect_size - split_len);
633 	} else {
634 		ata_pio_xfer(qc, page, offset, qc->sect_size);
635 	}
636 
637 	qc->curbytes += qc->sect_size;
638 	qc->cursg_ofs += qc->sect_size;
639 
640 	if (qc->cursg_ofs == qc->cursg->length) {
641 		qc->cursg = sg_next(qc->cursg);
642 		if (!qc->cursg)
643 			ap->hsm_task_state = HSM_ST_LAST;
644 		qc->cursg_ofs = 0;
645 	}
646 }
647 
648 /**
649  *	ata_pio_sectors - Transfer one or many sectors.
650  *	@qc: Command on going
651  *
652  *	Transfer one or many sectors of data from/to the
653  *	ATA device for the DRQ request.
654  *
655  *	LOCKING:
656  *	Inherited from caller.
657  */
ata_pio_sectors(struct ata_queued_cmd * qc)658 static void ata_pio_sectors(struct ata_queued_cmd *qc)
659 {
660 	if (is_multi_taskfile(&qc->tf)) {
661 		/* READ/WRITE MULTIPLE */
662 		unsigned int nsect;
663 
664 		WARN_ON_ONCE(qc->dev->multi_count == 0);
665 
666 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
667 			    qc->dev->multi_count);
668 		while (nsect--)
669 			ata_pio_sector(qc);
670 	} else
671 		ata_pio_sector(qc);
672 
673 	ata_sff_sync(qc->ap); /* flush */
674 }
675 
676 /**
677  *	atapi_send_cdb - Write CDB bytes to hardware
678  *	@ap: Port to which ATAPI device is attached.
679  *	@qc: Taskfile currently active
680  *
681  *	When device has indicated its readiness to accept
682  *	a CDB, this function is called.  Send the CDB.
683  *
684  *	LOCKING:
685  *	caller.
686  */
atapi_send_cdb(struct ata_port * ap,struct ata_queued_cmd * qc)687 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
688 {
689 	/* send SCSI cdb */
690 	trace_atapi_send_cdb(qc, 0, qc->dev->cdb_len);
691 	WARN_ON_ONCE(qc->dev->cdb_len < 12);
692 
693 	ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);
694 	ata_sff_sync(ap);
695 	/* FIXME: If the CDB is for DMA do we need to do the transition delay
696 	   or is bmdma_start guaranteed to do it ? */
697 	switch (qc->tf.protocol) {
698 	case ATAPI_PROT_PIO:
699 		ap->hsm_task_state = HSM_ST;
700 		break;
701 	case ATAPI_PROT_NODATA:
702 		ap->hsm_task_state = HSM_ST_LAST;
703 		break;
704 #ifdef CONFIG_ATA_BMDMA
705 	case ATAPI_PROT_DMA:
706 		ap->hsm_task_state = HSM_ST_LAST;
707 		/* initiate bmdma */
708 		trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
709 		ap->ops->bmdma_start(qc);
710 		break;
711 #endif /* CONFIG_ATA_BMDMA */
712 	default:
713 		BUG();
714 	}
715 }
716 
717 /**
718  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
719  *	@qc: Command on going
720  *	@bytes: number of bytes
721  *
722  *	Transfer data from/to the ATAPI device.
723  *
724  *	LOCKING:
725  *	Inherited from caller.
726  *
727  */
__atapi_pio_bytes(struct ata_queued_cmd * qc,unsigned int bytes)728 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
729 {
730 	int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
731 	struct ata_port *ap = qc->ap;
732 	struct ata_device *dev = qc->dev;
733 	struct ata_eh_info *ehi = &dev->link->eh_info;
734 	struct scatterlist *sg;
735 	struct page *page;
736 	unsigned char *buf;
737 	unsigned int offset, count, consumed;
738 
739 next_sg:
740 	sg = qc->cursg;
741 	if (unlikely(!sg)) {
742 		ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
743 				  "buf=%u cur=%u bytes=%u",
744 				  qc->nbytes, qc->curbytes, bytes);
745 		return -1;
746 	}
747 
748 	page = sg_page(sg);
749 	offset = sg->offset + qc->cursg_ofs;
750 
751 	/* get the current page and offset */
752 	page = nth_page(page, (offset >> PAGE_SHIFT));
753 	offset %= PAGE_SIZE;
754 
755 	/* don't overrun current sg */
756 	count = min(sg->length - qc->cursg_ofs, bytes);
757 
758 	/* don't cross page boundaries */
759 	count = min(count, (unsigned int)PAGE_SIZE - offset);
760 
761 	trace_atapi_pio_transfer_data(qc, offset, count);
762 
763 	/* do the actual data transfer */
764 	buf = kmap_atomic(page);
765 	consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw);
766 	kunmap_atomic(buf);
767 
768 	bytes -= min(bytes, consumed);
769 	qc->curbytes += count;
770 	qc->cursg_ofs += count;
771 
772 	if (qc->cursg_ofs == sg->length) {
773 		qc->cursg = sg_next(qc->cursg);
774 		qc->cursg_ofs = 0;
775 	}
776 
777 	/*
778 	 * There used to be a  WARN_ON_ONCE(qc->cursg && count != consumed);
779 	 * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
780 	 * check correctly as it doesn't know if it is the last request being
781 	 * made. Somebody should implement a proper sanity check.
782 	 */
783 	if (bytes)
784 		goto next_sg;
785 	return 0;
786 }
787 
788 /**
789  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
790  *	@qc: Command on going
791  *
792  *	Transfer Transfer data from/to the ATAPI device.
793  *
794  *	LOCKING:
795  *	Inherited from caller.
796  */
atapi_pio_bytes(struct ata_queued_cmd * qc)797 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
798 {
799 	struct ata_port *ap = qc->ap;
800 	struct ata_device *dev = qc->dev;
801 	struct ata_eh_info *ehi = &dev->link->eh_info;
802 	unsigned int ireason, bc_lo, bc_hi, bytes;
803 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
804 
805 	/* Abuse qc->result_tf for temp storage of intermediate TF
806 	 * here to save some kernel stack usage.
807 	 * For normal completion, qc->result_tf is not relevant. For
808 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
809 	 * So, the correctness of qc->result_tf is not affected.
810 	 */
811 	ap->ops->sff_tf_read(ap, &qc->result_tf);
812 	ireason = qc->result_tf.nsect;
813 	bc_lo = qc->result_tf.lbam;
814 	bc_hi = qc->result_tf.lbah;
815 	bytes = (bc_hi << 8) | bc_lo;
816 
817 	/* shall be cleared to zero, indicating xfer of data */
818 	if (unlikely(ireason & ATAPI_COD))
819 		goto atapi_check;
820 
821 	/* make sure transfer direction matches expected */
822 	i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
823 	if (unlikely(do_write != i_write))
824 		goto atapi_check;
825 
826 	if (unlikely(!bytes))
827 		goto atapi_check;
828 
829 	if (unlikely(__atapi_pio_bytes(qc, bytes)))
830 		goto err_out;
831 	ata_sff_sync(ap); /* flush */
832 
833 	return;
834 
835  atapi_check:
836 	ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
837 			  ireason, bytes);
838  err_out:
839 	qc->err_mask |= AC_ERR_HSM;
840 	ap->hsm_task_state = HSM_ST_ERR;
841 }
842 
843 /**
844  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
845  *	@ap: the target ata_port
846  *	@qc: qc on going
847  *
848  *	RETURNS:
849  *	1 if ok in workqueue, 0 otherwise.
850  */
ata_hsm_ok_in_wq(struct ata_port * ap,struct ata_queued_cmd * qc)851 static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
852 						struct ata_queued_cmd *qc)
853 {
854 	if (qc->tf.flags & ATA_TFLAG_POLLING)
855 		return 1;
856 
857 	if (ap->hsm_task_state == HSM_ST_FIRST) {
858 		if (qc->tf.protocol == ATA_PROT_PIO &&
859 		   (qc->tf.flags & ATA_TFLAG_WRITE))
860 		    return 1;
861 
862 		if (ata_is_atapi(qc->tf.protocol) &&
863 		   !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
864 			return 1;
865 	}
866 
867 	return 0;
868 }
869 
870 /**
871  *	ata_hsm_qc_complete - finish a qc running on standard HSM
872  *	@qc: Command to complete
873  *	@in_wq: 1 if called from workqueue, 0 otherwise
874  *
875  *	Finish @qc which is running on standard HSM.
876  *
877  *	LOCKING:
878  *	If @in_wq is zero, spin_lock_irqsave(host lock).
879  *	Otherwise, none on entry and grabs host lock.
880  */
ata_hsm_qc_complete(struct ata_queued_cmd * qc,int in_wq)881 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
882 {
883 	struct ata_port *ap = qc->ap;
884 
885 	if (in_wq) {
886 		/* EH might have kicked in while host lock is released. */
887 		qc = ata_qc_from_tag(ap, qc->tag);
888 		if (qc) {
889 			if (likely(!(qc->err_mask & AC_ERR_HSM))) {
890 				ata_sff_irq_on(ap);
891 				ata_qc_complete(qc);
892 			} else
893 				ata_port_freeze(ap);
894 		}
895 	} else {
896 		if (likely(!(qc->err_mask & AC_ERR_HSM)))
897 			ata_qc_complete(qc);
898 		else
899 			ata_port_freeze(ap);
900 	}
901 }
902 
903 /**
904  *	ata_sff_hsm_move - move the HSM to the next state.
905  *	@ap: the target ata_port
906  *	@qc: qc on going
907  *	@status: current device status
908  *	@in_wq: 1 if called from workqueue, 0 otherwise
909  *
910  *	RETURNS:
911  *	1 when poll next status needed, 0 otherwise.
912  */
ata_sff_hsm_move(struct ata_port * ap,struct ata_queued_cmd * qc,u8 status,int in_wq)913 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
914 		     u8 status, int in_wq)
915 {
916 	struct ata_link *link = qc->dev->link;
917 	struct ata_eh_info *ehi = &link->eh_info;
918 	int poll_next;
919 
920 	lockdep_assert_held(ap->lock);
921 
922 	WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
923 
924 	/* Make sure ata_sff_qc_issue() does not throw things
925 	 * like DMA polling into the workqueue. Notice that
926 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
927 	 */
928 	WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
929 
930 fsm_start:
931 	trace_ata_sff_hsm_state(qc, status);
932 
933 	switch (ap->hsm_task_state) {
934 	case HSM_ST_FIRST:
935 		/* Send first data block or PACKET CDB */
936 
937 		/* If polling, we will stay in the work queue after
938 		 * sending the data. Otherwise, interrupt handler
939 		 * takes over after sending the data.
940 		 */
941 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
942 
943 		/* check device status */
944 		if (unlikely((status & ATA_DRQ) == 0)) {
945 			/* handle BSY=0, DRQ=0 as error */
946 			if (likely(status & (ATA_ERR | ATA_DF)))
947 				/* device stops HSM for abort/error */
948 				qc->err_mask |= AC_ERR_DEV;
949 			else {
950 				/* HSM violation. Let EH handle this */
951 				ata_ehi_push_desc(ehi,
952 					"ST_FIRST: !(DRQ|ERR|DF)");
953 				qc->err_mask |= AC_ERR_HSM;
954 			}
955 
956 			ap->hsm_task_state = HSM_ST_ERR;
957 			goto fsm_start;
958 		}
959 
960 		/* Device should not ask for data transfer (DRQ=1)
961 		 * when it finds something wrong.
962 		 * We ignore DRQ here and stop the HSM by
963 		 * changing hsm_task_state to HSM_ST_ERR and
964 		 * let the EH abort the command or reset the device.
965 		 */
966 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
967 			/* Some ATAPI tape drives forget to clear the ERR bit
968 			 * when doing the next command (mostly request sense).
969 			 * We ignore ERR here to workaround and proceed sending
970 			 * the CDB.
971 			 */
972 			if (!(qc->dev->quirks & ATA_QUIRK_STUCK_ERR)) {
973 				ata_ehi_push_desc(ehi, "ST_FIRST: "
974 					"DRQ=1 with device error, "
975 					"dev_stat 0x%X", status);
976 				qc->err_mask |= AC_ERR_HSM;
977 				ap->hsm_task_state = HSM_ST_ERR;
978 				goto fsm_start;
979 			}
980 		}
981 
982 		if (qc->tf.protocol == ATA_PROT_PIO) {
983 			/* PIO data out protocol.
984 			 * send first data block.
985 			 */
986 
987 			/* ata_pio_sectors() might change the state
988 			 * to HSM_ST_LAST. so, the state is changed here
989 			 * before ata_pio_sectors().
990 			 */
991 			ap->hsm_task_state = HSM_ST;
992 			ata_pio_sectors(qc);
993 		} else
994 			/* send CDB */
995 			atapi_send_cdb(ap, qc);
996 
997 		/* if polling, ata_sff_pio_task() handles the rest.
998 		 * otherwise, interrupt handler takes over from here.
999 		 */
1000 		break;
1001 
1002 	case HSM_ST:
1003 		/* complete command or read/write the data register */
1004 		if (qc->tf.protocol == ATAPI_PROT_PIO) {
1005 			/* ATAPI PIO protocol */
1006 			if ((status & ATA_DRQ) == 0) {
1007 				/* No more data to transfer or device error.
1008 				 * Device error will be tagged in HSM_ST_LAST.
1009 				 */
1010 				ap->hsm_task_state = HSM_ST_LAST;
1011 				goto fsm_start;
1012 			}
1013 
1014 			/* Device should not ask for data transfer (DRQ=1)
1015 			 * when it finds something wrong.
1016 			 * We ignore DRQ here and stop the HSM by
1017 			 * changing hsm_task_state to HSM_ST_ERR and
1018 			 * let the EH abort the command or reset the device.
1019 			 */
1020 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
1021 				ata_ehi_push_desc(ehi, "ST-ATAPI: "
1022 					"DRQ=1 with device error, "
1023 					"dev_stat 0x%X", status);
1024 				qc->err_mask |= AC_ERR_HSM;
1025 				ap->hsm_task_state = HSM_ST_ERR;
1026 				goto fsm_start;
1027 			}
1028 
1029 			atapi_pio_bytes(qc);
1030 
1031 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1032 				/* bad ireason reported by device */
1033 				goto fsm_start;
1034 
1035 		} else {
1036 			/* ATA PIO protocol */
1037 			if (unlikely((status & ATA_DRQ) == 0)) {
1038 				/* handle BSY=0, DRQ=0 as error */
1039 				if (likely(status & (ATA_ERR | ATA_DF))) {
1040 					/* device stops HSM for abort/error */
1041 					qc->err_mask |= AC_ERR_DEV;
1042 
1043 					/* If diagnostic failed and this is
1044 					 * IDENTIFY, it's likely a phantom
1045 					 * device.  Mark hint.
1046 					 */
1047 					if (qc->dev->quirks &
1048 					    ATA_QUIRK_DIAGNOSTIC)
1049 						qc->err_mask |=
1050 							AC_ERR_NODEV_HINT;
1051 				} else {
1052 					/* HSM violation. Let EH handle this.
1053 					 * Phantom devices also trigger this
1054 					 * condition.  Mark hint.
1055 					 */
1056 					ata_ehi_push_desc(ehi, "ST-ATA: "
1057 						"DRQ=0 without device error, "
1058 						"dev_stat 0x%X", status);
1059 					qc->err_mask |= AC_ERR_HSM |
1060 							AC_ERR_NODEV_HINT;
1061 				}
1062 
1063 				ap->hsm_task_state = HSM_ST_ERR;
1064 				goto fsm_start;
1065 			}
1066 
1067 			/* For PIO reads, some devices may ask for
1068 			 * data transfer (DRQ=1) alone with ERR=1.
1069 			 * We respect DRQ here and transfer one
1070 			 * block of junk data before changing the
1071 			 * hsm_task_state to HSM_ST_ERR.
1072 			 *
1073 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
1074 			 * sense since the data block has been
1075 			 * transferred to the device.
1076 			 */
1077 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
1078 				/* data might be corrputed */
1079 				qc->err_mask |= AC_ERR_DEV;
1080 
1081 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1082 					ata_pio_sectors(qc);
1083 					status = ata_wait_idle(ap);
1084 				}
1085 
1086 				if (status & (ATA_BUSY | ATA_DRQ)) {
1087 					ata_ehi_push_desc(ehi, "ST-ATA: "
1088 						"BUSY|DRQ persists on ERR|DF, "
1089 						"dev_stat 0x%X", status);
1090 					qc->err_mask |= AC_ERR_HSM;
1091 				}
1092 
1093 				/* There are oddball controllers with
1094 				 * status register stuck at 0x7f and
1095 				 * lbal/m/h at zero which makes it
1096 				 * pass all other presence detection
1097 				 * mechanisms we have.  Set NODEV_HINT
1098 				 * for it.  Kernel bz#7241.
1099 				 */
1100 				if (status == 0x7f)
1101 					qc->err_mask |= AC_ERR_NODEV_HINT;
1102 
1103 				/* ata_pio_sectors() might change the
1104 				 * state to HSM_ST_LAST. so, the state
1105 				 * is changed after ata_pio_sectors().
1106 				 */
1107 				ap->hsm_task_state = HSM_ST_ERR;
1108 				goto fsm_start;
1109 			}
1110 
1111 			ata_pio_sectors(qc);
1112 
1113 			if (ap->hsm_task_state == HSM_ST_LAST &&
1114 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1115 				/* all data read */
1116 				status = ata_wait_idle(ap);
1117 				goto fsm_start;
1118 			}
1119 		}
1120 
1121 		poll_next = 1;
1122 		break;
1123 
1124 	case HSM_ST_LAST:
1125 		if (unlikely(!ata_ok(status))) {
1126 			qc->err_mask |= __ac_err_mask(status);
1127 			ap->hsm_task_state = HSM_ST_ERR;
1128 			goto fsm_start;
1129 		}
1130 
1131 		/* no more data to transfer */
1132 		trace_ata_sff_hsm_command_complete(qc, status);
1133 
1134 		WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1135 
1136 		ap->hsm_task_state = HSM_ST_IDLE;
1137 
1138 		/* complete taskfile transaction */
1139 		ata_hsm_qc_complete(qc, in_wq);
1140 
1141 		poll_next = 0;
1142 		break;
1143 
1144 	case HSM_ST_ERR:
1145 		ap->hsm_task_state = HSM_ST_IDLE;
1146 
1147 		/* complete taskfile transaction */
1148 		ata_hsm_qc_complete(qc, in_wq);
1149 
1150 		poll_next = 0;
1151 		break;
1152 	default:
1153 		poll_next = 0;
1154 		WARN(true, "ata%d: SFF host state machine in invalid state %d",
1155 		     ap->print_id, ap->hsm_task_state);
1156 	}
1157 
1158 	return poll_next;
1159 }
1160 EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1161 
ata_sff_queue_work(struct work_struct * work)1162 void ata_sff_queue_work(struct work_struct *work)
1163 {
1164 	queue_work(ata_sff_wq, work);
1165 }
1166 EXPORT_SYMBOL_GPL(ata_sff_queue_work);
1167 
ata_sff_queue_delayed_work(struct delayed_work * dwork,unsigned long delay)1168 void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
1169 {
1170 	queue_delayed_work(ata_sff_wq, dwork, delay);
1171 }
1172 EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
1173 
ata_sff_queue_pio_task(struct ata_link * link,unsigned long delay)1174 void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1175 {
1176 	struct ata_port *ap = link->ap;
1177 
1178 	WARN_ON((ap->sff_pio_task_link != NULL) &&
1179 		(ap->sff_pio_task_link != link));
1180 	ap->sff_pio_task_link = link;
1181 
1182 	/* may fail if ata_sff_flush_pio_task() in progress */
1183 	ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
1184 }
1185 EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1186 
ata_sff_flush_pio_task(struct ata_port * ap)1187 void ata_sff_flush_pio_task(struct ata_port *ap)
1188 {
1189 	trace_ata_sff_flush_pio_task(ap);
1190 
1191 	cancel_delayed_work_sync(&ap->sff_pio_task);
1192 
1193 	/*
1194 	 * We wanna reset the HSM state to IDLE.  If we do so without
1195 	 * grabbing the port lock, critical sections protected by it which
1196 	 * expect the HSM state to stay stable may get surprised.  For
1197 	 * example, we may set IDLE in between the time
1198 	 * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
1199 	 * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
1200 	 */
1201 	spin_lock_irq(ap->lock);
1202 	ap->hsm_task_state = HSM_ST_IDLE;
1203 	spin_unlock_irq(ap->lock);
1204 
1205 	ap->sff_pio_task_link = NULL;
1206 }
1207 
ata_sff_pio_task(struct work_struct * work)1208 static void ata_sff_pio_task(struct work_struct *work)
1209 {
1210 	struct ata_port *ap =
1211 		container_of(work, struct ata_port, sff_pio_task.work);
1212 	struct ata_link *link = ap->sff_pio_task_link;
1213 	struct ata_queued_cmd *qc;
1214 	u8 status;
1215 	int poll_next;
1216 
1217 	spin_lock_irq(ap->lock);
1218 
1219 	BUG_ON(ap->sff_pio_task_link == NULL);
1220 	/* qc can be NULL if timeout occurred */
1221 	qc = ata_qc_from_tag(ap, link->active_tag);
1222 	if (!qc) {
1223 		ap->sff_pio_task_link = NULL;
1224 		goto out_unlock;
1225 	}
1226 
1227 fsm_start:
1228 	WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1229 
1230 	/*
1231 	 * This is purely heuristic.  This is a fast path.
1232 	 * Sometimes when we enter, BSY will be cleared in
1233 	 * a chk-status or two.  If not, the drive is probably seeking
1234 	 * or something.  Snooze for a couple msecs, then
1235 	 * chk-status again.  If still busy, queue delayed work.
1236 	 */
1237 	status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1238 	if (status & ATA_BUSY) {
1239 		spin_unlock_irq(ap->lock);
1240 		ata_msleep(ap, 2);
1241 		spin_lock_irq(ap->lock);
1242 
1243 		status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1244 		if (status & ATA_BUSY) {
1245 			ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1246 			goto out_unlock;
1247 		}
1248 	}
1249 
1250 	/*
1251 	 * hsm_move() may trigger another command to be processed.
1252 	 * clean the link beforehand.
1253 	 */
1254 	ap->sff_pio_task_link = NULL;
1255 	/* move the HSM */
1256 	poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1257 
1258 	/* another command or interrupt handler
1259 	 * may be running at this point.
1260 	 */
1261 	if (poll_next)
1262 		goto fsm_start;
1263 out_unlock:
1264 	spin_unlock_irq(ap->lock);
1265 }
1266 
1267 /**
1268  *	ata_sff_qc_issue - issue taskfile to a SFF controller
1269  *	@qc: command to issue to device
1270  *
1271  *	This function issues a PIO or NODATA command to a SFF
1272  *	controller.
1273  *
1274  *	LOCKING:
1275  *	spin_lock_irqsave(host lock)
1276  *
1277  *	RETURNS:
1278  *	Zero on success, AC_ERR_* mask on failure
1279  */
ata_sff_qc_issue(struct ata_queued_cmd * qc)1280 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1281 {
1282 	struct ata_port *ap = qc->ap;
1283 	struct ata_link *link = qc->dev->link;
1284 
1285 	/* Use polling pio if the LLD doesn't handle
1286 	 * interrupt driven pio and atapi CDB interrupt.
1287 	 */
1288 	if (ap->flags & ATA_FLAG_PIO_POLLING)
1289 		qc->tf.flags |= ATA_TFLAG_POLLING;
1290 
1291 	/* select the device */
1292 	ata_dev_select(ap, qc->dev->devno, 1, 0);
1293 
1294 	/* start the command */
1295 	switch (qc->tf.protocol) {
1296 	case ATA_PROT_NODATA:
1297 		if (qc->tf.flags & ATA_TFLAG_POLLING)
1298 			ata_qc_set_polling(qc);
1299 
1300 		ata_tf_to_host(ap, &qc->tf, qc->tag);
1301 		ap->hsm_task_state = HSM_ST_LAST;
1302 
1303 		if (qc->tf.flags & ATA_TFLAG_POLLING)
1304 			ata_sff_queue_pio_task(link, 0);
1305 
1306 		break;
1307 
1308 	case ATA_PROT_PIO:
1309 		if (qc->tf.flags & ATA_TFLAG_POLLING)
1310 			ata_qc_set_polling(qc);
1311 
1312 		ata_tf_to_host(ap, &qc->tf, qc->tag);
1313 
1314 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
1315 			/* PIO data out protocol */
1316 			ap->hsm_task_state = HSM_ST_FIRST;
1317 			ata_sff_queue_pio_task(link, 0);
1318 
1319 			/* always send first data block using the
1320 			 * ata_sff_pio_task() codepath.
1321 			 */
1322 		} else {
1323 			/* PIO data in protocol */
1324 			ap->hsm_task_state = HSM_ST;
1325 
1326 			if (qc->tf.flags & ATA_TFLAG_POLLING)
1327 				ata_sff_queue_pio_task(link, 0);
1328 
1329 			/* if polling, ata_sff_pio_task() handles the
1330 			 * rest.  otherwise, interrupt handler takes
1331 			 * over from here.
1332 			 */
1333 		}
1334 
1335 		break;
1336 
1337 	case ATAPI_PROT_PIO:
1338 	case ATAPI_PROT_NODATA:
1339 		if (qc->tf.flags & ATA_TFLAG_POLLING)
1340 			ata_qc_set_polling(qc);
1341 
1342 		ata_tf_to_host(ap, &qc->tf, qc->tag);
1343 
1344 		ap->hsm_task_state = HSM_ST_FIRST;
1345 
1346 		/* send cdb by polling if no cdb interrupt */
1347 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1348 		    (qc->tf.flags & ATA_TFLAG_POLLING))
1349 			ata_sff_queue_pio_task(link, 0);
1350 		break;
1351 
1352 	default:
1353 		return AC_ERR_SYSTEM;
1354 	}
1355 
1356 	return 0;
1357 }
1358 EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1359 
1360 /**
1361  *	ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
1362  *	@qc: qc to fill result TF for
1363  *
1364  *	@qc is finished and result TF needs to be filled.  Fill it
1365  *	using ->sff_tf_read.
1366  *
1367  *	LOCKING:
1368  *	spin_lock_irqsave(host lock)
1369  */
ata_sff_qc_fill_rtf(struct ata_queued_cmd * qc)1370 void ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1371 {
1372 	qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1373 }
1374 EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1375 
ata_sff_idle_irq(struct ata_port * ap)1376 static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1377 {
1378 	ap->stats.idle_irq++;
1379 
1380 #ifdef ATA_IRQ_TRAP
1381 	if ((ap->stats.idle_irq % 1000) == 0) {
1382 		ap->ops->sff_check_status(ap);
1383 		if (ap->ops->sff_irq_clear)
1384 			ap->ops->sff_irq_clear(ap);
1385 		ata_port_warn(ap, "irq trap\n");
1386 		return 1;
1387 	}
1388 #endif
1389 	return 0;	/* irq not handled */
1390 }
1391 
__ata_sff_port_intr(struct ata_port * ap,struct ata_queued_cmd * qc,bool hsmv_on_idle)1392 static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1393 					struct ata_queued_cmd *qc,
1394 					bool hsmv_on_idle)
1395 {
1396 	u8 status;
1397 
1398 	trace_ata_sff_port_intr(qc, hsmv_on_idle);
1399 
1400 	/* Check whether we are expecting interrupt in this state */
1401 	switch (ap->hsm_task_state) {
1402 	case HSM_ST_FIRST:
1403 		/* Some pre-ATAPI-4 devices assert INTRQ
1404 		 * at this state when ready to receive CDB.
1405 		 */
1406 
1407 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1408 		 * The flag was turned on only for atapi devices.  No
1409 		 * need to check ata_is_atapi(qc->tf.protocol) again.
1410 		 */
1411 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1412 			return ata_sff_idle_irq(ap);
1413 		break;
1414 	case HSM_ST_IDLE:
1415 		return ata_sff_idle_irq(ap);
1416 	default:
1417 		break;
1418 	}
1419 
1420 	/* check main status, clearing INTRQ if needed */
1421 	status = ata_sff_irq_status(ap);
1422 	if (status & ATA_BUSY) {
1423 		if (hsmv_on_idle) {
1424 			/* BMDMA engine is already stopped, we're screwed */
1425 			qc->err_mask |= AC_ERR_HSM;
1426 			ap->hsm_task_state = HSM_ST_ERR;
1427 		} else
1428 			return ata_sff_idle_irq(ap);
1429 	}
1430 
1431 	/* clear irq events */
1432 	if (ap->ops->sff_irq_clear)
1433 		ap->ops->sff_irq_clear(ap);
1434 
1435 	ata_sff_hsm_move(ap, qc, status, 0);
1436 
1437 	return 1;	/* irq handled */
1438 }
1439 
1440 /**
1441  *	ata_sff_port_intr - Handle SFF port interrupt
1442  *	@ap: Port on which interrupt arrived (possibly...)
1443  *	@qc: Taskfile currently active in engine
1444  *
1445  *	Handle port interrupt for given queued command.
1446  *
1447  *	LOCKING:
1448  *	spin_lock_irqsave(host lock)
1449  *
1450  *	RETURNS:
1451  *	One if interrupt was handled, zero if not (shared irq).
1452  */
ata_sff_port_intr(struct ata_port * ap,struct ata_queued_cmd * qc)1453 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1454 {
1455 	return __ata_sff_port_intr(ap, qc, false);
1456 }
1457 EXPORT_SYMBOL_GPL(ata_sff_port_intr);
1458 
__ata_sff_interrupt(int irq,void * dev_instance,unsigned int (* port_intr)(struct ata_port *,struct ata_queued_cmd *))1459 static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1460 	unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1461 {
1462 	struct ata_host *host = dev_instance;
1463 	bool retried = false;
1464 	unsigned int i;
1465 	unsigned int handled, idle, polling;
1466 	unsigned long flags;
1467 
1468 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1469 	spin_lock_irqsave(&host->lock, flags);
1470 
1471 retry:
1472 	handled = idle = polling = 0;
1473 	for (i = 0; i < host->n_ports; i++) {
1474 		struct ata_port *ap = host->ports[i];
1475 		struct ata_queued_cmd *qc;
1476 
1477 		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1478 		if (qc) {
1479 			if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1480 				handled |= port_intr(ap, qc);
1481 			else
1482 				polling |= 1 << i;
1483 		} else
1484 			idle |= 1 << i;
1485 	}
1486 
1487 	/*
1488 	 * If no port was expecting IRQ but the controller is actually
1489 	 * asserting IRQ line, nobody cared will ensue.  Check IRQ
1490 	 * pending status if available and clear spurious IRQ.
1491 	 */
1492 	if (!handled && !retried) {
1493 		bool retry = false;
1494 
1495 		for (i = 0; i < host->n_ports; i++) {
1496 			struct ata_port *ap = host->ports[i];
1497 
1498 			if (polling & (1 << i))
1499 				continue;
1500 
1501 			if (!ap->ops->sff_irq_check ||
1502 			    !ap->ops->sff_irq_check(ap))
1503 				continue;
1504 
1505 			if (idle & (1 << i)) {
1506 				ap->ops->sff_check_status(ap);
1507 				if (ap->ops->sff_irq_clear)
1508 					ap->ops->sff_irq_clear(ap);
1509 			} else {
1510 				/* clear INTRQ and check if BUSY cleared */
1511 				if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
1512 					retry |= true;
1513 				/*
1514 				 * With command in flight, we can't do
1515 				 * sff_irq_clear() w/o racing with completion.
1516 				 */
1517 			}
1518 		}
1519 
1520 		if (retry) {
1521 			retried = true;
1522 			goto retry;
1523 		}
1524 	}
1525 
1526 	spin_unlock_irqrestore(&host->lock, flags);
1527 
1528 	return IRQ_RETVAL(handled);
1529 }
1530 
1531 /**
1532  *	ata_sff_interrupt - Default SFF ATA host interrupt handler
1533  *	@irq: irq line (unused)
1534  *	@dev_instance: pointer to our ata_host information structure
1535  *
1536  *	Default interrupt handler for PCI IDE devices.  Calls
1537  *	ata_sff_port_intr() for each port that is not disabled.
1538  *
1539  *	LOCKING:
1540  *	Obtains host lock during operation.
1541  *
1542  *	RETURNS:
1543  *	IRQ_NONE or IRQ_HANDLED.
1544  */
ata_sff_interrupt(int irq,void * dev_instance)1545 irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1546 {
1547 	return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1548 }
1549 EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1550 
1551 /**
1552  *	ata_sff_lost_interrupt	-	Check for an apparent lost interrupt
1553  *	@ap: port that appears to have timed out
1554  *
1555  *	Called from the libata error handlers when the core code suspects
1556  *	an interrupt has been lost. If it has complete anything we can and
1557  *	then return. Interface must support altstatus for this faster
1558  *	recovery to occur.
1559  *
1560  *	Locking:
1561  *	Caller holds host lock
1562  */
1563 
ata_sff_lost_interrupt(struct ata_port * ap)1564 void ata_sff_lost_interrupt(struct ata_port *ap)
1565 {
1566 	u8 status = 0;
1567 	struct ata_queued_cmd *qc;
1568 
1569 	/* Only one outstanding command per SFF channel */
1570 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
1571 	/* We cannot lose an interrupt on a non-existent or polled command */
1572 	if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1573 		return;
1574 	/* See if the controller thinks it is still busy - if so the command
1575 	   isn't a lost IRQ but is still in progress */
1576 	if (WARN_ON_ONCE(!ata_sff_altstatus(ap, &status)))
1577 		return;
1578 	if (status & ATA_BUSY)
1579 		return;
1580 
1581 	/* There was a command running, we are no longer busy and we have
1582 	   no interrupt. */
1583 	ata_port_warn(ap, "lost interrupt (Status 0x%x)\n", status);
1584 	/* Run the host interrupt logic as if the interrupt had not been
1585 	   lost */
1586 	ata_sff_port_intr(ap, qc);
1587 }
1588 EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1589 
1590 /**
1591  *	ata_sff_freeze - Freeze SFF controller port
1592  *	@ap: port to freeze
1593  *
1594  *	Freeze SFF controller port.
1595  *
1596  *	LOCKING:
1597  *	Inherited from caller.
1598  */
ata_sff_freeze(struct ata_port * ap)1599 void ata_sff_freeze(struct ata_port *ap)
1600 {
1601 	ap->ctl |= ATA_NIEN;
1602 	ap->last_ctl = ap->ctl;
1603 
1604 	ata_sff_set_devctl(ap, ap->ctl);
1605 
1606 	/* Under certain circumstances, some controllers raise IRQ on
1607 	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
1608 	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
1609 	 */
1610 	ap->ops->sff_check_status(ap);
1611 
1612 	if (ap->ops->sff_irq_clear)
1613 		ap->ops->sff_irq_clear(ap);
1614 }
1615 EXPORT_SYMBOL_GPL(ata_sff_freeze);
1616 
1617 /**
1618  *	ata_sff_thaw - Thaw SFF controller port
1619  *	@ap: port to thaw
1620  *
1621  *	Thaw SFF controller port.
1622  *
1623  *	LOCKING:
1624  *	Inherited from caller.
1625  */
ata_sff_thaw(struct ata_port * ap)1626 void ata_sff_thaw(struct ata_port *ap)
1627 {
1628 	/* clear & re-enable interrupts */
1629 	ap->ops->sff_check_status(ap);
1630 	if (ap->ops->sff_irq_clear)
1631 		ap->ops->sff_irq_clear(ap);
1632 	ata_sff_irq_on(ap);
1633 }
1634 EXPORT_SYMBOL_GPL(ata_sff_thaw);
1635 
1636 /**
1637  *	ata_sff_prereset - prepare SFF link for reset
1638  *	@link: SFF link to be reset
1639  *	@deadline: deadline jiffies for the operation
1640  *
1641  *	SFF link @link is about to be reset.  Initialize it.  It first
1642  *	calls ata_std_prereset() and wait for !BSY if the port is
1643  *	being softreset.
1644  *
1645  *	LOCKING:
1646  *	Kernel thread context (may sleep)
1647  *
1648  *	RETURNS:
1649  *	Always 0.
1650  */
ata_sff_prereset(struct ata_link * link,unsigned long deadline)1651 int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1652 {
1653 	struct ata_eh_context *ehc = &link->eh_context;
1654 	int rc;
1655 
1656 	/* The standard prereset is best-effort and always returns 0 */
1657 	ata_std_prereset(link, deadline);
1658 
1659 	/* if we're about to do hardreset, nothing more to do */
1660 	if (ehc->i.action & ATA_EH_HARDRESET)
1661 		return 0;
1662 
1663 	/* wait for !BSY if we don't know that no device is attached */
1664 	if (!ata_link_offline(link)) {
1665 		rc = ata_sff_wait_ready(link, deadline);
1666 		if (rc && rc != -ENODEV) {
1667 			ata_link_warn(link,
1668 				      "device not ready (errno=%d), forcing hardreset\n",
1669 				      rc);
1670 			ehc->i.action |= ATA_EH_HARDRESET;
1671 		}
1672 	}
1673 
1674 	return 0;
1675 }
1676 EXPORT_SYMBOL_GPL(ata_sff_prereset);
1677 
1678 /**
1679  *	ata_devchk - PATA device presence detection
1680  *	@ap: ATA channel to examine
1681  *	@device: Device to examine (starting at zero)
1682  *
1683  *	This technique was originally described in
1684  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
1685  *	later found its way into the ATA/ATAPI spec.
1686  *
1687  *	Write a pattern to the ATA shadow registers,
1688  *	and if a device is present, it will respond by
1689  *	correctly storing and echoing back the
1690  *	ATA shadow register contents.
1691  *
1692  *	RETURN:
1693  *	true if device is present, false if not.
1694  *
1695  *	LOCKING:
1696  *	caller.
1697  */
ata_devchk(struct ata_port * ap,unsigned int device)1698 static bool ata_devchk(struct ata_port *ap, unsigned int device)
1699 {
1700 	struct ata_ioports *ioaddr = &ap->ioaddr;
1701 	u8 nsect, lbal;
1702 
1703 	ap->ops->sff_dev_select(ap, device);
1704 
1705 	iowrite8(0x55, ioaddr->nsect_addr);
1706 	iowrite8(0xaa, ioaddr->lbal_addr);
1707 
1708 	iowrite8(0xaa, ioaddr->nsect_addr);
1709 	iowrite8(0x55, ioaddr->lbal_addr);
1710 
1711 	iowrite8(0x55, ioaddr->nsect_addr);
1712 	iowrite8(0xaa, ioaddr->lbal_addr);
1713 
1714 	nsect = ioread8(ioaddr->nsect_addr);
1715 	lbal = ioread8(ioaddr->lbal_addr);
1716 
1717 	if ((nsect == 0x55) && (lbal == 0xaa))
1718 		return true;	/* we found a device */
1719 
1720 	return false;		/* nothing found */
1721 }
1722 
1723 /**
1724  *	ata_sff_dev_classify - Parse returned ATA device signature
1725  *	@dev: ATA device to classify (starting at zero)
1726  *	@present: device seems present
1727  *	@r_err: Value of error register on completion
1728  *
1729  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1730  *	an ATA/ATAPI-defined set of values is placed in the ATA
1731  *	shadow registers, indicating the results of device detection
1732  *	and diagnostics.
1733  *
1734  *	Select the ATA device, and read the values from the ATA shadow
1735  *	registers.  Then parse according to the Error register value,
1736  *	and the spec-defined values examined by ata_dev_classify().
1737  *
1738  *	LOCKING:
1739  *	caller.
1740  *
1741  *	RETURNS:
1742  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1743  */
ata_sff_dev_classify(struct ata_device * dev,int present,u8 * r_err)1744 unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1745 				  u8 *r_err)
1746 {
1747 	struct ata_port *ap = dev->link->ap;
1748 	struct ata_taskfile tf;
1749 	unsigned int class;
1750 	u8 err;
1751 
1752 	ap->ops->sff_dev_select(ap, dev->devno);
1753 
1754 	memset(&tf, 0, sizeof(tf));
1755 
1756 	ap->ops->sff_tf_read(ap, &tf);
1757 	err = tf.error;
1758 	if (r_err)
1759 		*r_err = err;
1760 
1761 	/* see if device passed diags: continue and warn later */
1762 	if (err == 0)
1763 		/* diagnostic fail : do nothing _YET_ */
1764 		dev->quirks |= ATA_QUIRK_DIAGNOSTIC;
1765 	else if (err == 1)
1766 		/* do nothing */ ;
1767 	else if ((dev->devno == 0) && (err == 0x81))
1768 		/* do nothing */ ;
1769 	else
1770 		return ATA_DEV_NONE;
1771 
1772 	/* determine if device is ATA or ATAPI */
1773 	class = ata_port_classify(ap, &tf);
1774 	switch (class) {
1775 	case ATA_DEV_UNKNOWN:
1776 		/*
1777 		 * If the device failed diagnostic, it's likely to
1778 		 * have reported incorrect device signature too.
1779 		 * Assume ATA device if the device seems present but
1780 		 * device signature is invalid with diagnostic
1781 		 * failure.
1782 		 */
1783 		if (present && (dev->quirks & ATA_QUIRK_DIAGNOSTIC))
1784 			class = ATA_DEV_ATA;
1785 		else
1786 			class = ATA_DEV_NONE;
1787 		break;
1788 	case ATA_DEV_ATA:
1789 		if (ap->ops->sff_check_status(ap) == 0)
1790 			class = ATA_DEV_NONE;
1791 		break;
1792 	}
1793 	return class;
1794 }
1795 EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
1796 
1797 /**
1798  *	ata_sff_wait_after_reset - wait for devices to become ready after reset
1799  *	@link: SFF link which is just reset
1800  *	@devmask: mask of present devices
1801  *	@deadline: deadline jiffies for the operation
1802  *
1803  *	Wait devices attached to SFF @link to become ready after
1804  *	reset.  It contains preceding 150ms wait to avoid accessing TF
1805  *	status register too early.
1806  *
1807  *	LOCKING:
1808  *	Kernel thread context (may sleep).
1809  *
1810  *	RETURNS:
1811  *	0 on success, -ENODEV if some or all of devices in @devmask
1812  *	don't seem to exist.  -errno on other errors.
1813  */
ata_sff_wait_after_reset(struct ata_link * link,unsigned int devmask,unsigned long deadline)1814 int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1815 			     unsigned long deadline)
1816 {
1817 	struct ata_port *ap = link->ap;
1818 	struct ata_ioports *ioaddr = &ap->ioaddr;
1819 	unsigned int dev0 = devmask & (1 << 0);
1820 	unsigned int dev1 = devmask & (1 << 1);
1821 	int rc, ret = 0;
1822 
1823 	ata_msleep(ap, ATA_WAIT_AFTER_RESET);
1824 
1825 	/* always check readiness of the master device */
1826 	rc = ata_sff_wait_ready(link, deadline);
1827 	/* -ENODEV means the odd clown forgot the D7 pulldown resistor
1828 	 * and TF status is 0xff, bail out on it too.
1829 	 */
1830 	if (rc)
1831 		return rc;
1832 
1833 	/* if device 1 was found in ata_devchk, wait for register
1834 	 * access briefly, then wait for BSY to clear.
1835 	 */
1836 	if (dev1) {
1837 		int i;
1838 
1839 		ap->ops->sff_dev_select(ap, 1);
1840 
1841 		/* Wait for register access.  Some ATAPI devices fail
1842 		 * to set nsect/lbal after reset, so don't waste too
1843 		 * much time on it.  We're gonna wait for !BSY anyway.
1844 		 */
1845 		for (i = 0; i < 2; i++) {
1846 			u8 nsect, lbal;
1847 
1848 			nsect = ioread8(ioaddr->nsect_addr);
1849 			lbal = ioread8(ioaddr->lbal_addr);
1850 			if ((nsect == 1) && (lbal == 1))
1851 				break;
1852 			ata_msleep(ap, 50);	/* give drive a breather */
1853 		}
1854 
1855 		rc = ata_sff_wait_ready(link, deadline);
1856 		if (rc) {
1857 			if (rc != -ENODEV)
1858 				return rc;
1859 			ret = rc;
1860 		}
1861 	}
1862 
1863 	/* is all this really necessary? */
1864 	ap->ops->sff_dev_select(ap, 0);
1865 	if (dev1)
1866 		ap->ops->sff_dev_select(ap, 1);
1867 	if (dev0)
1868 		ap->ops->sff_dev_select(ap, 0);
1869 
1870 	return ret;
1871 }
1872 EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
1873 
ata_bus_softreset(struct ata_port * ap,unsigned int devmask,unsigned long deadline)1874 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
1875 			     unsigned long deadline)
1876 {
1877 	struct ata_ioports *ioaddr = &ap->ioaddr;
1878 
1879 	if (ap->ioaddr.ctl_addr) {
1880 		/* software reset.  causes dev0 to be selected */
1881 		iowrite8(ap->ctl, ioaddr->ctl_addr);
1882 		udelay(20);	/* FIXME: flush */
1883 		iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1884 		udelay(20);	/* FIXME: flush */
1885 		iowrite8(ap->ctl, ioaddr->ctl_addr);
1886 		ap->last_ctl = ap->ctl;
1887 	}
1888 
1889 	/* wait the port to become ready */
1890 	return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
1891 }
1892 
1893 /**
1894  *	ata_sff_softreset - reset host port via ATA SRST
1895  *	@link: ATA link to reset
1896  *	@classes: resulting classes of attached devices
1897  *	@deadline: deadline jiffies for the operation
1898  *
1899  *	Reset host port using ATA SRST.
1900  *
1901  *	LOCKING:
1902  *	Kernel thread context (may sleep)
1903  *
1904  *	RETURNS:
1905  *	0 on success, -errno otherwise.
1906  */
ata_sff_softreset(struct ata_link * link,unsigned int * classes,unsigned long deadline)1907 int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
1908 		      unsigned long deadline)
1909 {
1910 	struct ata_port *ap = link->ap;
1911 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1912 	unsigned int devmask = 0;
1913 	int rc;
1914 	u8 err;
1915 
1916 	/* determine if device 0/1 are present */
1917 	if (ata_devchk(ap, 0))
1918 		devmask |= (1 << 0);
1919 	if (slave_possible && ata_devchk(ap, 1))
1920 		devmask |= (1 << 1);
1921 
1922 	/* select device 0 again */
1923 	ap->ops->sff_dev_select(ap, 0);
1924 
1925 	/* issue bus reset */
1926 	rc = ata_bus_softreset(ap, devmask, deadline);
1927 	/* if link is occupied, -ENODEV too is an error */
1928 	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
1929 		ata_link_err(link, "SRST failed (errno=%d)\n", rc);
1930 		return rc;
1931 	}
1932 
1933 	/* determine by signature whether we have ATA or ATAPI devices */
1934 	classes[0] = ata_sff_dev_classify(&link->device[0],
1935 					  devmask & (1 << 0), &err);
1936 	if (slave_possible && err != 0x81)
1937 		classes[1] = ata_sff_dev_classify(&link->device[1],
1938 						  devmask & (1 << 1), &err);
1939 
1940 	return 0;
1941 }
1942 EXPORT_SYMBOL_GPL(ata_sff_softreset);
1943 
1944 /**
1945  *	sata_sff_hardreset - reset host port via SATA phy reset
1946  *	@link: link to reset
1947  *	@class: resulting class of attached device
1948  *	@deadline: deadline jiffies for the operation
1949  *
1950  *	SATA phy-reset host port using DET bits of SControl register,
1951  *	wait for !BSY and classify the attached device.
1952  *
1953  *	LOCKING:
1954  *	Kernel thread context (may sleep)
1955  *
1956  *	RETURNS:
1957  *	0 on success, -errno otherwise.
1958  */
sata_sff_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)1959 int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
1960 		       unsigned long deadline)
1961 {
1962 	struct ata_eh_context *ehc = &link->eh_context;
1963 	const unsigned int *timing = sata_ehc_deb_timing(ehc);
1964 	bool online;
1965 	int rc;
1966 
1967 	rc = sata_link_hardreset(link, timing, deadline, &online,
1968 				 ata_sff_check_ready);
1969 	if (online)
1970 		*class = ata_sff_dev_classify(link->device, 1, NULL);
1971 
1972 	return rc;
1973 }
1974 EXPORT_SYMBOL_GPL(sata_sff_hardreset);
1975 
1976 /**
1977  *	ata_sff_postreset - SFF postreset callback
1978  *	@link: the target SFF ata_link
1979  *	@classes: classes of attached devices
1980  *
1981  *	This function is invoked after a successful reset.  It first
1982  *	calls ata_std_postreset() and performs SFF specific postreset
1983  *	processing.
1984  *
1985  *	LOCKING:
1986  *	Kernel thread context (may sleep)
1987  */
ata_sff_postreset(struct ata_link * link,unsigned int * classes)1988 void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
1989 {
1990 	struct ata_port *ap = link->ap;
1991 
1992 	ata_std_postreset(link, classes);
1993 
1994 	/* is double-select really necessary? */
1995 	if (classes[0] != ATA_DEV_NONE)
1996 		ap->ops->sff_dev_select(ap, 1);
1997 	if (classes[1] != ATA_DEV_NONE)
1998 		ap->ops->sff_dev_select(ap, 0);
1999 
2000 	/* bail out if no device is present */
2001 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE)
2002 		return;
2003 
2004 	/* set up device control */
2005 	if (ata_sff_set_devctl(ap, ap->ctl))
2006 		ap->last_ctl = ap->ctl;
2007 }
2008 EXPORT_SYMBOL_GPL(ata_sff_postreset);
2009 
2010 /**
2011  *	ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
2012  *	@qc: command
2013  *
2014  *	Drain the FIFO and device of any stuck data following a command
2015  *	failing to complete. In some cases this is necessary before a
2016  *	reset will recover the device.
2017  *
2018  */
2019 
ata_sff_drain_fifo(struct ata_queued_cmd * qc)2020 void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2021 {
2022 	int count;
2023 	struct ata_port *ap;
2024 
2025 	/* We only need to flush incoming data when a command was running */
2026 	if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2027 		return;
2028 
2029 	ap = qc->ap;
2030 	/* Drain up to 64K of data before we give up this recovery method */
2031 	for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2032 						&& count < 65536; count += 2)
2033 		ioread16(ap->ioaddr.data_addr);
2034 
2035 	if (count)
2036 		ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
2037 
2038 }
2039 EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2040 
2041 /**
2042  *	ata_sff_error_handler - Stock error handler for SFF controller
2043  *	@ap: port to handle error for
2044  *
2045  *	Stock error handler for SFF controller.  It can handle both
2046  *	PATA and SATA controllers.  Many controllers should be able to
2047  *	use this EH as-is or with some added handling before and
2048  *	after.
2049  *
2050  *	LOCKING:
2051  *	Kernel thread context (may sleep)
2052  */
ata_sff_error_handler(struct ata_port * ap)2053 void ata_sff_error_handler(struct ata_port *ap)
2054 {
2055 	ata_reset_fn_t softreset = ap->ops->softreset;
2056 	ata_reset_fn_t hardreset = ap->ops->hardreset;
2057 	struct ata_queued_cmd *qc;
2058 	unsigned long flags;
2059 
2060 	qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2061 	if (qc && !(qc->flags & ATA_QCFLAG_EH))
2062 		qc = NULL;
2063 
2064 	spin_lock_irqsave(ap->lock, flags);
2065 
2066 	/*
2067 	 * We *MUST* do FIFO draining before we issue a reset as
2068 	 * several devices helpfully clear their internal state and
2069 	 * will lock solid if we touch the data port post reset. Pass
2070 	 * qc in case anyone wants to do different PIO/DMA recovery or
2071 	 * has per command fixups
2072 	 */
2073 	if (ap->ops->sff_drain_fifo)
2074 		ap->ops->sff_drain_fifo(qc);
2075 
2076 	spin_unlock_irqrestore(ap->lock, flags);
2077 
2078 	/* ignore built-in hardresets if SCR access is not available */
2079 	if ((hardreset == sata_std_hardreset ||
2080 	     hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
2081 		hardreset = NULL;
2082 
2083 	ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2084 		  ap->ops->postreset);
2085 }
2086 EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2087 
2088 /**
2089  *	ata_sff_std_ports - initialize ioaddr with standard port offsets.
2090  *	@ioaddr: IO address structure to be initialized
2091  *
2092  *	Utility function which initializes data_addr, error_addr,
2093  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
2094  *	device_addr, status_addr, and command_addr to standard offsets
2095  *	relative to cmd_addr.
2096  *
2097  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
2098  */
ata_sff_std_ports(struct ata_ioports * ioaddr)2099 void ata_sff_std_ports(struct ata_ioports *ioaddr)
2100 {
2101 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2102 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2103 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2104 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2105 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2106 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2107 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2108 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2109 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2110 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2111 }
2112 EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2113 
2114 #ifdef CONFIG_PCI
2115 
ata_resources_present(struct pci_dev * pdev,int port)2116 static bool ata_resources_present(struct pci_dev *pdev, int port)
2117 {
2118 	int i;
2119 
2120 	/* Check the PCI resources for this channel are enabled */
2121 	port *= 2;
2122 	for (i = 0; i < 2; i++) {
2123 		if (pci_resource_start(pdev, port + i) == 0 ||
2124 		    pci_resource_len(pdev, port + i) == 0)
2125 			return false;
2126 	}
2127 	return true;
2128 }
2129 
2130 /**
2131  *	ata_pci_sff_init_host - acquire native PCI ATA resources and init host
2132  *	@host: target ATA host
2133  *
2134  *	Acquire native PCI ATA resources for @host and initialize the
2135  *	first two ports of @host accordingly.  Ports marked dummy are
2136  *	skipped and allocation failure makes the port dummy.
2137  *
2138  *	Note that native PCI resources are valid even for legacy hosts
2139  *	as we fix up pdev resources array early in boot, so this
2140  *	function can be used for both native and legacy SFF hosts.
2141  *
2142  *	LOCKING:
2143  *	Inherited from calling layer (may sleep).
2144  *
2145  *	RETURNS:
2146  *	0 if at least one port is initialized, -ENODEV if no port is
2147  *	available.
2148  */
ata_pci_sff_init_host(struct ata_host * host)2149 int ata_pci_sff_init_host(struct ata_host *host)
2150 {
2151 	struct device *gdev = host->dev;
2152 	struct pci_dev *pdev = to_pci_dev(gdev);
2153 	unsigned int mask = 0;
2154 	int i, rc;
2155 
2156 	/* request, iomap BARs and init port addresses accordingly */
2157 	for (i = 0; i < 2; i++) {
2158 		struct ata_port *ap = host->ports[i];
2159 		int base = i * 2;
2160 		void __iomem * const *iomap;
2161 
2162 		if (ata_port_is_dummy(ap))
2163 			continue;
2164 
2165 		/* Discard disabled ports.  Some controllers show
2166 		 * their unused channels this way.  Disabled ports are
2167 		 * made dummy.
2168 		 */
2169 		if (!ata_resources_present(pdev, i)) {
2170 			ap->ops = &ata_dummy_port_ops;
2171 			continue;
2172 		}
2173 
2174 		rc = pcim_iomap_regions(pdev, 0x3 << base,
2175 					dev_driver_string(gdev));
2176 		if (rc) {
2177 			dev_warn(gdev,
2178 				 "failed to request/iomap BARs for port %d (errno=%d)\n",
2179 				 i, rc);
2180 			if (rc == -EBUSY)
2181 				pcim_pin_device(pdev);
2182 			ap->ops = &ata_dummy_port_ops;
2183 			continue;
2184 		}
2185 		host->iomap = iomap = pcim_iomap_table(pdev);
2186 
2187 		ap->ioaddr.cmd_addr = iomap[base];
2188 		ap->ioaddr.altstatus_addr =
2189 		ap->ioaddr.ctl_addr = (void __iomem *)
2190 			((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2191 		ata_sff_std_ports(&ap->ioaddr);
2192 
2193 		ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2194 			(unsigned long long)pci_resource_start(pdev, base),
2195 			(unsigned long long)pci_resource_start(pdev, base + 1));
2196 
2197 		mask |= 1 << i;
2198 	}
2199 
2200 	if (!mask) {
2201 		dev_err(gdev, "no available native port\n");
2202 		return -ENODEV;
2203 	}
2204 
2205 	return 0;
2206 }
2207 EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2208 
2209 /**
2210  *	ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
2211  *	@pdev: target PCI device
2212  *	@ppi: array of port_info, must be enough for two ports
2213  *	@r_host: out argument for the initialized ATA host
2214  *
2215  *	Helper to allocate PIO-only SFF ATA host for @pdev, acquire
2216  *	all PCI resources and initialize it accordingly in one go.
2217  *
2218  *	LOCKING:
2219  *	Inherited from calling layer (may sleep).
2220  *
2221  *	RETURNS:
2222  *	0 on success, -errno otherwise.
2223  */
ata_pci_sff_prepare_host(struct pci_dev * pdev,const struct ata_port_info * const * ppi,struct ata_host ** r_host)2224 int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2225 			     const struct ata_port_info * const *ppi,
2226 			     struct ata_host **r_host)
2227 {
2228 	struct ata_host *host;
2229 	int rc;
2230 
2231 	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2232 		return -ENOMEM;
2233 
2234 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2235 	if (!host) {
2236 		dev_err(&pdev->dev, "failed to allocate ATA host\n");
2237 		rc = -ENOMEM;
2238 		goto err_out;
2239 	}
2240 
2241 	rc = ata_pci_sff_init_host(host);
2242 	if (rc)
2243 		goto err_out;
2244 
2245 	devres_remove_group(&pdev->dev, NULL);
2246 	*r_host = host;
2247 	return 0;
2248 
2249 err_out:
2250 	devres_release_group(&pdev->dev, NULL);
2251 	return rc;
2252 }
2253 EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2254 
2255 /**
2256  *	ata_pci_sff_activate_host - start SFF host, request IRQ and register it
2257  *	@host: target SFF ATA host
2258  *	@irq_handler: irq_handler used when requesting IRQ(s)
2259  *	@sht: scsi_host_template to use when registering the host
2260  *
2261  *	This is the counterpart of ata_host_activate() for SFF ATA
2262  *	hosts.  This separate helper is necessary because SFF hosts
2263  *	use two separate interrupts in legacy mode.
2264  *
2265  *	LOCKING:
2266  *	Inherited from calling layer (may sleep).
2267  *
2268  *	RETURNS:
2269  *	0 on success, -errno otherwise.
2270  */
ata_pci_sff_activate_host(struct ata_host * host,irq_handler_t irq_handler,const struct scsi_host_template * sht)2271 int ata_pci_sff_activate_host(struct ata_host *host,
2272 			      irq_handler_t irq_handler,
2273 			      const struct scsi_host_template *sht)
2274 {
2275 	struct device *dev = host->dev;
2276 	struct pci_dev *pdev = to_pci_dev(dev);
2277 	const char *drv_name = dev_driver_string(host->dev);
2278 	int legacy_mode = 0, rc;
2279 
2280 	rc = ata_host_start(host);
2281 	if (rc)
2282 		return rc;
2283 
2284 	if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2285 		u8 tmp8, mask = 0;
2286 
2287 		/*
2288 		 * ATA spec says we should use legacy mode when one
2289 		 * port is in legacy mode, but disabled ports on some
2290 		 * PCI hosts appear as fixed legacy ports, e.g SB600/700
2291 		 * on which the secondary port is not wired, so
2292 		 * ignore ports that are marked as 'dummy' during
2293 		 * this check
2294 		 */
2295 		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2296 		if (!ata_port_is_dummy(host->ports[0]))
2297 			mask |= (1 << 0);
2298 		if (!ata_port_is_dummy(host->ports[1]))
2299 			mask |= (1 << 2);
2300 		if ((tmp8 & mask) != mask)
2301 			legacy_mode = 1;
2302 	}
2303 
2304 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
2305 		return -ENOMEM;
2306 
2307 	if (!legacy_mode && pdev->irq) {
2308 		int i;
2309 
2310 		rc = devm_request_irq(dev, pdev->irq, irq_handler,
2311 				      IRQF_SHARED, drv_name, host);
2312 		if (rc)
2313 			goto out;
2314 
2315 		for (i = 0; i < 2; i++) {
2316 			if (ata_port_is_dummy(host->ports[i]))
2317 				continue;
2318 			ata_port_desc_misc(host->ports[i], pdev->irq);
2319 		}
2320 	} else if (legacy_mode) {
2321 		if (!ata_port_is_dummy(host->ports[0])) {
2322 			rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2323 					      irq_handler, IRQF_SHARED,
2324 					      drv_name, host);
2325 			if (rc)
2326 				goto out;
2327 
2328 			ata_port_desc_misc(host->ports[0],
2329 					   ATA_PRIMARY_IRQ(pdev));
2330 		}
2331 
2332 		if (!ata_port_is_dummy(host->ports[1])) {
2333 			rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2334 					      irq_handler, IRQF_SHARED,
2335 					      drv_name, host);
2336 			if (rc)
2337 				goto out;
2338 
2339 			ata_port_desc_misc(host->ports[1],
2340 					   ATA_SECONDARY_IRQ(pdev));
2341 		}
2342 	}
2343 
2344 	rc = ata_host_register(host, sht);
2345 out:
2346 	if (rc == 0)
2347 		devres_remove_group(dev, NULL);
2348 	else
2349 		devres_release_group(dev, NULL);
2350 
2351 	return rc;
2352 }
2353 EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2354 
ata_sff_find_valid_pi(const struct ata_port_info * const * ppi)2355 static const struct ata_port_info *ata_sff_find_valid_pi(
2356 					const struct ata_port_info * const *ppi)
2357 {
2358 	int i;
2359 
2360 	/* look up the first valid port_info */
2361 	for (i = 0; i < 2 && ppi[i]; i++)
2362 		if (ppi[i]->port_ops != &ata_dummy_port_ops)
2363 			return ppi[i];
2364 
2365 	return NULL;
2366 }
2367 
ata_pci_init_one(struct pci_dev * pdev,const struct ata_port_info * const * ppi,const struct scsi_host_template * sht,void * host_priv,int hflags,bool bmdma)2368 static int ata_pci_init_one(struct pci_dev *pdev,
2369 		const struct ata_port_info * const *ppi,
2370 		const struct scsi_host_template *sht, void *host_priv,
2371 		int hflags, bool bmdma)
2372 {
2373 	struct device *dev = &pdev->dev;
2374 	const struct ata_port_info *pi;
2375 	struct ata_host *host = NULL;
2376 	int rc;
2377 
2378 	pi = ata_sff_find_valid_pi(ppi);
2379 	if (!pi) {
2380 		dev_err(&pdev->dev, "no valid port_info specified\n");
2381 		return -EINVAL;
2382 	}
2383 
2384 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
2385 		return -ENOMEM;
2386 
2387 	rc = pcim_enable_device(pdev);
2388 	if (rc)
2389 		goto out;
2390 
2391 #ifdef CONFIG_ATA_BMDMA
2392 	if (bmdma)
2393 		/* prepare and activate BMDMA host */
2394 		rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2395 	else
2396 #endif
2397 		/* prepare and activate SFF host */
2398 		rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2399 	if (rc)
2400 		goto out;
2401 	host->private_data = host_priv;
2402 	host->flags |= hflags;
2403 
2404 #ifdef CONFIG_ATA_BMDMA
2405 	if (bmdma) {
2406 		pci_set_master(pdev);
2407 		rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
2408 	} else
2409 #endif
2410 		rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2411 out:
2412 	if (rc == 0)
2413 		devres_remove_group(&pdev->dev, NULL);
2414 	else
2415 		devres_release_group(&pdev->dev, NULL);
2416 
2417 	return rc;
2418 }
2419 
2420 /**
2421  *	ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
2422  *	@pdev: Controller to be initialized
2423  *	@ppi: array of port_info, must be enough for two ports
2424  *	@sht: scsi_host_template to use when registering the host
2425  *	@host_priv: host private_data
2426  *	@hflag: host flags
2427  *
2428  *	This is a helper function which can be called from a driver's
2429  *	xxx_init_one() probe function if the hardware uses traditional
2430  *	IDE taskfile registers and is PIO only.
2431  *
2432  *	ASSUMPTION:
2433  *	Nobody makes a single channel controller that appears solely as
2434  *	the secondary legacy port on PCI.
2435  *
2436  *	LOCKING:
2437  *	Inherited from PCI layer (may sleep).
2438  *
2439  *	RETURNS:
2440  *	Zero on success, negative on errno-based value on error.
2441  */
ata_pci_sff_init_one(struct pci_dev * pdev,const struct ata_port_info * const * ppi,const struct scsi_host_template * sht,void * host_priv,int hflag)2442 int ata_pci_sff_init_one(struct pci_dev *pdev,
2443 		 const struct ata_port_info * const *ppi,
2444 		 const struct scsi_host_template *sht, void *host_priv, int hflag)
2445 {
2446 	return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
2447 }
2448 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2449 
2450 #endif /* CONFIG_PCI */
2451 
2452 /*
2453  *	BMDMA support
2454  */
2455 
2456 #ifdef CONFIG_ATA_BMDMA
2457 
2458 const struct ata_port_operations ata_bmdma_port_ops = {
2459 	.inherits		= &ata_sff_port_ops,
2460 
2461 	.error_handler		= ata_bmdma_error_handler,
2462 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
2463 
2464 	.qc_prep		= ata_bmdma_qc_prep,
2465 	.qc_issue		= ata_bmdma_qc_issue,
2466 
2467 	.sff_irq_clear		= ata_bmdma_irq_clear,
2468 	.bmdma_setup		= ata_bmdma_setup,
2469 	.bmdma_start		= ata_bmdma_start,
2470 	.bmdma_stop		= ata_bmdma_stop,
2471 	.bmdma_status		= ata_bmdma_status,
2472 
2473 	.port_start		= ata_bmdma_port_start,
2474 };
2475 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2476 
2477 const struct ata_port_operations ata_bmdma32_port_ops = {
2478 	.inherits		= &ata_bmdma_port_ops,
2479 
2480 	.sff_data_xfer		= ata_sff_data_xfer32,
2481 	.port_start		= ata_bmdma_port_start32,
2482 };
2483 EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2484 
2485 /**
2486  *	ata_bmdma_fill_sg - Fill PCI IDE PRD table
2487  *	@qc: Metadata associated with taskfile to be transferred
2488  *
2489  *	Fill PCI IDE PRD (scatter-gather) table with segments
2490  *	associated with the current disk command.
2491  *
2492  *	LOCKING:
2493  *	spin_lock_irqsave(host lock)
2494  *
2495  */
ata_bmdma_fill_sg(struct ata_queued_cmd * qc)2496 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2497 {
2498 	struct ata_port *ap = qc->ap;
2499 	struct ata_bmdma_prd *prd = ap->bmdma_prd;
2500 	struct scatterlist *sg;
2501 	unsigned int si, pi;
2502 
2503 	pi = 0;
2504 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2505 		u32 addr, offset;
2506 		u32 sg_len, len;
2507 
2508 		/* determine if physical DMA addr spans 64K boundary.
2509 		 * Note h/w doesn't support 64-bit, so we unconditionally
2510 		 * truncate dma_addr_t to u32.
2511 		 */
2512 		addr = (u32) sg_dma_address(sg);
2513 		sg_len = sg_dma_len(sg);
2514 
2515 		while (sg_len) {
2516 			offset = addr & 0xffff;
2517 			len = sg_len;
2518 			if ((offset + sg_len) > 0x10000)
2519 				len = 0x10000 - offset;
2520 
2521 			prd[pi].addr = cpu_to_le32(addr);
2522 			prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2523 
2524 			pi++;
2525 			sg_len -= len;
2526 			addr += len;
2527 		}
2528 	}
2529 
2530 	prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2531 }
2532 
2533 /**
2534  *	ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
2535  *	@qc: Metadata associated with taskfile to be transferred
2536  *
2537  *	Fill PCI IDE PRD (scatter-gather) table with segments
2538  *	associated with the current disk command. Perform the fill
2539  *	so that we avoid writing any length 64K records for
2540  *	controllers that don't follow the spec.
2541  *
2542  *	LOCKING:
2543  *	spin_lock_irqsave(host lock)
2544  *
2545  */
ata_bmdma_fill_sg_dumb(struct ata_queued_cmd * qc)2546 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2547 {
2548 	struct ata_port *ap = qc->ap;
2549 	struct ata_bmdma_prd *prd = ap->bmdma_prd;
2550 	struct scatterlist *sg;
2551 	unsigned int si, pi;
2552 
2553 	pi = 0;
2554 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2555 		u32 addr, offset;
2556 		u32 sg_len, len, blen;
2557 
2558 		/* determine if physical DMA addr spans 64K boundary.
2559 		 * Note h/w doesn't support 64-bit, so we unconditionally
2560 		 * truncate dma_addr_t to u32.
2561 		 */
2562 		addr = (u32) sg_dma_address(sg);
2563 		sg_len = sg_dma_len(sg);
2564 
2565 		while (sg_len) {
2566 			offset = addr & 0xffff;
2567 			len = sg_len;
2568 			if ((offset + sg_len) > 0x10000)
2569 				len = 0x10000 - offset;
2570 
2571 			blen = len & 0xffff;
2572 			prd[pi].addr = cpu_to_le32(addr);
2573 			if (blen == 0) {
2574 				/* Some PATA chipsets like the CS5530 can't
2575 				   cope with 0x0000 meaning 64K as the spec
2576 				   says */
2577 				prd[pi].flags_len = cpu_to_le32(0x8000);
2578 				blen = 0x8000;
2579 				prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2580 			}
2581 			prd[pi].flags_len = cpu_to_le32(blen);
2582 
2583 			pi++;
2584 			sg_len -= len;
2585 			addr += len;
2586 		}
2587 	}
2588 
2589 	prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2590 }
2591 
2592 /**
2593  *	ata_bmdma_qc_prep - Prepare taskfile for submission
2594  *	@qc: Metadata associated with taskfile to be prepared
2595  *
2596  *	Prepare ATA taskfile for submission.
2597  *
2598  *	LOCKING:
2599  *	spin_lock_irqsave(host lock)
2600  */
ata_bmdma_qc_prep(struct ata_queued_cmd * qc)2601 enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2602 {
2603 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2604 		return AC_ERR_OK;
2605 
2606 	ata_bmdma_fill_sg(qc);
2607 
2608 	return AC_ERR_OK;
2609 }
2610 EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2611 
2612 /**
2613  *	ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
2614  *	@qc: Metadata associated with taskfile to be prepared
2615  *
2616  *	Prepare ATA taskfile for submission.
2617  *
2618  *	LOCKING:
2619  *	spin_lock_irqsave(host lock)
2620  */
ata_bmdma_dumb_qc_prep(struct ata_queued_cmd * qc)2621 enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2622 {
2623 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2624 		return AC_ERR_OK;
2625 
2626 	ata_bmdma_fill_sg_dumb(qc);
2627 
2628 	return AC_ERR_OK;
2629 }
2630 EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2631 
2632 /**
2633  *	ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
2634  *	@qc: command to issue to device
2635  *
2636  *	This function issues a PIO, NODATA or DMA command to a
2637  *	SFF/BMDMA controller.  PIO and NODATA are handled by
2638  *	ata_sff_qc_issue().
2639  *
2640  *	LOCKING:
2641  *	spin_lock_irqsave(host lock)
2642  *
2643  *	RETURNS:
2644  *	Zero on success, AC_ERR_* mask on failure
2645  */
ata_bmdma_qc_issue(struct ata_queued_cmd * qc)2646 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2647 {
2648 	struct ata_port *ap = qc->ap;
2649 	struct ata_link *link = qc->dev->link;
2650 
2651 	/* defer PIO handling to sff_qc_issue */
2652 	if (!ata_is_dma(qc->tf.protocol))
2653 		return ata_sff_qc_issue(qc);
2654 
2655 	/* select the device */
2656 	ata_dev_select(ap, qc->dev->devno, 1, 0);
2657 
2658 	/* start the command */
2659 	switch (qc->tf.protocol) {
2660 	case ATA_PROT_DMA:
2661 		WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2662 
2663 		trace_ata_tf_load(ap, &qc->tf);
2664 		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
2665 		trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
2666 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
2667 		trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
2668 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
2669 		ap->hsm_task_state = HSM_ST_LAST;
2670 		break;
2671 
2672 	case ATAPI_PROT_DMA:
2673 		WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2674 
2675 		trace_ata_tf_load(ap, &qc->tf);
2676 		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
2677 		trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
2678 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
2679 		ap->hsm_task_state = HSM_ST_FIRST;
2680 
2681 		/* send cdb by polling if no cdb interrupt */
2682 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2683 			ata_sff_queue_pio_task(link, 0);
2684 		break;
2685 
2686 	default:
2687 		WARN_ON(1);
2688 		return AC_ERR_SYSTEM;
2689 	}
2690 
2691 	return 0;
2692 }
2693 EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2694 
2695 /**
2696  *	ata_bmdma_port_intr - Handle BMDMA port interrupt
2697  *	@ap: Port on which interrupt arrived (possibly...)
2698  *	@qc: Taskfile currently active in engine
2699  *
2700  *	Handle port interrupt for given queued command.
2701  *
2702  *	LOCKING:
2703  *	spin_lock_irqsave(host lock)
2704  *
2705  *	RETURNS:
2706  *	One if interrupt was handled, zero if not (shared irq).
2707  */
ata_bmdma_port_intr(struct ata_port * ap,struct ata_queued_cmd * qc)2708 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2709 {
2710 	struct ata_eh_info *ehi = &ap->link.eh_info;
2711 	u8 host_stat = 0;
2712 	bool bmdma_stopped = false;
2713 	unsigned int handled;
2714 
2715 	if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2716 		/* check status of DMA engine */
2717 		host_stat = ap->ops->bmdma_status(ap);
2718 		trace_ata_bmdma_status(ap, host_stat);
2719 
2720 		/* if it's not our irq... */
2721 		if (!(host_stat & ATA_DMA_INTR))
2722 			return ata_sff_idle_irq(ap);
2723 
2724 		/* before we do anything else, clear DMA-Start bit */
2725 		trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
2726 		ap->ops->bmdma_stop(qc);
2727 		bmdma_stopped = true;
2728 
2729 		if (unlikely(host_stat & ATA_DMA_ERR)) {
2730 			/* error when transferring data to/from memory */
2731 			qc->err_mask |= AC_ERR_HOST_BUS;
2732 			ap->hsm_task_state = HSM_ST_ERR;
2733 		}
2734 	}
2735 
2736 	handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2737 
2738 	if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2739 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2740 
2741 	return handled;
2742 }
2743 EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2744 
2745 /**
2746  *	ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
2747  *	@irq: irq line (unused)
2748  *	@dev_instance: pointer to our ata_host information structure
2749  *
2750  *	Default interrupt handler for PCI IDE devices.  Calls
2751  *	ata_bmdma_port_intr() for each port that is not disabled.
2752  *
2753  *	LOCKING:
2754  *	Obtains host lock during operation.
2755  *
2756  *	RETURNS:
2757  *	IRQ_NONE or IRQ_HANDLED.
2758  */
ata_bmdma_interrupt(int irq,void * dev_instance)2759 irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2760 {
2761 	return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2762 }
2763 EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2764 
2765 /**
2766  *	ata_bmdma_error_handler - Stock error handler for BMDMA controller
2767  *	@ap: port to handle error for
2768  *
2769  *	Stock error handler for BMDMA controller.  It can handle both
2770  *	PATA and SATA controllers.  Most BMDMA controllers should be
2771  *	able to use this EH as-is or with some added handling before
2772  *	and after.
2773  *
2774  *	LOCKING:
2775  *	Kernel thread context (may sleep)
2776  */
ata_bmdma_error_handler(struct ata_port * ap)2777 void ata_bmdma_error_handler(struct ata_port *ap)
2778 {
2779 	struct ata_queued_cmd *qc;
2780 	unsigned long flags;
2781 	bool thaw = false;
2782 
2783 	qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2784 	if (qc && !(qc->flags & ATA_QCFLAG_EH))
2785 		qc = NULL;
2786 
2787 	/* reset PIO HSM and stop DMA engine */
2788 	spin_lock_irqsave(ap->lock, flags);
2789 
2790 	if (qc && ata_is_dma(qc->tf.protocol)) {
2791 		u8 host_stat;
2792 
2793 		host_stat = ap->ops->bmdma_status(ap);
2794 		trace_ata_bmdma_status(ap, host_stat);
2795 
2796 		/* BMDMA controllers indicate host bus error by
2797 		 * setting DMA_ERR bit and timing out.  As it wasn't
2798 		 * really a timeout event, adjust error mask and
2799 		 * cancel frozen state.
2800 		 */
2801 		if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2802 			qc->err_mask = AC_ERR_HOST_BUS;
2803 			thaw = true;
2804 		}
2805 
2806 		trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
2807 		ap->ops->bmdma_stop(qc);
2808 
2809 		/* if we're gonna thaw, make sure IRQ is clear */
2810 		if (thaw) {
2811 			ap->ops->sff_check_status(ap);
2812 			if (ap->ops->sff_irq_clear)
2813 				ap->ops->sff_irq_clear(ap);
2814 		}
2815 	}
2816 
2817 	spin_unlock_irqrestore(ap->lock, flags);
2818 
2819 	if (thaw)
2820 		ata_eh_thaw_port(ap);
2821 
2822 	ata_sff_error_handler(ap);
2823 }
2824 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2825 
2826 /**
2827  *	ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
2828  *	@qc: internal command to clean up
2829  *
2830  *	LOCKING:
2831  *	Kernel thread context (may sleep)
2832  */
ata_bmdma_post_internal_cmd(struct ata_queued_cmd * qc)2833 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2834 {
2835 	struct ata_port *ap = qc->ap;
2836 	unsigned long flags;
2837 
2838 	if (ata_is_dma(qc->tf.protocol)) {
2839 		spin_lock_irqsave(ap->lock, flags);
2840 		trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
2841 		ap->ops->bmdma_stop(qc);
2842 		spin_unlock_irqrestore(ap->lock, flags);
2843 	}
2844 }
2845 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2846 
2847 /**
2848  *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
2849  *	@ap: Port associated with this ATA transaction.
2850  *
2851  *	Clear interrupt and error flags in DMA status register.
2852  *
2853  *	May be used as the irq_clear() entry in ata_port_operations.
2854  *
2855  *	LOCKING:
2856  *	spin_lock_irqsave(host lock)
2857  */
ata_bmdma_irq_clear(struct ata_port * ap)2858 void ata_bmdma_irq_clear(struct ata_port *ap)
2859 {
2860 	void __iomem *mmio = ap->ioaddr.bmdma_addr;
2861 
2862 	if (!mmio)
2863 		return;
2864 
2865 	iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
2866 }
2867 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
2868 
2869 /**
2870  *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2871  *	@qc: Info associated with this ATA transaction.
2872  *
2873  *	LOCKING:
2874  *	spin_lock_irqsave(host lock)
2875  */
ata_bmdma_setup(struct ata_queued_cmd * qc)2876 void ata_bmdma_setup(struct ata_queued_cmd *qc)
2877 {
2878 	struct ata_port *ap = qc->ap;
2879 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2880 	u8 dmactl;
2881 
2882 	/* load PRD table addr. */
2883 	mb();	/* make sure PRD table writes are visible to controller */
2884 	iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2885 
2886 	/* specify data direction, triple-check start bit is clear */
2887 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2888 	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2889 	if (!rw)
2890 		dmactl |= ATA_DMA_WR;
2891 	iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2892 
2893 	/* issue r/w command */
2894 	ap->ops->sff_exec_command(ap, &qc->tf);
2895 }
2896 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2897 
2898 /**
2899  *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
2900  *	@qc: Info associated with this ATA transaction.
2901  *
2902  *	LOCKING:
2903  *	spin_lock_irqsave(host lock)
2904  */
ata_bmdma_start(struct ata_queued_cmd * qc)2905 void ata_bmdma_start(struct ata_queued_cmd *qc)
2906 {
2907 	struct ata_port *ap = qc->ap;
2908 	u8 dmactl;
2909 
2910 	/* start host DMA transaction */
2911 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2912 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2913 
2914 	/* Strictly, one may wish to issue an ioread8() here, to
2915 	 * flush the mmio write.  However, control also passes
2916 	 * to the hardware at this point, and it will interrupt
2917 	 * us when we are to resume control.  So, in effect,
2918 	 * we don't care when the mmio write flushes.
2919 	 * Further, a read of the DMA status register _immediately_
2920 	 * following the write may not be what certain flaky hardware
2921 	 * is expected, so I think it is best to not add a readb()
2922 	 * without first all the MMIO ATA cards/mobos.
2923 	 * Or maybe I'm just being paranoid.
2924 	 *
2925 	 * FIXME: The posting of this write means I/O starts are
2926 	 * unnecessarily delayed for MMIO
2927 	 */
2928 }
2929 EXPORT_SYMBOL_GPL(ata_bmdma_start);
2930 
2931 /**
2932  *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
2933  *	@qc: Command we are ending DMA for
2934  *
2935  *	Clears the ATA_DMA_START flag in the dma control register
2936  *
2937  *	May be used as the bmdma_stop() entry in ata_port_operations.
2938  *
2939  *	LOCKING:
2940  *	spin_lock_irqsave(host lock)
2941  */
ata_bmdma_stop(struct ata_queued_cmd * qc)2942 void ata_bmdma_stop(struct ata_queued_cmd *qc)
2943 {
2944 	struct ata_port *ap = qc->ap;
2945 	void __iomem *mmio = ap->ioaddr.bmdma_addr;
2946 
2947 	/* clear start/stop bit */
2948 	iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
2949 		 mmio + ATA_DMA_CMD);
2950 
2951 	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
2952 	ata_sff_dma_pause(ap);
2953 }
2954 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
2955 
2956 /**
2957  *	ata_bmdma_status - Read PCI IDE BMDMA status
2958  *	@ap: Port associated with this ATA transaction.
2959  *
2960  *	Read and return BMDMA status register.
2961  *
2962  *	May be used as the bmdma_status() entry in ata_port_operations.
2963  *
2964  *	LOCKING:
2965  *	spin_lock_irqsave(host lock)
2966  */
ata_bmdma_status(struct ata_port * ap)2967 u8 ata_bmdma_status(struct ata_port *ap)
2968 {
2969 	return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
2970 }
2971 EXPORT_SYMBOL_GPL(ata_bmdma_status);
2972 
2973 
2974 /**
2975  *	ata_bmdma_port_start - Set port up for bmdma.
2976  *	@ap: Port to initialize
2977  *
2978  *	Called just after data structures for each port are
2979  *	initialized.  Allocates space for PRD table.
2980  *
2981  *	May be used as the port_start() entry in ata_port_operations.
2982  *
2983  *	LOCKING:
2984  *	Inherited from caller.
2985  */
ata_bmdma_port_start(struct ata_port * ap)2986 int ata_bmdma_port_start(struct ata_port *ap)
2987 {
2988 	if (ap->mwdma_mask || ap->udma_mask) {
2989 		ap->bmdma_prd =
2990 			dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
2991 					    &ap->bmdma_prd_dma, GFP_KERNEL);
2992 		if (!ap->bmdma_prd)
2993 			return -ENOMEM;
2994 	}
2995 
2996 	return 0;
2997 }
2998 EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
2999 
3000 /**
3001  *	ata_bmdma_port_start32 - Set port up for dma.
3002  *	@ap: Port to initialize
3003  *
3004  *	Called just after data structures for each port are
3005  *	initialized.  Enables 32bit PIO and allocates space for PRD
3006  *	table.
3007  *
3008  *	May be used as the port_start() entry in ata_port_operations for
3009  *	devices that are capable of 32bit PIO.
3010  *
3011  *	LOCKING:
3012  *	Inherited from caller.
3013  */
ata_bmdma_port_start32(struct ata_port * ap)3014 int ata_bmdma_port_start32(struct ata_port *ap)
3015 {
3016 	ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
3017 	return ata_bmdma_port_start(ap);
3018 }
3019 EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3020 
3021 #ifdef CONFIG_PCI
3022 
3023 /**
3024  *	ata_pci_bmdma_clear_simplex -	attempt to kick device out of simplex
3025  *	@pdev: PCI device
3026  *
3027  *	Some PCI ATA devices report simplex mode but in fact can be told to
3028  *	enter non simplex mode. This implements the necessary logic to
3029  *	perform the task on such devices. Calling it on other devices will
3030  *	have -undefined- behaviour.
3031  */
ata_pci_bmdma_clear_simplex(struct pci_dev * pdev)3032 int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3033 {
3034 #ifdef CONFIG_HAS_IOPORT
3035 	unsigned long bmdma = pci_resource_start(pdev, 4);
3036 	u8 simplex;
3037 
3038 	if (bmdma == 0)
3039 		return -ENOENT;
3040 
3041 	simplex = inb(bmdma + 0x02);
3042 	outb(simplex & 0x60, bmdma + 0x02);
3043 	simplex = inb(bmdma + 0x02);
3044 	if (simplex & 0x80)
3045 		return -EOPNOTSUPP;
3046 	return 0;
3047 #else
3048 	return -ENOENT;
3049 #endif /* CONFIG_HAS_IOPORT */
3050 }
3051 EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3052 
ata_bmdma_nodma(struct ata_host * host,const char * reason)3053 static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3054 {
3055 	int i;
3056 
3057 	dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
3058 
3059 	for (i = 0; i < 2; i++) {
3060 		host->ports[i]->mwdma_mask = 0;
3061 		host->ports[i]->udma_mask = 0;
3062 	}
3063 }
3064 
3065 /**
3066  *	ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
3067  *	@host: target ATA host
3068  *
3069  *	Acquire PCI BMDMA resources and initialize @host accordingly.
3070  *
3071  *	LOCKING:
3072  *	Inherited from calling layer (may sleep).
3073  */
ata_pci_bmdma_init(struct ata_host * host)3074 void ata_pci_bmdma_init(struct ata_host *host)
3075 {
3076 	struct device *gdev = host->dev;
3077 	struct pci_dev *pdev = to_pci_dev(gdev);
3078 	int i, rc;
3079 
3080 	/* No BAR4 allocation: No DMA */
3081 	if (pci_resource_start(pdev, 4) == 0) {
3082 		ata_bmdma_nodma(host, "BAR4 is zero");
3083 		return;
3084 	}
3085 
3086 	/*
3087 	 * Some controllers require BMDMA region to be initialized
3088 	 * even if DMA is not in use to clear IRQ status via
3089 	 * ->sff_irq_clear method.  Try to initialize bmdma_addr
3090 	 * regardless of dma masks.
3091 	 */
3092 	rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
3093 	if (rc)
3094 		ata_bmdma_nodma(host, "failed to set dma mask");
3095 
3096 	/* request and iomap DMA region */
3097 	rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3098 	if (rc) {
3099 		ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3100 		return;
3101 	}
3102 	host->iomap = pcim_iomap_table(pdev);
3103 
3104 	for (i = 0; i < 2; i++) {
3105 		struct ata_port *ap = host->ports[i];
3106 		void __iomem *bmdma = host->iomap[4] + 8 * i;
3107 
3108 		if (ata_port_is_dummy(ap))
3109 			continue;
3110 
3111 		ap->ioaddr.bmdma_addr = bmdma;
3112 		if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3113 		    (ioread8(bmdma + 2) & 0x80))
3114 			host->flags |= ATA_HOST_SIMPLEX;
3115 
3116 		ata_port_desc(ap, "bmdma 0x%llx",
3117 		    (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3118 	}
3119 }
3120 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3121 
3122 /**
3123  *	ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
3124  *	@pdev: target PCI device
3125  *	@ppi: array of port_info, must be enough for two ports
3126  *	@r_host: out argument for the initialized ATA host
3127  *
3128  *	Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
3129  *	resources and initialize it accordingly in one go.
3130  *
3131  *	LOCKING:
3132  *	Inherited from calling layer (may sleep).
3133  *
3134  *	RETURNS:
3135  *	0 on success, -errno otherwise.
3136  */
ata_pci_bmdma_prepare_host(struct pci_dev * pdev,const struct ata_port_info * const * ppi,struct ata_host ** r_host)3137 int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3138 			       const struct ata_port_info * const * ppi,
3139 			       struct ata_host **r_host)
3140 {
3141 	int rc;
3142 
3143 	rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3144 	if (rc)
3145 		return rc;
3146 
3147 	ata_pci_bmdma_init(*r_host);
3148 	return 0;
3149 }
3150 EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3151 
3152 /**
3153  *	ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
3154  *	@pdev: Controller to be initialized
3155  *	@ppi: array of port_info, must be enough for two ports
3156  *	@sht: scsi_host_template to use when registering the host
3157  *	@host_priv: host private_data
3158  *	@hflags: host flags
3159  *
3160  *	This function is similar to ata_pci_sff_init_one() but also
3161  *	takes care of BMDMA initialization.
3162  *
3163  *	LOCKING:
3164  *	Inherited from PCI layer (may sleep).
3165  *
3166  *	RETURNS:
3167  *	Zero on success, negative on errno-based value on error.
3168  */
ata_pci_bmdma_init_one(struct pci_dev * pdev,const struct ata_port_info * const * ppi,const struct scsi_host_template * sht,void * host_priv,int hflags)3169 int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3170 			   const struct ata_port_info * const * ppi,
3171 			   const struct scsi_host_template *sht, void *host_priv,
3172 			   int hflags)
3173 {
3174 	return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
3175 }
3176 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3177 
3178 #endif /* CONFIG_PCI */
3179 #endif /* CONFIG_ATA_BMDMA */
3180 
3181 /**
3182  *	ata_sff_port_init - Initialize SFF/BMDMA ATA port
3183  *	@ap: Port to initialize
3184  *
3185  *	Called on port allocation to initialize SFF/BMDMA specific
3186  *	fields.
3187  *
3188  *	LOCKING:
3189  *	None.
3190  */
ata_sff_port_init(struct ata_port * ap)3191 void ata_sff_port_init(struct ata_port *ap)
3192 {
3193 	INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3194 	ap->ctl = ATA_DEVCTL_OBS;
3195 	ap->last_ctl = 0xFF;
3196 }
3197 
ata_sff_init(void)3198 int __init ata_sff_init(void)
3199 {
3200 	ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3201 	if (!ata_sff_wq)
3202 		return -ENOMEM;
3203 
3204 	return 0;
3205 }
3206 
ata_sff_exit(void)3207 void ata_sff_exit(void)
3208 {
3209 	destroy_workqueue(ata_sff_wq);
3210 }
3211