1 /*
2  * sata_dwc.c
3  *
4  * Synopsys DesignWare Cores (DWC) SATA host driver
5  *
6  * Author: Mark Miesfeld <mmiesfeld@amcc.com>
7  *
8  * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
9  * Copyright 2008 DENX Software Engineering
10  *
11  * Based on versions provided by AMCC and Synopsys which are:
12  *          Copyright 2006 Applied Micro Circuits Corporation
13  *          COPYRIGHT (C) 2005  SYNOPSYS, INC.  ALL RIGHTS RESERVED
14  *
15  * This program is free software; you can redistribute
16  * it and/or modify it under the terms of the GNU
17  * General Public License as published by the
18  * Free Software Foundation;  either version 2 of the  License,
19  * or (at your option) any later version.
20  *
21  */
22 /*
23  * SATA support based on the chip canyonlands.
24  *
25  * 04-17-2009
26  *		The local version of this driver for the canyonlands board
27  *		does not use interrupts but polls the chip instead.
28  */
29 
30 #include <common.h>
31 #include <command.h>
32 #include <pci.h>
33 #include <asm/processor.h>
34 #include <asm/errno.h>
35 #include <asm/io.h>
36 #include <malloc.h>
37 #include <ata.h>
38 #include <linux/ctype.h>
39 
40 #include "sata_dwc.h"
41 
42 #define DMA_NUM_CHANS			1
43 #define DMA_NUM_CHAN_REGS		8
44 
45 #define AHB_DMA_BRST_DFLT		16
46 
47 struct dmareg {
48 	u32 low;
49 	u32 high;
50 };
51 
52 struct dma_chan_regs {
53 	struct dmareg sar;
54 	struct dmareg dar;
55 	struct dmareg llp;
56 	struct dmareg ctl;
57 	struct dmareg sstat;
58 	struct dmareg dstat;
59 	struct dmareg sstatar;
60 	struct dmareg dstatar;
61 	struct dmareg cfg;
62 	struct dmareg sgr;
63 	struct dmareg dsr;
64 };
65 
66 struct dma_interrupt_regs {
67 	struct dmareg tfr;
68 	struct dmareg block;
69 	struct dmareg srctran;
70 	struct dmareg dsttran;
71 	struct dmareg error;
72 };
73 
74 struct ahb_dma_regs {
75 	struct dma_chan_regs	chan_regs[DMA_NUM_CHAN_REGS];
76 	struct dma_interrupt_regs	interrupt_raw;
77 	struct dma_interrupt_regs	interrupt_status;
78 	struct dma_interrupt_regs	interrupt_mask;
79 	struct dma_interrupt_regs	interrupt_clear;
80 	struct dmareg			statusInt;
81 	struct dmareg			rq_srcreg;
82 	struct dmareg			rq_dstreg;
83 	struct dmareg			rq_sgl_srcreg;
84 	struct dmareg			rq_sgl_dstreg;
85 	struct dmareg			rq_lst_srcreg;
86 	struct dmareg			rq_lst_dstreg;
87 	struct dmareg			dma_cfg;
88 	struct dmareg			dma_chan_en;
89 	struct dmareg			dma_id;
90 	struct dmareg			dma_test;
91 	struct dmareg			res1;
92 	struct dmareg			res2;
93 	/* DMA Comp Params
94 	 * Param 6 = dma_param[0], Param 5 = dma_param[1],
95 	 * Param 4 = dma_param[2] ...
96 	 */
97 	struct dmareg			dma_params[6];
98 };
99 
100 #define DMA_EN			0x00000001
101 #define DMA_DI			0x00000000
102 #define DMA_CHANNEL(ch)		(0x00000001 << (ch))
103 #define DMA_ENABLE_CHAN(ch)	((0x00000001 << (ch)) |	\
104 				((0x000000001 << (ch)) << 8))
105 #define DMA_DISABLE_CHAN(ch)	(0x00000000 | 	\
106 				((0x000000001 << (ch)) << 8))
107 
108 #define SATA_DWC_MAX_PORTS	1
109 #define SATA_DWC_SCR_OFFSET	0x24
110 #define SATA_DWC_REG_OFFSET	0x64
111 
112 struct sata_dwc_regs {
113 	u32 fptagr;
114 	u32 fpbor;
115 	u32 fptcr;
116 	u32 dmacr;
117 	u32 dbtsr;
118 	u32 intpr;
119 	u32 intmr;
120 	u32 errmr;
121 	u32 llcr;
122 	u32 phycr;
123 	u32 physr;
124 	u32 rxbistpd;
125 	u32 rxbistpd1;
126 	u32 rxbistpd2;
127 	u32 txbistpd;
128 	u32 txbistpd1;
129 	u32 txbistpd2;
130 	u32 bistcr;
131 	u32 bistfctr;
132 	u32 bistsr;
133 	u32 bistdecr;
134 	u32 res[15];
135 	u32 testr;
136 	u32 versionr;
137 	u32 idr;
138 	u32 unimpl[192];
139 	u32 dmadr[256];
140 };
141 
142 #define SATA_DWC_TXFIFO_DEPTH		0x01FF
143 #define SATA_DWC_RXFIFO_DEPTH		0x01FF
144 
145 #define SATA_DWC_DBTSR_MWR(size)	((size / 4) & SATA_DWC_TXFIFO_DEPTH)
146 #define SATA_DWC_DBTSR_MRD(size)	(((size / 4) &	\
147 					SATA_DWC_RXFIFO_DEPTH) << 16)
148 #define SATA_DWC_INTPR_DMAT		0x00000001
149 #define SATA_DWC_INTPR_NEWFP		0x00000002
150 #define SATA_DWC_INTPR_PMABRT		0x00000004
151 #define SATA_DWC_INTPR_ERR		0x00000008
152 #define SATA_DWC_INTPR_NEWBIST		0x00000010
153 #define SATA_DWC_INTPR_IPF		0x10000000
154 #define SATA_DWC_INTMR_DMATM		0x00000001
155 #define SATA_DWC_INTMR_NEWFPM		0x00000002
156 #define SATA_DWC_INTMR_PMABRTM		0x00000004
157 #define SATA_DWC_INTMR_ERRM		0x00000008
158 #define SATA_DWC_INTMR_NEWBISTM		0x00000010
159 
160 #define SATA_DWC_DMACR_TMOD_TXCHEN	0x00000004
161 #define SATA_DWC_DMACR_TXRXCH_CLEAR	SATA_DWC_DMACR_TMOD_TXCHEN
162 
163 #define SATA_DWC_QCMD_MAX	32
164 
165 #define SATA_DWC_SERROR_ERR_BITS	0x0FFF0F03
166 
167 #define HSDEVP_FROM_AP(ap)	(struct sata_dwc_device_port*)	\
168 				(ap)->private_data
169 
170 struct sata_dwc_device {
171 	struct device		*dev;
172 	struct ata_probe_ent	*pe;
173 	struct ata_host		*host;
174 	u8			*reg_base;
175 	struct sata_dwc_regs	*sata_dwc_regs;
176 	int			irq_dma;
177 };
178 
179 struct sata_dwc_device_port {
180 	struct sata_dwc_device	*hsdev;
181 	int			cmd_issued[SATA_DWC_QCMD_MAX];
182 	u32			dma_chan[SATA_DWC_QCMD_MAX];
183 	int			dma_pending[SATA_DWC_QCMD_MAX];
184 };
185 
186 enum {
187 	SATA_DWC_CMD_ISSUED_NOT		= 0,
188 	SATA_DWC_CMD_ISSUED_PEND	= 1,
189 	SATA_DWC_CMD_ISSUED_EXEC	= 2,
190 	SATA_DWC_CMD_ISSUED_NODATA	= 3,
191 
192 	SATA_DWC_DMA_PENDING_NONE	= 0,
193 	SATA_DWC_DMA_PENDING_TX		= 1,
194 	SATA_DWC_DMA_PENDING_RX		= 2,
195 };
196 
197 #define msleep(a)	udelay(a * 1000)
198 #define ssleep(a)	msleep(a * 1000)
199 
200 static int ata_probe_timeout = (ATA_TMOUT_INTERNAL / 100);
201 
202 enum sata_dev_state {
203 	SATA_INIT = 0,
204 	SATA_READY = 1,
205 	SATA_NODEVICE = 2,
206 	SATA_ERROR = 3,
207 };
208 enum sata_dev_state dev_state = SATA_INIT;
209 
210 static struct ahb_dma_regs		*sata_dma_regs = 0;
211 static struct ata_host			*phost;
212 static struct ata_port			ap;
213 static struct ata_port			*pap = &ap;
214 static struct ata_device		ata_device;
215 static struct sata_dwc_device_port	dwc_devp;
216 
217 static void	*scr_addr_sstatus;
218 static u32	temp_n_block = 0;
219 
220 static unsigned ata_exec_internal(struct ata_device *dev,
221 			struct ata_taskfile *tf, const u8 *cdb,
222 			int dma_dir, unsigned int buflen,
223 			unsigned long timeout);
224 static unsigned int ata_dev_set_feature(struct ata_device *dev,
225 			u8 enable,u8 feature);
226 static unsigned int ata_dev_init_params(struct ata_device *dev,
227 			u16 heads, u16 sectors);
228 static u8 ata_irq_on(struct ata_port *ap);
229 static struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
230 			unsigned int tag);
231 static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
232 			u8 status, int in_wq);
233 static void ata_tf_to_host(struct ata_port *ap,
234 			const struct ata_taskfile *tf);
235 static void ata_exec_command(struct ata_port *ap,
236 			const struct ata_taskfile *tf);
237 static unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
238 static u8 ata_check_altstatus(struct ata_port *ap);
239 static u8 ata_check_status(struct ata_port *ap);
240 static void ata_dev_select(struct ata_port *ap, unsigned int device,
241 			unsigned int wait, unsigned int can_sleep);
242 static void ata_qc_issue(struct ata_queued_cmd *qc);
243 static void ata_tf_load(struct ata_port *ap,
244 			const struct ata_taskfile *tf);
245 static int ata_dev_read_sectors(unsigned char* pdata,
246 			unsigned long datalen, u32 block, u32 n_block);
247 static int ata_dev_write_sectors(unsigned char* pdata,
248 			unsigned long datalen , u32 block, u32 n_block);
249 static void ata_std_dev_select(struct ata_port *ap, unsigned int device);
250 static void ata_qc_complete(struct ata_queued_cmd *qc);
251 static void __ata_qc_complete(struct ata_queued_cmd *qc);
252 static void fill_result_tf(struct ata_queued_cmd *qc);
253 static void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
254 static void ata_mmio_data_xfer(struct ata_device *dev,
255 			unsigned char *buf,
256 			unsigned int buflen,int do_write);
257 static void ata_pio_task(struct ata_port *arg_ap);
258 static void __ata_port_freeze(struct ata_port *ap);
259 static int ata_port_freeze(struct ata_port *ap);
260 static void ata_qc_free(struct ata_queued_cmd *qc);
261 static void ata_pio_sectors(struct ata_queued_cmd *qc);
262 static void ata_pio_sector(struct ata_queued_cmd *qc);
263 static void ata_pio_queue_task(struct ata_port *ap,
264 			void *data,unsigned long delay);
265 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq);
266 static int sata_dwc_softreset(struct ata_port *ap);
267 static int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
268 		unsigned int flags, u16 *id);
269 static int check_sata_dev_state(void);
270 
271 extern block_dev_desc_t sata_dev_desc[CONFIG_SYS_SATA_MAX_DEVICE];
272 
273 static const struct ata_port_info sata_dwc_port_info[] = {
274 	{
275 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
276 				ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING |
277 				ATA_FLAG_SRST | ATA_FLAG_NCQ,
278 		.pio_mask	= 0x1f,
279 		.mwdma_mask	= 0x07,
280 		.udma_mask	= 0x7f,
281 	},
282 };
283 
init_sata(int dev)284 int init_sata(int dev)
285 {
286 	struct sata_dwc_device hsdev;
287 	struct ata_host host;
288 	struct ata_port_info pi = sata_dwc_port_info[0];
289 	struct ata_link *link;
290 	struct sata_dwc_device_port hsdevp = dwc_devp;
291 	u8 *base = 0;
292 	u8 *sata_dma_regs_addr = 0;
293 	u8 status;
294 	unsigned long base_addr = 0;
295 	int chan = 0;
296 	int rc;
297 	int i;
298 
299 	phost = &host;
300 
301 	base = (u8*)SATA_BASE_ADDR;
302 
303 	hsdev.sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
304 
305 	host.n_ports = SATA_DWC_MAX_PORTS;
306 
307 	for (i = 0; i < SATA_DWC_MAX_PORTS; i++) {
308 		ap.pflags |= ATA_PFLAG_INITIALIZING;
309 		ap.flags = ATA_FLAG_DISABLED;
310 		ap.print_id = -1;
311 		ap.ctl = ATA_DEVCTL_OBS;
312 		ap.host = &host;
313 		ap.last_ctl = 0xFF;
314 
315 		link = &ap.link;
316 		link->ap = &ap;
317 		link->pmp = 0;
318 		link->active_tag = ATA_TAG_POISON;
319 		link->hw_sata_spd_limit = 0;
320 
321 		ap.port_no = i;
322 		host.ports[i] = &ap;
323 	}
324 
325 	ap.pio_mask = pi.pio_mask;
326 	ap.mwdma_mask = pi.mwdma_mask;
327 	ap.udma_mask = pi.udma_mask;
328 	ap.flags |= pi.flags;
329 	ap.link.flags |= pi.link_flags;
330 
331 	host.ports[0]->ioaddr.cmd_addr = base;
332 	host.ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
333 	scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
334 
335 	base_addr = (unsigned long)base;
336 
337 	host.ports[0]->ioaddr.cmd_addr = (void *)base_addr + 0x00;
338 	host.ports[0]->ioaddr.data_addr = (void *)base_addr + 0x00;
339 
340 	host.ports[0]->ioaddr.error_addr = (void *)base_addr + 0x04;
341 	host.ports[0]->ioaddr.feature_addr = (void *)base_addr + 0x04;
342 
343 	host.ports[0]->ioaddr.nsect_addr = (void *)base_addr + 0x08;
344 
345 	host.ports[0]->ioaddr.lbal_addr = (void *)base_addr + 0x0c;
346 	host.ports[0]->ioaddr.lbam_addr = (void *)base_addr + 0x10;
347 	host.ports[0]->ioaddr.lbah_addr = (void *)base_addr + 0x14;
348 
349 	host.ports[0]->ioaddr.device_addr = (void *)base_addr + 0x18;
350 	host.ports[0]->ioaddr.command_addr = (void *)base_addr + 0x1c;
351 	host.ports[0]->ioaddr.status_addr = (void *)base_addr + 0x1c;
352 
353 	host.ports[0]->ioaddr.altstatus_addr = (void *)base_addr + 0x20;
354 	host.ports[0]->ioaddr.ctl_addr = (void *)base_addr + 0x20;
355 
356 	sata_dma_regs_addr = (u8*)SATA_DMA_REG_ADDR;
357 	sata_dma_regs = (void *__iomem)sata_dma_regs_addr;
358 
359 	status = ata_check_altstatus(&ap);
360 
361 	if (status == 0x7f) {
362 		printf("Hard Disk not found.\n");
363 		dev_state = SATA_NODEVICE;
364 		rc = FALSE;
365 		return rc;
366 	}
367 #ifndef CONFIG_SAM460EX
368 	printf("Waiting for device...");
369 #endif
370 	i = 0;
371 	while (1) {
372 		udelay(10000);
373 
374 		status = ata_check_altstatus(&ap);
375 
376 		if ((status & ATA_BUSY) == 0) {
377 			printf("\n");
378 			break;
379 		}
380 
381 		i++;
382 		if (i > (ATA_RESET_TIME * 100)) {
383 			printf("** TimeOUT **\n");
384 
385 			dev_state = SATA_NODEVICE;
386 			rc = FALSE;
387 			return rc;
388 		}
389 		if ((i >= 100) && ((i % 100) == 0))
390 			printf(".");
391 	}
392 
393 	rc = sata_dwc_softreset(&ap);
394 
395 	if (rc) {
396 		printf("sata_dwc : error. soft reset failed\n");
397 		return rc;
398 	}
399 
400 	for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
401 		out_le32(&(sata_dma_regs->interrupt_mask.error.low),
402 				DMA_DISABLE_CHAN(chan));
403 
404 		out_le32(&(sata_dma_regs->interrupt_mask.tfr.low),
405 				DMA_DISABLE_CHAN(chan));
406 	}
407 
408 	out_le32(&(sata_dma_regs->dma_cfg.low), DMA_DI);
409 
410 	out_le32(&hsdev.sata_dwc_regs->intmr,
411 		SATA_DWC_INTMR_ERRM |
412 		SATA_DWC_INTMR_PMABRTM);
413 
414 	/* Unmask the error bits that should trigger
415 	 * an error interrupt by setting the error mask register.
416 	 */
417 	out_le32(&hsdev.sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
418 
419 	hsdev.host = ap.host;
420 	memset(&hsdevp, 0, sizeof(hsdevp));
421 	hsdevp.hsdev = &hsdev;
422 
423 	for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
424 		hsdevp.cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
425 
426 	out_le32((void __iomem *)scr_addr_sstatus + 4,
427 		in_le32((void __iomem *)scr_addr_sstatus + 4));
428 
429 	rc = 0;
430 	return rc;
431 }
432 
ata_check_altstatus(struct ata_port * ap)433 static u8 ata_check_altstatus(struct ata_port *ap)
434 {
435 	u8 val = 0;
436 	val = readb(ap->ioaddr.altstatus_addr);
437 	return val;
438 }
439 
sata_dwc_softreset(struct ata_port * ap)440 static int sata_dwc_softreset(struct ata_port *ap)
441 {
442 	u8 nsect,lbal = 0;
443 	u8 tmp = 0;
444 	u32 serror = 0;
445 	u8 status = 0;
446 	struct ata_ioports *ioaddr = &ap->ioaddr;
447 
448 	serror = in_le32((void *)ap->ioaddr.scr_addr + (SCR_ERROR * 4));
449 
450 	writeb(0x55, ioaddr->nsect_addr);
451 	writeb(0xaa, ioaddr->lbal_addr);
452 	writeb(0xaa, ioaddr->nsect_addr);
453 	writeb(0x55, ioaddr->lbal_addr);
454 	writeb(0x55, ioaddr->nsect_addr);
455 	writeb(0xaa, ioaddr->lbal_addr);
456 
457 	nsect = readb(ioaddr->nsect_addr);
458 	lbal = readb(ioaddr->lbal_addr);
459 
460 	if ((nsect == 0x55) && (lbal == 0xaa)) {
461 #ifndef CONFIG_SAM460EX
462 		printf("Device found\n");
463 #endif
464 	} else {
465 #ifndef CONFIG_SAM460EX
466 		printf("No device found\n");
467 #else
468         printf("No SATA-2 device found\n");
469 #endif
470 		dev_state = SATA_NODEVICE;
471 		return FALSE;
472 	}
473 
474 	tmp = ATA_DEVICE_OBS;
475 	writeb(tmp, ioaddr->device_addr);
476 	writeb(ap->ctl, ioaddr->ctl_addr);
477 
478 	udelay(200);
479 
480 	writeb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
481 
482 	udelay(200);
483 	writeb(ap->ctl, ioaddr->ctl_addr);
484 
485 	msleep(150);
486 	status = ata_check_status(ap);
487 
488 	msleep(50);
489 	ata_check_status(ap);
490 
491 	while (1) {
492 		u8 status = ata_check_status(ap);
493 
494 		if (!(status & ATA_BUSY))
495 			break;
496 
497 		printf("Hard Disk status is BUSY.\n");
498 		msleep(50);
499 	}
500 
501 	tmp = ATA_DEVICE_OBS;
502 	writeb(tmp, ioaddr->device_addr);
503 
504 	nsect = readb(ioaddr->nsect_addr);
505 	lbal = readb(ioaddr->lbal_addr);
506 
507 	return 0;
508 }
509 
ata_check_status(struct ata_port * ap)510 static u8 ata_check_status(struct ata_port *ap)
511 {
512 	u8 val = 0;
513 	val = readb(ap->ioaddr.status_addr);
514 	return val;
515 }
516 
ata_id_has_hipm(const u16 * id)517 static int ata_id_has_hipm(const u16 *id)
518 {
519 	u16 val = id[76];
520 
521 	if (val == 0 || val == 0xffff)
522 		return -1;
523 
524 	return val & (1 << 9);
525 }
526 
ata_id_has_dipm(const u16 * id)527 static int ata_id_has_dipm(const u16 *id)
528 {
529 	u16 val = id[78];
530 
531 	if (val == 0 || val == 0xffff)
532 		return -1;
533 
534 	return val & (1 << 3);
535 }
536 
scan_sata(int dev)537 int scan_sata(int dev)
538 {
539 	int i;
540 	int rc;
541 	u8 status;
542 	const u16 *id;
543 	struct ata_device *ata_dev = &ata_device;
544 	unsigned long pio_mask, mwdma_mask, udma_mask;
545 	unsigned long xfer_mask;
546 	char revbuf[7];
547 	u16 iobuf[ATA_SECTOR_WORDS];
548 
549 	memset(iobuf, 0, sizeof(iobuf));
550 
551 	if (dev_state == SATA_NODEVICE)
552 		return 1;
553 #ifndef CONFIG_SAM460EX
554 	printf("Waiting for device...");
555 #endif
556 	i = 0;
557 	while (1) {
558 		udelay(10000);
559 
560 		status = ata_check_altstatus(&ap);
561 
562 		if ((status & ATA_BUSY) == 0) {
563 #ifndef CONFIG_SAM460EX
564 			printf("\n");
565 #endif
566 			break;
567 		}
568 
569 		i++;
570 		if (i > (ATA_RESET_TIME * 100)) {
571 			printf("** TimeOUT **\n");
572 
573 			dev_state = SATA_NODEVICE;
574 			return 1;
575 		}
576 		if ((i >= 100) && ((i % 100) == 0))
577 			printf(".");
578 	}
579 
580 	udelay(1000);
581 
582 	rc = ata_dev_read_id(ata_dev, &ata_dev->class,
583 			ATA_READID_POSTRESET,ata_dev->id);
584 	if (rc) {
585 #ifndef CONFIG_SAM460EX
586 		printf("sata_dwc : error. failed sata scan\n");
587 #endif
588 		return 1;
589 	}
590 
591 	/* SATA drives indicate we have a bridge. We don't know which
592 	 * end of the link the bridge is which is a problem
593 	 */
594 	if (ata_id_is_sata(ata_dev->id))
595 		ap.cbl = ATA_CBL_SATA;
596 
597 	id = ata_dev->id;
598 
599 	ata_dev->flags &= ~ATA_DFLAG_CFG_MASK;
600 	ata_dev->max_sectors = 0;
601 	ata_dev->cdb_len = 0;
602 	ata_dev->n_sectors = 0;
603 	ata_dev->cylinders = 0;
604 	ata_dev->heads = 0;
605 	ata_dev->sectors = 0;
606 
607 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
608 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
609 		pio_mask <<= 3;
610 		pio_mask |= 0x7;
611 	} else {
612 		/* If word 64 isn't valid then Word 51 high byte holds
613 		 * the PIO timing number for the maximum. Turn it into
614 		 * a mask.
615 		 */
616 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
617 		if (mode < 5) {
618 			pio_mask = (2 << mode) - 1;
619 		} else {
620 			pio_mask = 1;
621 		}
622 	}
623 
624 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
625 
626 	if (ata_id_is_cfa(id)) {
627 		int pio = id[163] & 0x7;
628 		int dma = (id[163] >> 3) & 7;
629 
630 		if (pio)
631 			pio_mask |= (1 << 5);
632 		if (pio > 1)
633 			pio_mask |= (1 << 6);
634 		if (dma)
635 			mwdma_mask |= (1 << 3);
636 		if (dma > 1)
637 			mwdma_mask |= (1 << 4);
638 	}
639 
640 	udma_mask = 0;
641 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
642 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
643 
644 	xfer_mask = ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
645 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
646 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
647 
648 	if (ata_dev->class == ATA_DEV_ATA) {
649 		if (ata_id_is_cfa(id)) {
650 			if (id[162] & 1)
651 				printf("supports DRM functions and may "
652 					"not be fully accessable.\n");
653 			sprintf(revbuf, "%s", "CFA");
654 		} else {
655 			if (ata_id_has_tpm(id))
656 				printf("supports DRM functions and may "
657 						"not be fully accessable.\n");
658 		}
659 
660 		ata_dev->n_sectors = ata_id_n_sectors((u16*)id);
661 
662 		if (ata_dev->id[59] & 0x100)
663 			ata_dev->multi_count = ata_dev->id[59] & 0xff;
664 
665 		if (ata_id_has_lba(id)) {
666 			const char *lba_desc;
667 			char ncq_desc[20];
668 
669 			lba_desc = "LBA";
670 			ata_dev->flags |= ATA_DFLAG_LBA;
671 			if (ata_id_has_lba48(id)) {
672 				ata_dev->flags |= ATA_DFLAG_LBA48;
673 				lba_desc = "LBA48";
674 
675 				if (ata_dev->n_sectors >= (1UL << 28) &&
676 					ata_id_has_flush_ext(id))
677 					ata_dev->flags |= ATA_DFLAG_FLUSH_EXT;
678 			}
679 			if (!ata_id_has_ncq(ata_dev->id))
680 				ncq_desc[0] = '\0';
681 
682 			if (ata_dev->horkage & ATA_HORKAGE_NONCQ)
683 				sprintf(ncq_desc, "%s", "NCQ (not used)");
684 
685 			if (ap.flags & ATA_FLAG_NCQ)
686 				ata_dev->flags |= ATA_DFLAG_NCQ;
687 		}
688 		ata_dev->cdb_len = 16;
689 	}
690 	ata_dev->max_sectors = ATA_MAX_SECTORS;
691 	if (ata_dev->flags & ATA_DFLAG_LBA48)
692 		ata_dev->max_sectors = ATA_MAX_SECTORS_LBA48;
693 
694 	if (!(ata_dev->horkage & ATA_HORKAGE_IPM)) {
695 		if (ata_id_has_hipm(ata_dev->id))
696 			ata_dev->flags |= ATA_DFLAG_HIPM;
697 		if (ata_id_has_dipm(ata_dev->id))
698 			ata_dev->flags |= ATA_DFLAG_DIPM;
699 	}
700 
701 	if ((ap.cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ata_dev->id))) {
702 		ata_dev->udma_mask &= ATA_UDMA5;
703 		ata_dev->max_sectors = ATA_MAX_SECTORS;
704 	}
705 
706 	if (ata_dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
707 		printf("Drive reports diagnostics failure."
708 				"This may indicate a drive\n");
709 		printf("fault or invalid emulation."
710 				"Contact drive vendor for information.\n");
711 	}
712 
713 	rc = check_sata_dev_state();
714 
715 	ata_id_c_string(ata_dev->id,
716 			(unsigned char *)sata_dev_desc[dev].revision,
717 			 ATA_ID_FW_REV, sizeof(sata_dev_desc[dev].revision));
718 	ata_id_c_string(ata_dev->id,
719 			(unsigned char *)sata_dev_desc[dev].vendor,
720 			 ATA_ID_PROD, sizeof(sata_dev_desc[dev].vendor));
721 	ata_id_c_string(ata_dev->id,
722 			(unsigned char *)sata_dev_desc[dev].product,
723 			 ATA_ID_SERNO, sizeof(sata_dev_desc[dev].product));
724 
725 	sata_dev_desc[dev].lba = (u32) ata_dev->n_sectors;
726 
727 #ifdef CONFIG_LBA48
728 	if (ata_dev->id[83] & (1 << 10)) {
729 		sata_dev_desc[dev].lba48 = 1;
730 	} else {
731 		sata_dev_desc[dev].lba48 = 0;
732 	}
733 #endif
734 
735 	return 0;
736 }
737 
ata_busy_wait(struct ata_port * ap,unsigned int bits,unsigned int max)738 static u8 ata_busy_wait(struct ata_port *ap,
739 		unsigned int bits,unsigned int max)
740 {
741 	u8 status;
742 
743 	do {
744 		udelay(10);
745 		status = ata_check_status(ap);
746 		max--;
747 	} while (status != 0xff && (status & bits) && (max > 0));
748 
749 	return status;
750 }
751 
ata_dev_read_id(struct ata_device * dev,unsigned int * p_class,unsigned int flags,u16 * id)752 static int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
753 		unsigned int flags, u16 *id)
754 {
755 	struct ata_port *ap = pap;
756 	unsigned int class = *p_class;
757 	struct ata_taskfile tf;
758 	unsigned int err_mask = 0;
759 	const char *reason;
760 	int may_fallback = 1, tried_spinup = 0;
761 	u8 status;
762 	int rc;
763 
764 	status = ata_busy_wait(ap, ATA_BUSY, 30000);
765 	if (status & ATA_BUSY) {
766 		printf("BSY = 0 check. timeout.\n");
767 		rc = FALSE;
768 		return rc;
769 	}
770 
771 	ata_dev_select(ap, dev->devno, 1, 1);
772 
773 retry:
774 	memset(&tf, 0, sizeof(tf));
775 	ap->print_id = 1;
776 	ap->flags &= ~ATA_FLAG_DISABLED;
777 	tf.ctl = ap->ctl;
778 	tf.device = ATA_DEVICE_OBS;
779 	tf.command = ATA_CMD_ID_ATA;
780 	tf.protocol = ATA_PROT_PIO;
781 
782 	/* Some devices choke if TF registers contain garbage.  Make
783 	 * sure those are properly initialized.
784 	 */
785 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
786 
787 	/* Device presence detection is unreliable on some
788 	 * controllers.  Always poll IDENTIFY if available.
789 	 */
790 	tf.flags |= ATA_TFLAG_POLLING;
791 
792 	temp_n_block = 1;
793 
794 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
795 					sizeof(id[0]) * ATA_ID_WORDS, 0);
796 
797 	if (err_mask) {
798 		if (err_mask & AC_ERR_NODEV_HINT) {
799 			printf("NODEV after polling detection\n");
800 			return -ENOENT;
801 		}
802 
803 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
804 			/* Device or controller might have reported
805 			 * the wrong device class.  Give a shot at the
806 			 * other IDENTIFY if the current one is
807 			 * aborted by the device.
808 			 */
809 			if (may_fallback) {
810 				may_fallback = 0;
811 
812 				if (class == ATA_DEV_ATA) {
813 					class = ATA_DEV_ATAPI;
814 				} else {
815 					class = ATA_DEV_ATA;
816 				}
817 				goto retry;
818 			}
819 			/* Control reaches here iff the device aborted
820 			 * both flavors of IDENTIFYs which happens
821 			 * sometimes with phantom devices.
822 			 */
823 			printf("both IDENTIFYs aborted, assuming NODEV\n");
824 			return -ENOENT;
825 		}
826 		rc = -EIO;
827 		reason = "I/O error";
828 		goto err_out;
829 	}
830 
831 	/* Falling back doesn't make sense if ID data was read
832 	 * successfully at least once.
833 	 */
834 	may_fallback = 0;
835 
836 	unsigned int id_cnt;
837 
838 	for (id_cnt = 0; id_cnt < ATA_ID_WORDS; id_cnt++)
839 		id[id_cnt] = le16_to_cpu(id[id_cnt]);
840 
841 
842 	rc = -EINVAL;
843 	reason = "device reports invalid type";
844 
845 	if (class == ATA_DEV_ATA) {
846 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
847 			goto err_out;
848 	} else {
849 		if (ata_id_is_ata(id))
850 			goto err_out;
851 	}
852 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
853 		tried_spinup = 1;
854 		/*
855 		 * Drive powered-up in standby mode, and requires a specific
856 		 * SET_FEATURES spin-up subcommand before it will accept
857 		 * anything other than the original IDENTIFY command.
858 		 */
859 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
860 		if (err_mask && id[2] != 0x738c) {
861 			rc = -EIO;
862 			reason = "SPINUP failed";
863 			goto err_out;
864 		}
865 		/*
866 		 * If the drive initially returned incomplete IDENTIFY info,
867 		 * we now must reissue the IDENTIFY command.
868 		 */
869 		if (id[2] == 0x37c8)
870 			goto retry;
871 	}
872 
873 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
874 		/*
875 		 * The exact sequence expected by certain pre-ATA4 drives is:
876 		 * SRST RESET
877 		 * IDENTIFY (optional in early ATA)
878 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
879 		 * anything else..
880 		 * Some drives were very specific about that exact sequence.
881 		 *
882 		 * Note that ATA4 says lba is mandatory so the second check
883 		 * shoud never trigger.
884 		 */
885 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
886 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
887 			if (err_mask) {
888 				rc = -EIO;
889 				reason = "INIT_DEV_PARAMS failed";
890 				goto err_out;
891 			}
892 
893 			/* current CHS translation info (id[53-58]) might be
894 			 * changed. reread the identify device info.
895 			 */
896 			flags &= ~ATA_READID_POSTRESET;
897 			goto retry;
898 		}
899 	}
900 
901 	*p_class = class;
902 	return 0;
903 
904 err_out:
905 	return rc;
906 }
907 
ata_wait_idle(struct ata_port * ap)908 static u8 ata_wait_idle(struct ata_port *ap)
909 {
910 	u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
911 	return status;
912 }
913 
ata_dev_select(struct ata_port * ap,unsigned int device,unsigned int wait,unsigned int can_sleep)914 static void ata_dev_select(struct ata_port *ap, unsigned int device,
915 		unsigned int wait, unsigned int can_sleep)
916 {
917 	if (wait)
918 		ata_wait_idle(ap);
919 
920 	ata_std_dev_select(ap, device);
921 
922 	if (wait)
923 		ata_wait_idle(ap);
924 }
925 
ata_std_dev_select(struct ata_port * ap,unsigned int device)926 static void ata_std_dev_select(struct ata_port *ap, unsigned int device)
927 {
928 	u8 tmp;
929 
930 	if (device == 0) {
931 		tmp = ATA_DEVICE_OBS;
932 	} else {
933 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
934 	}
935 
936 	writeb(tmp, ap->ioaddr.device_addr);
937 
938 	readb(ap->ioaddr.altstatus_addr);
939 
940 	udelay(1);
941 }
942 
waiting_for_reg_state(volatile u8 * offset,int timeout_msec,u32 sign)943 static int waiting_for_reg_state(volatile u8 *offset,
944 				int timeout_msec,
945 				u32 sign)
946 {
947 	int i;
948 	u32 status;
949 
950 	for (i = 0; i < timeout_msec; i++) {
951 		status = readl(offset);
952 		if ((status & sign) != 0)
953 			break;
954 		msleep(1);
955 	}
956 
957 	return (i < timeout_msec) ? 0 : -1;
958 }
959 
ata_qc_reinit(struct ata_queued_cmd * qc)960 static void ata_qc_reinit(struct ata_queued_cmd *qc)
961 {
962 	qc->dma_dir = DMA_NONE;
963 	qc->flags = 0;
964 	qc->nbytes = qc->extrabytes = qc->curbytes = 0;
965 	qc->n_elem = 0;
966 	qc->err_mask = 0;
967 	qc->sect_size = ATA_SECT_SIZE;
968 	qc->nbytes = ATA_SECT_SIZE * temp_n_block;
969 
970 	memset(&qc->tf, 0, sizeof(qc->tf));
971 	qc->tf.ctl = 0;
972 	qc->tf.device = ATA_DEVICE_OBS;
973 
974 	qc->result_tf.command = ATA_DRDY;
975 	qc->result_tf.feature = 0;
976 }
977 
__ata_qc_from_tag(struct ata_port * ap,unsigned int tag)978 struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
979 					unsigned int tag)
980 {
981 	if (tag < ATA_MAX_QUEUE)
982 		return &ap->qcmd[tag];
983 	return NULL;
984 }
985 
__ata_port_freeze(struct ata_port * ap)986 static void __ata_port_freeze(struct ata_port *ap)
987 {
988 	printf("set port freeze.\n");
989 	ap->pflags |= ATA_PFLAG_FROZEN;
990 }
991 
ata_port_freeze(struct ata_port * ap)992 static int ata_port_freeze(struct ata_port *ap)
993 {
994 	__ata_port_freeze(ap);
995 	return 0;
996 }
997 
ata_exec_internal(struct ata_device * dev,struct ata_taskfile * tf,const u8 * cdb,int dma_dir,unsigned int buflen,unsigned long timeout)998 unsigned ata_exec_internal(struct ata_device *dev,
999 			struct ata_taskfile *tf, const u8 *cdb,
1000 			int dma_dir, unsigned int buflen,
1001 			unsigned long timeout)
1002 {
1003 	struct ata_link *link = dev->link;
1004 	struct ata_port *ap = pap;
1005 	struct ata_queued_cmd *qc;
1006 	unsigned int tag, preempted_tag;
1007 	u32 preempted_sactive, preempted_qc_active;
1008 	int preempted_nr_active_links;
1009 	unsigned int err_mask;
1010 	int rc = 0;
1011 	u8 status;
1012 
1013 	status = ata_busy_wait(ap, ATA_BUSY, 300000);
1014 	if (status & ATA_BUSY) {
1015 		printf("BSY = 0 check. timeout.\n");
1016 		rc = FALSE;
1017 		return rc;
1018 	}
1019 
1020 	if (ap->pflags & ATA_PFLAG_FROZEN)
1021 		return AC_ERR_SYSTEM;
1022 
1023 	tag = ATA_TAG_INTERNAL;
1024 
1025 	if (test_and_set_bit(tag, &ap->qc_allocated)) {
1026 		rc = FALSE;
1027 		return rc;
1028 	}
1029 
1030 	qc = __ata_qc_from_tag(ap, tag);
1031 	qc->tag = tag;
1032 	qc->ap = ap;
1033 	qc->dev = dev;
1034 
1035 	ata_qc_reinit(qc);
1036 
1037 	preempted_tag = link->active_tag;
1038 	preempted_sactive = link->sactive;
1039 	preempted_qc_active = ap->qc_active;
1040 	preempted_nr_active_links = ap->nr_active_links;
1041 	link->active_tag = ATA_TAG_POISON;
1042 	link->sactive = 0;
1043 	ap->qc_active = 0;
1044 	ap->nr_active_links = 0;
1045 
1046 	qc->tf = *tf;
1047 	if (cdb)
1048 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1049 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1050 	qc->dma_dir = dma_dir;
1051 	qc->private_data = 0;
1052 
1053 	ata_qc_issue(qc);
1054 
1055 	if (!timeout)
1056 		timeout = ata_probe_timeout * 1000 / HZ;
1057 
1058 	status = ata_busy_wait(ap, ATA_BUSY, 30000);
1059 	if (status & ATA_BUSY) {
1060 		printf("BSY = 0 check. timeout.\n");
1061 		printf("altstatus = 0x%x.\n", status);
1062 		qc->err_mask |= AC_ERR_OTHER;
1063 		return qc->err_mask;
1064 	}
1065 
1066 	if (waiting_for_reg_state(ap->ioaddr.altstatus_addr, 1000, 0x8)) {
1067 		u8 status = 0;
1068 		u8 errorStatus = 0;
1069 
1070 		status = readb(ap->ioaddr.altstatus_addr);
1071 		if ((status & 0x01) != 0) {
1072 			errorStatus = readb(ap->ioaddr.feature_addr);
1073 			if (errorStatus == 0x04 &&
1074 				qc->tf.command == ATA_CMD_PIO_READ_EXT){
1075 				printf("Hard Disk doesn't support LBA48\n");
1076 				dev_state = SATA_ERROR;
1077 				qc->err_mask |= AC_ERR_OTHER;
1078 				return qc->err_mask;
1079 			}
1080 		}
1081 		qc->err_mask |= AC_ERR_OTHER;
1082 		return qc->err_mask;
1083 	}
1084 
1085 	status = ata_busy_wait(ap, ATA_BUSY, 10);
1086 	if (status & ATA_BUSY) {
1087 		printf("BSY = 0 check. timeout.\n");
1088 		qc->err_mask |= AC_ERR_OTHER;
1089 		return qc->err_mask;
1090 	}
1091 
1092 	ata_pio_task(ap);
1093 
1094 	if (!rc) {
1095 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1096 			qc->err_mask |= AC_ERR_TIMEOUT;
1097 			ata_port_freeze(ap);
1098 		}
1099 	}
1100 
1101 	if (qc->flags & ATA_QCFLAG_FAILED) {
1102 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1103 			qc->err_mask |= AC_ERR_DEV;
1104 
1105 		if (!qc->err_mask)
1106 			qc->err_mask |= AC_ERR_OTHER;
1107 
1108 		if (qc->err_mask & ~AC_ERR_OTHER)
1109 			qc->err_mask &= ~AC_ERR_OTHER;
1110 	}
1111 
1112 	*tf = qc->result_tf;
1113 	err_mask = qc->err_mask;
1114 	ata_qc_free(qc);
1115 	link->active_tag = preempted_tag;
1116 	link->sactive = preempted_sactive;
1117 	ap->qc_active = preempted_qc_active;
1118 	ap->nr_active_links = preempted_nr_active_links;
1119 
1120 	if (ap->flags & ATA_FLAG_DISABLED) {
1121 		err_mask |= AC_ERR_SYSTEM;
1122 		ap->flags &= ~ATA_FLAG_DISABLED;
1123 	}
1124 
1125 	return err_mask;
1126 }
1127 
ata_qc_issue(struct ata_queued_cmd * qc)1128 static void ata_qc_issue(struct ata_queued_cmd *qc)
1129 {
1130 	struct ata_port *ap = qc->ap;
1131 	struct ata_link *link = qc->dev->link;
1132 	u8 prot = qc->tf.protocol;
1133 
1134 	if (ata_is_ncq(prot)) {
1135 		if (!link->sactive)
1136 			ap->nr_active_links++;
1137 		link->sactive |= 1 << qc->tag;
1138 	} else {
1139 		ap->nr_active_links++;
1140 		link->active_tag = qc->tag;
1141 	}
1142 
1143 	qc->flags |= ATA_QCFLAG_ACTIVE;
1144 	ap->qc_active |= 1 << qc->tag;
1145 
1146 	if (qc->dev->flags & ATA_DFLAG_SLEEPING) {
1147 		msleep(1);
1148 		return;
1149 	}
1150 
1151 	qc->err_mask |= ata_qc_issue_prot(qc);
1152 	if (qc->err_mask)
1153 		goto err;
1154 
1155 	return;
1156 err:
1157 	ata_qc_complete(qc);
1158 }
1159 
ata_qc_issue_prot(struct ata_queued_cmd * qc)1160 static unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1161 {
1162 	struct ata_port *ap = qc->ap;
1163 
1164 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
1165 		switch (qc->tf.protocol) {
1166 		case ATA_PROT_PIO:
1167 		case ATA_PROT_NODATA:
1168 		case ATAPI_PROT_PIO:
1169 		case ATAPI_PROT_NODATA:
1170 			qc->tf.flags |= ATA_TFLAG_POLLING;
1171 			break;
1172 		default:
1173 			break;
1174 		}
1175 	}
1176 
1177 	ata_dev_select(ap, qc->dev->devno, 1, 0);
1178 
1179 	switch (qc->tf.protocol) {
1180 	case ATA_PROT_PIO:
1181 		if (qc->tf.flags & ATA_TFLAG_POLLING)
1182 			qc->tf.ctl |= ATA_NIEN;
1183 
1184 		ata_tf_to_host(ap, &qc->tf);
1185 
1186 		ap->hsm_task_state = HSM_ST;
1187 
1188 		if (qc->tf.flags & ATA_TFLAG_POLLING)
1189 			ata_pio_queue_task(ap, qc, 0);
1190 
1191 		break;
1192 
1193 	default:
1194 		return AC_ERR_SYSTEM;
1195 	}
1196 
1197 	return 0;
1198 }
1199 
ata_tf_to_host(struct ata_port * ap,const struct ata_taskfile * tf)1200 static void ata_tf_to_host(struct ata_port *ap,
1201 			const struct ata_taskfile *tf)
1202 {
1203 	ata_tf_load(ap, tf);
1204 	ata_exec_command(ap, tf);
1205 }
1206 
ata_tf_load(struct ata_port * ap,const struct ata_taskfile * tf)1207 static void ata_tf_load(struct ata_port *ap,
1208 			const struct ata_taskfile *tf)
1209 {
1210 	struct ata_ioports *ioaddr = &ap->ioaddr;
1211 	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
1212 
1213 	if (tf->ctl != ap->last_ctl) {
1214 		if (ioaddr->ctl_addr)
1215 			writeb(tf->ctl, ioaddr->ctl_addr);
1216 		ap->last_ctl = tf->ctl;
1217 		ata_wait_idle(ap);
1218 	}
1219 
1220 	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
1221 		writeb(tf->hob_feature, ioaddr->feature_addr);
1222 		writeb(tf->hob_nsect, ioaddr->nsect_addr);
1223 		writeb(tf->hob_lbal, ioaddr->lbal_addr);
1224 		writeb(tf->hob_lbam, ioaddr->lbam_addr);
1225 		writeb(tf->hob_lbah, ioaddr->lbah_addr);
1226 	}
1227 
1228 	if (is_addr) {
1229 		writeb(tf->feature, ioaddr->feature_addr);
1230 		writeb(tf->nsect, ioaddr->nsect_addr);
1231 		writeb(tf->lbal, ioaddr->lbal_addr);
1232 		writeb(tf->lbam, ioaddr->lbam_addr);
1233 		writeb(tf->lbah, ioaddr->lbah_addr);
1234 	}
1235 
1236 	if (tf->flags & ATA_TFLAG_DEVICE)
1237 		writeb(tf->device, ioaddr->device_addr);
1238 
1239 	ata_wait_idle(ap);
1240 }
1241 
ata_exec_command(struct ata_port * ap,const struct ata_taskfile * tf)1242 static void ata_exec_command(struct ata_port *ap,
1243 			const struct ata_taskfile *tf)
1244 {
1245 	writeb(tf->command, ap->ioaddr.command_addr);
1246 
1247 	readb(ap->ioaddr.altstatus_addr);
1248 
1249 	udelay(1);
1250 }
1251 
ata_pio_queue_task(struct ata_port * ap,void * data,unsigned long delay)1252 static void ata_pio_queue_task(struct ata_port *ap,
1253 			void *data,unsigned long delay)
1254 {
1255 	ap->port_task_data = data;
1256 }
1257 
ac_err_mask(u8 status)1258 static unsigned int ac_err_mask(u8 status)
1259 {
1260 	if (status & (ATA_BUSY | ATA_DRQ))
1261 		return AC_ERR_HSM;
1262 	if (status & (ATA_ERR | ATA_DF))
1263 		return AC_ERR_DEV;
1264 	return 0;
1265 }
1266 
__ac_err_mask(u8 status)1267 static unsigned int __ac_err_mask(u8 status)
1268 {
1269 	unsigned int mask = ac_err_mask(status);
1270 	if (mask == 0)
1271 		return AC_ERR_OTHER;
1272 	return mask;
1273 }
1274 
ata_pio_task(struct ata_port * arg_ap)1275 static void ata_pio_task(struct ata_port *arg_ap)
1276 {
1277 	struct ata_port *ap = arg_ap;
1278 	struct ata_queued_cmd *qc = ap->port_task_data;
1279 	u8 status;
1280 	int poll_next;
1281 
1282 fsm_start:
1283 	/*
1284 	 * This is purely heuristic.  This is a fast path.
1285 	 * Sometimes when we enter, BSY will be cleared in
1286 	 * a chk-status or two.  If not, the drive is probably seeking
1287 	 * or something.  Snooze for a couple msecs, then
1288 	 * chk-status again.  If still busy, queue delayed work.
1289 	 */
1290 	status = ata_busy_wait(ap, ATA_BUSY, 5);
1291 	if (status & ATA_BUSY) {
1292 		msleep(2);
1293 		status = ata_busy_wait(ap, ATA_BUSY, 10);
1294 		if (status & ATA_BUSY) {
1295 			ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
1296 			return;
1297 		}
1298 	}
1299 
1300 	poll_next = ata_hsm_move(ap, qc, status, 1);
1301 
1302 	/* another command or interrupt handler
1303 	 * may be running at this point.
1304 	 */
1305 	if (poll_next)
1306 		goto fsm_start;
1307 }
1308 
ata_hsm_move(struct ata_port * ap,struct ata_queued_cmd * qc,u8 status,int in_wq)1309 static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1310 			u8 status, int in_wq)
1311 {
1312 	int poll_next;
1313 
1314 fsm_start:
1315 	switch (ap->hsm_task_state) {
1316 	case HSM_ST_FIRST:
1317 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1318 
1319 		if ((status & ATA_DRQ) == 0) {
1320 			if (status & (ATA_ERR | ATA_DF)) {
1321 				qc->err_mask |= AC_ERR_DEV;
1322 			} else {
1323 				qc->err_mask |= AC_ERR_HSM;
1324 			}
1325 			ap->hsm_task_state = HSM_ST_ERR;
1326 			goto fsm_start;
1327 		}
1328 
1329 		/* Device should not ask for data transfer (DRQ=1)
1330 		 * when it finds something wrong.
1331 		 * We ignore DRQ here and stop the HSM by
1332 		 * changing hsm_task_state to HSM_ST_ERR and
1333 		 * let the EH abort the command or reset the device.
1334 		 */
1335 		if (status & (ATA_ERR | ATA_DF)) {
1336 			if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1337 				printf("DRQ=1 with device error, "
1338 					"dev_stat 0x%X\n", status);
1339 				qc->err_mask |= AC_ERR_HSM;
1340 				ap->hsm_task_state = HSM_ST_ERR;
1341 				goto fsm_start;
1342 			}
1343 		}
1344 
1345 		if (qc->tf.protocol == ATA_PROT_PIO) {
1346 			/* PIO data out protocol.
1347 			 * send first data block.
1348 			 */
1349 			/* ata_pio_sectors() might change the state
1350 			 * to HSM_ST_LAST. so, the state is changed here
1351 			 * before ata_pio_sectors().
1352 			 */
1353 			ap->hsm_task_state = HSM_ST;
1354 			ata_pio_sectors(qc);
1355 		} else {
1356 			printf("protocol is not ATA_PROT_PIO \n");
1357 		}
1358 		break;
1359 
1360 	case HSM_ST:
1361 		if ((status & ATA_DRQ) == 0) {
1362 			if (status & (ATA_ERR | ATA_DF)) {
1363 				qc->err_mask |= AC_ERR_DEV;
1364 			} else {
1365 				/* HSM violation. Let EH handle this.
1366 				 * Phantom devices also trigger this
1367 				 * condition.  Mark hint.
1368 				 */
1369 				qc->err_mask |= AC_ERR_HSM | AC_ERR_NODEV_HINT;
1370 			}
1371 
1372 			ap->hsm_task_state = HSM_ST_ERR;
1373 			goto fsm_start;
1374 		}
1375 		/* For PIO reads, some devices may ask for
1376 		 * data transfer (DRQ=1) alone with ERR=1.
1377 		 * We respect DRQ here and transfer one
1378 		 * block of junk data before changing the
1379 		 * hsm_task_state to HSM_ST_ERR.
1380 		 *
1381 		 * For PIO writes, ERR=1 DRQ=1 doesn't make
1382 		 * sense since the data block has been
1383 		 * transferred to the device.
1384 		 */
1385 		if (status & (ATA_ERR | ATA_DF)) {
1386 			qc->err_mask |= AC_ERR_DEV;
1387 
1388 			if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1389 				ata_pio_sectors(qc);
1390 				status = ata_wait_idle(ap);
1391 			}
1392 
1393 			if (status & (ATA_BUSY | ATA_DRQ))
1394 				qc->err_mask |= AC_ERR_HSM;
1395 
1396 			/* ata_pio_sectors() might change the
1397 			 * state to HSM_ST_LAST. so, the state
1398 			 * is changed after ata_pio_sectors().
1399 			 */
1400 			ap->hsm_task_state = HSM_ST_ERR;
1401 			goto fsm_start;
1402 		}
1403 
1404 		ata_pio_sectors(qc);
1405 		if (ap->hsm_task_state == HSM_ST_LAST &&
1406 			(!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1407 			status = ata_wait_idle(ap);
1408 			goto fsm_start;
1409 		}
1410 
1411 		poll_next = 1;
1412 		break;
1413 
1414 	case HSM_ST_LAST:
1415 		if (!ata_ok(status)) {
1416 			qc->err_mask |= __ac_err_mask(status);
1417 			ap->hsm_task_state = HSM_ST_ERR;
1418 			goto fsm_start;
1419 		}
1420 
1421 		ap->hsm_task_state = HSM_ST_IDLE;
1422 
1423 		ata_hsm_qc_complete(qc, in_wq);
1424 
1425 		poll_next = 0;
1426 		break;
1427 
1428 	case HSM_ST_ERR:
1429 		/* make sure qc->err_mask is available to
1430 		 * know what's wrong and recover
1431 		 */
1432 		ap->hsm_task_state = HSM_ST_IDLE;
1433 
1434 		ata_hsm_qc_complete(qc, in_wq);
1435 
1436 		poll_next = 0;
1437 		break;
1438 	default:
1439 		poll_next = 0;
1440 	}
1441 
1442 	return poll_next;
1443 }
1444 
ata_pio_sectors(struct ata_queued_cmd * qc)1445 static void ata_pio_sectors(struct ata_queued_cmd *qc)
1446 {
1447 	struct ata_port *ap;
1448 	ap = pap;
1449 	qc->pdata = ap->pdata;
1450 
1451 	ata_pio_sector(qc);
1452 
1453 	readb(qc->ap->ioaddr.altstatus_addr);
1454 	udelay(1);
1455 }
1456 
ata_pio_sector(struct ata_queued_cmd * qc)1457 static void ata_pio_sector(struct ata_queued_cmd *qc)
1458 {
1459 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1460 	struct ata_port *ap = qc->ap;
1461 	unsigned int offset;
1462 	unsigned char *buf;
1463 	char temp_data_buf[512];
1464 
1465 	if (qc->curbytes == qc->nbytes - qc->sect_size)
1466 		ap->hsm_task_state = HSM_ST_LAST;
1467 
1468 	offset = qc->curbytes;
1469 
1470 	switch (qc->tf.command) {
1471 	case ATA_CMD_ID_ATA:
1472 		buf = (unsigned char *)&ata_device.id[0];
1473 		break;
1474 	case ATA_CMD_PIO_READ_EXT:
1475 	case ATA_CMD_PIO_READ:
1476 	case ATA_CMD_PIO_WRITE_EXT:
1477 	case ATA_CMD_PIO_WRITE:
1478 		buf = qc->pdata + offset;
1479 		break;
1480 	default:
1481 		buf = (unsigned char *)&temp_data_buf[0];
1482 	}
1483 
1484 	ata_mmio_data_xfer(qc->dev, buf, qc->sect_size, do_write);
1485 
1486 	qc->curbytes += qc->sect_size;
1487 
1488 }
1489 
ata_mmio_data_xfer(struct ata_device * dev,unsigned char * buf,unsigned int buflen,int do_write)1490 static void ata_mmio_data_xfer(struct ata_device *dev, unsigned char *buf,
1491 				unsigned int buflen, int do_write)
1492 {
1493 	struct ata_port *ap = pap;
1494 	void __iomem *data_addr = ap->ioaddr.data_addr;
1495 	unsigned int words = buflen >> 1;
1496 	u16 *buf16 = (u16 *)buf;
1497 	unsigned int i = 0;
1498 
1499 	udelay(100);
1500 	if (do_write) {
1501 		for (i = 0; i < words; i++)
1502 			writew(le16_to_cpu(buf16[i]), data_addr);
1503 	} else {
1504 		for (i = 0; i < words; i++)
1505 			buf16[i] = cpu_to_le16(readw(data_addr));
1506 	}
1507 
1508 	if (buflen & 0x01) {
1509 		__le16 align_buf[1] = { 0 };
1510 		unsigned char *trailing_buf = buf + buflen - 1;
1511 
1512 		if (do_write) {
1513 			memcpy(align_buf, trailing_buf, 1);
1514 			writew(le16_to_cpu(align_buf[0]), data_addr);
1515 		} else {
1516 			align_buf[0] = cpu_to_le16(readw(data_addr));
1517 			memcpy(trailing_buf, align_buf, 1);
1518 		}
1519 	}
1520 }
1521 
ata_hsm_qc_complete(struct ata_queued_cmd * qc,int in_wq)1522 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1523 {
1524 	struct ata_port *ap = qc->ap;
1525 
1526 	if (in_wq) {
1527 		/* EH might have kicked in while host lock is
1528 		 * released.
1529 		 */
1530 		qc = &ap->qcmd[qc->tag];
1531 		if (qc) {
1532 			if (!(qc->err_mask & AC_ERR_HSM)) {
1533 				ata_irq_on(ap);
1534 				ata_qc_complete(qc);
1535 			} else {
1536 				ata_port_freeze(ap);
1537 			}
1538 		}
1539 	} else {
1540 		if (!(qc->err_mask & AC_ERR_HSM)) {
1541 			ata_qc_complete(qc);
1542 		} else {
1543 			ata_port_freeze(ap);
1544 		}
1545 	}
1546 }
1547 
ata_irq_on(struct ata_port * ap)1548 static u8 ata_irq_on(struct ata_port *ap)
1549 {
1550 	struct ata_ioports *ioaddr = &ap->ioaddr;
1551 	u8 tmp;
1552 
1553 	ap->ctl &= ~ATA_NIEN;
1554 	ap->last_ctl = ap->ctl;
1555 
1556 	if (ioaddr->ctl_addr)
1557 		writeb(ap->ctl, ioaddr->ctl_addr);
1558 
1559 	tmp = ata_wait_idle(ap);
1560 
1561 	return tmp;
1562 }
1563 
ata_tag_internal(unsigned int tag)1564 static unsigned int ata_tag_internal(unsigned int tag)
1565 {
1566 	return tag == ATA_MAX_QUEUE - 1;
1567 }
1568 
ata_qc_complete(struct ata_queued_cmd * qc)1569 static void ata_qc_complete(struct ata_queued_cmd *qc)
1570 {
1571 	struct ata_device *dev = qc->dev;
1572 	if (qc->err_mask)
1573 		qc->flags |= ATA_QCFLAG_FAILED;
1574 
1575 	if (qc->flags & ATA_QCFLAG_FAILED) {
1576 		if (!ata_tag_internal(qc->tag)) {
1577 			fill_result_tf(qc);
1578 			return;
1579 		}
1580 	}
1581 	if (qc->flags & ATA_QCFLAG_RESULT_TF)
1582 		fill_result_tf(qc);
1583 
1584 	/* Some commands need post-processing after successful
1585 	 * completion.
1586 	 */
1587 	switch (qc->tf.command) {
1588 	case ATA_CMD_SET_FEATURES:
1589 		if (qc->tf.feature != SETFEATURES_WC_ON &&
1590 				qc->tf.feature != SETFEATURES_WC_OFF)
1591 			break;
1592 	case ATA_CMD_INIT_DEV_PARAMS:
1593 	case ATA_CMD_SET_MULTI:
1594 		break;
1595 
1596 	case ATA_CMD_SLEEP:
1597 		dev->flags |= ATA_DFLAG_SLEEPING;
1598 		break;
1599 	}
1600 
1601 	__ata_qc_complete(qc);
1602 }
1603 
fill_result_tf(struct ata_queued_cmd * qc)1604 static void fill_result_tf(struct ata_queued_cmd *qc)
1605 {
1606 	struct ata_port *ap = qc->ap;
1607 
1608 	qc->result_tf.flags = qc->tf.flags;
1609 	ata_tf_read(ap, &qc->result_tf);
1610 }
1611 
ata_tf_read(struct ata_port * ap,struct ata_taskfile * tf)1612 static void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
1613 {
1614 	struct ata_ioports *ioaddr = &ap->ioaddr;
1615 
1616 	tf->command = ata_check_status(ap);
1617 	tf->feature = readb(ioaddr->error_addr);
1618 	tf->nsect = readb(ioaddr->nsect_addr);
1619 	tf->lbal = readb(ioaddr->lbal_addr);
1620 	tf->lbam = readb(ioaddr->lbam_addr);
1621 	tf->lbah = readb(ioaddr->lbah_addr);
1622 	tf->device = readb(ioaddr->device_addr);
1623 
1624 	if (tf->flags & ATA_TFLAG_LBA48) {
1625 		if (ioaddr->ctl_addr) {
1626 			writeb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
1627 
1628 			tf->hob_feature = readb(ioaddr->error_addr);
1629 			tf->hob_nsect = readb(ioaddr->nsect_addr);
1630 			tf->hob_lbal = readb(ioaddr->lbal_addr);
1631 			tf->hob_lbam = readb(ioaddr->lbam_addr);
1632 			tf->hob_lbah = readb(ioaddr->lbah_addr);
1633 
1634 			writeb(tf->ctl, ioaddr->ctl_addr);
1635 			ap->last_ctl = tf->ctl;
1636 		} else {
1637 			printf("sata_dwc warnning register read.\n");
1638 		}
1639 	}
1640 }
1641 
__ata_qc_complete(struct ata_queued_cmd * qc)1642 static void __ata_qc_complete(struct ata_queued_cmd *qc)
1643 {
1644 	struct ata_port *ap = qc->ap;
1645 	struct ata_link *link = qc->dev->link;
1646 
1647 	link->active_tag = ATA_TAG_POISON;
1648 	ap->nr_active_links--;
1649 
1650 	if (qc->flags & ATA_QCFLAG_CLEAR_EXCL && ap->excl_link == link)
1651 		ap->excl_link = NULL;
1652 
1653 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
1654 	ap->qc_active &= ~(1 << qc->tag);
1655 }
1656 
ata_qc_free(struct ata_queued_cmd * qc)1657 static void ata_qc_free(struct ata_queued_cmd *qc)
1658 {
1659 	struct ata_port *ap = qc->ap;
1660 	unsigned int tag;
1661 	qc->flags = 0;
1662 	tag = qc->tag;
1663 	if (tag < ATA_MAX_QUEUE) {
1664 		qc->tag = ATA_TAG_POISON;
1665 		clear_bit(tag, &ap->qc_allocated);
1666 	}
1667 }
1668 
check_sata_dev_state(void)1669 static int check_sata_dev_state(void)
1670 {
1671 	unsigned long datalen;
1672 	unsigned char *pdata;
1673 	int ret = 0;
1674 	int i = 0;
1675 	char temp_data_buf[512];
1676 
1677 	while (1) {
1678 		udelay(10000);
1679 
1680 		pdata = (unsigned char*)&temp_data_buf[0];
1681 		datalen = 512;
1682 
1683 		ret = ata_dev_read_sectors(pdata, datalen, 0, 1);
1684 
1685 		if (ret == TRUE)
1686 			break;
1687 
1688 		i++;
1689 		if (i > (ATA_RESET_TIME * 20)) {
1690 			printf("** TimeOUT **\n");
1691 			dev_state = SATA_NODEVICE;
1692 			return FALSE;
1693 		}
1694 
1695 		if ((i >= 100) && ((i % 100) == 0))
1696 			printf(".");
1697 	}
1698 
1699 	dev_state = SATA_READY;
1700 
1701 	return TRUE;
1702 }
1703 
ata_dev_set_feature(struct ata_device * dev,u8 enable,u8 feature)1704 static unsigned int ata_dev_set_feature(struct ata_device *dev,
1705 				u8 enable, u8 feature)
1706 {
1707 	struct ata_taskfile tf;
1708 	struct ata_port *ap;
1709 	ap = pap;
1710 	unsigned int err_mask;
1711 
1712 	memset(&tf, 0, sizeof(tf));
1713 	tf.ctl = ap->ctl;
1714 
1715 	tf.device = ATA_DEVICE_OBS;
1716 	tf.command = ATA_CMD_SET_FEATURES;
1717 	tf.feature = enable;
1718 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1719 	tf.protocol = ATA_PROT_NODATA;
1720 	tf.nsect = feature;
1721 
1722 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, 0, 0);
1723 
1724 	return err_mask;
1725 }
1726 
ata_dev_init_params(struct ata_device * dev,u16 heads,u16 sectors)1727 static unsigned int ata_dev_init_params(struct ata_device *dev,
1728 				u16 heads, u16 sectors)
1729 {
1730 	struct ata_taskfile tf;
1731 	struct ata_port *ap;
1732 	ap = pap;
1733 	unsigned int err_mask;
1734 
1735 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
1736 		return AC_ERR_INVALID;
1737 
1738 	memset(&tf, 0, sizeof(tf));
1739 	tf.ctl = ap->ctl;
1740 	tf.device = ATA_DEVICE_OBS;
1741 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
1742 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1743 	tf.protocol = ATA_PROT_NODATA;
1744 	tf.nsect = sectors;
1745 	tf.device |= (heads - 1) & 0x0f;
1746 
1747 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, 0, 0);
1748 
1749 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1750 		err_mask = 0;
1751 
1752 	return err_mask;
1753 }
1754 
1755 #if defined(CONFIG_SATA_DWC) && !defined(CONFIG_LBA48)
1756 #define SATA_MAX_READ_BLK 0xFF
1757 #else
1758 #define SATA_MAX_READ_BLK 0xFFFF
1759 #endif
1760 
sata_read(int device,ulong blknr,lbaint_t blkcnt,void * buffer)1761 ulong sata_read(int device, ulong blknr, lbaint_t blkcnt, void *buffer)
1762 {
1763 	ulong start,blks, buf_addr;
1764 	unsigned short smallblks;
1765 	unsigned long datalen;
1766 	unsigned char *pdata;
1767 	device &= 0xff;
1768 
1769 	u32 block = 0;
1770 	u32 n_block = 0;
1771 
1772 	if (dev_state != SATA_READY)
1773 		return 0;
1774 
1775 	buf_addr = (unsigned long)buffer;
1776 	start = blknr;
1777 	blks = blkcnt;
1778 	do {
1779 		pdata = (unsigned char *)buf_addr;
1780 		if (blks > SATA_MAX_READ_BLK) {
1781 			datalen = sata_dev_desc[device].blksz * SATA_MAX_READ_BLK;
1782 			smallblks = SATA_MAX_READ_BLK;
1783 
1784 			block = (u32)start;
1785 			n_block = (u32)smallblks;
1786 
1787 			start += SATA_MAX_READ_BLK;
1788 			blks -= SATA_MAX_READ_BLK;
1789 		} else {
1790 			datalen = sata_dev_desc[device].blksz * SATA_MAX_READ_BLK;
1791 			datalen = sata_dev_desc[device].blksz * blks;
1792 			smallblks = (unsigned short)blks;
1793 
1794 			block = (u32)start;
1795 			n_block = (u32)smallblks;
1796 
1797 			start += blks;
1798 			blks = 0;
1799 		}
1800 
1801 		if (ata_dev_read_sectors(pdata, datalen, block, n_block) != TRUE) {
1802 			printf("sata_dwc : Hard disk read error.\n");
1803 			blkcnt -= blks;
1804 			break;
1805 		}
1806 		buf_addr += datalen;
1807 	} while (blks != 0);
1808 
1809 	return (blkcnt);
1810 }
1811 
ata_dev_read_sectors(unsigned char * pdata,unsigned long datalen,u32 block,u32 n_block)1812 static int ata_dev_read_sectors(unsigned char *pdata, unsigned long datalen,
1813 						u32 block, u32 n_block)
1814 {
1815 	struct ata_port *ap = pap;
1816 	struct ata_device *dev = &ata_device;
1817 	struct ata_taskfile tf;
1818 	unsigned int class = ATA_DEV_ATA;
1819 	unsigned int err_mask = 0;
1820 	const char *reason;
1821 	int may_fallback = 1;
1822 	int rc;
1823 
1824 	if (dev_state == SATA_ERROR)
1825 		return FALSE;
1826 
1827 	ata_dev_select(ap, dev->devno, 1, 1);
1828 
1829 retry:
1830 	memset(&tf, 0, sizeof(tf));
1831 	tf.ctl = ap->ctl;
1832 	ap->print_id = 1;
1833 	ap->flags &= ~ATA_FLAG_DISABLED;
1834 
1835 	ap->pdata = pdata;
1836 
1837 	tf.device = ATA_DEVICE_OBS;
1838 
1839 	temp_n_block = n_block;
1840 
1841 #ifdef CONFIG_LBA48
1842 	tf.command = ATA_CMD_PIO_READ_EXT;
1843 	tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1844 
1845 	tf.hob_feature = 31;
1846 	tf.feature = 31;
1847 	tf.hob_nsect = (n_block >> 8) & 0xff;
1848 	tf.nsect = n_block & 0xff;
1849 
1850 	tf.hob_lbah = 0x0;
1851 	tf.hob_lbam = 0x0;
1852 	tf.hob_lbal = (block >> 24) & 0xff;
1853 	tf.lbah = (block >> 16) & 0xff;
1854 	tf.lbam = (block >> 8) & 0xff;
1855 	tf.lbal = block & 0xff;
1856 
1857 	tf.device = 1 << 6;
1858 	if (tf.flags & ATA_TFLAG_FUA)
1859 		tf.device |= 1 << 7;
1860 #else
1861 	tf.command = ATA_CMD_PIO_READ;
1862 	tf.flags |= ATA_TFLAG_LBA ;
1863 
1864 	tf.feature = 31;
1865 	tf.nsect = n_block & 0xff;
1866 
1867 	tf.lbah = (block >> 16) & 0xff;
1868 	tf.lbam = (block >> 8) & 0xff;
1869 	tf.lbal = block & 0xff;
1870 
1871 	tf.device = (block >> 24) & 0xf;
1872 
1873 	tf.device |= 1 << 6;
1874 	if (tf.flags & ATA_TFLAG_FUA)
1875 		tf.device |= 1 << 7;
1876 
1877 #endif
1878 
1879 	tf.protocol = ATA_PROT_PIO;
1880 
1881 	/* Some devices choke if TF registers contain garbage.  Make
1882 	 * sure those are properly initialized.
1883 	 */
1884 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1885 	tf.flags |= ATA_TFLAG_POLLING;
1886 
1887 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 0, 0);
1888 
1889 	if (err_mask) {
1890 		if (err_mask & AC_ERR_NODEV_HINT) {
1891 			printf("READ_SECTORS NODEV after polling detection\n");
1892 			return -ENOENT;
1893 		}
1894 
1895 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1896 			/* Device or controller might have reported
1897 			 * the wrong device class.  Give a shot at the
1898 			 * other IDENTIFY if the current one is
1899 			 * aborted by the device.
1900 			 */
1901 			if (may_fallback) {
1902 				may_fallback = 0;
1903 
1904 				if (class == ATA_DEV_ATA) {
1905 					class = ATA_DEV_ATAPI;
1906 				} else {
1907 					class = ATA_DEV_ATA;
1908 				}
1909 				goto retry;
1910 			}
1911 			/* Control reaches here iff the device aborted
1912 			 * both flavors of IDENTIFYs which happens
1913 			 * sometimes with phantom devices.
1914 			 */
1915 			printf("both IDENTIFYs aborted, assuming NODEV\n");
1916 			return -ENOENT;
1917 		}
1918 
1919 		rc = -EIO;
1920 		reason = "I/O error";
1921 		goto err_out;
1922 	}
1923 
1924 	/* Falling back doesn't make sense if ID data was read
1925 	 * successfully at least once.
1926 	 */
1927 	may_fallback = 0;
1928 
1929 	rc = -EINVAL;
1930 	reason = "device reports invalid type";
1931 
1932 	return TRUE;
1933 
1934 err_out:
1935 	printf("failed to READ SECTORS (%s, err_mask=0x%x)\n", reason, err_mask);
1936 	return FALSE;
1937 }
1938 
1939 #if defined(CONFIG_SATA_DWC) && !defined(CONFIG_LBA48)
1940 #define SATA_MAX_WRITE_BLK 0xFF
1941 #else
1942 #define SATA_MAX_WRITE_BLK 0xFFFF
1943 #endif
1944 
sata_write(int device,ulong blknr,lbaint_t blkcnt,void * buffer)1945 ulong sata_write(int device, ulong blknr, lbaint_t blkcnt, void *buffer)
1946 {
1947 	ulong start,blks, buf_addr;
1948 	unsigned short smallblks;
1949 	unsigned long datalen;
1950 	unsigned char *pdata;
1951 	device &= 0xff;
1952 
1953 
1954 	u32 block = 0;
1955 	u32 n_block = 0;
1956 
1957 	if (dev_state != SATA_READY)
1958 		return 0;
1959 
1960 	buf_addr = (unsigned long)buffer;
1961 	start = blknr;
1962 	blks = blkcnt;
1963 	do {
1964 		pdata = (unsigned char *)buf_addr;
1965 		if (blks > SATA_MAX_WRITE_BLK) {
1966 			datalen = sata_dev_desc[device].blksz * SATA_MAX_WRITE_BLK;
1967 			smallblks = SATA_MAX_WRITE_BLK;
1968 
1969 			block = (u32)start;
1970 			n_block = (u32)smallblks;
1971 
1972 			start += SATA_MAX_WRITE_BLK;
1973 			blks -= SATA_MAX_WRITE_BLK;
1974 		} else {
1975 			datalen = sata_dev_desc[device].blksz * blks;
1976 			smallblks = (unsigned short)blks;
1977 
1978 			block = (u32)start;
1979 			n_block = (u32)smallblks;
1980 
1981 			start += blks;
1982 			blks = 0;
1983 		}
1984 
1985 		if (ata_dev_write_sectors(pdata, datalen, block, n_block) != TRUE) {
1986 			printf("sata_dwc : Hard disk read error.\n");
1987 			blkcnt -= blks;
1988 			break;
1989 		}
1990 		buf_addr += datalen;
1991 	} while (blks != 0);
1992 
1993 	return (blkcnt);
1994 }
1995 
ata_dev_write_sectors(unsigned char * pdata,unsigned long datalen,u32 block,u32 n_block)1996 static int ata_dev_write_sectors(unsigned char* pdata, unsigned long datalen,
1997 						u32 block, u32 n_block)
1998 {
1999 	struct ata_port *ap = pap;
2000 	struct ata_device *dev = &ata_device;
2001 	struct ata_taskfile tf;
2002 	unsigned int class = ATA_DEV_ATA;
2003 	unsigned int err_mask = 0;
2004 	const char *reason;
2005 	int may_fallback = 1;
2006 	int rc;
2007 
2008 	if (dev_state == SATA_ERROR)
2009 		return FALSE;
2010 
2011 	ata_dev_select(ap, dev->devno, 1, 1);
2012 
2013 retry:
2014 	memset(&tf, 0, sizeof(tf));
2015 	tf.ctl = ap->ctl;
2016 	ap->print_id = 1;
2017 	ap->flags &= ~ATA_FLAG_DISABLED;
2018 
2019 	ap->pdata = pdata;
2020 
2021 	tf.device = ATA_DEVICE_OBS;
2022 
2023 	temp_n_block = n_block;
2024 
2025 
2026 #ifdef CONFIG_LBA48
2027 	tf.command = ATA_CMD_PIO_WRITE_EXT;
2028 	tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48 | ATA_TFLAG_WRITE;
2029 
2030 	tf.hob_feature = 31;
2031 	tf.feature = 31;
2032 	tf.hob_nsect = (n_block >> 8) & 0xff;
2033 	tf.nsect = n_block & 0xff;
2034 
2035 	tf.hob_lbah = 0x0;
2036 	tf.hob_lbam = 0x0;
2037 	tf.hob_lbal = (block >> 24) & 0xff;
2038 	tf.lbah = (block >> 16) & 0xff;
2039 	tf.lbam = (block >> 8) & 0xff;
2040 	tf.lbal = block & 0xff;
2041 
2042 	tf.device = 1 << 6;
2043 	if (tf.flags & ATA_TFLAG_FUA)
2044 		tf.device |= 1 << 7;
2045 #else
2046 	tf.command = ATA_CMD_PIO_WRITE;
2047 	tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_WRITE;
2048 
2049 	tf.feature = 31;
2050 	tf.nsect = n_block & 0xff;
2051 
2052 	tf.lbah = (block >> 16) & 0xff;
2053 	tf.lbam = (block >> 8) & 0xff;
2054 	tf.lbal = block & 0xff;
2055 
2056 	tf.device = (block >> 24) & 0xf;
2057 
2058 	tf.device |= 1 << 6;
2059 	if (tf.flags & ATA_TFLAG_FUA)
2060 		tf.device |= 1 << 7;
2061 
2062 #endif
2063 
2064 	tf.protocol = ATA_PROT_PIO;
2065 
2066 	/* Some devices choke if TF registers contain garbage.  Make
2067 	 * sure those are properly initialized.
2068 	 */
2069 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2070 	tf.flags |= ATA_TFLAG_POLLING;
2071 
2072 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 0, 0);
2073 
2074 	if (err_mask) {
2075 		if (err_mask & AC_ERR_NODEV_HINT) {
2076 			printf("READ_SECTORS NODEV after polling detection\n");
2077 			return -ENOENT;
2078 		}
2079 
2080 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2081 			/* Device or controller might have reported
2082 			 * the wrong device class.  Give a shot at the
2083 			 * other IDENTIFY if the current one is
2084 			 * aborted by the device.
2085 			 */
2086 			if (may_fallback) {
2087 				may_fallback = 0;
2088 
2089 				if (class == ATA_DEV_ATA) {
2090 					class = ATA_DEV_ATAPI;
2091 				} else {
2092 					class = ATA_DEV_ATA;
2093 				}
2094 				goto retry;
2095 			}
2096 			/* Control reaches here iff the device aborted
2097 			 * both flavors of IDENTIFYs which happens
2098 			 * sometimes with phantom devices.
2099 			 */
2100 			printf("both IDENTIFYs aborted, assuming NODEV\n");
2101 			return -ENOENT;
2102 		}
2103 
2104 		rc = -EIO;
2105 		reason = "I/O error";
2106 		goto err_out;
2107 	}
2108 
2109 	/* Falling back doesn't make sense if ID data was read
2110 	 * successfully at least once.
2111 	 */
2112 	may_fallback = 0;
2113 
2114 	rc = -EINVAL;
2115 	reason = "device reports invalid type";
2116 
2117 	return TRUE;
2118 
2119 err_out:
2120 	printf("failed to WRITE SECTORS (%s, err_mask=0x%x)\n", reason, err_mask);
2121 	return FALSE;
2122 }
2123