1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994-1998	   Linus Torvalds & authors (see below)
4  *  Copyright (C) 1998-2002	   Linux ATA Development
5  *				      Andre Hedrick <andre@linux-ide.org>
6  *  Copyright (C) 2003		   Red Hat
7  *  Copyright (C) 2003-2005, 2007  Bartlomiej Zolnierkiewicz
8  */
9 
10 /*
11  *  Mostly written by Mark Lord <mlord@pobox.com>
12  *                and Gadi Oxman <gadio@netvision.net.il>
13  *                and Andre Hedrick <andre@linux-ide.org>
14  *
15  * This is the IDE/ATA disk driver, as evolved from hd.c and ide.c.
16  */
17 
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/kernel.h>
21 #include <linux/timer.h>
22 #include <linux/mm.h>
23 #include <linux/interrupt.h>
24 #include <linux/major.h>
25 #include <linux/errno.h>
26 #include <linux/genhd.h>
27 #include <linux/slab.h>
28 #include <linux/delay.h>
29 #include <linux/mutex.h>
30 #include <linux/leds.h>
31 #include <linux/ide.h>
32 
33 #include <asm/byteorder.h>
34 #include <asm/irq.h>
35 #include <linux/uaccess.h>
36 #include <asm/io.h>
37 #include <asm/div64.h>
38 
39 #include "ide-disk.h"
40 
41 static const u8 ide_rw_cmds[] = {
42 	ATA_CMD_READ_MULTI,
43 	ATA_CMD_WRITE_MULTI,
44 	ATA_CMD_READ_MULTI_EXT,
45 	ATA_CMD_WRITE_MULTI_EXT,
46 	ATA_CMD_PIO_READ,
47 	ATA_CMD_PIO_WRITE,
48 	ATA_CMD_PIO_READ_EXT,
49 	ATA_CMD_PIO_WRITE_EXT,
50 	ATA_CMD_READ,
51 	ATA_CMD_WRITE,
52 	ATA_CMD_READ_EXT,
53 	ATA_CMD_WRITE_EXT,
54 };
55 
ide_tf_set_cmd(ide_drive_t * drive,struct ide_cmd * cmd,u8 dma)56 static void ide_tf_set_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 dma)
57 {
58 	u8 index, lba48, write;
59 
60 	lba48 = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 2 : 0;
61 	write = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
62 
63 	if (dma) {
64 		cmd->protocol = ATA_PROT_DMA;
65 		index = 8;
66 	} else {
67 		cmd->protocol = ATA_PROT_PIO;
68 		if (drive->mult_count) {
69 			cmd->tf_flags |= IDE_TFLAG_MULTI_PIO;
70 			index = 0;
71 		} else
72 			index = 4;
73 	}
74 
75 	cmd->tf.command = ide_rw_cmds[index + lba48 + write];
76 }
77 
78 /*
79  * __ide_do_rw_disk() issues READ and WRITE commands to a disk,
80  * using LBA if supported, or CHS otherwise, to address sectors.
81  */
__ide_do_rw_disk(ide_drive_t * drive,struct request * rq,sector_t block)82 static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
83 					sector_t block)
84 {
85 	ide_hwif_t *hwif	= drive->hwif;
86 	u16 nsectors		= (u16)blk_rq_sectors(rq);
87 	u8 lba48		= !!(drive->dev_flags & IDE_DFLAG_LBA48);
88 	u8 dma			= !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
89 	struct ide_cmd		cmd;
90 	struct ide_taskfile	*tf = &cmd.tf;
91 	ide_startstop_t		rc;
92 
93 	if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
94 		if (block + blk_rq_sectors(rq) > 1ULL << 28)
95 			dma = 0;
96 		else
97 			lba48 = 0;
98 	}
99 
100 	memset(&cmd, 0, sizeof(cmd));
101 	cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
102 	cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
103 
104 	if (drive->dev_flags & IDE_DFLAG_LBA) {
105 		if (lba48) {
106 			pr_debug("%s: LBA=0x%012llx\n", drive->name,
107 					(unsigned long long)block);
108 
109 			tf->nsect  = nsectors & 0xff;
110 			tf->lbal   = (u8) block;
111 			tf->lbam   = (u8)(block >>  8);
112 			tf->lbah   = (u8)(block >> 16);
113 			tf->device = ATA_LBA;
114 
115 			tf = &cmd.hob;
116 			tf->nsect = (nsectors >> 8) & 0xff;
117 			tf->lbal  = (u8)(block >> 24);
118 			if (sizeof(block) != 4) {
119 				tf->lbam = (u8)((u64)block >> 32);
120 				tf->lbah = (u8)((u64)block >> 40);
121 			}
122 
123 			cmd.valid.out.hob = IDE_VALID_OUT_HOB;
124 			cmd.valid.in.hob  = IDE_VALID_IN_HOB;
125 			cmd.tf_flags |= IDE_TFLAG_LBA48;
126 		} else {
127 			tf->nsect  = nsectors & 0xff;
128 			tf->lbal   = block;
129 			tf->lbam   = block >>= 8;
130 			tf->lbah   = block >>= 8;
131 			tf->device = ((block >> 8) & 0xf) | ATA_LBA;
132 		}
133 	} else {
134 		unsigned int sect, head, cyl, track;
135 
136 		track = (int)block / drive->sect;
137 		sect  = (int)block % drive->sect + 1;
138 		head  = track % drive->head;
139 		cyl   = track / drive->head;
140 
141 		pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect);
142 
143 		tf->nsect  = nsectors & 0xff;
144 		tf->lbal   = sect;
145 		tf->lbam   = cyl;
146 		tf->lbah   = cyl >> 8;
147 		tf->device = head;
148 	}
149 
150 	cmd.tf_flags |= IDE_TFLAG_FS;
151 
152 	if (rq_data_dir(rq))
153 		cmd.tf_flags |= IDE_TFLAG_WRITE;
154 
155 	ide_tf_set_cmd(drive, &cmd, dma);
156 	cmd.rq = rq;
157 
158 	if (dma == 0) {
159 		ide_init_sg_cmd(&cmd, nsectors << 9);
160 		ide_map_sg(drive, &cmd);
161 	}
162 
163 	rc = do_rw_taskfile(drive, &cmd);
164 
165 	if (rc == ide_stopped && dma) {
166 		/* fallback to PIO */
167 		cmd.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK;
168 		ide_tf_set_cmd(drive, &cmd, 0);
169 		ide_init_sg_cmd(&cmd, nsectors << 9);
170 		rc = do_rw_taskfile(drive, &cmd);
171 	}
172 
173 	return rc;
174 }
175 
176 /*
177  * 268435455  == 137439 MB or 28bit limit
178  * 320173056  == 163929 MB or 48bit addressing
179  * 1073741822 == 549756 MB or 48bit addressing fake drive
180  */
181 
ide_do_rw_disk(ide_drive_t * drive,struct request * rq,sector_t block)182 static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
183 				      sector_t block)
184 {
185 	ide_hwif_t *hwif = drive->hwif;
186 
187 	BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
188 	BUG_ON(blk_rq_is_passthrough(rq));
189 
190 	ledtrig_disk_activity(rq_data_dir(rq) == WRITE);
191 
192 	pr_debug("%s: %sing: block=%llu, sectors=%u\n",
193 		 drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
194 		 (unsigned long long)block, blk_rq_sectors(rq));
195 
196 	if (hwif->rw_disk)
197 		hwif->rw_disk(drive, rq);
198 
199 	return __ide_do_rw_disk(drive, rq, block);
200 }
201 
202 /*
203  * Queries for true maximum capacity of the drive.
204  * Returns maximum LBA address (> 0) of the drive, 0 if failed.
205  */
idedisk_read_native_max_address(ide_drive_t * drive,int lba48)206 static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48)
207 {
208 	struct ide_cmd cmd;
209 	struct ide_taskfile *tf = &cmd.tf;
210 	u64 addr = 0;
211 
212 	memset(&cmd, 0, sizeof(cmd));
213 	if (lba48)
214 		tf->command = ATA_CMD_READ_NATIVE_MAX_EXT;
215 	else
216 		tf->command = ATA_CMD_READ_NATIVE_MAX;
217 	tf->device  = ATA_LBA;
218 
219 	cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
220 	cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
221 	if (lba48) {
222 		cmd.valid.out.hob = IDE_VALID_OUT_HOB;
223 		cmd.valid.in.hob  = IDE_VALID_IN_HOB;
224 		cmd.tf_flags = IDE_TFLAG_LBA48;
225 	}
226 
227 	ide_no_data_taskfile(drive, &cmd);
228 
229 	/* if OK, compute maximum address value */
230 	if (!(tf->status & ATA_ERR))
231 		addr = ide_get_lba_addr(&cmd, lba48) + 1;
232 
233 	return addr;
234 }
235 
236 /*
237  * Sets maximum virtual LBA address of the drive.
238  * Returns new maximum virtual LBA address (> 0) or 0 on failure.
239  */
idedisk_set_max_address(ide_drive_t * drive,u64 addr_req,int lba48)240 static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
241 {
242 	struct ide_cmd cmd;
243 	struct ide_taskfile *tf = &cmd.tf;
244 	u64 addr_set = 0;
245 
246 	addr_req--;
247 
248 	memset(&cmd, 0, sizeof(cmd));
249 	tf->lbal     = (addr_req >>  0) & 0xff;
250 	tf->lbam     = (addr_req >>= 8) & 0xff;
251 	tf->lbah     = (addr_req >>= 8) & 0xff;
252 	if (lba48) {
253 		cmd.hob.lbal = (addr_req >>= 8) & 0xff;
254 		cmd.hob.lbam = (addr_req >>= 8) & 0xff;
255 		cmd.hob.lbah = (addr_req >>= 8) & 0xff;
256 		tf->command  = ATA_CMD_SET_MAX_EXT;
257 	} else {
258 		tf->device   = (addr_req >>= 8) & 0x0f;
259 		tf->command  = ATA_CMD_SET_MAX;
260 	}
261 	tf->device |= ATA_LBA;
262 
263 	cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
264 	cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
265 	if (lba48) {
266 		cmd.valid.out.hob = IDE_VALID_OUT_HOB;
267 		cmd.valid.in.hob  = IDE_VALID_IN_HOB;
268 		cmd.tf_flags = IDE_TFLAG_LBA48;
269 	}
270 
271 	ide_no_data_taskfile(drive, &cmd);
272 
273 	/* if OK, compute maximum address value */
274 	if (!(tf->status & ATA_ERR))
275 		addr_set = ide_get_lba_addr(&cmd, lba48) + 1;
276 
277 	return addr_set;
278 }
279 
sectors_to_MB(unsigned long long n)280 static unsigned long long sectors_to_MB(unsigned long long n)
281 {
282 	n <<= 9;		/* make it bytes */
283 	do_div(n, 1000000);	/* make it MB */
284 	return n;
285 }
286 
287 /*
288  * Some disks report total number of sectors instead of
289  * maximum sector address.  We list them here.
290  */
291 static const struct drive_list_entry hpa_list[] = {
292 	{ "ST340823A",	NULL },
293 	{ "ST320413A",	NULL },
294 	{ "ST310211A",	NULL },
295 	{ NULL,		NULL }
296 };
297 
ide_disk_hpa_get_native_capacity(ide_drive_t * drive,int lba48)298 static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48)
299 {
300 	u64 capacity, set_max;
301 
302 	capacity = drive->capacity64;
303 	set_max  = idedisk_read_native_max_address(drive, lba48);
304 
305 	if (ide_in_drive_list(drive->id, hpa_list)) {
306 		/*
307 		 * Since we are inclusive wrt to firmware revisions do this
308 		 * extra check and apply the workaround only when needed.
309 		 */
310 		if (set_max == capacity + 1)
311 			set_max--;
312 	}
313 
314 	return set_max;
315 }
316 
ide_disk_hpa_set_capacity(ide_drive_t * drive,u64 set_max,int lba48)317 static u64 ide_disk_hpa_set_capacity(ide_drive_t *drive, u64 set_max, int lba48)
318 {
319 	set_max = idedisk_set_max_address(drive, set_max, lba48);
320 	if (set_max)
321 		drive->capacity64 = set_max;
322 
323 	return set_max;
324 }
325 
idedisk_check_hpa(ide_drive_t * drive)326 static void idedisk_check_hpa(ide_drive_t *drive)
327 {
328 	u64 capacity, set_max;
329 	int lba48 = ata_id_lba48_enabled(drive->id);
330 
331 	capacity = drive->capacity64;
332 	set_max  = ide_disk_hpa_get_native_capacity(drive, lba48);
333 
334 	if (set_max <= capacity)
335 		return;
336 
337 	drive->probed_capacity = set_max;
338 
339 	printk(KERN_INFO "%s: Host Protected Area detected.\n"
340 			 "\tcurrent capacity is %llu sectors (%llu MB)\n"
341 			 "\tnative  capacity is %llu sectors (%llu MB)\n",
342 			 drive->name,
343 			 capacity, sectors_to_MB(capacity),
344 			 set_max, sectors_to_MB(set_max));
345 
346 	if ((drive->dev_flags & IDE_DFLAG_NOHPA) == 0)
347 		return;
348 
349 	set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48);
350 	if (set_max)
351 		printk(KERN_INFO "%s: Host Protected Area disabled.\n",
352 				 drive->name);
353 }
354 
ide_disk_get_capacity(ide_drive_t * drive)355 static int ide_disk_get_capacity(ide_drive_t *drive)
356 {
357 	u16 *id = drive->id;
358 	int lba;
359 
360 	if (ata_id_lba48_enabled(id)) {
361 		/* drive speaks 48-bit LBA */
362 		lba = 1;
363 		drive->capacity64 = ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
364 	} else if (ata_id_has_lba(id) && ata_id_is_lba_capacity_ok(id)) {
365 		/* drive speaks 28-bit LBA */
366 		lba = 1;
367 		drive->capacity64 = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
368 	} else {
369 		/* drive speaks boring old 28-bit CHS */
370 		lba = 0;
371 		drive->capacity64 = drive->cyl * drive->head * drive->sect;
372 	}
373 
374 	drive->probed_capacity = drive->capacity64;
375 
376 	if (lba) {
377 		drive->dev_flags |= IDE_DFLAG_LBA;
378 
379 		/*
380 		* If this device supports the Host Protected Area feature set,
381 		* then we may need to change our opinion about its capacity.
382 		*/
383 		if (ata_id_hpa_enabled(id))
384 			idedisk_check_hpa(drive);
385 	}
386 
387 	/* limit drive capacity to 137GB if LBA48 cannot be used */
388 	if ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 &&
389 	    drive->capacity64 > 1ULL << 28) {
390 		printk(KERN_WARNING "%s: cannot use LBA48 - full capacity "
391 		       "%llu sectors (%llu MB)\n",
392 		       drive->name, (unsigned long long)drive->capacity64,
393 		       sectors_to_MB(drive->capacity64));
394 		drive->probed_capacity = drive->capacity64 = 1ULL << 28;
395 	}
396 
397 	if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
398 	    (drive->dev_flags & IDE_DFLAG_LBA48)) {
399 		if (drive->capacity64 > 1ULL << 28) {
400 			printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
401 					 " will be used for accessing sectors "
402 					 "> %u\n", drive->name, 1 << 28);
403 		} else
404 			drive->dev_flags &= ~IDE_DFLAG_LBA48;
405 	}
406 
407 	return 0;
408 }
409 
ide_disk_unlock_native_capacity(ide_drive_t * drive)410 static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
411 {
412 	u16 *id = drive->id;
413 	int lba48 = ata_id_lba48_enabled(id);
414 
415 	if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
416 	    ata_id_hpa_enabled(id) == 0)
417 		return;
418 
419 	/*
420 	 * according to the spec the SET MAX ADDRESS command shall be
421 	 * immediately preceded by a READ NATIVE MAX ADDRESS command
422 	 */
423 	if (!ide_disk_hpa_get_native_capacity(drive, lba48))
424 		return;
425 
426 	if (ide_disk_hpa_set_capacity(drive, drive->probed_capacity, lba48))
427 		drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
428 }
429 
idedisk_prep_rq(ide_drive_t * drive,struct request * rq)430 static bool idedisk_prep_rq(ide_drive_t *drive, struct request *rq)
431 {
432 	struct ide_cmd *cmd;
433 
434 	if (req_op(rq) != REQ_OP_FLUSH)
435 		return true;
436 
437 	if (ide_req(rq)->special) {
438 		cmd = ide_req(rq)->special;
439 		memset(cmd, 0, sizeof(*cmd));
440 	} else {
441 		cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
442 	}
443 
444 	/* FIXME: map struct ide_taskfile on rq->cmd[] */
445 	BUG_ON(cmd == NULL);
446 
447 	if (ata_id_flush_ext_enabled(drive->id) &&
448 	    (drive->capacity64 >= (1UL << 28)))
449 		cmd->tf.command = ATA_CMD_FLUSH_EXT;
450 	else
451 		cmd->tf.command = ATA_CMD_FLUSH;
452 	cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
453 	cmd->tf_flags = IDE_TFLAG_DYN;
454 	cmd->protocol = ATA_PROT_NODATA;
455 	rq->cmd_flags &= ~REQ_OP_MASK;
456 	rq->cmd_flags |= REQ_OP_DRV_OUT;
457 	ide_req(rq)->type = ATA_PRIV_TASKFILE;
458 	ide_req(rq)->special = cmd;
459 	cmd->rq = rq;
460 
461 	return true;
462 }
463 
464 ide_devset_get(multcount, mult_count);
465 
466 /*
467  * This is tightly woven into the driver->do_special can not touch.
468  * DON'T do it again until a total personality rewrite is committed.
469  */
set_multcount(ide_drive_t * drive,int arg)470 static int set_multcount(ide_drive_t *drive, int arg)
471 {
472 	struct request *rq;
473 
474 	if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff))
475 		return -EINVAL;
476 
477 	if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
478 		return -EBUSY;
479 
480 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
481 	ide_req(rq)->type = ATA_PRIV_TASKFILE;
482 
483 	drive->mult_req = arg;
484 	drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
485 	blk_execute_rq(NULL, rq, 0);
486 	blk_put_request(rq);
487 
488 	return (drive->mult_count == arg) ? 0 : -EIO;
489 }
490 
491 ide_devset_get_flag(nowerr, IDE_DFLAG_NOWERR);
492 
set_nowerr(ide_drive_t * drive,int arg)493 static int set_nowerr(ide_drive_t *drive, int arg)
494 {
495 	if (arg < 0 || arg > 1)
496 		return -EINVAL;
497 
498 	if (arg)
499 		drive->dev_flags |= IDE_DFLAG_NOWERR;
500 	else
501 		drive->dev_flags &= ~IDE_DFLAG_NOWERR;
502 
503 	drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
504 
505 	return 0;
506 }
507 
ide_do_setfeature(ide_drive_t * drive,u8 feature,u8 nsect)508 static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
509 {
510 	struct ide_cmd cmd;
511 
512 	memset(&cmd, 0, sizeof(cmd));
513 	cmd.tf.feature = feature;
514 	cmd.tf.nsect   = nsect;
515 	cmd.tf.command = ATA_CMD_SET_FEATURES;
516 	cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
517 	cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
518 
519 	return ide_no_data_taskfile(drive, &cmd);
520 }
521 
update_flush(ide_drive_t * drive)522 static void update_flush(ide_drive_t *drive)
523 {
524 	u16 *id = drive->id;
525 	bool wc = false;
526 
527 	if (drive->dev_flags & IDE_DFLAG_WCACHE) {
528 		unsigned long long capacity;
529 		int barrier;
530 		/*
531 		 * We must avoid issuing commands a drive does not
532 		 * understand or we may crash it. We check flush cache
533 		 * is supported. We also check we have the LBA48 flush
534 		 * cache if the drive capacity is too large. By this
535 		 * time we have trimmed the drive capacity if LBA48 is
536 		 * not available so we don't need to recheck that.
537 		 */
538 		capacity = ide_gd_capacity(drive);
539 		barrier = ata_id_flush_enabled(id) &&
540 			(drive->dev_flags & IDE_DFLAG_NOFLUSH) == 0 &&
541 			((drive->dev_flags & IDE_DFLAG_LBA48) == 0 ||
542 			 capacity <= (1ULL << 28) ||
543 			 ata_id_flush_ext_enabled(id));
544 
545 		printk(KERN_INFO "%s: cache flushes %ssupported\n",
546 		       drive->name, barrier ? "" : "not ");
547 
548 		if (barrier) {
549 			wc = true;
550 			drive->prep_rq = idedisk_prep_rq;
551 		}
552 	}
553 
554 	blk_queue_write_cache(drive->queue, wc, false);
555 }
556 
557 ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
558 
set_wcache(ide_drive_t * drive,int arg)559 static int set_wcache(ide_drive_t *drive, int arg)
560 {
561 	int err = 1;
562 
563 	if (arg < 0 || arg > 1)
564 		return -EINVAL;
565 
566 	if (ata_id_flush_enabled(drive->id)) {
567 		err = ide_do_setfeature(drive,
568 			arg ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF, 0);
569 		if (err == 0) {
570 			if (arg)
571 				drive->dev_flags |= IDE_DFLAG_WCACHE;
572 			else
573 				drive->dev_flags &= ~IDE_DFLAG_WCACHE;
574 		}
575 	}
576 
577 	update_flush(drive);
578 
579 	return err;
580 }
581 
do_idedisk_flushcache(ide_drive_t * drive)582 static int do_idedisk_flushcache(ide_drive_t *drive)
583 {
584 	struct ide_cmd cmd;
585 
586 	memset(&cmd, 0, sizeof(cmd));
587 	if (ata_id_flush_ext_enabled(drive->id))
588 		cmd.tf.command = ATA_CMD_FLUSH_EXT;
589 	else
590 		cmd.tf.command = ATA_CMD_FLUSH;
591 	cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
592 	cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
593 
594 	return ide_no_data_taskfile(drive, &cmd);
595 }
596 
597 ide_devset_get(acoustic, acoustic);
598 
set_acoustic(ide_drive_t * drive,int arg)599 static int set_acoustic(ide_drive_t *drive, int arg)
600 {
601 	if (arg < 0 || arg > 254)
602 		return -EINVAL;
603 
604 	ide_do_setfeature(drive,
605 		arg ? SETFEATURES_AAM_ON : SETFEATURES_AAM_OFF, arg);
606 
607 	drive->acoustic = arg;
608 
609 	return 0;
610 }
611 
612 ide_devset_get_flag(addressing, IDE_DFLAG_LBA48);
613 
614 /*
615  * drive->addressing:
616  *	0: 28-bit
617  *	1: 48-bit
618  *	2: 48-bit capable doing 28-bit
619  */
set_addressing(ide_drive_t * drive,int arg)620 static int set_addressing(ide_drive_t *drive, int arg)
621 {
622 	if (arg < 0 || arg > 2)
623 		return -EINVAL;
624 
625 	if (arg && ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
626 	    ata_id_lba48_enabled(drive->id) == 0))
627 		return -EIO;
628 
629 	if (arg == 2)
630 		arg = 0;
631 
632 	if (arg)
633 		drive->dev_flags |= IDE_DFLAG_LBA48;
634 	else
635 		drive->dev_flags &= ~IDE_DFLAG_LBA48;
636 
637 	return 0;
638 }
639 
640 ide_ext_devset_rw(acoustic, acoustic);
641 ide_ext_devset_rw(address, addressing);
642 ide_ext_devset_rw(multcount, multcount);
643 ide_ext_devset_rw(wcache, wcache);
644 
645 ide_ext_devset_rw_sync(nowerr, nowerr);
646 
ide_disk_check(ide_drive_t * drive,const char * s)647 static int ide_disk_check(ide_drive_t *drive, const char *s)
648 {
649 	return 1;
650 }
651 
ide_disk_setup(ide_drive_t * drive)652 static void ide_disk_setup(ide_drive_t *drive)
653 {
654 	struct ide_disk_obj *idkp = drive->driver_data;
655 	struct request_queue *q = drive->queue;
656 	ide_hwif_t *hwif = drive->hwif;
657 	u16 *id = drive->id;
658 	char *m = (char *)&id[ATA_ID_PROD];
659 	unsigned long long capacity;
660 
661 	ide_proc_register_driver(drive, idkp->driver);
662 
663 	if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0)
664 		return;
665 
666 	if (drive->dev_flags & IDE_DFLAG_REMOVABLE) {
667 		/*
668 		 * Removable disks (eg. SYQUEST); ignore 'WD' drives
669 		 */
670 		if (m[0] != 'W' || m[1] != 'D')
671 			drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
672 	}
673 
674 	(void)set_addressing(drive, 1);
675 
676 	if (drive->dev_flags & IDE_DFLAG_LBA48) {
677 		int max_s = 2048;
678 
679 		if (max_s > hwif->rqsize)
680 			max_s = hwif->rqsize;
681 
682 		blk_queue_max_hw_sectors(q, max_s);
683 	}
684 
685 	printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
686 	       queue_max_sectors(q) / 2);
687 
688 	if (ata_id_is_ssd(id)) {
689 		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
690 		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
691 	}
692 
693 	/* calculate drive capacity, and select LBA if possible */
694 	ide_disk_get_capacity(drive);
695 
696 	/*
697 	 * if possible, give fdisk access to more of the drive,
698 	 * by correcting bios_cyls:
699 	 */
700 	capacity = ide_gd_capacity(drive);
701 
702 	if ((drive->dev_flags & IDE_DFLAG_FORCED_GEOM) == 0) {
703 		if (ata_id_lba48_enabled(drive->id)) {
704 			/* compatibility */
705 			drive->bios_sect = 63;
706 			drive->bios_head = 255;
707 		}
708 
709 		if (drive->bios_sect && drive->bios_head) {
710 			unsigned int cap0 = capacity; /* truncate to 32 bits */
711 			unsigned int cylsz, cyl;
712 
713 			if (cap0 != capacity)
714 				drive->bios_cyl = 65535;
715 			else {
716 				cylsz = drive->bios_sect * drive->bios_head;
717 				cyl = cap0 / cylsz;
718 				if (cyl > 65535)
719 					cyl = 65535;
720 				if (cyl > drive->bios_cyl)
721 					drive->bios_cyl = cyl;
722 			}
723 		}
724 	}
725 	printk(KERN_INFO "%s: %llu sectors (%llu MB)",
726 			 drive->name, capacity, sectors_to_MB(capacity));
727 
728 	/* Only print cache size when it was specified */
729 	if (id[ATA_ID_BUF_SIZE])
730 		printk(KERN_CONT " w/%dKiB Cache", id[ATA_ID_BUF_SIZE] / 2);
731 
732 	printk(KERN_CONT ", CHS=%d/%d/%d\n",
733 			 drive->bios_cyl, drive->bios_head, drive->bios_sect);
734 
735 	/* write cache enabled? */
736 	if ((id[ATA_ID_CSFO] & 1) || ata_id_wcache_enabled(id))
737 		drive->dev_flags |= IDE_DFLAG_WCACHE;
738 
739 	set_wcache(drive, 1);
740 
741 	if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 &&
742 	    (drive->head == 0 || drive->head > 16))
743 		printk(KERN_ERR "%s: invalid geometry: %d physical heads?\n",
744 			drive->name, drive->head);
745 }
746 
ide_disk_flush(ide_drive_t * drive)747 static void ide_disk_flush(ide_drive_t *drive)
748 {
749 	if (ata_id_flush_enabled(drive->id) == 0 ||
750 	    (drive->dev_flags & IDE_DFLAG_WCACHE) == 0)
751 		return;
752 
753 	if (do_idedisk_flushcache(drive))
754 		printk(KERN_INFO "%s: wcache flush failed!\n", drive->name);
755 }
756 
ide_disk_init_media(ide_drive_t * drive,struct gendisk * disk)757 static int ide_disk_init_media(ide_drive_t *drive, struct gendisk *disk)
758 {
759 	return 0;
760 }
761 
ide_disk_set_doorlock(ide_drive_t * drive,struct gendisk * disk,int on)762 static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
763 				 int on)
764 {
765 	struct ide_cmd cmd;
766 	int ret;
767 
768 	if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
769 		return 0;
770 
771 	memset(&cmd, 0, sizeof(cmd));
772 	cmd.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
773 	cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
774 	cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
775 
776 	ret = ide_no_data_taskfile(drive, &cmd);
777 
778 	if (ret)
779 		drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
780 
781 	return ret;
782 }
783 
784 const struct ide_disk_ops ide_ata_disk_ops = {
785 	.check			= ide_disk_check,
786 	.unlock_native_capacity	= ide_disk_unlock_native_capacity,
787 	.get_capacity		= ide_disk_get_capacity,
788 	.setup			= ide_disk_setup,
789 	.flush			= ide_disk_flush,
790 	.init_media		= ide_disk_init_media,
791 	.set_doorlock		= ide_disk_set_doorlock,
792 	.do_request		= ide_do_rw_disk,
793 	.ioctl			= ide_disk_ioctl,
794 	.compat_ioctl		= ide_disk_ioctl,
795 };
796