]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/ide/ide-disk.c
Merge branches 'for-4.4/upstream-fixes', 'for-4.5/async-suspend', 'for-4.5/container...
[mirror_ubuntu-artful-kernel.git] / drivers / ide / ide-disk.c
1 /*
2 * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
3 * Copyright (C) 1998-2002 Linux ATA Development
4 * Andre Hedrick <andre@linux-ide.org>
5 * Copyright (C) 2003 Red Hat
6 * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
7 */
8
9 /*
10 * Mostly written by Mark Lord <mlord@pobox.com>
11 * and Gadi Oxman <gadio@netvision.net.il>
12 * and Andre Hedrick <andre@linux-ide.org>
13 *
14 * This is the IDE/ATA disk driver, as evolved from hd.c and ide.c.
15 */
16
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/mm.h>
22 #include <linux/interrupt.h>
23 #include <linux/major.h>
24 #include <linux/errno.h>
25 #include <linux/genhd.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/mutex.h>
29 #include <linux/leds.h>
30 #include <linux/ide.h>
31
32 #include <asm/byteorder.h>
33 #include <asm/irq.h>
34 #include <asm/uaccess.h>
35 #include <asm/io.h>
36 #include <asm/div64.h>
37
38 #include "ide-disk.h"
39
40 static const u8 ide_rw_cmds[] = {
41 ATA_CMD_READ_MULTI,
42 ATA_CMD_WRITE_MULTI,
43 ATA_CMD_READ_MULTI_EXT,
44 ATA_CMD_WRITE_MULTI_EXT,
45 ATA_CMD_PIO_READ,
46 ATA_CMD_PIO_WRITE,
47 ATA_CMD_PIO_READ_EXT,
48 ATA_CMD_PIO_WRITE_EXT,
49 ATA_CMD_READ,
50 ATA_CMD_WRITE,
51 ATA_CMD_READ_EXT,
52 ATA_CMD_WRITE_EXT,
53 };
54
55 static void ide_tf_set_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 dma)
56 {
57 u8 index, lba48, write;
58
59 lba48 = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 2 : 0;
60 write = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
61
62 if (dma) {
63 cmd->protocol = ATA_PROT_DMA;
64 index = 8;
65 } else {
66 cmd->protocol = ATA_PROT_PIO;
67 if (drive->mult_count) {
68 cmd->tf_flags |= IDE_TFLAG_MULTI_PIO;
69 index = 0;
70 } else
71 index = 4;
72 }
73
74 cmd->tf.command = ide_rw_cmds[index + lba48 + write];
75 }
76
77 /*
78 * __ide_do_rw_disk() issues READ and WRITE commands to a disk,
79 * using LBA if supported, or CHS otherwise, to address sectors.
80 */
81 static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
82 sector_t block)
83 {
84 ide_hwif_t *hwif = drive->hwif;
85 u16 nsectors = (u16)blk_rq_sectors(rq);
86 u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
87 u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
88 struct ide_cmd cmd;
89 struct ide_taskfile *tf = &cmd.tf;
90 ide_startstop_t rc;
91
92 if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
93 if (block + blk_rq_sectors(rq) > 1ULL << 28)
94 dma = 0;
95 else
96 lba48 = 0;
97 }
98
99 memset(&cmd, 0, sizeof(cmd));
100 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
101 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
102
103 if (drive->dev_flags & IDE_DFLAG_LBA) {
104 if (lba48) {
105 pr_debug("%s: LBA=0x%012llx\n", drive->name,
106 (unsigned long long)block);
107
108 tf->nsect = nsectors & 0xff;
109 tf->lbal = (u8) block;
110 tf->lbam = (u8)(block >> 8);
111 tf->lbah = (u8)(block >> 16);
112 tf->device = ATA_LBA;
113
114 tf = &cmd.hob;
115 tf->nsect = (nsectors >> 8) & 0xff;
116 tf->lbal = (u8)(block >> 24);
117 if (sizeof(block) != 4) {
118 tf->lbam = (u8)((u64)block >> 32);
119 tf->lbah = (u8)((u64)block >> 40);
120 }
121
122 cmd.valid.out.hob = IDE_VALID_OUT_HOB;
123 cmd.valid.in.hob = IDE_VALID_IN_HOB;
124 cmd.tf_flags |= IDE_TFLAG_LBA48;
125 } else {
126 tf->nsect = nsectors & 0xff;
127 tf->lbal = block;
128 tf->lbam = block >>= 8;
129 tf->lbah = block >>= 8;
130 tf->device = ((block >> 8) & 0xf) | ATA_LBA;
131 }
132 } else {
133 unsigned int sect, head, cyl, track;
134
135 track = (int)block / drive->sect;
136 sect = (int)block % drive->sect + 1;
137 head = track % drive->head;
138 cyl = track / drive->head;
139
140 pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect);
141
142 tf->nsect = nsectors & 0xff;
143 tf->lbal = sect;
144 tf->lbam = cyl;
145 tf->lbah = cyl >> 8;
146 tf->device = head;
147 }
148
149 cmd.tf_flags |= IDE_TFLAG_FS;
150
151 if (rq_data_dir(rq))
152 cmd.tf_flags |= IDE_TFLAG_WRITE;
153
154 ide_tf_set_cmd(drive, &cmd, dma);
155 cmd.rq = rq;
156
157 if (dma == 0) {
158 ide_init_sg_cmd(&cmd, nsectors << 9);
159 ide_map_sg(drive, &cmd);
160 }
161
162 rc = do_rw_taskfile(drive, &cmd);
163
164 if (rc == ide_stopped && dma) {
165 /* fallback to PIO */
166 cmd.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK;
167 ide_tf_set_cmd(drive, &cmd, 0);
168 ide_init_sg_cmd(&cmd, nsectors << 9);
169 rc = do_rw_taskfile(drive, &cmd);
170 }
171
172 return rc;
173 }
174
175 /*
176 * 268435455 == 137439 MB or 28bit limit
177 * 320173056 == 163929 MB or 48bit addressing
178 * 1073741822 == 549756 MB or 48bit addressing fake drive
179 */
180
181 static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
182 sector_t block)
183 {
184 ide_hwif_t *hwif = drive->hwif;
185
186 BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
187 BUG_ON(rq->cmd_type != REQ_TYPE_FS);
188
189 ledtrig_ide_activity();
190
191 pr_debug("%s: %sing: block=%llu, sectors=%u\n",
192 drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
193 (unsigned long long)block, blk_rq_sectors(rq));
194
195 if (hwif->rw_disk)
196 hwif->rw_disk(drive, rq);
197
198 return __ide_do_rw_disk(drive, rq, block);
199 }
200
201 /*
202 * Queries for true maximum capacity of the drive.
203 * Returns maximum LBA address (> 0) of the drive, 0 if failed.
204 */
205 static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48)
206 {
207 struct ide_cmd cmd;
208 struct ide_taskfile *tf = &cmd.tf;
209 u64 addr = 0;
210
211 memset(&cmd, 0, sizeof(cmd));
212 if (lba48)
213 tf->command = ATA_CMD_READ_NATIVE_MAX_EXT;
214 else
215 tf->command = ATA_CMD_READ_NATIVE_MAX;
216 tf->device = ATA_LBA;
217
218 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
219 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
220 if (lba48) {
221 cmd.valid.out.hob = IDE_VALID_OUT_HOB;
222 cmd.valid.in.hob = IDE_VALID_IN_HOB;
223 cmd.tf_flags = IDE_TFLAG_LBA48;
224 }
225
226 ide_no_data_taskfile(drive, &cmd);
227
228 /* if OK, compute maximum address value */
229 if (!(tf->status & ATA_ERR))
230 addr = ide_get_lba_addr(&cmd, lba48) + 1;
231
232 return addr;
233 }
234
235 /*
236 * Sets maximum virtual LBA address of the drive.
237 * Returns new maximum virtual LBA address (> 0) or 0 on failure.
238 */
239 static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
240 {
241 struct ide_cmd cmd;
242 struct ide_taskfile *tf = &cmd.tf;
243 u64 addr_set = 0;
244
245 addr_req--;
246
247 memset(&cmd, 0, sizeof(cmd));
248 tf->lbal = (addr_req >> 0) & 0xff;
249 tf->lbam = (addr_req >>= 8) & 0xff;
250 tf->lbah = (addr_req >>= 8) & 0xff;
251 if (lba48) {
252 cmd.hob.lbal = (addr_req >>= 8) & 0xff;
253 cmd.hob.lbam = (addr_req >>= 8) & 0xff;
254 cmd.hob.lbah = (addr_req >>= 8) & 0xff;
255 tf->command = ATA_CMD_SET_MAX_EXT;
256 } else {
257 tf->device = (addr_req >>= 8) & 0x0f;
258 tf->command = ATA_CMD_SET_MAX;
259 }
260 tf->device |= ATA_LBA;
261
262 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
263 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
264 if (lba48) {
265 cmd.valid.out.hob = IDE_VALID_OUT_HOB;
266 cmd.valid.in.hob = IDE_VALID_IN_HOB;
267 cmd.tf_flags = IDE_TFLAG_LBA48;
268 }
269
270 ide_no_data_taskfile(drive, &cmd);
271
272 /* if OK, compute maximum address value */
273 if (!(tf->status & ATA_ERR))
274 addr_set = ide_get_lba_addr(&cmd, lba48) + 1;
275
276 return addr_set;
277 }
278
279 static unsigned long long sectors_to_MB(unsigned long long n)
280 {
281 n <<= 9; /* make it bytes */
282 do_div(n, 1000000); /* make it MB */
283 return n;
284 }
285
286 /*
287 * Some disks report total number of sectors instead of
288 * maximum sector address. We list them here.
289 */
290 static const struct drive_list_entry hpa_list[] = {
291 { "ST340823A", NULL },
292 { "ST320413A", NULL },
293 { "ST310211A", NULL },
294 { NULL, NULL }
295 };
296
297 static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48)
298 {
299 u64 capacity, set_max;
300
301 capacity = drive->capacity64;
302 set_max = idedisk_read_native_max_address(drive, lba48);
303
304 if (ide_in_drive_list(drive->id, hpa_list)) {
305 /*
306 * Since we are inclusive wrt to firmware revisions do this
307 * extra check and apply the workaround only when needed.
308 */
309 if (set_max == capacity + 1)
310 set_max--;
311 }
312
313 return set_max;
314 }
315
316 static u64 ide_disk_hpa_set_capacity(ide_drive_t *drive, u64 set_max, int lba48)
317 {
318 set_max = idedisk_set_max_address(drive, set_max, lba48);
319 if (set_max)
320 drive->capacity64 = set_max;
321
322 return set_max;
323 }
324
325 static void idedisk_check_hpa(ide_drive_t *drive)
326 {
327 u64 capacity, set_max;
328 int lba48 = ata_id_lba48_enabled(drive->id);
329
330 capacity = drive->capacity64;
331 set_max = ide_disk_hpa_get_native_capacity(drive, lba48);
332
333 if (set_max <= capacity)
334 return;
335
336 drive->probed_capacity = set_max;
337
338 printk(KERN_INFO "%s: Host Protected Area detected.\n"
339 "\tcurrent capacity is %llu sectors (%llu MB)\n"
340 "\tnative capacity is %llu sectors (%llu MB)\n",
341 drive->name,
342 capacity, sectors_to_MB(capacity),
343 set_max, sectors_to_MB(set_max));
344
345 if ((drive->dev_flags & IDE_DFLAG_NOHPA) == 0)
346 return;
347
348 set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48);
349 if (set_max)
350 printk(KERN_INFO "%s: Host Protected Area disabled.\n",
351 drive->name);
352 }
353
354 static int ide_disk_get_capacity(ide_drive_t *drive)
355 {
356 u16 *id = drive->id;
357 int lba;
358
359 if (ata_id_lba48_enabled(id)) {
360 /* drive speaks 48-bit LBA */
361 lba = 1;
362 drive->capacity64 = ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
363 } else if (ata_id_has_lba(id) && ata_id_is_lba_capacity_ok(id)) {
364 /* drive speaks 28-bit LBA */
365 lba = 1;
366 drive->capacity64 = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
367 } else {
368 /* drive speaks boring old 28-bit CHS */
369 lba = 0;
370 drive->capacity64 = drive->cyl * drive->head * drive->sect;
371 }
372
373 drive->probed_capacity = drive->capacity64;
374
375 if (lba) {
376 drive->dev_flags |= IDE_DFLAG_LBA;
377
378 /*
379 * If this device supports the Host Protected Area feature set,
380 * then we may need to change our opinion about its capacity.
381 */
382 if (ata_id_hpa_enabled(id))
383 idedisk_check_hpa(drive);
384 }
385
386 /* limit drive capacity to 137GB if LBA48 cannot be used */
387 if ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 &&
388 drive->capacity64 > 1ULL << 28) {
389 printk(KERN_WARNING "%s: cannot use LBA48 - full capacity "
390 "%llu sectors (%llu MB)\n",
391 drive->name, (unsigned long long)drive->capacity64,
392 sectors_to_MB(drive->capacity64));
393 drive->probed_capacity = drive->capacity64 = 1ULL << 28;
394 }
395
396 if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
397 (drive->dev_flags & IDE_DFLAG_LBA48)) {
398 if (drive->capacity64 > 1ULL << 28) {
399 printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
400 " will be used for accessing sectors "
401 "> %u\n", drive->name, 1 << 28);
402 } else
403 drive->dev_flags &= ~IDE_DFLAG_LBA48;
404 }
405
406 return 0;
407 }
408
409 static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
410 {
411 u16 *id = drive->id;
412 int lba48 = ata_id_lba48_enabled(id);
413
414 if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
415 ata_id_hpa_enabled(id) == 0)
416 return;
417
418 /*
419 * according to the spec the SET MAX ADDRESS command shall be
420 * immediately preceded by a READ NATIVE MAX ADDRESS command
421 */
422 if (!ide_disk_hpa_get_native_capacity(drive, lba48))
423 return;
424
425 if (ide_disk_hpa_set_capacity(drive, drive->probed_capacity, lba48))
426 drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
427 }
428
429 static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
430 {
431 ide_drive_t *drive = q->queuedata;
432 struct ide_cmd *cmd;
433
434 if (!(rq->cmd_flags & REQ_FLUSH))
435 return BLKPREP_OK;
436
437 if (rq->special) {
438 cmd = rq->special;
439 memset(cmd, 0, sizeof(*cmd));
440 } else {
441 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
442 }
443
444 /* FIXME: map struct ide_taskfile on rq->cmd[] */
445 BUG_ON(cmd == NULL);
446
447 if (ata_id_flush_ext_enabled(drive->id) &&
448 (drive->capacity64 >= (1UL << 28)))
449 cmd->tf.command = ATA_CMD_FLUSH_EXT;
450 else
451 cmd->tf.command = ATA_CMD_FLUSH;
452 cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
453 cmd->tf_flags = IDE_TFLAG_DYN;
454 cmd->protocol = ATA_PROT_NODATA;
455
456 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
457 rq->special = cmd;
458 cmd->rq = rq;
459
460 return BLKPREP_OK;
461 }
462
463 ide_devset_get(multcount, mult_count);
464
465 /*
466 * This is tightly woven into the driver->do_special can not touch.
467 * DON'T do it again until a total personality rewrite is committed.
468 */
469 static int set_multcount(ide_drive_t *drive, int arg)
470 {
471 struct request *rq;
472 int error;
473
474 if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff))
475 return -EINVAL;
476
477 if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
478 return -EBUSY;
479
480 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
481 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
482
483 drive->mult_req = arg;
484 drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
485 error = blk_execute_rq(drive->queue, NULL, rq, 0);
486 blk_put_request(rq);
487
488 return (drive->mult_count == arg) ? 0 : -EIO;
489 }
490
491 ide_devset_get_flag(nowerr, IDE_DFLAG_NOWERR);
492
493 static int set_nowerr(ide_drive_t *drive, int arg)
494 {
495 if (arg < 0 || arg > 1)
496 return -EINVAL;
497
498 if (arg)
499 drive->dev_flags |= IDE_DFLAG_NOWERR;
500 else
501 drive->dev_flags &= ~IDE_DFLAG_NOWERR;
502
503 drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
504
505 return 0;
506 }
507
508 static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
509 {
510 struct ide_cmd cmd;
511
512 memset(&cmd, 0, sizeof(cmd));
513 cmd.tf.feature = feature;
514 cmd.tf.nsect = nsect;
515 cmd.tf.command = ATA_CMD_SET_FEATURES;
516 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
517 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
518
519 return ide_no_data_taskfile(drive, &cmd);
520 }
521
522 static void update_flush(ide_drive_t *drive)
523 {
524 u16 *id = drive->id;
525 unsigned flush = 0;
526
527 if (drive->dev_flags & IDE_DFLAG_WCACHE) {
528 unsigned long long capacity;
529 int barrier;
530 /*
531 * We must avoid issuing commands a drive does not
532 * understand or we may crash it. We check flush cache
533 * is supported. We also check we have the LBA48 flush
534 * cache if the drive capacity is too large. By this
535 * time we have trimmed the drive capacity if LBA48 is
536 * not available so we don't need to recheck that.
537 */
538 capacity = ide_gd_capacity(drive);
539 barrier = ata_id_flush_enabled(id) &&
540 (drive->dev_flags & IDE_DFLAG_NOFLUSH) == 0 &&
541 ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 ||
542 capacity <= (1ULL << 28) ||
543 ata_id_flush_ext_enabled(id));
544
545 printk(KERN_INFO "%s: cache flushes %ssupported\n",
546 drive->name, barrier ? "" : "not ");
547
548 if (barrier) {
549 flush = REQ_FLUSH;
550 blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
551 }
552 }
553
554 blk_queue_flush(drive->queue, flush);
555 }
556
557 ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
558
559 static int set_wcache(ide_drive_t *drive, int arg)
560 {
561 int err = 1;
562
563 if (arg < 0 || arg > 1)
564 return -EINVAL;
565
566 if (ata_id_flush_enabled(drive->id)) {
567 err = ide_do_setfeature(drive,
568 arg ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF, 0);
569 if (err == 0) {
570 if (arg)
571 drive->dev_flags |= IDE_DFLAG_WCACHE;
572 else
573 drive->dev_flags &= ~IDE_DFLAG_WCACHE;
574 }
575 }
576
577 update_flush(drive);
578
579 return err;
580 }
581
582 static int do_idedisk_flushcache(ide_drive_t *drive)
583 {
584 struct ide_cmd cmd;
585
586 memset(&cmd, 0, sizeof(cmd));
587 if (ata_id_flush_ext_enabled(drive->id))
588 cmd.tf.command = ATA_CMD_FLUSH_EXT;
589 else
590 cmd.tf.command = ATA_CMD_FLUSH;
591 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
592 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
593
594 return ide_no_data_taskfile(drive, &cmd);
595 }
596
597 ide_devset_get(acoustic, acoustic);
598
599 static int set_acoustic(ide_drive_t *drive, int arg)
600 {
601 if (arg < 0 || arg > 254)
602 return -EINVAL;
603
604 ide_do_setfeature(drive,
605 arg ? SETFEATURES_AAM_ON : SETFEATURES_AAM_OFF, arg);
606
607 drive->acoustic = arg;
608
609 return 0;
610 }
611
612 ide_devset_get_flag(addressing, IDE_DFLAG_LBA48);
613
614 /*
615 * drive->addressing:
616 * 0: 28-bit
617 * 1: 48-bit
618 * 2: 48-bit capable doing 28-bit
619 */
620 static int set_addressing(ide_drive_t *drive, int arg)
621 {
622 if (arg < 0 || arg > 2)
623 return -EINVAL;
624
625 if (arg && ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
626 ata_id_lba48_enabled(drive->id) == 0))
627 return -EIO;
628
629 if (arg == 2)
630 arg = 0;
631
632 if (arg)
633 drive->dev_flags |= IDE_DFLAG_LBA48;
634 else
635 drive->dev_flags &= ~IDE_DFLAG_LBA48;
636
637 return 0;
638 }
639
640 ide_ext_devset_rw(acoustic, acoustic);
641 ide_ext_devset_rw(address, addressing);
642 ide_ext_devset_rw(multcount, multcount);
643 ide_ext_devset_rw(wcache, wcache);
644
645 ide_ext_devset_rw_sync(nowerr, nowerr);
646
647 static int ide_disk_check(ide_drive_t *drive, const char *s)
648 {
649 return 1;
650 }
651
652 static void ide_disk_setup(ide_drive_t *drive)
653 {
654 struct ide_disk_obj *idkp = drive->driver_data;
655 struct request_queue *q = drive->queue;
656 ide_hwif_t *hwif = drive->hwif;
657 u16 *id = drive->id;
658 char *m = (char *)&id[ATA_ID_PROD];
659 unsigned long long capacity;
660
661 ide_proc_register_driver(drive, idkp->driver);
662
663 if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0)
664 return;
665
666 if (drive->dev_flags & IDE_DFLAG_REMOVABLE) {
667 /*
668 * Removable disks (eg. SYQUEST); ignore 'WD' drives
669 */
670 if (m[0] != 'W' || m[1] != 'D')
671 drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
672 }
673
674 (void)set_addressing(drive, 1);
675
676 if (drive->dev_flags & IDE_DFLAG_LBA48) {
677 int max_s = 2048;
678
679 if (max_s > hwif->rqsize)
680 max_s = hwif->rqsize;
681
682 blk_queue_max_hw_sectors(q, max_s);
683 }
684
685 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
686 queue_max_sectors(q) / 2);
687
688 if (ata_id_is_ssd(id)) {
689 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
690 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
691 }
692
693 /* calculate drive capacity, and select LBA if possible */
694 ide_disk_get_capacity(drive);
695
696 /*
697 * if possible, give fdisk access to more of the drive,
698 * by correcting bios_cyls:
699 */
700 capacity = ide_gd_capacity(drive);
701
702 if ((drive->dev_flags & IDE_DFLAG_FORCED_GEOM) == 0) {
703 if (ata_id_lba48_enabled(drive->id)) {
704 /* compatibility */
705 drive->bios_sect = 63;
706 drive->bios_head = 255;
707 }
708
709 if (drive->bios_sect && drive->bios_head) {
710 unsigned int cap0 = capacity; /* truncate to 32 bits */
711 unsigned int cylsz, cyl;
712
713 if (cap0 != capacity)
714 drive->bios_cyl = 65535;
715 else {
716 cylsz = drive->bios_sect * drive->bios_head;
717 cyl = cap0 / cylsz;
718 if (cyl > 65535)
719 cyl = 65535;
720 if (cyl > drive->bios_cyl)
721 drive->bios_cyl = cyl;
722 }
723 }
724 }
725 printk(KERN_INFO "%s: %llu sectors (%llu MB)",
726 drive->name, capacity, sectors_to_MB(capacity));
727
728 /* Only print cache size when it was specified */
729 if (id[ATA_ID_BUF_SIZE])
730 printk(KERN_CONT " w/%dKiB Cache", id[ATA_ID_BUF_SIZE] / 2);
731
732 printk(KERN_CONT ", CHS=%d/%d/%d\n",
733 drive->bios_cyl, drive->bios_head, drive->bios_sect);
734
735 /* write cache enabled? */
736 if ((id[ATA_ID_CSFO] & 1) || ata_id_wcache_enabled(id))
737 drive->dev_flags |= IDE_DFLAG_WCACHE;
738
739 set_wcache(drive, 1);
740
741 if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 &&
742 (drive->head == 0 || drive->head > 16)) {
743 printk(KERN_ERR "%s: invalid geometry: %d physical heads?\n",
744 drive->name, drive->head);
745 drive->dev_flags &= ~IDE_DFLAG_ATTACH;
746 } else
747 drive->dev_flags |= IDE_DFLAG_ATTACH;
748 }
749
750 static void ide_disk_flush(ide_drive_t *drive)
751 {
752 if (ata_id_flush_enabled(drive->id) == 0 ||
753 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0)
754 return;
755
756 if (do_idedisk_flushcache(drive))
757 printk(KERN_INFO "%s: wcache flush failed!\n", drive->name);
758 }
759
760 static int ide_disk_init_media(ide_drive_t *drive, struct gendisk *disk)
761 {
762 return 0;
763 }
764
765 static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
766 int on)
767 {
768 struct ide_cmd cmd;
769 int ret;
770
771 if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
772 return 0;
773
774 memset(&cmd, 0, sizeof(cmd));
775 cmd.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
776 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
777 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
778
779 ret = ide_no_data_taskfile(drive, &cmd);
780
781 if (ret)
782 drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
783
784 return ret;
785 }
786
787 const struct ide_disk_ops ide_ata_disk_ops = {
788 .check = ide_disk_check,
789 .unlock_native_capacity = ide_disk_unlock_native_capacity,
790 .get_capacity = ide_disk_get_capacity,
791 .setup = ide_disk_setup,
792 .flush = ide_disk_flush,
793 .init_media = ide_disk_init_media,
794 .set_doorlock = ide_disk_set_doorlock,
795 .do_request = ide_do_rw_disk,
796 .ioctl = ide_disk_ioctl,
797 };