1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/kernel.h>
31#include <linux/timer.h>
32#include <linux/mm.h>
33#include <linux/interrupt.h>
34#include <linux/major.h>
35#include <linux/errno.h>
36#include <linux/genhd.h>
37#include <linux/blkpg.h>
38#include <linux/slab.h>
39#include <linux/init.h>
40#include <linux/pci.h>
41#include <linux/delay.h>
42#include <linux/ide.h>
43#include <linux/hdreg.h>
44#include <linux/completion.h>
45#include <linux/reboot.h>
46#include <linux/cdrom.h>
47#include <linux/seq_file.h>
48#include <linux/device.h>
49#include <linux/kmod.h>
50#include <linux/scatterlist.h>
51#include <linux/bitops.h>
52
53#include <asm/byteorder.h>
54#include <asm/irq.h>
55#include <asm/uaccess.h>
56#include <asm/io.h>
57
58static int __ide_end_request(ide_drive_t *drive, struct request *rq,
59 int uptodate, unsigned int nr_bytes, int dequeue)
60{
61 int ret = 1;
62 int error = 0;
63
64 if (uptodate <= 0)
65 error = uptodate ? uptodate : -EIO;
66
67
68
69
70
71 if (blk_noretry_request(rq) && error)
72 nr_bytes = rq->hard_nr_sectors << 9;
73
74 if (!blk_fs_request(rq) && error && !rq->errors)
75 rq->errors = -EIO;
76
77
78
79
80
81 if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
82 drive->retry_pio <= 3) {
83 drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
84 ide_dma_on(drive);
85 }
86
87 if (!__blk_end_request(rq, error, nr_bytes)) {
88 if (dequeue)
89 HWGROUP(drive)->rq = NULL;
90 ret = 0;
91 }
92
93 return ret;
94}
95
96
97
98
99
100
101
102
103
104
105
106
107int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
108{
109 unsigned int nr_bytes = nr_sectors << 9;
110 struct request *rq;
111 unsigned long flags;
112 int ret = 1;
113
114
115
116
117
118 spin_lock_irqsave(&ide_lock, flags);
119 rq = HWGROUP(drive)->rq;
120
121 if (!nr_bytes) {
122 if (blk_pc_request(rq))
123 nr_bytes = rq->data_len;
124 else
125 nr_bytes = rq->hard_cur_sectors << 9;
126 }
127
128 ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
129
130 spin_unlock_irqrestore(&ide_lock, flags);
131 return ret;
132}
133EXPORT_SYMBOL(ide_end_request);
134
135static void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
136{
137 struct request_pm_state *pm = rq->data;
138
139#ifdef DEBUG_PM
140 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
141 drive->name, pm->pm_step);
142#endif
143 if (drive->media != ide_disk)
144 return;
145
146 switch (pm->pm_step) {
147 case IDE_PM_FLUSH_CACHE:
148 if (pm->pm_state == PM_EVENT_FREEZE)
149 pm->pm_step = IDE_PM_COMPLETED;
150 else
151 pm->pm_step = IDE_PM_STANDBY;
152 break;
153 case IDE_PM_STANDBY:
154 pm->pm_step = IDE_PM_COMPLETED;
155 break;
156 case IDE_PM_RESTORE_PIO:
157 pm->pm_step = IDE_PM_IDLE;
158 break;
159 case IDE_PM_IDLE:
160 pm->pm_step = IDE_PM_RESTORE_DMA;
161 break;
162 }
163}
164
165static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
166{
167 struct request_pm_state *pm = rq->data;
168 ide_task_t *args = rq->special;
169
170 memset(args, 0, sizeof(*args));
171
172 switch (pm->pm_step) {
173 case IDE_PM_FLUSH_CACHE:
174 if (drive->media != ide_disk)
175 break;
176
177 if (ata_id_flush_enabled(drive->id) == 0 ||
178 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
179 ide_complete_power_step(drive, rq);
180 return ide_stopped;
181 }
182 if (ata_id_flush_ext_enabled(drive->id))
183 args->tf.command = ATA_CMD_FLUSH_EXT;
184 else
185 args->tf.command = ATA_CMD_FLUSH;
186 goto out_do_tf;
187 case IDE_PM_STANDBY:
188 args->tf.command = ATA_CMD_STANDBYNOW1;
189 goto out_do_tf;
190 case IDE_PM_RESTORE_PIO:
191 ide_set_max_pio(drive);
192
193
194
195 if (drive->media != ide_disk)
196 pm->pm_step = IDE_PM_RESTORE_DMA;
197 else
198 ide_complete_power_step(drive, rq);
199 return ide_stopped;
200 case IDE_PM_IDLE:
201 args->tf.command = ATA_CMD_IDLEIMMEDIATE;
202 goto out_do_tf;
203 case IDE_PM_RESTORE_DMA:
204
205
206
207
208
209 if (drive->hwif->dma_ops == NULL)
210 break;
211
212
213
214 ide_set_dma(drive);
215 break;
216 }
217
218 pm->pm_step = IDE_PM_COMPLETED;
219 return ide_stopped;
220
221out_do_tf:
222 args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
223 args->data_phase = TASKFILE_NO_DATA;
224 return do_rw_taskfile(drive, args);
225}
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
243 int uptodate, int nr_sectors)
244{
245 unsigned long flags;
246 int ret;
247
248 spin_lock_irqsave(&ide_lock, flags);
249 BUG_ON(!blk_rq_started(rq));
250 ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
251 spin_unlock_irqrestore(&ide_lock, flags);
252
253 return ret;
254}
255EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
256
257
258
259
260
261
262
263
264
265
266static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
267{
268 unsigned long flags;
269
270#ifdef DEBUG_PM
271 printk("%s: completing PM request, %s\n", drive->name,
272 blk_pm_suspend_request(rq) ? "suspend" : "resume");
273#endif
274 spin_lock_irqsave(&ide_lock, flags);
275 if (blk_pm_suspend_request(rq)) {
276 blk_stop_queue(drive->queue);
277 } else {
278 drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
279 blk_start_queue(drive->queue);
280 }
281 HWGROUP(drive)->rq = NULL;
282 if (__blk_end_request(rq, 0, 0))
283 BUG();
284 spin_unlock_irqrestore(&ide_lock, flags);
285}
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
302{
303 unsigned long flags;
304 struct request *rq;
305
306 spin_lock_irqsave(&ide_lock, flags);
307 rq = HWGROUP(drive)->rq;
308 spin_unlock_irqrestore(&ide_lock, flags);
309
310 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
311 ide_task_t *task = (ide_task_t *)rq->special;
312
313 if (rq->errors == 0)
314 rq->errors = !OK_STAT(stat, ATA_DRDY, BAD_STAT);
315
316 if (task) {
317 struct ide_taskfile *tf = &task->tf;
318
319 tf->error = err;
320 tf->status = stat;
321
322 drive->hwif->tp_ops->tf_read(drive, task);
323
324 if (task->tf_flags & IDE_TFLAG_DYN)
325 kfree(task);
326 }
327 } else if (blk_pm_request(rq)) {
328 struct request_pm_state *pm = rq->data;
329
330 ide_complete_power_step(drive, rq);
331 if (pm->pm_step == IDE_PM_COMPLETED)
332 ide_complete_pm_request(drive, rq);
333 return;
334 }
335
336 spin_lock_irqsave(&ide_lock, flags);
337 HWGROUP(drive)->rq = NULL;
338 rq->errors = err;
339 if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0),
340 blk_rq_bytes(rq))))
341 BUG();
342 spin_unlock_irqrestore(&ide_lock, flags);
343}
344
345EXPORT_SYMBOL(ide_end_drive_cmd);
346
347static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
348{
349 if (rq->rq_disk) {
350 ide_driver_t *drv;
351
352 drv = *(ide_driver_t **)rq->rq_disk->private_data;
353 drv->end_request(drive, 0, 0);
354 } else
355 ide_end_request(drive, 0, 0);
356}
357
358static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
359{
360 ide_hwif_t *hwif = drive->hwif;
361
362 if ((stat & ATA_BUSY) ||
363 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
364
365 rq->errors |= ERROR_RESET;
366 } else if (stat & ATA_ERR) {
367
368 if (err == ATA_ABORTED) {
369 if ((drive->dev_flags & IDE_DFLAG_LBA) &&
370
371 hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS)
372 return ide_stopped;
373 } else if ((err & BAD_CRC) == BAD_CRC) {
374
375 drive->crc_count++;
376 } else if (err & (ATA_BBK | ATA_UNC)) {
377
378 rq->errors = ERROR_MAX;
379 } else if (err & ATA_TRK0NF) {
380
381 rq->errors |= ERROR_RECAL;
382 }
383 }
384
385 if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
386 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
387 int nsect = drive->mult_count ? drive->mult_count : 1;
388
389 ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
390 }
391
392 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
393 ide_kill_rq(drive, rq);
394 return ide_stopped;
395 }
396
397 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
398 rq->errors |= ERROR_RESET;
399
400 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
401 ++rq->errors;
402 return ide_do_reset(drive);
403 }
404
405 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
406 drive->special.b.recalibrate = 1;
407
408 ++rq->errors;
409
410 return ide_stopped;
411}
412
413static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
414{
415 ide_hwif_t *hwif = drive->hwif;
416
417 if ((stat & ATA_BUSY) ||
418 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
419
420 rq->errors |= ERROR_RESET;
421 } else {
422
423 }
424
425 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
426
427 hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
428
429 if (rq->errors >= ERROR_MAX) {
430 ide_kill_rq(drive, rq);
431 } else {
432 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
433 ++rq->errors;
434 return ide_do_reset(drive);
435 }
436 ++rq->errors;
437 }
438
439 return ide_stopped;
440}
441
442ide_startstop_t
443__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
444{
445 if (drive->media == ide_disk)
446 return ide_ata_error(drive, rq, stat, err);
447 return ide_atapi_error(drive, rq, stat, err);
448}
449
450EXPORT_SYMBOL_GPL(__ide_error);
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
466{
467 struct request *rq;
468 u8 err;
469
470 err = ide_dump_status(drive, msg, stat);
471
472 if ((rq = HWGROUP(drive)->rq) == NULL)
473 return ide_stopped;
474
475
476 if (!blk_fs_request(rq)) {
477 rq->errors = 1;
478 ide_end_drive_cmd(drive, stat, err);
479 return ide_stopped;
480 }
481
482 if (rq->rq_disk) {
483 ide_driver_t *drv;
484
485 drv = *(ide_driver_t **)rq->rq_disk->private_data;
486 return drv->error(drive, rq, stat, err);
487 } else
488 return __ide_error(drive, rq, stat, err);
489}
490
491EXPORT_SYMBOL_GPL(ide_error);
492
493static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
494{
495 tf->nsect = drive->sect;
496 tf->lbal = drive->sect;
497 tf->lbam = drive->cyl;
498 tf->lbah = drive->cyl >> 8;
499 tf->device = (drive->head - 1) | drive->select;
500 tf->command = ATA_CMD_INIT_DEV_PARAMS;
501}
502
503static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
504{
505 tf->nsect = drive->sect;
506 tf->command = ATA_CMD_RESTORE;
507}
508
509static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
510{
511 tf->nsect = drive->mult_req;
512 tf->command = ATA_CMD_SET_MULTI;
513}
514
515static ide_startstop_t ide_disk_special(ide_drive_t *drive)
516{
517 special_t *s = &drive->special;
518 ide_task_t args;
519
520 memset(&args, 0, sizeof(ide_task_t));
521 args.data_phase = TASKFILE_NO_DATA;
522
523 if (s->b.set_geometry) {
524 s->b.set_geometry = 0;
525 ide_tf_set_specify_cmd(drive, &args.tf);
526 } else if (s->b.recalibrate) {
527 s->b.recalibrate = 0;
528 ide_tf_set_restore_cmd(drive, &args.tf);
529 } else if (s->b.set_multmode) {
530 s->b.set_multmode = 0;
531 ide_tf_set_setmult_cmd(drive, &args.tf);
532 } else if (s->all) {
533 int special = s->all;
534 s->all = 0;
535 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special);
536 return ide_stopped;
537 }
538
539 args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE |
540 IDE_TFLAG_CUSTOM_HANDLER;
541
542 do_rw_taskfile(drive, &args);
543
544 return ide_started;
545}
546
547
548
549
550
551
552
553
554
555
556
557static ide_startstop_t do_special (ide_drive_t *drive)
558{
559 special_t *s = &drive->special;
560
561#ifdef DEBUG
562 printk("%s: do_special: 0x%02x\n", drive->name, s->all);
563#endif
564 if (drive->media == ide_disk)
565 return ide_disk_special(drive);
566
567 s->all = 0;
568 drive->mult_req = 0;
569 return ide_stopped;
570}
571
572void ide_map_sg(ide_drive_t *drive, struct request *rq)
573{
574 ide_hwif_t *hwif = drive->hwif;
575 struct scatterlist *sg = hwif->sg_table;
576
577 if (hwif->sg_mapped)
578 return;
579
580 if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) {
581 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
582 } else {
583 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
584 hwif->sg_nents = 1;
585 }
586}
587
588EXPORT_SYMBOL_GPL(ide_map_sg);
589
590void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
591{
592 ide_hwif_t *hwif = drive->hwif;
593
594 hwif->nsect = hwif->nleft = rq->nr_sectors;
595 hwif->cursg_ofs = 0;
596 hwif->cursg = NULL;
597}
598
599EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
600
601
602
603
604
605
606
607
608
609
610
611
612
613static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
614 struct request *rq)
615{
616 ide_hwif_t *hwif = HWIF(drive);
617 ide_task_t *task = rq->special;
618
619 if (task) {
620 hwif->data_phase = task->data_phase;
621
622 switch (hwif->data_phase) {
623 case TASKFILE_MULTI_OUT:
624 case TASKFILE_OUT:
625 case TASKFILE_MULTI_IN:
626 case TASKFILE_IN:
627 ide_init_sg_cmd(drive, rq);
628 ide_map_sg(drive, rq);
629 default:
630 break;
631 }
632
633 return do_rw_taskfile(drive, task);
634 }
635
636
637
638
639
640#ifdef DEBUG
641 printk("%s: DRIVE_CMD (null)\n", drive->name);
642#endif
643 ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif),
644 ide_read_error(drive));
645
646 return ide_stopped;
647}
648
649int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
650 int arg)
651{
652 struct request_queue *q = drive->queue;
653 struct request *rq;
654 int ret = 0;
655
656 if (!(setting->flags & DS_SYNC))
657 return setting->set(drive, arg);
658
659 rq = blk_get_request(q, READ, __GFP_WAIT);
660 rq->cmd_type = REQ_TYPE_SPECIAL;
661 rq->cmd_len = 5;
662 rq->cmd[0] = REQ_DEVSET_EXEC;
663 *(int *)&rq->cmd[1] = arg;
664 rq->special = setting->set;
665
666 if (blk_execute_rq(q, NULL, rq, 0))
667 ret = rq->errors;
668 blk_put_request(rq);
669
670 return ret;
671}
672EXPORT_SYMBOL_GPL(ide_devset_execute);
673
674static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
675{
676 u8 cmd = rq->cmd[0];
677
678 if (cmd == REQ_PARK_HEADS || cmd == REQ_UNPARK_HEADS) {
679 ide_task_t task;
680 struct ide_taskfile *tf = &task.tf;
681
682 memset(&task, 0, sizeof(task));
683 if (cmd == REQ_PARK_HEADS) {
684 drive->sleep = *(unsigned long *)rq->special;
685 drive->dev_flags |= IDE_DFLAG_SLEEPING;
686 tf->command = ATA_CMD_IDLEIMMEDIATE;
687 tf->feature = 0x44;
688 tf->lbal = 0x4c;
689 tf->lbam = 0x4e;
690 tf->lbah = 0x55;
691 task.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER;
692 } else
693 tf->command = ATA_CMD_CHK_POWER;
694
695 task.tf_flags |= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
696 task.rq = rq;
697 drive->hwif->data_phase = task.data_phase = TASKFILE_NO_DATA;
698 return do_rw_taskfile(drive, &task);
699 }
700
701 switch (cmd) {
702 case REQ_DEVSET_EXEC:
703 {
704 int err, (*setfunc)(ide_drive_t *, int) = rq->special;
705
706 err = setfunc(drive, *(int *)&rq->cmd[1]);
707 if (err)
708 rq->errors = err;
709 else
710 err = 1;
711 ide_end_request(drive, err, 0);
712 return ide_stopped;
713 }
714 case REQ_DRIVE_RESET:
715 return ide_do_reset(drive);
716 default:
717 blk_dump_rq_flags(rq, "ide_special_rq - bad request");
718 ide_end_request(drive, 0, 0);
719 return ide_stopped;
720 }
721}
722
723static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
724{
725 struct request_pm_state *pm = rq->data;
726
727 if (blk_pm_suspend_request(rq) &&
728 pm->pm_step == IDE_PM_START_SUSPEND)
729
730 drive->dev_flags |= IDE_DFLAG_BLOCKED;
731 else if (blk_pm_resume_request(rq) &&
732 pm->pm_step == IDE_PM_START_RESUME) {
733
734
735
736
737
738
739
740
741 ide_hwif_t *hwif = drive->hwif;
742 int rc;
743#ifdef DEBUG_PM
744 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
745#endif
746 rc = ide_wait_not_busy(hwif, 35000);
747 if (rc)
748 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
749 SELECT_DRIVE(drive);
750 hwif->tp_ops->set_irq(hwif, 1);
751 rc = ide_wait_not_busy(hwif, 100000);
752 if (rc)
753 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
754 }
755}
756
757
758
759
760
761
762
763
764
765
766static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
767{
768 ide_startstop_t startstop;
769
770 BUG_ON(!blk_rq_started(rq));
771
772#ifdef DEBUG
773 printk("%s: start_request: current=0x%08lx\n",
774 HWIF(drive)->name, (unsigned long) rq);
775#endif
776
777
778 if (drive->max_failures && (drive->failures > drive->max_failures)) {
779 rq->cmd_flags |= REQ_FAILED;
780 goto kill_rq;
781 }
782
783 if (blk_pm_request(rq))
784 ide_check_pm_state(drive, rq);
785
786 SELECT_DRIVE(drive);
787 if (ide_wait_stat(&startstop, drive, drive->ready_stat,
788 ATA_BUSY | ATA_DRQ, WAIT_READY)) {
789 printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
790 return startstop;
791 }
792 if (!drive->special.all) {
793 ide_driver_t *drv;
794
795
796
797
798
799 if (drive->current_speed == 0xff)
800 ide_config_drive_speed(drive, drive->desired_speed);
801
802 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
803 return execute_drive_cmd(drive, rq);
804 else if (blk_pm_request(rq)) {
805 struct request_pm_state *pm = rq->data;
806#ifdef DEBUG_PM
807 printk("%s: start_power_step(step: %d)\n",
808 drive->name, pm->pm_step);
809#endif
810 startstop = ide_start_power_step(drive, rq);
811 if (startstop == ide_stopped &&
812 pm->pm_step == IDE_PM_COMPLETED)
813 ide_complete_pm_request(drive, rq);
814 return startstop;
815 } else if (!rq->rq_disk && blk_special_request(rq))
816
817
818
819
820
821
822
823
824 return ide_special_rq(drive, rq);
825
826 drv = *(ide_driver_t **)rq->rq_disk->private_data;
827
828 return drv->do_request(drive, rq, rq->sector);
829 }
830 return do_special(drive);
831kill_rq:
832 ide_kill_rq(drive, rq);
833 return ide_stopped;
834}
835
836
837
838
839
840
841
842
843
844
845void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
846{
847 if (timeout > WAIT_WORSTCASE)
848 timeout = WAIT_WORSTCASE;
849 drive->sleep = timeout + jiffies;
850 drive->dev_flags |= IDE_DFLAG_SLEEPING;
851}
852
853EXPORT_SYMBOL(ide_stall_queue);
854
855#define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time)
856
857
858
859
860
861
862
863
864
865
866static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup)
867{
868 ide_drive_t *drive, *best;
869
870repeat:
871 best = NULL;
872 drive = hwgroup->drive;
873
874
875
876
877
878
879 if (blk_queue_flushing(drive->queue)) {
880
881
882
883
884
885 blk_remove_plug(drive->queue);
886 return drive;
887 }
888
889 do {
890 u8 dev_s = !!(drive->dev_flags & IDE_DFLAG_SLEEPING);
891 u8 best_s = (best && !!(best->dev_flags & IDE_DFLAG_SLEEPING));
892
893 if ((dev_s == 0 || time_after_eq(jiffies, drive->sleep)) &&
894 !elv_queue_empty(drive->queue)) {
895 if (best == NULL ||
896 (dev_s && (best_s == 0 || time_before(drive->sleep, best->sleep))) ||
897 (best_s == 0 && time_before(WAKEUP(drive), WAKEUP(best)))) {
898 if (!blk_queue_plugged(drive->queue))
899 best = drive;
900 }
901 }
902 } while ((drive = drive->next) != hwgroup->drive);
903
904 if (best && (best->dev_flags & IDE_DFLAG_NICE1) &&
905 (best->dev_flags & IDE_DFLAG_SLEEPING) == 0 &&
906 best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) {
907 long t = (signed long)(WAKEUP(best) - jiffies);
908 if (t >= WAIT_MIN_SLEEP) {
909
910
911
912
913 drive = best->next;
914 do {
915 if ((drive->dev_flags & IDE_DFLAG_SLEEPING) == 0
916 && time_before(jiffies - best->service_time, WAKEUP(drive))
917 && time_before(WAKEUP(drive), jiffies + t))
918 {
919 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP));
920 goto repeat;
921 }
922 } while ((drive = drive->next) != best);
923 }
924 }
925 return best;
926}
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
964{
965 ide_drive_t *drive;
966 ide_hwif_t *hwif;
967 struct request *rq;
968 ide_startstop_t startstop;
969 int loops = 0;
970
971
972 BUG_ON(!irqs_disabled());
973
974 while (!hwgroup->busy) {
975 hwgroup->busy = 1;
976
977 ide_get_lock(ide_intr, hwgroup);
978 drive = choose_drive(hwgroup);
979 if (drive == NULL) {
980 int sleeping = 0;
981 unsigned long sleep = 0;
982 hwgroup->rq = NULL;
983 drive = hwgroup->drive;
984 do {
985 if ((drive->dev_flags & IDE_DFLAG_SLEEPING) &&
986 (sleeping == 0 ||
987 time_before(drive->sleep, sleep))) {
988 sleeping = 1;
989 sleep = drive->sleep;
990 }
991 } while ((drive = drive->next) != hwgroup->drive);
992 if (sleeping) {
993
994
995
996
997
998
999 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP))
1000 sleep = jiffies + WAIT_MIN_SLEEP;
1001#if 1
1002 if (timer_pending(&hwgroup->timer))
1003 printk(KERN_CRIT "ide_set_handler: timer already active\n");
1004#endif
1005
1006 hwgroup->sleeping = 1;
1007 hwgroup->req_gen_timer = hwgroup->req_gen;
1008 mod_timer(&hwgroup->timer, sleep);
1009
1010
1011 } else {
1012
1013
1014
1015
1016
1017 ide_release_lock();
1018 hwgroup->busy = 0;
1019 }
1020
1021
1022 return;
1023 }
1024 again:
1025 hwif = HWIF(drive);
1026 if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) {
1027
1028
1029
1030
1031 if (drive->quirk_list != 1)
1032 hwif->tp_ops->set_irq(hwif, 0);
1033 }
1034 hwgroup->hwif = hwif;
1035 hwgroup->drive = drive;
1036 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
1037 drive->service_start = jiffies;
1038
1039 if (blk_queue_plugged(drive->queue)) {
1040 printk(KERN_ERR "ide: huh? queue was plugged!\n");
1041 break;
1042 }
1043
1044
1045
1046
1047
1048 rq = elv_next_request(drive->queue);
1049 if (!rq) {
1050 hwgroup->busy = 0;
1051 break;
1052 }
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
1071 blk_pm_request(rq) == 0 &&
1072 (rq->cmd_flags & REQ_PREEMPT) == 0) {
1073 drive = drive->next ? drive->next : hwgroup->drive;
1074 if (loops++ < 4 && !blk_queue_plugged(drive->queue))
1075 goto again;
1076
1077 hwgroup->busy = 0;
1078 break;
1079 }
1080
1081 hwgroup->rq = rq;
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1092 disable_irq_nosync(hwif->irq);
1093 spin_unlock(&ide_lock);
1094 local_irq_enable_in_hardirq();
1095
1096 startstop = start_request(drive, rq);
1097 spin_lock_irq(&ide_lock);
1098 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1099 enable_irq(hwif->irq);
1100 if (startstop == ide_stopped)
1101 hwgroup->busy = 0;
1102 }
1103}
1104
1105
1106
1107
1108void do_ide_request(struct request_queue *q)
1109{
1110 ide_drive_t *drive = q->queuedata;
1111
1112 ide_do_request(HWGROUP(drive), IDE_NO_IRQ);
1113}
1114
1115
1116
1117
1118
1119
1120static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
1121{
1122 ide_hwif_t *hwif = HWIF(drive);
1123 struct request *rq;
1124 ide_startstop_t ret = ide_stopped;
1125
1126
1127
1128
1129
1130 if (error < 0) {
1131 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
1132 (void)hwif->dma_ops->dma_end(drive);
1133 ret = ide_error(drive, "dma timeout error",
1134 hwif->tp_ops->read_status(hwif));
1135 } else {
1136 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
1137 hwif->dma_ops->dma_timeout(drive);
1138 }
1139
1140
1141
1142
1143
1144
1145 drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
1146 drive->retry_pio++;
1147 ide_dma_off_quietly(drive);
1148
1149
1150
1151
1152
1153 rq = HWGROUP(drive)->rq;
1154
1155 if (!rq)
1156 goto out;
1157
1158 HWGROUP(drive)->rq = NULL;
1159
1160 rq->errors = 0;
1161
1162 if (!rq->bio)
1163 goto out;
1164
1165 rq->sector = rq->bio->bi_sector;
1166 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
1167 rq->hard_cur_sectors = rq->current_nr_sectors;
1168 rq->buffer = bio_data(rq->bio);
1169out:
1170 return ret;
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187void ide_timer_expiry (unsigned long data)
1188{
1189 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data;
1190 ide_handler_t *handler;
1191 ide_expiry_t *expiry;
1192 unsigned long flags;
1193 unsigned long wait = -1;
1194
1195 spin_lock_irqsave(&ide_lock, flags);
1196
1197 if (((handler = hwgroup->handler) == NULL) ||
1198 (hwgroup->req_gen != hwgroup->req_gen_timer)) {
1199
1200
1201
1202
1203
1204
1205 if (hwgroup->sleeping) {
1206 hwgroup->sleeping = 0;
1207 hwgroup->busy = 0;
1208 }
1209 } else {
1210 ide_drive_t *drive = hwgroup->drive;
1211 if (!drive) {
1212 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n");
1213 hwgroup->handler = NULL;
1214 } else {
1215 ide_hwif_t *hwif;
1216 ide_startstop_t startstop = ide_stopped;
1217 if (!hwgroup->busy) {
1218 hwgroup->busy = 1;
1219 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name);
1220 }
1221 if ((expiry = hwgroup->expiry) != NULL) {
1222
1223 if ((wait = expiry(drive)) > 0) {
1224
1225 hwgroup->timer.expires = jiffies + wait;
1226 hwgroup->req_gen_timer = hwgroup->req_gen;
1227 add_timer(&hwgroup->timer);
1228 spin_unlock_irqrestore(&ide_lock, flags);
1229 return;
1230 }
1231 }
1232 hwgroup->handler = NULL;
1233
1234
1235
1236
1237
1238 spin_unlock(&ide_lock);
1239 hwif = HWIF(drive);
1240
1241 disable_irq(hwif->irq);
1242
1243
1244 local_irq_disable();
1245 if (hwgroup->polling) {
1246 startstop = handler(drive);
1247 } else if (drive_is_ready(drive)) {
1248 if (drive->waiting_for_dma)
1249 hwif->dma_ops->dma_lost_irq(drive);
1250 (void)ide_ack_intr(hwif);
1251 printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
1252 startstop = handler(drive);
1253 } else {
1254 if (drive->waiting_for_dma) {
1255 startstop = ide_dma_timeout_retry(drive, wait);
1256 } else
1257 startstop =
1258 ide_error(drive, "irq timeout",
1259 hwif->tp_ops->read_status(hwif));
1260 }
1261 drive->service_time = jiffies - drive->service_start;
1262 spin_lock_irq(&ide_lock);
1263 enable_irq(hwif->irq);
1264 if (startstop == ide_stopped)
1265 hwgroup->busy = 0;
1266 }
1267 }
1268 ide_do_request(hwgroup, IDE_NO_IRQ);
1269 spin_unlock_irqrestore(&ide_lock, flags);
1270}
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
1306{
1307 u8 stat;
1308 ide_hwif_t *hwif = hwgroup->hwif;
1309
1310
1311
1312
1313 do {
1314 if (hwif->irq == irq) {
1315 stat = hwif->tp_ops->read_status(hwif);
1316
1317 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
1318
1319 static unsigned long last_msgtime, count;
1320 ++count;
1321 if (time_after(jiffies, last_msgtime + HZ)) {
1322 last_msgtime = jiffies;
1323 printk(KERN_ERR "%s%s: unexpected interrupt, "
1324 "status=0x%02x, count=%ld\n",
1325 hwif->name,
1326 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count);
1327 }
1328 }
1329 }
1330 } while ((hwif = hwif->next) != hwgroup->hwif);
1331}
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358irqreturn_t ide_intr (int irq, void *dev_id)
1359{
1360 unsigned long flags;
1361 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
1362 ide_hwif_t *hwif;
1363 ide_drive_t *drive;
1364 ide_handler_t *handler;
1365 ide_startstop_t startstop;
1366
1367 spin_lock_irqsave(&ide_lock, flags);
1368 hwif = hwgroup->hwif;
1369
1370 if (!ide_ack_intr(hwif)) {
1371 spin_unlock_irqrestore(&ide_lock, flags);
1372 return IRQ_NONE;
1373 }
1374
1375 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) {
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391#ifdef CONFIG_BLK_DEV_IDEPCI
1392 if (hwif->chipset != ide_pci)
1393#endif
1394 {
1395
1396
1397
1398
1399 unexpected_intr(irq, hwgroup);
1400#ifdef CONFIG_BLK_DEV_IDEPCI
1401 } else {
1402
1403
1404
1405
1406 (void)hwif->tp_ops->read_status(hwif);
1407#endif
1408 }
1409 spin_unlock_irqrestore(&ide_lock, flags);
1410 return IRQ_NONE;
1411 }
1412 drive = hwgroup->drive;
1413 if (!drive) {
1414
1415
1416
1417
1418
1419
1420 spin_unlock_irqrestore(&ide_lock, flags);
1421 return IRQ_HANDLED;
1422 }
1423 if (!drive_is_ready(drive)) {
1424
1425
1426
1427
1428
1429
1430
1431 spin_unlock_irqrestore(&ide_lock, flags);
1432 return IRQ_NONE;
1433 }
1434 if (!hwgroup->busy) {
1435 hwgroup->busy = 1;
1436 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name);
1437 }
1438 hwgroup->handler = NULL;
1439 hwgroup->req_gen++;
1440 del_timer(&hwgroup->timer);
1441 spin_unlock(&ide_lock);
1442
1443 if (hwif->port_ops && hwif->port_ops->clear_irq)
1444 hwif->port_ops->clear_irq(drive);
1445
1446 if (drive->dev_flags & IDE_DFLAG_UNMASK)
1447 local_irq_enable_in_hardirq();
1448
1449
1450 startstop = handler(drive);
1451
1452 spin_lock_irq(&ide_lock);
1453
1454
1455
1456
1457
1458
1459
1460 drive->service_time = jiffies - drive->service_start;
1461 if (startstop == ide_stopped) {
1462 if (hwgroup->handler == NULL) {
1463 hwgroup->busy = 0;
1464 ide_do_request(hwgroup, hwif->irq);
1465 } else {
1466 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler "
1467 "on exit\n", drive->name);
1468 }
1469 }
1470 spin_unlock_irqrestore(&ide_lock, flags);
1471 return IRQ_HANDLED;
1472}
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
1490{
1491 unsigned long flags;
1492 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1493
1494 spin_lock_irqsave(&ide_lock, flags);
1495 hwgroup->rq = NULL;
1496 __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
1497 blk_start_queueing(drive->queue);
1498 spin_unlock_irqrestore(&ide_lock, flags);
1499}
1500
1501EXPORT_SYMBOL(ide_do_drive_cmd);
1502
1503void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
1504{
1505 ide_hwif_t *hwif = drive->hwif;
1506 ide_task_t task;
1507
1508 memset(&task, 0, sizeof(task));
1509 task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM |
1510 IDE_TFLAG_OUT_FEATURE | tf_flags;
1511 task.tf.feature = dma;
1512 task.tf.lbam = bcount & 0xff;
1513 task.tf.lbah = (bcount >> 8) & 0xff;
1514
1515 ide_tf_dump(drive->name, &task.tf);
1516 hwif->tp_ops->set_irq(hwif, 1);
1517 SELECT_MASK(drive, 0);
1518 hwif->tp_ops->tf_load(drive, &task);
1519}
1520
1521EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
1522
1523void ide_pad_transfer(ide_drive_t *drive, int write, int len)
1524{
1525 ide_hwif_t *hwif = drive->hwif;
1526 u8 buf[4] = { 0 };
1527
1528 while (len > 0) {
1529 if (write)
1530 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
1531 else
1532 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
1533 len -= 4;
1534 }
1535}
1536EXPORT_SYMBOL_GPL(ide_pad_transfer);