1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/kernel.h>
37#include <linux/blkdev.h>
38#include <linux/spinlock.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_eh.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_tcq.h>
45#include <scsi/scsi_transport.h>
46#include <linux/libata.h>
47#include <linux/hdreg.h>
48#include <linux/uaccess.h>
49
50#include "libata.h"
51
52#define SECTOR_SIZE 512
53#define ATA_SCSI_RBUF_SIZE 4096
54
55static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
56static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE];
57
58typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
59
60static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
61 const struct scsi_device *scsidev);
62static struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
63 const struct scsi_device *scsidev);
64static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
65 unsigned int id, unsigned int lun);
66
67
68#define RW_RECOVERY_MPAGE 0x1
69#define RW_RECOVERY_MPAGE_LEN 12
70#define CACHE_MPAGE 0x8
71#define CACHE_MPAGE_LEN 20
72#define CONTROL_MPAGE 0xa
73#define CONTROL_MPAGE_LEN 12
74#define ALL_MPAGES 0x3f
75#define ALL_SUB_MPAGES 0xff
76
77
78static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = {
79 RW_RECOVERY_MPAGE,
80 RW_RECOVERY_MPAGE_LEN - 2,
81 (1 << 7),
82 0,
83 0, 0, 0, 0,
84 0,
85 0, 0, 0
86};
87
88static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
89 CACHE_MPAGE,
90 CACHE_MPAGE_LEN - 2,
91 0,
92 0, 0, 0, 0, 0, 0, 0, 0, 0,
93 0,
94 0, 0, 0, 0, 0, 0, 0
95};
96
97static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
98 CONTROL_MPAGE,
99 CONTROL_MPAGE_LEN - 2,
100 2,
101 0,
102 0, 0, 0, 0, 0xff, 0xff,
103 0, 30
104};
105
106
107
108
109
110static struct scsi_transport_template ata_scsi_transport_template = {
111 .eh_strategy_handler = ata_scsi_error,
112 .eh_timed_out = ata_scsi_timed_out,
113 .user_scan = ata_scsi_user_scan,
114};
115
116
117static const struct {
118 enum link_pm value;
119 const char *name;
120} link_pm_policy[] = {
121 { NOT_AVAILABLE, "max_performance" },
122 { MIN_POWER, "min_power" },
123 { MAX_PERFORMANCE, "max_performance" },
124 { MEDIUM_POWER, "medium_power" },
125};
126
127static const char *ata_scsi_lpm_get(enum link_pm policy)
128{
129 int i;
130
131 for (i = 0; i < ARRAY_SIZE(link_pm_policy); i++)
132 if (link_pm_policy[i].value == policy)
133 return link_pm_policy[i].name;
134
135 return NULL;
136}
137
138static ssize_t ata_scsi_lpm_put(struct device *dev,
139 struct device_attribute *attr,
140 const char *buf, size_t count)
141{
142 struct Scsi_Host *shost = class_to_shost(dev);
143 struct ata_port *ap = ata_shost_to_port(shost);
144 enum link_pm policy = 0;
145 int i;
146
147
148
149
150
151
152
153
154 for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) {
155 const int len = strlen(link_pm_policy[i].name);
156 if (strncmp(link_pm_policy[i].name, buf, len) == 0 &&
157 buf[len] == '\n') {
158 policy = link_pm_policy[i].value;
159 break;
160 }
161 }
162 if (!policy)
163 return -EINVAL;
164
165 ata_lpm_schedule(ap, policy);
166 return count;
167}
168
169static ssize_t
170ata_scsi_lpm_show(struct device *dev, struct device_attribute *attr, char *buf)
171{
172 struct Scsi_Host *shost = class_to_shost(dev);
173 struct ata_port *ap = ata_shost_to_port(shost);
174 const char *policy =
175 ata_scsi_lpm_get(ap->pm_policy);
176
177 if (!policy)
178 return -EINVAL;
179
180 return snprintf(buf, 23, "%s\n", policy);
181}
182DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
183 ata_scsi_lpm_show, ata_scsi_lpm_put);
184EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
185
186static ssize_t ata_scsi_park_show(struct device *device,
187 struct device_attribute *attr, char *buf)
188{
189 struct scsi_device *sdev = to_scsi_device(device);
190 struct ata_port *ap;
191 struct ata_link *link;
192 struct ata_device *dev;
193 unsigned long flags, now;
194 unsigned int uninitialized_var(msecs);
195 int rc = 0;
196
197 ap = ata_shost_to_port(sdev->host);
198
199 spin_lock_irqsave(ap->lock, flags);
200 dev = ata_scsi_find_dev(ap, sdev);
201 if (!dev) {
202 rc = -ENODEV;
203 goto unlock;
204 }
205 if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
206 rc = -EOPNOTSUPP;
207 goto unlock;
208 }
209
210 link = dev->link;
211 now = jiffies;
212 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
213 link->eh_context.unloaded_mask & (1 << dev->devno) &&
214 time_after(dev->unpark_deadline, now))
215 msecs = jiffies_to_msecs(dev->unpark_deadline - now);
216 else
217 msecs = 0;
218
219unlock:
220 spin_unlock_irq(ap->lock);
221
222 return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
223}
224
225static ssize_t ata_scsi_park_store(struct device *device,
226 struct device_attribute *attr,
227 const char *buf, size_t len)
228{
229 struct scsi_device *sdev = to_scsi_device(device);
230 struct ata_port *ap;
231 struct ata_device *dev;
232 long int input;
233 unsigned long flags;
234 int rc;
235
236 rc = strict_strtol(buf, 10, &input);
237 if (rc || input < -2)
238 return -EINVAL;
239 if (input > ATA_TMOUT_MAX_PARK) {
240 rc = -EOVERFLOW;
241 input = ATA_TMOUT_MAX_PARK;
242 }
243
244 ap = ata_shost_to_port(sdev->host);
245
246 spin_lock_irqsave(ap->lock, flags);
247 dev = ata_scsi_find_dev(ap, sdev);
248 if (unlikely(!dev)) {
249 rc = -ENODEV;
250 goto unlock;
251 }
252 if (dev->class != ATA_DEV_ATA) {
253 rc = -EOPNOTSUPP;
254 goto unlock;
255 }
256
257 if (input >= 0) {
258 if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
259 rc = -EOPNOTSUPP;
260 goto unlock;
261 }
262
263 dev->unpark_deadline = ata_deadline(jiffies, input);
264 dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
265 ata_port_schedule_eh(ap);
266 complete(&ap->park_req_pending);
267 } else {
268 switch (input) {
269 case -1:
270 dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
271 break;
272 case -2:
273 dev->flags |= ATA_DFLAG_NO_UNLOAD;
274 break;
275 }
276 }
277unlock:
278 spin_unlock_irqrestore(ap->lock, flags);
279
280 return rc ? rc : len;
281}
282DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
283 ata_scsi_park_show, ata_scsi_park_store);
284EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
285
286static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
287{
288 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
289
290 scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
291}
292
293static ssize_t
294ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
295 const char *buf, size_t count)
296{
297 struct Scsi_Host *shost = class_to_shost(dev);
298 struct ata_port *ap = ata_shost_to_port(shost);
299 if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
300 return ap->ops->em_store(ap, buf, count);
301 return -EINVAL;
302}
303
304static ssize_t
305ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
306 char *buf)
307{
308 struct Scsi_Host *shost = class_to_shost(dev);
309 struct ata_port *ap = ata_shost_to_port(shost);
310
311 if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
312 return ap->ops->em_show(ap, buf);
313 return -EINVAL;
314}
315DEVICE_ATTR(em_message, S_IRUGO | S_IWUGO,
316 ata_scsi_em_message_show, ata_scsi_em_message_store);
317EXPORT_SYMBOL_GPL(dev_attr_em_message);
318
319static ssize_t
320ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
321 char *buf)
322{
323 struct Scsi_Host *shost = class_to_shost(dev);
324 struct ata_port *ap = ata_shost_to_port(shost);
325
326 return snprintf(buf, 23, "%d\n", ap->em_message_type);
327}
328DEVICE_ATTR(em_message_type, S_IRUGO,
329 ata_scsi_em_message_type_show, NULL);
330EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
331
332static ssize_t
333ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
334 char *buf)
335{
336 struct scsi_device *sdev = to_scsi_device(dev);
337 struct ata_port *ap = ata_shost_to_port(sdev->host);
338 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
339
340 if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
341 return ap->ops->sw_activity_show(atadev, buf);
342 return -EINVAL;
343}
344
345static ssize_t
346ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
347 const char *buf, size_t count)
348{
349 struct scsi_device *sdev = to_scsi_device(dev);
350 struct ata_port *ap = ata_shost_to_port(sdev->host);
351 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
352 enum sw_activity val;
353 int rc;
354
355 if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
356 val = simple_strtoul(buf, NULL, 0);
357 switch (val) {
358 case OFF: case BLINK_ON: case BLINK_OFF:
359 rc = ap->ops->sw_activity_store(atadev, val);
360 if (!rc)
361 return count;
362 else
363 return rc;
364 }
365 }
366 return -EINVAL;
367}
368DEVICE_ATTR(sw_activity, S_IWUGO | S_IRUGO, ata_scsi_activity_show,
369 ata_scsi_activity_store);
370EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
371
372struct device_attribute *ata_common_sdev_attrs[] = {
373 &dev_attr_unload_heads,
374 NULL
375};
376EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
377
378static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
379 void (*done)(struct scsi_cmnd *))
380{
381 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
382
383 done(cmd);
384}
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
405 sector_t capacity, int geom[])
406{
407 geom[0] = 255;
408 geom[1] = 63;
409 sector_div(capacity, 255*63);
410 geom[2] = capacity;
411
412 return 0;
413}
414
415
416
417
418
419
420
421
422
423
424
425
426static int ata_get_identity(struct scsi_device *sdev, void __user *arg)
427{
428 struct ata_port *ap = ata_shost_to_port(sdev->host);
429 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
430 u16 __user *dst = arg;
431 char buf[40];
432
433 if (!dev)
434 return -ENOMSG;
435
436 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16)))
437 return -EFAULT;
438
439 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN);
440 if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN))
441 return -EFAULT;
442
443 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN);
444 if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN))
445 return -EFAULT;
446
447 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN);
448 if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN))
449 return -EFAULT;
450
451 return 0;
452}
453
454
455
456
457
458
459
460
461
462
463
464
465int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
466{
467 int rc = 0;
468 u8 scsi_cmd[MAX_COMMAND_SIZE];
469 u8 args[4], *argbuf = NULL, *sensebuf = NULL;
470 int argsize = 0;
471 enum dma_data_direction data_dir;
472 int cmd_result;
473
474 if (arg == NULL)
475 return -EINVAL;
476
477 if (copy_from_user(args, arg, sizeof(args)))
478 return -EFAULT;
479
480 sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
481 if (!sensebuf)
482 return -ENOMEM;
483
484 memset(scsi_cmd, 0, sizeof(scsi_cmd));
485
486 if (args[3]) {
487 argsize = SECTOR_SIZE * args[3];
488 argbuf = kmalloc(argsize, GFP_KERNEL);
489 if (argbuf == NULL) {
490 rc = -ENOMEM;
491 goto error;
492 }
493
494 scsi_cmd[1] = (4 << 1);
495 scsi_cmd[2] = 0x0e;
496
497 data_dir = DMA_FROM_DEVICE;
498 } else {
499 scsi_cmd[1] = (3 << 1);
500 scsi_cmd[2] = 0x20;
501 data_dir = DMA_NONE;
502 }
503
504 scsi_cmd[0] = ATA_16;
505
506 scsi_cmd[4] = args[2];
507 if (args[0] == ATA_CMD_SMART) {
508 scsi_cmd[6] = args[3];
509 scsi_cmd[8] = args[1];
510 scsi_cmd[10] = 0x4f;
511 scsi_cmd[12] = 0xc2;
512 } else {
513 scsi_cmd[6] = args[1];
514 }
515 scsi_cmd[14] = args[0];
516
517
518
519 cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
520 sensebuf, (10*HZ), 5, 0);
521
522 if (driver_byte(cmd_result) == DRIVER_SENSE) {
523 u8 *desc = sensebuf + 8;
524 cmd_result &= ~(0xFF<<24);
525
526
527
528 if (cmd_result & SAM_STAT_CHECK_CONDITION) {
529 struct scsi_sense_hdr sshdr;
530 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
531 &sshdr);
532 if (sshdr.sense_key == 0 &&
533 sshdr.asc == 0 && sshdr.ascq == 0)
534 cmd_result &= ~SAM_STAT_CHECK_CONDITION;
535 }
536
537
538 if (sensebuf[0] == 0x72 &&
539 desc[0] == 0x09) {
540 args[0] = desc[13];
541 args[1] = desc[3];
542 args[2] = desc[5];
543 if (copy_to_user(arg, args, sizeof(args)))
544 rc = -EFAULT;
545 }
546 }
547
548
549 if (cmd_result) {
550 rc = -EIO;
551 goto error;
552 }
553
554 if ((argbuf)
555 && copy_to_user(arg + sizeof(args), argbuf, argsize))
556 rc = -EFAULT;
557error:
558 kfree(sensebuf);
559 kfree(argbuf);
560 return rc;
561}
562
563
564
565
566
567
568
569
570
571
572
573
574int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
575{
576 int rc = 0;
577 u8 scsi_cmd[MAX_COMMAND_SIZE];
578 u8 args[7], *sensebuf = NULL;
579 int cmd_result;
580
581 if (arg == NULL)
582 return -EINVAL;
583
584 if (copy_from_user(args, arg, sizeof(args)))
585 return -EFAULT;
586
587 sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
588 if (!sensebuf)
589 return -ENOMEM;
590
591 memset(scsi_cmd, 0, sizeof(scsi_cmd));
592 scsi_cmd[0] = ATA_16;
593 scsi_cmd[1] = (3 << 1);
594 scsi_cmd[2] = 0x20;
595 scsi_cmd[4] = args[1];
596 scsi_cmd[6] = args[2];
597 scsi_cmd[8] = args[3];
598 scsi_cmd[10] = args[4];
599 scsi_cmd[12] = args[5];
600 scsi_cmd[13] = args[6] & 0x4f;
601 scsi_cmd[14] = args[0];
602
603
604
605 cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
606 sensebuf, (10*HZ), 5, 0);
607
608 if (driver_byte(cmd_result) == DRIVER_SENSE) {
609 u8 *desc = sensebuf + 8;
610 cmd_result &= ~(0xFF<<24);
611
612
613
614 if (cmd_result & SAM_STAT_CHECK_CONDITION) {
615 struct scsi_sense_hdr sshdr;
616 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
617 &sshdr);
618 if (sshdr.sense_key == 0 &&
619 sshdr.asc == 0 && sshdr.ascq == 0)
620 cmd_result &= ~SAM_STAT_CHECK_CONDITION;
621 }
622
623
624 if (sensebuf[0] == 0x72 &&
625 desc[0] == 0x09) {
626 args[0] = desc[13];
627 args[1] = desc[3];
628 args[2] = desc[5];
629 args[3] = desc[7];
630 args[4] = desc[9];
631 args[5] = desc[11];
632 args[6] = desc[12];
633 if (copy_to_user(arg, args, sizeof(args)))
634 rc = -EFAULT;
635 }
636 }
637
638 if (cmd_result) {
639 rc = -EIO;
640 goto error;
641 }
642
643 error:
644 kfree(sensebuf);
645 return rc;
646}
647
648int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
649{
650 int val = -EINVAL, rc = -EINVAL;
651
652 switch (cmd) {
653 case ATA_IOC_GET_IO32:
654 val = 0;
655 if (copy_to_user(arg, &val, 1))
656 return -EFAULT;
657 return 0;
658
659 case ATA_IOC_SET_IO32:
660 val = (unsigned long) arg;
661 if (val != 0)
662 return -EINVAL;
663 return 0;
664
665 case HDIO_GET_IDENTITY:
666 return ata_get_identity(scsidev, arg);
667
668 case HDIO_DRIVE_CMD:
669 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
670 return -EACCES;
671 return ata_cmd_ioctl(scsidev, arg);
672
673 case HDIO_DRIVE_TASK:
674 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
675 return -EACCES;
676 return ata_task_ioctl(scsidev, arg);
677
678 default:
679 rc = -ENOTTY;
680 break;
681 }
682
683 return rc;
684}
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
707 struct scsi_cmnd *cmd,
708 void (*done)(struct scsi_cmnd *))
709{
710 struct ata_queued_cmd *qc;
711
712 qc = ata_qc_new_init(dev);
713 if (qc) {
714 qc->scsicmd = cmd;
715 qc->scsidone = done;
716
717 qc->sg = scsi_sglist(cmd);
718 qc->n_elem = scsi_sg_count(cmd);
719 } else {
720 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
721 done(cmd);
722 }
723
724 return qc;
725}
726
727static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
728{
729 struct scsi_cmnd *scmd = qc->scsicmd;
730
731 qc->extrabytes = scmd->request->extra_len;
732 qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes;
733}
734
735
736
737
738
739
740
741
742
743
744
745
746
747static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
748{
749 u8 stat = tf->command, err = tf->feature;
750
751 printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat);
752 if (stat & ATA_BUSY) {
753 printk("Busy }\n");
754 } else {
755 if (stat & 0x40) printk("DriveReady ");
756 if (stat & 0x20) printk("DeviceFault ");
757 if (stat & 0x10) printk("SeekComplete ");
758 if (stat & 0x08) printk("DataRequest ");
759 if (stat & 0x04) printk("CorrectedError ");
760 if (stat & 0x02) printk("Index ");
761 if (stat & 0x01) printk("Error ");
762 printk("}\n");
763
764 if (err) {
765 printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err);
766 if (err & 0x04) printk("DriveStatusError ");
767 if (err & 0x80) {
768 if (err & 0x04) printk("BadCRC ");
769 else printk("Sector ");
770 }
771 if (err & 0x40) printk("UncorrectableError ");
772 if (err & 0x10) printk("SectorIdNotFound ");
773 if (err & 0x02) printk("TrackZeroNotFound ");
774 if (err & 0x01) printk("AddrMarkNotFound ");
775 printk("}\n");
776 }
777 }
778}
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
798 u8 *asc, u8 *ascq, int verbose)
799{
800 int i;
801
802
803 static const unsigned char sense_table[][4] = {
804
805 {0xd1, ABORTED_COMMAND, 0x00, 0x00},
806
807 {0xd0, ABORTED_COMMAND, 0x00, 0x00},
808
809 {0x61, HARDWARE_ERROR, 0x00, 0x00},
810
811 {0x84, ABORTED_COMMAND, 0x47, 0x00},
812
813 {0x37, NOT_READY, 0x04, 0x00},
814
815 {0x09, NOT_READY, 0x04, 0x00},
816
817 {0x01, MEDIUM_ERROR, 0x13, 0x00},
818
819 {0x02, HARDWARE_ERROR, 0x00, 0x00},
820
821 {0x04, ABORTED_COMMAND, 0x00, 0x00},
822
823 {0x08, NOT_READY, 0x04, 0x00},
824
825 {0x10, ABORTED_COMMAND, 0x14, 0x00},
826
827 {0x08, NOT_READY, 0x04, 0x00},
828
829 {0x40, MEDIUM_ERROR, 0x11, 0x04},
830
831 {0x80, MEDIUM_ERROR, 0x11, 0x04},
832 {0xFF, 0xFF, 0xFF, 0xFF},
833 };
834 static const unsigned char stat_table[][4] = {
835
836 {0x80, ABORTED_COMMAND, 0x47, 0x00},
837 {0x20, HARDWARE_ERROR, 0x00, 0x00},
838 {0x08, ABORTED_COMMAND, 0x47, 0x00},
839 {0x04, RECOVERED_ERROR, 0x11, 0x00},
840 {0xFF, 0xFF, 0xFF, 0xFF},
841 };
842
843
844
845
846 if (drv_stat & ATA_BUSY) {
847 drv_err = 0;
848 }
849
850 if (drv_err) {
851
852 for (i = 0; sense_table[i][0] != 0xFF; i++) {
853
854 if ((sense_table[i][0] & drv_err) ==
855 sense_table[i][0]) {
856 *sk = sense_table[i][1];
857 *asc = sense_table[i][2];
858 *ascq = sense_table[i][3];
859 goto translate_done;
860 }
861 }
862
863 if (verbose)
864 printk(KERN_WARNING "ata%u: no sense translation for "
865 "error 0x%02x\n", id, drv_err);
866 }
867
868
869 for (i = 0; stat_table[i][0] != 0xFF; i++) {
870 if (stat_table[i][0] & drv_stat) {
871 *sk = stat_table[i][1];
872 *asc = stat_table[i][2];
873 *ascq = stat_table[i][3];
874 goto translate_done;
875 }
876 }
877
878 if (verbose)
879 printk(KERN_WARNING "ata%u: no sense translation for "
880 "status: 0x%02x\n", id, drv_stat);
881
882
883
884 *sk = ABORTED_COMMAND;
885 *asc = 0x00;
886 *ascq = 0x00;
887
888 translate_done:
889 if (verbose)
890 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
891 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
892 id, drv_stat, drv_err, *sk, *asc, *ascq);
893 return;
894}
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
910{
911 struct scsi_cmnd *cmd = qc->scsicmd;
912 struct ata_taskfile *tf = &qc->result_tf;
913 unsigned char *sb = cmd->sense_buffer;
914 unsigned char *desc = sb + 8;
915 int verbose = qc->ap->ops->error_handler == NULL;
916
917 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
918
919 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
920
921
922
923
924
925 if (qc->err_mask ||
926 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
927 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
928 &sb[1], &sb[2], &sb[3], verbose);
929 sb[1] &= 0x0f;
930 }
931
932
933
934
935 sb[0] = 0x72;
936
937 desc[0] = 0x09;
938
939
940 sb[7] = 14;
941 desc[1] = 12;
942
943
944
945
946 desc[2] = 0x00;
947 desc[3] = tf->feature;
948 desc[5] = tf->nsect;
949 desc[7] = tf->lbal;
950 desc[9] = tf->lbam;
951 desc[11] = tf->lbah;
952 desc[12] = tf->device;
953 desc[13] = tf->command;
954
955
956
957
958
959 if (tf->flags & ATA_TFLAG_LBA48) {
960 desc[2] |= 0x01;
961 desc[4] = tf->hob_nsect;
962 desc[6] = tf->hob_lbal;
963 desc[8] = tf->hob_lbam;
964 desc[10] = tf->hob_lbah;
965 }
966}
967
968
969
970
971
972
973
974
975
976
977
978static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
979{
980 struct ata_device *dev = qc->dev;
981 struct scsi_cmnd *cmd = qc->scsicmd;
982 struct ata_taskfile *tf = &qc->result_tf;
983 unsigned char *sb = cmd->sense_buffer;
984 unsigned char *desc = sb + 8;
985 int verbose = qc->ap->ops->error_handler == NULL;
986 u64 block;
987
988 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
989
990 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
991
992
993 sb[0] = 0x72;
994
995
996
997
998 if (qc->err_mask ||
999 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
1000 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
1001 &sb[1], &sb[2], &sb[3], verbose);
1002 sb[1] &= 0x0f;
1003 }
1004
1005 block = ata_tf_read_block(&qc->result_tf, dev);
1006
1007
1008 sb[7] = 12;
1009 desc[0] = 0x00;
1010 desc[1] = 10;
1011
1012 desc[2] |= 0x80;
1013 desc[6] = block >> 40;
1014 desc[7] = block >> 32;
1015 desc[8] = block >> 24;
1016 desc[9] = block >> 16;
1017 desc[10] = block >> 8;
1018 desc[11] = block;
1019}
1020
1021static void ata_scsi_sdev_config(struct scsi_device *sdev)
1022{
1023 sdev->use_10_for_rw = 1;
1024 sdev->use_10_for_ms = 1;
1025
1026
1027
1028
1029
1030
1031 sdev->max_device_blocked = 1;
1032}
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049static int atapi_drain_needed(struct request *rq)
1050{
1051 if (likely(!blk_pc_request(rq)))
1052 return 0;
1053
1054 if (!rq->data_len || (rq->cmd_flags & REQ_RW))
1055 return 0;
1056
1057 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
1058}
1059
1060static int ata_scsi_dev_config(struct scsi_device *sdev,
1061 struct ata_device *dev)
1062{
1063 if (!ata_id_has_unload(dev->id))
1064 dev->flags |= ATA_DFLAG_NO_UNLOAD;
1065
1066
1067 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
1068
1069 if (dev->class == ATA_DEV_ATAPI) {
1070 struct request_queue *q = sdev->request_queue;
1071 void *buf;
1072
1073
1074 blk_queue_update_dma_alignment(sdev->request_queue,
1075 ATA_DMA_PAD_SZ - 1);
1076 blk_queue_update_dma_pad(sdev->request_queue,
1077 ATA_DMA_PAD_SZ - 1);
1078
1079
1080 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
1081 if (!buf) {
1082 ata_dev_printk(dev, KERN_ERR,
1083 "drain buffer allocation failed\n");
1084 return -ENOMEM;
1085 }
1086
1087 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
1088 } else {
1089 if (ata_id_is_ssd(dev->id))
1090 queue_flag_set_unlocked(QUEUE_FLAG_NONROT,
1091 sdev->request_queue);
1092
1093
1094 blk_queue_update_dma_alignment(sdev->request_queue,
1095 ATA_SECT_SIZE - 1);
1096 sdev->manage_start_stop = 1;
1097 }
1098
1099 if (dev->flags & ATA_DFLAG_AN)
1100 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
1101
1102 if (dev->flags & ATA_DFLAG_NCQ) {
1103 int depth;
1104
1105 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
1106 depth = min(ATA_MAX_QUEUE - 1, depth);
1107 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
1108 }
1109
1110 return 0;
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125int ata_scsi_slave_config(struct scsi_device *sdev)
1126{
1127 struct ata_port *ap = ata_shost_to_port(sdev->host);
1128 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
1129 int rc = 0;
1130
1131 ata_scsi_sdev_config(sdev);
1132
1133 if (dev)
1134 rc = ata_scsi_dev_config(sdev, dev);
1135
1136 return rc;
1137}
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153void ata_scsi_slave_destroy(struct scsi_device *sdev)
1154{
1155 struct ata_port *ap = ata_shost_to_port(sdev->host);
1156 struct request_queue *q = sdev->request_queue;
1157 unsigned long flags;
1158 struct ata_device *dev;
1159
1160 if (!ap->ops->error_handler)
1161 return;
1162
1163 spin_lock_irqsave(ap->lock, flags);
1164 dev = __ata_scsi_find_dev(ap, sdev);
1165 if (dev && dev->sdev) {
1166
1167 dev->sdev = NULL;
1168 dev->flags |= ATA_DFLAG_DETACH;
1169 ata_port_schedule_eh(ap);
1170 }
1171 spin_unlock_irqrestore(ap->lock, flags);
1172
1173 kfree(q->dma_drain_buffer);
1174 q->dma_drain_buffer = NULL;
1175 q->dma_drain_size = 0;
1176}
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
1194{
1195 struct ata_port *ap = ata_shost_to_port(sdev->host);
1196 struct ata_device *dev;
1197 unsigned long flags;
1198
1199 if (queue_depth < 1 || queue_depth == sdev->queue_depth)
1200 return sdev->queue_depth;
1201
1202 dev = ata_scsi_find_dev(ap, sdev);
1203 if (!dev || !ata_dev_enabled(dev))
1204 return sdev->queue_depth;
1205
1206
1207 spin_lock_irqsave(ap->lock, flags);
1208 dev->flags &= ~ATA_DFLAG_NCQ_OFF;
1209 if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
1210 dev->flags |= ATA_DFLAG_NCQ_OFF;
1211 queue_depth = 1;
1212 }
1213 spin_unlock_irqrestore(ap->lock, flags);
1214
1215
1216 queue_depth = min(queue_depth, sdev->host->can_queue);
1217 queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
1218 queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1);
1219
1220 if (sdev->queue_depth == queue_depth)
1221 return -EINVAL;
1222
1223 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
1224 return queue_depth;
1225}
1226
1227
1228static void ata_delayed_done_timerfn(unsigned long arg)
1229{
1230 struct scsi_cmnd *scmd = (void *)arg;
1231
1232 scmd->scsi_done(scmd);
1233}
1234
1235
1236static void ata_delayed_done(struct scsi_cmnd *scmd)
1237{
1238 static struct timer_list timer;
1239
1240 setup_timer(&timer, ata_delayed_done_timerfn, (unsigned long)scmd);
1241 mod_timer(&timer, jiffies + 5 * HZ);
1242}
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
1260{
1261 struct scsi_cmnd *scmd = qc->scsicmd;
1262 struct ata_taskfile *tf = &qc->tf;
1263 const u8 *cdb = scmd->cmnd;
1264
1265 if (scmd->cmd_len < 5)
1266 goto invalid_fld;
1267
1268 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1269 tf->protocol = ATA_PROT_NODATA;
1270 if (cdb[1] & 0x1) {
1271 ;
1272 }
1273 if (cdb[4] & 0x2)
1274 goto invalid_fld;
1275 if (((cdb[4] >> 4) & 0xf) != 0)
1276 goto invalid_fld;
1277
1278 if (cdb[4] & 0x1) {
1279 tf->nsect = 1;
1280
1281 if (qc->dev->flags & ATA_DFLAG_LBA) {
1282 tf->flags |= ATA_TFLAG_LBA;
1283
1284 tf->lbah = 0x0;
1285 tf->lbam = 0x0;
1286 tf->lbal = 0x0;
1287 tf->device |= ATA_LBA;
1288 } else {
1289
1290 tf->lbal = 0x1;
1291 tf->lbam = 0x0;
1292 tf->lbah = 0x0;
1293 }
1294
1295 tf->command = ATA_CMD_VERIFY;
1296 } else {
1297
1298
1299
1300
1301 if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) &&
1302 (system_state == SYSTEM_HALT ||
1303 system_state == SYSTEM_POWER_OFF)) {
1304 static unsigned long warned;
1305
1306 if (!test_and_set_bit(0, &warned)) {
1307 ata_dev_printk(qc->dev, KERN_WARNING,
1308 "DISK MIGHT NOT BE SPUN DOWN PROPERLY. "
1309 "UPDATE SHUTDOWN UTILITY\n");
1310 ata_dev_printk(qc->dev, KERN_WARNING,
1311 "For more info, visit "
1312 "http://linux-ata.org/shutdown.html\n");
1313
1314
1315
1316
1317 scmd->scsi_done = qc->scsidone;
1318 qc->scsidone = ata_delayed_done;
1319 }
1320 scmd->result = SAM_STAT_GOOD;
1321 return 1;
1322 }
1323
1324
1325 tf->command = ATA_CMD_STANDBYNOW1;
1326 }
1327
1328
1329
1330
1331
1332
1333
1334
1335 return 0;
1336
1337invalid_fld:
1338 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
1339
1340 return 1;
1341}
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
1358{
1359 struct ata_taskfile *tf = &qc->tf;
1360
1361 tf->flags |= ATA_TFLAG_DEVICE;
1362 tf->protocol = ATA_PROT_NODATA;
1363
1364 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT)
1365 tf->command = ATA_CMD_FLUSH_EXT;
1366 else
1367 tf->command = ATA_CMD_FLUSH;
1368
1369
1370 qc->flags |= ATA_QCFLAG_IO;
1371
1372 return 0;
1373}
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1386{
1387 u64 lba = 0;
1388 u32 len;
1389
1390 VPRINTK("six-byte command\n");
1391
1392 lba |= ((u64)(cdb[1] & 0x1f)) << 16;
1393 lba |= ((u64)cdb[2]) << 8;
1394 lba |= ((u64)cdb[3]);
1395
1396 len = cdb[4];
1397
1398 *plba = lba;
1399 *plen = len;
1400}
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1413{
1414 u64 lba = 0;
1415 u32 len = 0;
1416
1417 VPRINTK("ten-byte command\n");
1418
1419 lba |= ((u64)cdb[2]) << 24;
1420 lba |= ((u64)cdb[3]) << 16;
1421 lba |= ((u64)cdb[4]) << 8;
1422 lba |= ((u64)cdb[5]);
1423
1424 len |= ((u32)cdb[7]) << 8;
1425 len |= ((u32)cdb[8]);
1426
1427 *plba = lba;
1428 *plen = len;
1429}
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1442{
1443 u64 lba = 0;
1444 u32 len = 0;
1445
1446 VPRINTK("sixteen-byte command\n");
1447
1448 lba |= ((u64)cdb[2]) << 56;
1449 lba |= ((u64)cdb[3]) << 48;
1450 lba |= ((u64)cdb[4]) << 40;
1451 lba |= ((u64)cdb[5]) << 32;
1452 lba |= ((u64)cdb[6]) << 24;
1453 lba |= ((u64)cdb[7]) << 16;
1454 lba |= ((u64)cdb[8]) << 8;
1455 lba |= ((u64)cdb[9]);
1456
1457 len |= ((u32)cdb[10]) << 24;
1458 len |= ((u32)cdb[11]) << 16;
1459 len |= ((u32)cdb[12]) << 8;
1460 len |= ((u32)cdb[13]);
1461
1462 *plba = lba;
1463 *plen = len;
1464}
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
1479{
1480 struct scsi_cmnd *scmd = qc->scsicmd;
1481 struct ata_taskfile *tf = &qc->tf;
1482 struct ata_device *dev = qc->dev;
1483 u64 dev_sectors = qc->dev->n_sectors;
1484 const u8 *cdb = scmd->cmnd;
1485 u64 block;
1486 u32 n_block;
1487
1488 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1489 tf->protocol = ATA_PROT_NODATA;
1490
1491 if (cdb[0] == VERIFY) {
1492 if (scmd->cmd_len < 10)
1493 goto invalid_fld;
1494 scsi_10_lba_len(cdb, &block, &n_block);
1495 } else if (cdb[0] == VERIFY_16) {
1496 if (scmd->cmd_len < 16)
1497 goto invalid_fld;
1498 scsi_16_lba_len(cdb, &block, &n_block);
1499 } else
1500 goto invalid_fld;
1501
1502 if (!n_block)
1503 goto nothing_to_do;
1504 if (block >= dev_sectors)
1505 goto out_of_range;
1506 if ((block + n_block) > dev_sectors)
1507 goto out_of_range;
1508
1509 if (dev->flags & ATA_DFLAG_LBA) {
1510 tf->flags |= ATA_TFLAG_LBA;
1511
1512 if (lba_28_ok(block, n_block)) {
1513
1514 tf->command = ATA_CMD_VERIFY;
1515 tf->device |= (block >> 24) & 0xf;
1516 } else if (lba_48_ok(block, n_block)) {
1517 if (!(dev->flags & ATA_DFLAG_LBA48))
1518 goto out_of_range;
1519
1520
1521 tf->flags |= ATA_TFLAG_LBA48;
1522 tf->command = ATA_CMD_VERIFY_EXT;
1523
1524 tf->hob_nsect = (n_block >> 8) & 0xff;
1525
1526 tf->hob_lbah = (block >> 40) & 0xff;
1527 tf->hob_lbam = (block >> 32) & 0xff;
1528 tf->hob_lbal = (block >> 24) & 0xff;
1529 } else
1530
1531 goto out_of_range;
1532
1533 tf->nsect = n_block & 0xff;
1534
1535 tf->lbah = (block >> 16) & 0xff;
1536 tf->lbam = (block >> 8) & 0xff;
1537 tf->lbal = block & 0xff;
1538
1539 tf->device |= ATA_LBA;
1540 } else {
1541
1542 u32 sect, head, cyl, track;
1543
1544 if (!lba_28_ok(block, n_block))
1545 goto out_of_range;
1546
1547
1548 track = (u32)block / dev->sectors;
1549 cyl = track / dev->heads;
1550 head = track % dev->heads;
1551 sect = (u32)block % dev->sectors + 1;
1552
1553 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1554 (u32)block, track, cyl, head, sect);
1555
1556
1557
1558
1559
1560 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1561 goto out_of_range;
1562
1563 tf->command = ATA_CMD_VERIFY;
1564 tf->nsect = n_block & 0xff;
1565 tf->lbal = sect;
1566 tf->lbam = cyl;
1567 tf->lbah = cyl >> 8;
1568 tf->device |= head;
1569 }
1570
1571 return 0;
1572
1573invalid_fld:
1574 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
1575
1576 return 1;
1577
1578out_of_range:
1579 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
1580
1581 return 1;
1582
1583nothing_to_do:
1584 scmd->result = SAM_STAT_GOOD;
1585 return 1;
1586}
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
1607{
1608 struct scsi_cmnd *scmd = qc->scsicmd;
1609 const u8 *cdb = scmd->cmnd;
1610 unsigned int tf_flags = 0;
1611 u64 block;
1612 u32 n_block;
1613 int rc;
1614
1615 if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16)
1616 tf_flags |= ATA_TFLAG_WRITE;
1617
1618
1619 switch (cdb[0]) {
1620 case READ_10:
1621 case WRITE_10:
1622 if (unlikely(scmd->cmd_len < 10))
1623 goto invalid_fld;
1624 scsi_10_lba_len(cdb, &block, &n_block);
1625 if (unlikely(cdb[1] & (1 << 3)))
1626 tf_flags |= ATA_TFLAG_FUA;
1627 break;
1628 case READ_6:
1629 case WRITE_6:
1630 if (unlikely(scmd->cmd_len < 6))
1631 goto invalid_fld;
1632 scsi_6_lba_len(cdb, &block, &n_block);
1633
1634
1635
1636
1637 if (!n_block)
1638 n_block = 256;
1639 break;
1640 case READ_16:
1641 case WRITE_16:
1642 if (unlikely(scmd->cmd_len < 16))
1643 goto invalid_fld;
1644 scsi_16_lba_len(cdb, &block, &n_block);
1645 if (unlikely(cdb[1] & (1 << 3)))
1646 tf_flags |= ATA_TFLAG_FUA;
1647 break;
1648 default:
1649 DPRINTK("no-byte command\n");
1650 goto invalid_fld;
1651 }
1652
1653
1654 if (!n_block)
1655
1656
1657
1658
1659
1660
1661
1662 goto nothing_to_do;
1663
1664 qc->flags |= ATA_QCFLAG_IO;
1665 qc->nbytes = n_block * ATA_SECT_SIZE;
1666
1667 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
1668 qc->tag);
1669 if (likely(rc == 0))
1670 return 0;
1671
1672 if (rc == -ERANGE)
1673 goto out_of_range;
1674
1675invalid_fld:
1676 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
1677
1678 return 1;
1679
1680out_of_range:
1681 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
1682
1683 return 1;
1684
1685nothing_to_do:
1686 scmd->result = SAM_STAT_GOOD;
1687 return 1;
1688}
1689
1690static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1691{
1692 struct ata_port *ap = qc->ap;
1693 struct scsi_cmnd *cmd = qc->scsicmd;
1694 u8 *cdb = cmd->cmnd;
1695 int need_sense = (qc->err_mask != 0);
1696
1697
1698
1699
1700
1701
1702
1703
1704 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1705 ((cdb[2] & 0x20) || need_sense)) {
1706 ata_gen_passthru_sense(qc);
1707 } else {
1708 if (!need_sense) {
1709 cmd->result = SAM_STAT_GOOD;
1710 } else {
1711
1712
1713
1714
1715
1716
1717 ata_gen_ata_sense(qc);
1718 }
1719 }
1720
1721
1722 if (unlikely(qc->tf.command == ATA_CMD_STANDBY ||
1723 qc->tf.command == ATA_CMD_STANDBYNOW1))
1724 qc->dev->flags |= ATA_DFLAG_SPUNDOWN;
1725 else if (likely(system_state != SYSTEM_HALT &&
1726 system_state != SYSTEM_POWER_OFF))
1727 qc->dev->flags &= ~ATA_DFLAG_SPUNDOWN;
1728
1729 if (need_sense && !ap->ops->error_handler)
1730 ata_dump_status(ap->print_id, &qc->result_tf);
1731
1732 qc->scsidone(cmd);
1733
1734 ata_qc_free(qc);
1735}
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1765 void (*done)(struct scsi_cmnd *),
1766 ata_xlat_func_t xlat_func)
1767{
1768 struct ata_port *ap = dev->link->ap;
1769 struct ata_queued_cmd *qc;
1770 int rc;
1771
1772 VPRINTK("ENTER\n");
1773
1774 qc = ata_scsi_qc_new(dev, cmd, done);
1775 if (!qc)
1776 goto err_mem;
1777
1778
1779 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1780 cmd->sc_data_direction == DMA_TO_DEVICE) {
1781 if (unlikely(scsi_bufflen(cmd) < 1)) {
1782 ata_dev_printk(dev, KERN_WARNING,
1783 "WARNING: zero len r/w req\n");
1784 goto err_did;
1785 }
1786
1787 ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
1788
1789 qc->dma_dir = cmd->sc_data_direction;
1790 }
1791
1792 qc->complete_fn = ata_scsi_qc_complete;
1793
1794 if (xlat_func(qc))
1795 goto early_finish;
1796
1797 if (ap->ops->qc_defer) {
1798 if ((rc = ap->ops->qc_defer(qc)))
1799 goto defer;
1800 }
1801
1802
1803 ata_qc_issue(qc);
1804
1805 VPRINTK("EXIT\n");
1806 return 0;
1807
1808early_finish:
1809 ata_qc_free(qc);
1810 qc->scsidone(cmd);
1811 DPRINTK("EXIT - early finish (good or error)\n");
1812 return 0;
1813
1814err_did:
1815 ata_qc_free(qc);
1816 cmd->result = (DID_ERROR << 16);
1817 qc->scsidone(cmd);
1818err_mem:
1819 DPRINTK("EXIT - internal\n");
1820 return 0;
1821
1822defer:
1823 ata_qc_free(qc);
1824 DPRINTK("EXIT - defer\n");
1825 if (rc == ATA_DEFER_LINK)
1826 return SCSI_MLQUEUE_DEVICE_BUSY;
1827 else
1828 return SCSI_MLQUEUE_HOST_BUSY;
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845static void *ata_scsi_rbuf_get(struct scsi_cmnd *cmd, bool copy_in,
1846 unsigned long *flags)
1847{
1848 spin_lock_irqsave(&ata_scsi_rbuf_lock, *flags);
1849
1850 memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
1851 if (copy_in)
1852 sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
1853 ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
1854 return ata_scsi_rbuf;
1855}
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, bool copy_out,
1870 unsigned long *flags)
1871{
1872 if (copy_out)
1873 sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
1874 ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
1875 spin_unlock_irqrestore(&ata_scsi_rbuf_lock, *flags);
1876}
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1894 unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
1895{
1896 u8 *rbuf;
1897 unsigned int rc;
1898 struct scsi_cmnd *cmd = args->cmd;
1899 unsigned long flags;
1900
1901 rbuf = ata_scsi_rbuf_get(cmd, false, &flags);
1902 rc = actor(args, rbuf);
1903 ata_scsi_rbuf_put(cmd, rc == 0, &flags);
1904
1905 if (rc == 0)
1906 cmd->result = SAM_STAT_GOOD;
1907 args->done(cmd);
1908}
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
1922{
1923 const u8 versions[] = {
1924 0x60,
1925
1926 0x03,
1927 0x20,
1928
1929 0x02,
1930 0x60
1931 };
1932 u8 hdr[] = {
1933 TYPE_DISK,
1934 0,
1935 0x5,
1936 2,
1937 95 - 4
1938 };
1939
1940 VPRINTK("ENTER\n");
1941
1942
1943 if (ata_id_removeable(args->id))
1944 hdr[1] |= (1 << 7);
1945
1946 memcpy(rbuf, hdr, sizeof(hdr));
1947 memcpy(&rbuf[8], "ATA ", 8);
1948 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
1949 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
1950
1951 if (rbuf[32] == 0 || rbuf[32] == ' ')
1952 memcpy(&rbuf[32], "n/a ", 4);
1953
1954 memcpy(rbuf + 59, versions, sizeof(versions));
1955
1956 return 0;
1957}
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
1970{
1971 const u8 pages[] = {
1972 0x00,
1973 0x80,
1974 0x83,
1975 0x89,
1976 0xb1,
1977 };
1978
1979 rbuf[3] = sizeof(pages);
1980 memcpy(rbuf + 4, pages, sizeof(pages));
1981 return 0;
1982}
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
1995{
1996 const u8 hdr[] = {
1997 0,
1998 0x80,
1999 0,
2000 ATA_ID_SERNO_LEN,
2001 };
2002
2003 memcpy(rbuf, hdr, sizeof(hdr));
2004 ata_id_string(args->id, (unsigned char *) &rbuf[4],
2005 ATA_ID_SERNO, ATA_ID_SERNO_LEN);
2006 return 0;
2007}
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
2023{
2024 const int sat_model_serial_desc_len = 68;
2025 int num;
2026
2027 rbuf[1] = 0x83;
2028 num = 4;
2029
2030
2031 rbuf[num + 0] = 2;
2032 rbuf[num + 3] = ATA_ID_SERNO_LEN;
2033 num += 4;
2034 ata_id_string(args->id, (unsigned char *) rbuf + num,
2035 ATA_ID_SERNO, ATA_ID_SERNO_LEN);
2036 num += ATA_ID_SERNO_LEN;
2037
2038
2039
2040 rbuf[num + 0] = 2;
2041 rbuf[num + 1] = 1;
2042 rbuf[num + 3] = sat_model_serial_desc_len;
2043 num += 4;
2044 memcpy(rbuf + num, "ATA ", 8);
2045 num += 8;
2046 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
2047 ATA_ID_PROD_LEN);
2048 num += ATA_ID_PROD_LEN;
2049 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
2050 ATA_ID_SERNO_LEN);
2051 num += ATA_ID_SERNO_LEN;
2052
2053 rbuf[3] = num - 4;
2054 return 0;
2055}
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
2068{
2069 struct ata_taskfile tf;
2070
2071 memset(&tf, 0, sizeof(tf));
2072
2073 rbuf[1] = 0x89;
2074 rbuf[2] = (0x238 >> 8);
2075 rbuf[3] = (0x238 & 0xff);
2076
2077 memcpy(&rbuf[8], "linux ", 8);
2078 memcpy(&rbuf[16], "libata ", 16);
2079 memcpy(&rbuf[32], DRV_VERSION, 4);
2080 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
2081
2082
2083
2084 tf.command = ATA_DRDY;
2085 tf.lbal = 0x1;
2086 tf.nsect = 0x1;
2087
2088 ata_tf_to_fis(&tf, 0, 1, &rbuf[36]);
2089 rbuf[36] = 0x34;
2090
2091 rbuf[56] = ATA_CMD_ID_ATA;
2092
2093 memcpy(&rbuf[60], &args->id[0], 512);
2094 return 0;
2095}
2096
2097static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
2098{
2099 rbuf[1] = 0xb1;
2100 rbuf[3] = 0x3c;
2101 if (ata_id_major_version(args->id) > 7) {
2102 rbuf[4] = args->id[217] >> 8;
2103 rbuf[5] = args->id[217];
2104 rbuf[7] = args->id[168] & 0xf;
2105 }
2106
2107 return 0;
2108}
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121static unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf)
2122{
2123 VPRINTK("ENTER\n");
2124 return 0;
2125}
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139static unsigned int ata_msense_caching(u16 *id, u8 *buf)
2140{
2141 memcpy(buf, def_cache_mpage, sizeof(def_cache_mpage));
2142 if (ata_id_wcache_enabled(id))
2143 buf[2] |= (1 << 2);
2144 if (!ata_id_rahead_enabled(id))
2145 buf[12] |= (1 << 5);
2146 return sizeof(def_cache_mpage);
2147}
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158static unsigned int ata_msense_ctl_mode(u8 *buf)
2159{
2160 memcpy(buf, def_control_mpage, sizeof(def_control_mpage));
2161 return sizeof(def_control_mpage);
2162}
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173static unsigned int ata_msense_rw_recovery(u8 *buf)
2174{
2175 memcpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage));
2176 return sizeof(def_rw_recovery_mpage);
2177}
2178
2179
2180
2181
2182
2183static int ata_dev_supports_fua(u16 *id)
2184{
2185 unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1];
2186
2187 if (!libata_fua)
2188 return 0;
2189 if (!ata_id_has_fua(id))
2190 return 0;
2191
2192 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
2193 ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw));
2194
2195 if (strcmp(model, "Maxtor"))
2196 return 1;
2197 if (strcmp(fw, "BANC1G10"))
2198 return 1;
2199
2200 return 0;
2201}
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
2216{
2217 struct ata_device *dev = args->dev;
2218 u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
2219 const u8 sat_blk_desc[] = {
2220 0, 0, 0, 0,
2221 0,
2222 0, 0x2, 0x0
2223 };
2224 u8 pg, spg;
2225 unsigned int ebd, page_control, six_byte;
2226 u8 dpofua;
2227
2228 VPRINTK("ENTER\n");
2229
2230 six_byte = (scsicmd[0] == MODE_SENSE);
2231 ebd = !(scsicmd[1] & 0x8);
2232
2233
2234
2235
2236 page_control = scsicmd[2] >> 6;
2237 switch (page_control) {
2238 case 0:
2239 break;
2240 case 3:
2241 goto saving_not_supp;
2242 case 1:
2243 case 2:
2244 default:
2245 goto invalid_fld;
2246 }
2247
2248 if (six_byte)
2249 p += 4 + (ebd ? 8 : 0);
2250 else
2251 p += 8 + (ebd ? 8 : 0);
2252
2253 pg = scsicmd[2] & 0x3f;
2254 spg = scsicmd[3];
2255
2256
2257
2258
2259 if (spg && (spg != ALL_SUB_MPAGES))
2260 goto invalid_fld;
2261
2262 switch(pg) {
2263 case RW_RECOVERY_MPAGE:
2264 p += ata_msense_rw_recovery(p);
2265 break;
2266
2267 case CACHE_MPAGE:
2268 p += ata_msense_caching(args->id, p);
2269 break;
2270
2271 case CONTROL_MPAGE:
2272 p += ata_msense_ctl_mode(p);
2273 break;
2274
2275 case ALL_MPAGES:
2276 p += ata_msense_rw_recovery(p);
2277 p += ata_msense_caching(args->id, p);
2278 p += ata_msense_ctl_mode(p);
2279 break;
2280
2281 default:
2282 goto invalid_fld;
2283 }
2284
2285 dpofua = 0;
2286 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
2287 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
2288 dpofua = 1 << 4;
2289
2290 if (six_byte) {
2291 rbuf[0] = p - rbuf - 1;
2292 rbuf[2] |= dpofua;
2293 if (ebd) {
2294 rbuf[3] = sizeof(sat_blk_desc);
2295 memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
2296 }
2297 } else {
2298 unsigned int output_len = p - rbuf - 2;
2299
2300 rbuf[0] = output_len >> 8;
2301 rbuf[1] = output_len;
2302 rbuf[3] |= dpofua;
2303 if (ebd) {
2304 rbuf[7] = sizeof(sat_blk_desc);
2305 memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
2306 }
2307 }
2308 return 0;
2309
2310invalid_fld:
2311 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
2312
2313 return 1;
2314
2315saving_not_supp:
2316 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
2317
2318 return 1;
2319}
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2332{
2333 u64 last_lba = args->dev->n_sectors - 1;
2334
2335 VPRINTK("ENTER\n");
2336
2337 if (args->cmd->cmnd[0] == READ_CAPACITY) {
2338 if (last_lba >= 0xffffffffULL)
2339 last_lba = 0xffffffff;
2340
2341
2342 rbuf[0] = last_lba >> (8 * 3);
2343 rbuf[1] = last_lba >> (8 * 2);
2344 rbuf[2] = last_lba >> (8 * 1);
2345 rbuf[3] = last_lba;
2346
2347
2348 rbuf[6] = ATA_SECT_SIZE >> 8;
2349 rbuf[7] = ATA_SECT_SIZE & 0xff;
2350 } else {
2351
2352 rbuf[0] = last_lba >> (8 * 7);
2353 rbuf[1] = last_lba >> (8 * 6);
2354 rbuf[2] = last_lba >> (8 * 5);
2355 rbuf[3] = last_lba >> (8 * 4);
2356 rbuf[4] = last_lba >> (8 * 3);
2357 rbuf[5] = last_lba >> (8 * 2);
2358 rbuf[6] = last_lba >> (8 * 1);
2359 rbuf[7] = last_lba;
2360
2361
2362 rbuf[10] = ATA_SECT_SIZE >> 8;
2363 rbuf[11] = ATA_SECT_SIZE & 0xff;
2364 }
2365
2366 return 0;
2367}
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
2380{
2381 VPRINTK("ENTER\n");
2382 rbuf[3] = 8;
2383
2384 return 0;
2385}
2386
2387static void atapi_sense_complete(struct ata_queued_cmd *qc)
2388{
2389 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
2390
2391
2392
2393
2394
2395 ata_gen_passthru_sense(qc);
2396 }
2397
2398 qc->scsidone(qc->scsicmd);
2399 ata_qc_free(qc);
2400}
2401
2402
2403static inline int ata_pio_use_silly(struct ata_port *ap)
2404{
2405 return (ap->flags & ATA_FLAG_PIO_DMA);
2406}
2407
2408static void atapi_request_sense(struct ata_queued_cmd *qc)
2409{
2410 struct ata_port *ap = qc->ap;
2411 struct scsi_cmnd *cmd = qc->scsicmd;
2412
2413 DPRINTK("ATAPI request sense\n");
2414
2415
2416 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2417
2418#ifdef CONFIG_ATA_SFF
2419 if (ap->ops->sff_tf_read)
2420 ap->ops->sff_tf_read(ap, &qc->tf);
2421#endif
2422
2423
2424 cmd->sense_buffer[0] = 0x70;
2425 cmd->sense_buffer[2] = qc->tf.feature >> 4;
2426
2427 ata_qc_reinit(qc);
2428
2429
2430 sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
2431 ata_sg_init(qc, &qc->sgent, 1);
2432 qc->dma_dir = DMA_FROM_DEVICE;
2433
2434 memset(&qc->cdb, 0, qc->dev->cdb_len);
2435 qc->cdb[0] = REQUEST_SENSE;
2436 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2437
2438 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2439 qc->tf.command = ATA_CMD_PACKET;
2440
2441 if (ata_pio_use_silly(ap)) {
2442 qc->tf.protocol = ATAPI_PROT_DMA;
2443 qc->tf.feature |= ATAPI_PKT_DMA;
2444 } else {
2445 qc->tf.protocol = ATAPI_PROT_PIO;
2446 qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
2447 qc->tf.lbah = 0;
2448 }
2449 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2450
2451 qc->complete_fn = atapi_sense_complete;
2452
2453 ata_qc_issue(qc);
2454
2455 DPRINTK("EXIT\n");
2456}
2457
2458static void atapi_qc_complete(struct ata_queued_cmd *qc)
2459{
2460 struct scsi_cmnd *cmd = qc->scsicmd;
2461 unsigned int err_mask = qc->err_mask;
2462
2463 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2464
2465
2466 if (unlikely(qc->ap->ops->error_handler &&
2467 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2468
2469 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2470
2471
2472
2473
2474
2475 ata_gen_passthru_sense(qc);
2476 }
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
2489 qc->dev->sdev->locked = 0;
2490
2491 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2492 qc->scsidone(cmd);
2493 ata_qc_free(qc);
2494 return;
2495 }
2496
2497
2498 if (unlikely(err_mask & AC_ERR_DEV)) {
2499 cmd->result = SAM_STAT_CHECK_CONDITION;
2500 atapi_request_sense(qc);
2501 return;
2502 } else if (unlikely(err_mask)) {
2503
2504
2505
2506
2507
2508 ata_gen_passthru_sense(qc);
2509 } else {
2510 u8 *scsicmd = cmd->cmnd;
2511
2512 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
2513 unsigned long flags;
2514 u8 *buf;
2515
2516 buf = ata_scsi_rbuf_get(cmd, true, &flags);
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526 if (buf[2] == 0) {
2527 buf[2] = 0x5;
2528 buf[3] = 0x32;
2529 }
2530
2531 ata_scsi_rbuf_put(cmd, true, &flags);
2532 }
2533
2534 cmd->result = SAM_STAT_GOOD;
2535 }
2536
2537 qc->scsidone(cmd);
2538 ata_qc_free(qc);
2539}
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2551{
2552 struct scsi_cmnd *scmd = qc->scsicmd;
2553 struct ata_device *dev = qc->dev;
2554 int nodata = (scmd->sc_data_direction == DMA_NONE);
2555 int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO);
2556 unsigned int nbytes;
2557
2558 memset(qc->cdb, 0, dev->cdb_len);
2559 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len);
2560
2561 qc->complete_fn = atapi_qc_complete;
2562
2563 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2564 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
2565 qc->tf.flags |= ATA_TFLAG_WRITE;
2566 DPRINTK("direction: write\n");
2567 }
2568
2569 qc->tf.command = ATA_CMD_PACKET;
2570 ata_qc_set_pc_nbytes(qc);
2571
2572
2573 if (!nodata && !using_pio && atapi_check_dma(qc))
2574 using_pio = 1;
2575
2576
2577
2578
2579
2580
2581 nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024);
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607 if (nbytes & 0x1)
2608 nbytes++;
2609
2610 qc->tf.lbam = (nbytes & 0xFF);
2611 qc->tf.lbah = (nbytes >> 8);
2612
2613 if (nodata)
2614 qc->tf.protocol = ATAPI_PROT_NODATA;
2615 else if (using_pio)
2616 qc->tf.protocol = ATAPI_PROT_PIO;
2617 else {
2618
2619 qc->tf.protocol = ATAPI_PROT_DMA;
2620 qc->tf.feature |= ATAPI_PKT_DMA;
2621
2622 if ((dev->flags & ATA_DFLAG_DMADIR) &&
2623 (scmd->sc_data_direction != DMA_TO_DEVICE))
2624
2625 qc->tf.feature |= ATAPI_DMADIR;
2626 }
2627
2628
2629
2630
2631 return 0;
2632}
2633
2634static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
2635{
2636 if (!sata_pmp_attached(ap)) {
2637 if (likely(devno < ata_link_max_devices(&ap->link)))
2638 return &ap->link.device[devno];
2639 } else {
2640 if (likely(devno < ap->nr_pmp_links))
2641 return &ap->pmp_link[devno].device[0];
2642 }
2643
2644 return NULL;
2645}
2646
2647static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
2648 const struct scsi_device *scsidev)
2649{
2650 int devno;
2651
2652
2653 if (!sata_pmp_attached(ap)) {
2654 if (unlikely(scsidev->channel || scsidev->lun))
2655 return NULL;
2656 devno = scsidev->id;
2657 } else {
2658 if (unlikely(scsidev->id || scsidev->lun))
2659 return NULL;
2660 devno = scsidev->channel;
2661 }
2662
2663 return ata_find_dev(ap, devno);
2664}
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682static struct ata_device *
2683ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2684{
2685 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2686
2687 if (unlikely(!dev || !ata_dev_enabled(dev)))
2688 return NULL;
2689
2690 return dev;
2691}
2692
2693
2694
2695
2696
2697
2698
2699
2700static u8
2701ata_scsi_map_proto(u8 byte1)
2702{
2703 switch((byte1 & 0x1e) >> 1) {
2704 case 3:
2705 return ATA_PROT_NODATA;
2706
2707 case 6:
2708 case 10:
2709 case 11:
2710 return ATA_PROT_DMA;
2711
2712 case 4:
2713 case 5:
2714 return ATA_PROT_PIO;
2715
2716 case 0:
2717 case 1:
2718 case 8:
2719 case 9:
2720 case 7:
2721 case 12:
2722 case 15:
2723 default:
2724 break;
2725 }
2726
2727 return ATA_PROT_UNKNOWN;
2728}
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2740{
2741 struct ata_taskfile *tf = &(qc->tf);
2742 struct scsi_cmnd *scmd = qc->scsicmd;
2743 struct ata_device *dev = qc->dev;
2744 const u8 *cdb = scmd->cmnd;
2745
2746 if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
2747 goto invalid_fld;
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
2765 goto invalid_fld;
2766
2767
2768 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2769 goto invalid_fld;
2770
2771
2772
2773
2774
2775 if (cdb[0] == ATA_16) {
2776
2777
2778
2779
2780
2781 if (cdb[1] & 0x01) {
2782 tf->hob_feature = cdb[3];
2783 tf->hob_nsect = cdb[5];
2784 tf->hob_lbal = cdb[7];
2785 tf->hob_lbam = cdb[9];
2786 tf->hob_lbah = cdb[11];
2787 tf->flags |= ATA_TFLAG_LBA48;
2788 } else
2789 tf->flags &= ~ATA_TFLAG_LBA48;
2790
2791
2792
2793
2794 tf->feature = cdb[4];
2795 tf->nsect = cdb[6];
2796 tf->lbal = cdb[8];
2797 tf->lbam = cdb[10];
2798 tf->lbah = cdb[12];
2799 tf->device = cdb[13];
2800 tf->command = cdb[14];
2801 } else {
2802
2803
2804
2805 tf->flags &= ~ATA_TFLAG_LBA48;
2806
2807 tf->feature = cdb[3];
2808 tf->nsect = cdb[4];
2809 tf->lbal = cdb[5];
2810 tf->lbam = cdb[6];
2811 tf->lbah = cdb[7];
2812 tf->device = cdb[8];
2813 tf->command = cdb[9];
2814 }
2815
2816
2817 tf->device = dev->devno ?
2818 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
2819
2820
2821 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf))
2822 goto invalid_fld;
2823
2824 if (is_multi_taskfile(tf)) {
2825 unsigned int multi_count = 1 << (cdb[1] >> 5);
2826
2827
2828
2829
2830 if (multi_count != dev->multi_count)
2831 ata_dev_printk(dev, KERN_WARNING,
2832 "invalid multi_count %u ignored\n",
2833 multi_count);
2834 }
2835
2836
2837 qc->sect_size = ATA_SECT_SIZE;
2838 switch (tf->command) {
2839 case ATA_CMD_READ_LONG:
2840 case ATA_CMD_READ_LONG_ONCE:
2841 case ATA_CMD_WRITE_LONG:
2842 case ATA_CMD_WRITE_LONG_ONCE:
2843 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
2844 goto invalid_fld;
2845 qc->sect_size = scsi_bufflen(scmd);
2846 }
2847
2848
2849
2850
2851
2852
2853
2854
2855 if ((tf->command == ATA_CMD_SET_FEATURES)
2856 && (tf->feature == SETFEATURES_XFER))
2857 goto invalid_fld;
2858
2859
2860
2861
2862
2863
2864 tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE);
2865
2866 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2867 tf->flags |= ATA_TFLAG_WRITE;
2868
2869
2870
2871
2872
2873
2874
2875 ata_qc_set_pc_nbytes(qc);
2876
2877
2878 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
2879
2880 return 0;
2881
2882 invalid_fld:
2883 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00);
2884
2885 return 1;
2886}
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
2901{
2902 switch (cmd) {
2903 case READ_6:
2904 case READ_10:
2905 case READ_16:
2906
2907 case WRITE_6:
2908 case WRITE_10:
2909 case WRITE_16:
2910 return ata_scsi_rw_xlat;
2911
2912 case SYNCHRONIZE_CACHE:
2913 if (ata_try_flush_cache(dev))
2914 return ata_scsi_flush_xlat;
2915 break;
2916
2917 case VERIFY:
2918 case VERIFY_16:
2919 return ata_scsi_verify_xlat;
2920
2921 case ATA_12:
2922 case ATA_16:
2923 return ata_scsi_pass_thru;
2924
2925 case START_STOP:
2926 return ata_scsi_start_stop_xlat;
2927 }
2928
2929 return NULL;
2930}
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2941 struct scsi_cmnd *cmd)
2942{
2943#ifdef ATA_DEBUG
2944 struct scsi_device *scsidev = cmd->device;
2945 u8 *scsicmd = cmd->cmnd;
2946
2947 DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
2948 ap->print_id,
2949 scsidev->channel, scsidev->id, scsidev->lun,
2950 scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
2951 scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
2952 scsicmd[8]);
2953#endif
2954}
2955
2956static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
2957 void (*done)(struct scsi_cmnd *),
2958 struct ata_device *dev)
2959{
2960 u8 scsi_op = scmd->cmnd[0];
2961 ata_xlat_func_t xlat_func;
2962 int rc = 0;
2963
2964 if (dev->class == ATA_DEV_ATA) {
2965 if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
2966 goto bad_cdb_len;
2967
2968 xlat_func = ata_get_xlat_func(dev, scsi_op);
2969 } else {
2970 if (unlikely(!scmd->cmd_len))
2971 goto bad_cdb_len;
2972
2973 xlat_func = NULL;
2974 if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
2975
2976 int len = COMMAND_SIZE(scsi_op);
2977 if (unlikely(len > scmd->cmd_len || len > dev->cdb_len))
2978 goto bad_cdb_len;
2979
2980 xlat_func = atapi_xlat;
2981 } else {
2982
2983 if (unlikely(scmd->cmd_len > 16))
2984 goto bad_cdb_len;
2985
2986 xlat_func = ata_get_xlat_func(dev, scsi_op);
2987 }
2988 }
2989
2990 if (xlat_func)
2991 rc = ata_scsi_translate(dev, scmd, done, xlat_func);
2992 else
2993 ata_scsi_simulate(dev, scmd, done);
2994
2995 return rc;
2996
2997 bad_cdb_len:
2998 DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
2999 scmd->cmd_len, scsi_op, dev->cdb_len);
3000 scmd->result = DID_ERROR << 16;
3001 done(scmd);
3002 return 0;
3003}
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
3025{
3026 struct ata_port *ap;
3027 struct ata_device *dev;
3028 struct scsi_device *scsidev = cmd->device;
3029 struct Scsi_Host *shost = scsidev->host;
3030 int rc = 0;
3031
3032 ap = ata_shost_to_port(shost);
3033
3034 spin_unlock(shost->host_lock);
3035 spin_lock(ap->lock);
3036
3037 ata_scsi_dump_cdb(ap, cmd);
3038
3039 dev = ata_scsi_find_dev(ap, scsidev);
3040 if (likely(dev))
3041 rc = __ata_scsi_queuecmd(cmd, done, dev);
3042 else {
3043 cmd->result = (DID_BAD_TARGET << 16);
3044 done(cmd);
3045 }
3046
3047 spin_unlock(ap->lock);
3048 spin_lock(shost->host_lock);
3049 return rc;
3050}
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3066 void (*done)(struct scsi_cmnd *))
3067{
3068 struct ata_scsi_args args;
3069 const u8 *scsicmd = cmd->cmnd;
3070 u8 tmp8;
3071
3072 args.dev = dev;
3073 args.id = dev->id;
3074 args.cmd = cmd;
3075 args.done = done;
3076
3077 switch(scsicmd[0]) {
3078
3079 case FORMAT_UNIT:
3080 ata_scsi_invalid_field(cmd, done);
3081 break;
3082
3083 case INQUIRY:
3084 if (scsicmd[1] & 2)
3085 ata_scsi_invalid_field(cmd, done);
3086 else if ((scsicmd[1] & 1) == 0)
3087 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
3088 else switch (scsicmd[2]) {
3089 case 0x00:
3090 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
3091 break;
3092 case 0x80:
3093 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
3094 break;
3095 case 0x83:
3096 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
3097 break;
3098 case 0x89:
3099 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
3100 break;
3101 case 0xb1:
3102 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
3103 break;
3104 default:
3105 ata_scsi_invalid_field(cmd, done);
3106 break;
3107 }
3108 break;
3109
3110 case MODE_SENSE:
3111 case MODE_SENSE_10:
3112 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
3113 break;
3114
3115 case MODE_SELECT:
3116 case MODE_SELECT_10:
3117 ata_scsi_invalid_field(cmd, done);
3118 break;
3119
3120 case READ_CAPACITY:
3121 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
3122 break;
3123
3124 case SERVICE_ACTION_IN:
3125 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
3126 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
3127 else
3128 ata_scsi_invalid_field(cmd, done);
3129 break;
3130
3131 case REPORT_LUNS:
3132 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
3133 break;
3134
3135 case REQUEST_SENSE:
3136 ata_scsi_set_sense(cmd, 0, 0, 0);
3137 cmd->result = (DRIVER_SENSE << 24);
3138 done(cmd);
3139 break;
3140
3141
3142
3143
3144 case SYNCHRONIZE_CACHE:
3145
3146
3147
3148 case REZERO_UNIT:
3149 case SEEK_6:
3150 case SEEK_10:
3151 case TEST_UNIT_READY:
3152 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
3153 break;
3154
3155 case SEND_DIAGNOSTIC:
3156 tmp8 = scsicmd[1] & ~(1 << 3);
3157 if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
3158 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
3159 else
3160 ata_scsi_invalid_field(cmd, done);
3161 break;
3162
3163
3164 default:
3165 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
3166
3167 done(cmd);
3168 break;
3169 }
3170}
3171
3172int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
3173{
3174 int i, rc;
3175
3176 for (i = 0; i < host->n_ports; i++) {
3177 struct ata_port *ap = host->ports[i];
3178 struct Scsi_Host *shost;
3179
3180 rc = -ENOMEM;
3181 shost = scsi_host_alloc(sht, sizeof(struct ata_port *));
3182 if (!shost)
3183 goto err_alloc;
3184
3185 *(struct ata_port **)&shost->hostdata[0] = ap;
3186 ap->scsi_host = shost;
3187
3188 shost->transportt = &ata_scsi_transport_template;
3189 shost->unique_id = ap->print_id;
3190 shost->max_id = 16;
3191 shost->max_lun = 1;
3192 shost->max_channel = 1;
3193 shost->max_cmd_len = 16;
3194
3195
3196
3197
3198
3199
3200 shost->max_host_blocked = 1;
3201
3202 rc = scsi_add_host(ap->scsi_host, ap->host->dev);
3203 if (rc)
3204 goto err_add;
3205 }
3206
3207 return 0;
3208
3209 err_add:
3210 scsi_host_put(host->ports[i]->scsi_host);
3211 err_alloc:
3212 while (--i >= 0) {
3213 struct Scsi_Host *shost = host->ports[i]->scsi_host;
3214
3215 scsi_remove_host(shost);
3216 scsi_host_put(shost);
3217 }
3218 return rc;
3219}
3220
3221void ata_scsi_scan_host(struct ata_port *ap, int sync)
3222{
3223 int tries = 5;
3224 struct ata_device *last_failed_dev = NULL;
3225 struct ata_link *link;
3226 struct ata_device *dev;
3227
3228 if (ap->flags & ATA_FLAG_DISABLED)
3229 return;
3230
3231 repeat:
3232 ata_port_for_each_link(link, ap) {
3233 ata_link_for_each_dev(dev, link) {
3234 struct scsi_device *sdev;
3235 int channel = 0, id = 0;
3236
3237 if (!ata_dev_enabled(dev) || dev->sdev)
3238 continue;
3239
3240 if (ata_is_host_link(link))
3241 id = dev->devno;
3242 else
3243 channel = link->pmp;
3244
3245 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0,
3246 NULL);
3247 if (!IS_ERR(sdev)) {
3248 dev->sdev = sdev;
3249 scsi_device_put(sdev);
3250 }
3251 }
3252 }
3253
3254
3255
3256
3257
3258 ata_port_for_each_link(link, ap) {
3259 ata_link_for_each_dev(dev, link) {
3260 if (ata_dev_enabled(dev) && !dev->sdev)
3261 goto exit_loop;
3262 }
3263 }
3264 exit_loop:
3265 if (!link)
3266 return;
3267
3268
3269 if (sync) {
3270
3271
3272
3273 if (dev != last_failed_dev) {
3274 msleep(100);
3275 last_failed_dev = dev;
3276 goto repeat;
3277 }
3278
3279
3280
3281
3282 if (--tries) {
3283 msleep(100);
3284 goto repeat;
3285 }
3286
3287 ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan "
3288 "failed without making any progress,\n"
3289 " switching to async\n");
3290 }
3291
3292 queue_delayed_work(ata_aux_wq, &ap->hotplug_task,
3293 round_jiffies_relative(HZ));
3294}
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311int ata_scsi_offline_dev(struct ata_device *dev)
3312{
3313 if (dev->sdev) {
3314 scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
3315 return 1;
3316 }
3317 return 0;
3318}
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330static void ata_scsi_remove_dev(struct ata_device *dev)
3331{
3332 struct ata_port *ap = dev->link->ap;
3333 struct scsi_device *sdev;
3334 unsigned long flags;
3335
3336
3337
3338
3339
3340
3341
3342 mutex_lock(&ap->scsi_host->scan_mutex);
3343 spin_lock_irqsave(ap->lock, flags);
3344
3345
3346 sdev = dev->sdev;
3347 dev->sdev = NULL;
3348
3349 if (sdev) {
3350
3351
3352
3353
3354 if (scsi_device_get(sdev) == 0) {
3355
3356
3357
3358
3359
3360 scsi_device_set_state(sdev, SDEV_OFFLINE);
3361 } else {
3362 WARN_ON(1);
3363 sdev = NULL;
3364 }
3365 }
3366
3367 spin_unlock_irqrestore(ap->lock, flags);
3368 mutex_unlock(&ap->scsi_host->scan_mutex);
3369
3370 if (sdev) {
3371 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
3372 sdev->sdev_gendev.bus_id);
3373
3374 scsi_remove_device(sdev);
3375 scsi_device_put(sdev);
3376 }
3377}
3378
3379static void ata_scsi_handle_link_detach(struct ata_link *link)
3380{
3381 struct ata_port *ap = link->ap;
3382 struct ata_device *dev;
3383
3384 ata_link_for_each_dev(dev, link) {
3385 unsigned long flags;
3386
3387 if (!(dev->flags & ATA_DFLAG_DETACHED))
3388 continue;
3389
3390 spin_lock_irqsave(ap->lock, flags);
3391 dev->flags &= ~ATA_DFLAG_DETACHED;
3392 spin_unlock_irqrestore(ap->lock, flags);
3393
3394 ata_scsi_remove_dev(dev);
3395 }
3396}
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408void ata_scsi_media_change_notify(struct ata_device *dev)
3409{
3410 if (dev->sdev)
3411 sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE,
3412 GFP_ATOMIC);
3413}
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427void ata_scsi_hotplug(struct work_struct *work)
3428{
3429 struct ata_port *ap =
3430 container_of(work, struct ata_port, hotplug_task.work);
3431 int i;
3432
3433 if (ap->pflags & ATA_PFLAG_UNLOADING) {
3434 DPRINTK("ENTER/EXIT - unloading\n");
3435 return;
3436 }
3437
3438 DPRINTK("ENTER\n");
3439
3440
3441
3442
3443
3444 ata_scsi_handle_link_detach(&ap->link);
3445 if (ap->pmp_link)
3446 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
3447 ata_scsi_handle_link_detach(&ap->pmp_link[i]);
3448
3449
3450 ata_scsi_scan_host(ap, 0);
3451
3452 DPRINTK("EXIT\n");
3453}
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3472 unsigned int id, unsigned int lun)
3473{
3474 struct ata_port *ap = ata_shost_to_port(shost);
3475 unsigned long flags;
3476 int devno, rc = 0;
3477
3478 if (!ap->ops->error_handler)
3479 return -EOPNOTSUPP;
3480
3481 if (lun != SCAN_WILD_CARD && lun)
3482 return -EINVAL;
3483
3484 if (!sata_pmp_attached(ap)) {
3485 if (channel != SCAN_WILD_CARD && channel)
3486 return -EINVAL;
3487 devno = id;
3488 } else {
3489 if (id != SCAN_WILD_CARD && id)
3490 return -EINVAL;
3491 devno = channel;
3492 }
3493
3494 spin_lock_irqsave(ap->lock, flags);
3495
3496 if (devno == SCAN_WILD_CARD) {
3497 struct ata_link *link;
3498
3499 ata_port_for_each_link(link, ap) {
3500 struct ata_eh_info *ehi = &link->eh_info;
3501 ehi->probe_mask |= ATA_ALL_DEVICES;
3502 ehi->action |= ATA_EH_RESET;
3503 }
3504 } else {
3505 struct ata_device *dev = ata_find_dev(ap, devno);
3506
3507 if (dev) {
3508 struct ata_eh_info *ehi = &dev->link->eh_info;
3509 ehi->probe_mask |= 1 << dev->devno;
3510 ehi->action |= ATA_EH_RESET;
3511 } else
3512 rc = -EINVAL;
3513 }
3514
3515 if (rc == 0) {
3516 ata_port_schedule_eh(ap);
3517 spin_unlock_irqrestore(ap->lock, flags);
3518 ata_port_wait_eh(ap);
3519 } else
3520 spin_unlock_irqrestore(ap->lock, flags);
3521
3522 return rc;
3523}
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537void ata_scsi_dev_rescan(struct work_struct *work)
3538{
3539 struct ata_port *ap =
3540 container_of(work, struct ata_port, scsi_rescan_task);
3541 struct ata_link *link;
3542 struct ata_device *dev;
3543 unsigned long flags;
3544
3545 spin_lock_irqsave(ap->lock, flags);
3546
3547 ata_port_for_each_link(link, ap) {
3548 ata_link_for_each_dev(dev, link) {
3549 struct scsi_device *sdev = dev->sdev;
3550
3551 if (!ata_dev_enabled(dev) || !sdev)
3552 continue;
3553 if (scsi_device_get(sdev))
3554 continue;
3555
3556 spin_unlock_irqrestore(ap->lock, flags);
3557 scsi_rescan_device(&(sdev->sdev_gendev));
3558 scsi_device_put(sdev);
3559 spin_lock_irqsave(ap->lock, flags);
3560 }
3561 }
3562
3563 spin_unlock_irqrestore(ap->lock, flags);
3564}
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579struct ata_port *ata_sas_port_alloc(struct ata_host *host,
3580 struct ata_port_info *port_info,
3581 struct Scsi_Host *shost)
3582{
3583 struct ata_port *ap;
3584
3585 ap = ata_port_alloc(host);
3586 if (!ap)
3587 return NULL;
3588
3589 ap->port_no = 0;
3590 ap->lock = shost->host_lock;
3591 ap->pio_mask = port_info->pio_mask;
3592 ap->mwdma_mask = port_info->mwdma_mask;
3593 ap->udma_mask = port_info->udma_mask;
3594 ap->flags |= port_info->flags;
3595 ap->ops = port_info->port_ops;
3596 ap->cbl = ATA_CBL_SATA;
3597
3598 return ap;
3599}
3600EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614int ata_sas_port_start(struct ata_port *ap)
3615{
3616 return 0;
3617}
3618EXPORT_SYMBOL_GPL(ata_sas_port_start);
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630void ata_sas_port_stop(struct ata_port *ap)
3631{
3632}
3633EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646int ata_sas_port_init(struct ata_port *ap)
3647{
3648 int rc = ap->ops->port_start(ap);
3649
3650 if (!rc) {
3651 ap->print_id = ata_print_id++;
3652 rc = ata_bus_probe(ap);
3653 }
3654
3655 return rc;
3656}
3657EXPORT_SYMBOL_GPL(ata_sas_port_init);
3658
3659
3660
3661
3662
3663
3664
3665void ata_sas_port_destroy(struct ata_port *ap)
3666{
3667 if (ap->ops->port_stop)
3668 ap->ops->port_stop(ap);
3669 kfree(ap);
3670}
3671EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
3683{
3684 ata_scsi_sdev_config(sdev);
3685 ata_scsi_dev_config(sdev, ap->link.device);
3686 return 0;
3687}
3688EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
3702 struct ata_port *ap)
3703{
3704 int rc = 0;
3705
3706 ata_scsi_dump_cdb(ap, cmd);
3707
3708 if (likely(ata_dev_enabled(ap->link.device)))
3709 rc = __ata_scsi_queuecmd(cmd, done, ap->link.device);
3710 else {
3711 cmd->result = (DID_BAD_TARGET << 16);
3712 done(cmd);
3713 }
3714 return rc;
3715}
3716EXPORT_SYMBOL_GPL(ata_sas_queuecmd);