Lines Matching +full:tf +full:- +full:a

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libata-core.c - helper library for ATA
5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6 * Copyright 2003-2004 Jeff Garzik
9 * as Documentation/driver-api/libata.rst
12 * http://www.sata-io.org/
16 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
17 * http://www.sata-io.org (SATA)
19 * http://www.qic.org (QIC157 - Tape and DSC)
20 * http://www.ce-ata.org (CE-ATA: not supported)
22 * libata is essentially a library of internal helper functions for
23 * low-level ATA host controller drivers. As such, the API/ABI is
65 #include "libata-transport.h"
114 …ing cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst …
169 * ata_link_next - link iteration helper
192 return ap->pmp_link; in ata_link_next()
195 return &ap->link; in ata_link_next()
199 if (link == &ap->link) in ata_link_next()
203 return ap->pmp_link; in ata_link_next()
206 if (unlikely(ap->slave_link)) in ata_link_next()
207 return ap->slave_link; in ata_link_next()
214 if (unlikely(link == ap->slave_link)) in ata_link_next()
217 /* we were over a PMP link */ in ata_link_next()
218 if (++link < ap->pmp_link + ap->nr_pmp_links) in ata_link_next()
222 return &ap->link; in ata_link_next()
229 * ata_dev_next - device iteration helper
251 dev = link->device; in ata_dev_next()
255 dev = link->device + ata_link_max_devices(link) - 1; in ata_dev_next()
264 if (++dev < link->device + ata_link_max_devices(link)) in ata_dev_next()
269 if (--dev >= link->device) in ata_dev_next()
283 * ata_dev_phys_link - find physical link for a device
287 * this is different from @dev->link only when @dev is on slave
288 * link. For all other cases, it's the same as @dev->link.
298 struct ata_port *ap = dev->link->ap; in ata_dev_phys_link()
300 if (!ap->slave_link) in ata_dev_phys_link()
301 return dev->link; in ata_dev_phys_link()
302 if (!dev->devno) in ata_dev_phys_link()
303 return &ap->link; in ata_dev_phys_link()
304 return ap->slave_link; in ata_dev_phys_link()
309 * ata_force_cbl - force cable type according to libata.force
315 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
325 for (i = ata_force_tbl_size - 1; i >= 0; i--) { in ata_force_cbl()
328 if (fe->port != -1 && fe->port != ap->print_id) in ata_force_cbl()
331 if (fe->param.cbl == ATA_CBL_NONE) in ata_force_cbl()
334 ap->cbl = fe->param.cbl; in ata_force_cbl()
335 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name); in ata_force_cbl()
341 * ata_force_link_limits - force link limits according to libata.force
347 * the host link and all fan-out ports connected via PMP. If the
349 * first fan-out link not the host link. Device number 15 always
359 int linkno = link->pmp; in ata_force_link_limits()
365 for (i = ata_force_tbl_size - 1; i >= 0; i--) { in ata_force_link_limits()
368 if (fe->port != -1 && fe->port != link->ap->print_id) in ata_force_link_limits()
371 if (fe->device != -1 && fe->device != linkno) in ata_force_link_limits()
375 if (!did_spd && fe->param.spd_limit) { in ata_force_link_limits()
376 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; in ata_force_link_limits()
378 fe->param.name); in ata_force_link_limits()
383 if (fe->param.lflags) { in ata_force_link_limits()
384 link->flags |= fe->param.lflags; in ata_force_link_limits()
386 "FORCE: link flag 0x%x forced -> 0x%x\n", in ata_force_link_limits()
387 fe->param.lflags, link->flags); in ata_force_link_limits()
393 * ata_force_xfermask - force xfermask according to libata.force
405 int devno = dev->link->pmp + dev->devno; in ata_force_xfermask()
410 if (ata_is_host_link(dev->link)) in ata_force_xfermask()
413 for (i = ata_force_tbl_size - 1; i >= 0; i--) { in ata_force_xfermask()
417 if (fe->port != -1 && fe->port != dev->link->ap->print_id) in ata_force_xfermask()
420 if (fe->device != -1 && fe->device != devno && in ata_force_xfermask()
421 fe->device != alt_devno) in ata_force_xfermask()
424 if (!fe->param.xfer_mask) in ata_force_xfermask()
427 ata_unpack_xfermask(fe->param.xfer_mask, in ata_force_xfermask()
430 dev->udma_mask = udma_mask; in ata_force_xfermask()
432 dev->udma_mask = 0; in ata_force_xfermask()
433 dev->mwdma_mask = mwdma_mask; in ata_force_xfermask()
435 dev->udma_mask = 0; in ata_force_xfermask()
436 dev->mwdma_mask = 0; in ata_force_xfermask()
437 dev->pio_mask = pio_mask; in ata_force_xfermask()
441 fe->param.name); in ata_force_xfermask()
447 * ata_force_horkage - force horkage according to libata.force
459 int devno = dev->link->pmp + dev->devno; in ata_force_horkage()
464 if (ata_is_host_link(dev->link)) in ata_force_horkage()
470 if (fe->port != -1 && fe->port != dev->link->ap->print_id) in ata_force_horkage()
473 if (fe->device != -1 && fe->device != devno && in ata_force_horkage()
474 fe->device != alt_devno) in ata_force_horkage()
477 if (!(~dev->horkage & fe->param.horkage_on) && in ata_force_horkage()
478 !(dev->horkage & fe->param.horkage_off)) in ata_force_horkage()
481 dev->horkage |= fe->param.horkage_on; in ata_force_horkage()
482 dev->horkage &= ~fe->param.horkage_off; in ata_force_horkage()
485 fe->param.name); in ata_force_horkage()
495 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
564 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
565 * @tf: command to examine and configure
566 * @dev: device tf belongs to
568 * Examine the device configuration and tf->flags to calculate
574 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) in ata_rwcmd_protocol() argument
580 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; in ata_rwcmd_protocol()
581 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; in ata_rwcmd_protocol()
582 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; in ata_rwcmd_protocol()
584 if (dev->flags & ATA_DFLAG_PIO) { in ata_rwcmd_protocol()
585 tf->protocol = ATA_PROT_PIO; in ata_rwcmd_protocol()
586 index = dev->multi_count ? 0 : 8; in ata_rwcmd_protocol()
587 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { in ata_rwcmd_protocol()
589 tf->protocol = ATA_PROT_PIO; in ata_rwcmd_protocol()
590 index = dev->multi_count ? 0 : 8; in ata_rwcmd_protocol()
592 tf->protocol = ATA_PROT_DMA; in ata_rwcmd_protocol()
598 tf->command = cmd; in ata_rwcmd_protocol()
601 return -1; in ata_rwcmd_protocol()
605 * ata_tf_read_block - Read block address from ATA taskfile
606 * @tf: ATA taskfile of interest
607 * @dev: ATA device @tf belongs to
612 * Read block address from @tf. This function can handle all
613 * three address formats - LBA, LBA48 and CHS. tf->protocol and
617 * Block address read from @tf.
619 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev) in ata_tf_read_block() argument
623 if (tf->flags & ATA_TFLAG_LBA) { in ata_tf_read_block()
624 if (tf->flags & ATA_TFLAG_LBA48) { in ata_tf_read_block()
625 block |= (u64)tf->hob_lbah << 40; in ata_tf_read_block()
626 block |= (u64)tf->hob_lbam << 32; in ata_tf_read_block()
627 block |= (u64)tf->hob_lbal << 24; in ata_tf_read_block()
629 block |= (tf->device & 0xf) << 24; in ata_tf_read_block()
631 block |= tf->lbah << 16; in ata_tf_read_block()
632 block |= tf->lbam << 8; in ata_tf_read_block()
633 block |= tf->lbal; in ata_tf_read_block()
637 cyl = tf->lbam | (tf->lbah << 8); in ata_tf_read_block()
638 head = tf->device & 0xf; in ata_tf_read_block()
639 sect = tf->lbal; in ata_tf_read_block()
647 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; in ata_tf_read_block()
654 * ata_build_rw_tf - Build ATA taskfile for given read/write request
655 * @tf: Target ATA taskfile
656 * @dev: ATA device @tf belongs to
666 * Build ATA taskfile @tf for read/write request described by
671 * 0 on success, -ERANGE if the request is too large for @dev,
672 * -EINVAL if the request is invalid.
674 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, in ata_build_rw_tf() argument
678 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in ata_build_rw_tf()
679 tf->flags |= tf_flags; in ata_build_rw_tf()
684 return -ERANGE; in ata_build_rw_tf()
686 tf->protocol = ATA_PROT_NCQ; in ata_build_rw_tf()
687 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; in ata_build_rw_tf()
689 if (tf->flags & ATA_TFLAG_WRITE) in ata_build_rw_tf()
690 tf->command = ATA_CMD_FPDMA_WRITE; in ata_build_rw_tf()
692 tf->command = ATA_CMD_FPDMA_READ; in ata_build_rw_tf()
694 tf->nsect = tag << 3; in ata_build_rw_tf()
695 tf->hob_feature = (n_block >> 8) & 0xff; in ata_build_rw_tf()
696 tf->feature = n_block & 0xff; in ata_build_rw_tf()
698 tf->hob_lbah = (block >> 40) & 0xff; in ata_build_rw_tf()
699 tf->hob_lbam = (block >> 32) & 0xff; in ata_build_rw_tf()
700 tf->hob_lbal = (block >> 24) & 0xff; in ata_build_rw_tf()
701 tf->lbah = (block >> 16) & 0xff; in ata_build_rw_tf()
702 tf->lbam = (block >> 8) & 0xff; in ata_build_rw_tf()
703 tf->lbal = block & 0xff; in ata_build_rw_tf()
705 tf->device = ATA_LBA; in ata_build_rw_tf()
706 if (tf->flags & ATA_TFLAG_FUA) in ata_build_rw_tf()
707 tf->device |= 1 << 7; in ata_build_rw_tf()
709 if (dev->flags & ATA_DFLAG_NCQ_PRIO) { in ata_build_rw_tf()
711 tf->hob_nsect |= ATA_PRIO_HIGH << in ata_build_rw_tf()
714 } else if (dev->flags & ATA_DFLAG_LBA) { in ata_build_rw_tf()
715 tf->flags |= ATA_TFLAG_LBA; in ata_build_rw_tf()
719 tf->device |= (block >> 24) & 0xf; in ata_build_rw_tf()
721 if (!(dev->flags & ATA_DFLAG_LBA48)) in ata_build_rw_tf()
722 return -ERANGE; in ata_build_rw_tf()
725 tf->flags |= ATA_TFLAG_LBA48; in ata_build_rw_tf()
727 tf->hob_nsect = (n_block >> 8) & 0xff; in ata_build_rw_tf()
729 tf->hob_lbah = (block >> 40) & 0xff; in ata_build_rw_tf()
730 tf->hob_lbam = (block >> 32) & 0xff; in ata_build_rw_tf()
731 tf->hob_lbal = (block >> 24) & 0xff; in ata_build_rw_tf()
734 return -ERANGE; in ata_build_rw_tf()
736 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) in ata_build_rw_tf()
737 return -EINVAL; in ata_build_rw_tf()
739 tf->nsect = n_block & 0xff; in ata_build_rw_tf()
741 tf->lbah = (block >> 16) & 0xff; in ata_build_rw_tf()
742 tf->lbam = (block >> 8) & 0xff; in ata_build_rw_tf()
743 tf->lbal = block & 0xff; in ata_build_rw_tf()
745 tf->device |= ATA_LBA; in ata_build_rw_tf()
750 /* The request -may- be too large for CHS addressing. */ in ata_build_rw_tf()
752 return -ERANGE; in ata_build_rw_tf()
754 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) in ata_build_rw_tf()
755 return -EINVAL; in ata_build_rw_tf()
758 track = (u32)block / dev->sectors; in ata_build_rw_tf()
759 cyl = track / dev->heads; in ata_build_rw_tf()
760 head = track % dev->heads; in ata_build_rw_tf()
761 sect = (u32)block % dev->sectors + 1; in ata_build_rw_tf()
767 Cylinder: 0-65535 in ata_build_rw_tf()
768 Head: 0-15 in ata_build_rw_tf()
769 Sector: 1-255*/ in ata_build_rw_tf()
771 return -ERANGE; in ata_build_rw_tf()
773 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ in ata_build_rw_tf()
774 tf->lbal = sect; in ata_build_rw_tf()
775 tf->lbam = cyl; in ata_build_rw_tf()
776 tf->lbah = cyl >> 8; in ata_build_rw_tf()
777 tf->device |= head; in ata_build_rw_tf()
784 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
789 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
809 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
836 { -1, },
840 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
854 int highbit = fls(xfer_mask) - 1; in ata_xfer_mask2mode()
857 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) in ata_xfer_mask2mode()
858 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) in ata_xfer_mask2mode()
859 return ent->base + highbit - ent->shift; in ata_xfer_mask2mode()
865 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
880 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) in ata_xfer_mode2mask()
881 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) in ata_xfer_mode2mask()
882 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) in ata_xfer_mode2mask()
883 & ~((1 << ent->shift) - 1); in ata_xfer_mode2mask()
889 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
898 * Matching xfer_shift, -1 if no match found.
904 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) in ata_xfer_mode2shift()
905 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) in ata_xfer_mode2shift()
906 return ent->shift; in ata_xfer_mode2shift()
907 return -1; in ata_xfer_mode2shift()
912 * ata_mode_string - convert xfer_mask to string
923 * @mode_mask, or the constant C string "<n/a>".
951 highbit = fls(xfer_mask) - 1; in ata_mode_string()
954 return "<n/a>"; in ata_mode_string()
966 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) in sata_spd_string()
968 return spd_str[spd - 1]; in sata_spd_string()
972 * ata_dev_classify - determine device type based on ATA-spec signature
973 * @tf: ATA taskfile register set for device to be identified
975 * Determine from taskfile register contents whether a device is
986 unsigned int ata_dev_classify(const struct ata_taskfile *tf) in ata_dev_classify() argument
989 * put a proper signature into the LBA mid/high registers, in ata_dev_classify()
992 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate in ata_dev_classify()
999 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and in ata_dev_classify()
1004 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. in ata_dev_classify()
1005 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports in ata_dev_classify()
1009 if ((tf->lbam == 0) && (tf->lbah == 0)) { in ata_dev_classify()
1014 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { in ata_dev_classify()
1019 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { in ata_dev_classify()
1024 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { in ata_dev_classify()
1029 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) { in ata_dev_classify()
1040 * ata_id_string - Convert IDENTIFY DEVICE page into string
1047 * 16-bit chunks. Run through the string, and output each
1048 * 8-bit chunk linearly, regardless of platform.
1071 len -= 2; in ata_id_string()
1077 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1095 ata_id_string(id, s, ofs, len - 1); in ata_id_c_string()
1097 p = s + strnlen(s, len - 1); in ata_id_c_string()
1098 while (p > s && p[-1] == ' ') in ata_id_c_string()
1099 p--; in ata_id_c_string()
1121 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) in ata_tf_to_lba48() argument
1125 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; in ata_tf_to_lba48()
1126 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; in ata_tf_to_lba48()
1127 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; in ata_tf_to_lba48()
1128 sectors |= (tf->lbah & 0xff) << 16; in ata_tf_to_lba48()
1129 sectors |= (tf->lbam & 0xff) << 8; in ata_tf_to_lba48()
1130 sectors |= (tf->lbal & 0xff); in ata_tf_to_lba48()
1135 u64 ata_tf_to_lba(const struct ata_taskfile *tf) in ata_tf_to_lba() argument
1139 sectors |= (tf->device & 0x0f) << 24; in ata_tf_to_lba()
1140 sectors |= (tf->lbah & 0xff) << 16; in ata_tf_to_lba()
1141 sectors |= (tf->lbam & 0xff) << 8; in ata_tf_to_lba()
1142 sectors |= (tf->lbal & 0xff); in ata_tf_to_lba()
1148 * ata_read_native_max_address - Read native max address
1156 * 0 on success, -EACCES if command is aborted by the drive.
1157 * -EIO on other errors.
1162 struct ata_taskfile tf; in ata_read_native_max_address() local
1163 int lba48 = ata_id_has_lba48(dev->id); in ata_read_native_max_address()
1165 ata_tf_init(dev, &tf); in ata_read_native_max_address()
1168 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; in ata_read_native_max_address()
1171 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; in ata_read_native_max_address()
1172 tf.flags |= ATA_TFLAG_LBA48; in ata_read_native_max_address()
1174 tf.command = ATA_CMD_READ_NATIVE_MAX; in ata_read_native_max_address()
1176 tf.protocol = ATA_PROT_NODATA; in ata_read_native_max_address()
1177 tf.device |= ATA_LBA; in ata_read_native_max_address()
1179 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); in ata_read_native_max_address()
1184 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) in ata_read_native_max_address()
1185 return -EACCES; in ata_read_native_max_address()
1186 return -EIO; in ata_read_native_max_address()
1190 *max_sectors = ata_tf_to_lba48(&tf) + 1; in ata_read_native_max_address()
1192 *max_sectors = ata_tf_to_lba(&tf) + 1; in ata_read_native_max_address()
1193 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) in ata_read_native_max_address()
1194 (*max_sectors)--; in ata_read_native_max_address()
1199 * ata_set_max_sectors - Set max sectors
1206 * 0 on success, -EACCES if command is aborted or denied (due to
1207 * previous non-volatile SET_MAX) by the drive. -EIO on other
1213 struct ata_taskfile tf; in ata_set_max_sectors() local
1214 int lba48 = ata_id_has_lba48(dev->id); in ata_set_max_sectors()
1216 new_sectors--; in ata_set_max_sectors()
1218 ata_tf_init(dev, &tf); in ata_set_max_sectors()
1220 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; in ata_set_max_sectors()
1223 tf.command = ATA_CMD_SET_MAX_EXT; in ata_set_max_sectors()
1224 tf.flags |= ATA_TFLAG_LBA48; in ata_set_max_sectors()
1226 tf.hob_lbal = (new_sectors >> 24) & 0xff; in ata_set_max_sectors()
1227 tf.hob_lbam = (new_sectors >> 32) & 0xff; in ata_set_max_sectors()
1228 tf.hob_lbah = (new_sectors >> 40) & 0xff; in ata_set_max_sectors()
1230 tf.command = ATA_CMD_SET_MAX; in ata_set_max_sectors()
1232 tf.device |= (new_sectors >> 24) & 0xf; in ata_set_max_sectors()
1235 tf.protocol = ATA_PROT_NODATA; in ata_set_max_sectors()
1236 tf.device |= ATA_LBA; in ata_set_max_sectors()
1238 tf.lbal = (new_sectors >> 0) & 0xff; in ata_set_max_sectors()
1239 tf.lbam = (new_sectors >> 8) & 0xff; in ata_set_max_sectors()
1240 tf.lbah = (new_sectors >> 16) & 0xff; in ata_set_max_sectors()
1242 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); in ata_set_max_sectors()
1248 (tf.feature & (ATA_ABORTED | ATA_IDNF))) in ata_set_max_sectors()
1249 return -EACCES; in ata_set_max_sectors()
1250 return -EIO; in ata_set_max_sectors()
1257 * ata_hpa_resize - Resize a device with an HPA set
1265 * 0 on success, -errno on failure.
1269 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_hpa_resize()
1270 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; in ata_hpa_resize()
1271 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; in ata_hpa_resize()
1272 u64 sectors = ata_id_n_sectors(dev->id); in ata_hpa_resize()
1277 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) || in ata_hpa_resize()
1278 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || in ata_hpa_resize()
1279 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) in ata_hpa_resize()
1288 if (rc == -EACCES || !unlock_hpa) { in ata_hpa_resize()
1291 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; in ata_hpa_resize()
1294 if (rc == -EACCES) in ata_hpa_resize()
1300 dev->n_native_sectors = native_sectors; in ata_hpa_resize()
1322 if (rc == -EACCES) { in ata_hpa_resize()
1325 "device aborted resize (%llu -> %llu), skipping HPA handling\n", in ata_hpa_resize()
1328 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; in ata_hpa_resize()
1333 /* re-read IDENTIFY data */ in ata_hpa_resize()
1337 "failed to re-read IDENTIFY data after HPA resizing\n"); in ata_hpa_resize()
1342 u64 new_sectors = ata_id_n_sectors(dev->id); in ata_hpa_resize()
1344 "HPA unlocked: %llu -> %llu, native %llu\n", in ata_hpa_resize()
1354 * ata_dump_id - IDENTIFY DEVICE info debugging output
1357 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1393 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1419 * a mask. in ata_id_xfermask()
1423 pio_mask = (2 << mode) - 1; in ata_id_xfermask()
1428 * committee and you too can get a free iordy field to in ata_id_xfermask()
1464 struct completion *waiting = qc->private_data; in ata_qc_complete_internal()
1470 * ata_exec_internal_sg - execute libata internal command
1472 * @tf: Taskfile registers for the command and the result
1479 * Executes libata internal command with timeout. @tf contains
1482 * is taken after a command times out. It's caller's duty to
1492 struct ata_taskfile *tf, const u8 *cdb, in ata_exec_internal_sg() argument
1496 struct ata_link *link = dev->link; in ata_exec_internal_sg()
1497 struct ata_port *ap = link->ap; in ata_exec_internal_sg()
1498 u8 command = tf->command; in ata_exec_internal_sg()
1510 spin_lock_irqsave(ap->lock, flags); in ata_exec_internal_sg()
1513 if (ap->pflags & ATA_PFLAG_FROZEN) { in ata_exec_internal_sg()
1514 spin_unlock_irqrestore(ap->lock, flags); in ata_exec_internal_sg()
1521 qc->tag = ATA_TAG_INTERNAL; in ata_exec_internal_sg()
1522 qc->hw_tag = 0; in ata_exec_internal_sg()
1523 qc->scsicmd = NULL; in ata_exec_internal_sg()
1524 qc->ap = ap; in ata_exec_internal_sg()
1525 qc->dev = dev; in ata_exec_internal_sg()
1528 preempted_tag = link->active_tag; in ata_exec_internal_sg()
1529 preempted_sactive = link->sactive; in ata_exec_internal_sg()
1530 preempted_qc_active = ap->qc_active; in ata_exec_internal_sg()
1531 preempted_nr_active_links = ap->nr_active_links; in ata_exec_internal_sg()
1532 link->active_tag = ATA_TAG_POISON; in ata_exec_internal_sg()
1533 link->sactive = 0; in ata_exec_internal_sg()
1534 ap->qc_active = 0; in ata_exec_internal_sg()
1535 ap->nr_active_links = 0; in ata_exec_internal_sg()
1538 qc->tf = *tf; in ata_exec_internal_sg()
1540 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); in ata_exec_internal_sg()
1543 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && in ata_exec_internal_sg()
1545 qc->tf.feature |= ATAPI_DMADIR; in ata_exec_internal_sg()
1547 qc->flags |= ATA_QCFLAG_RESULT_TF; in ata_exec_internal_sg()
1548 qc->dma_dir = dma_dir; in ata_exec_internal_sg()
1554 buflen += sg->length; in ata_exec_internal_sg()
1557 qc->nbytes = buflen; in ata_exec_internal_sg()
1560 qc->private_data = &wait; in ata_exec_internal_sg()
1561 qc->complete_fn = ata_qc_complete_internal; in ata_exec_internal_sg()
1565 spin_unlock_irqrestore(ap->lock, flags); in ata_exec_internal_sg()
1576 if (ap->ops->error_handler) in ata_exec_internal_sg()
1581 if (ap->ops->error_handler) in ata_exec_internal_sg()
1587 spin_lock_irqsave(ap->lock, flags); in ata_exec_internal_sg()
1592 * cleaned up by ->post_internal_cmd(). in ata_exec_internal_sg()
1594 if (qc->flags & ATA_QCFLAG_ACTIVE) { in ata_exec_internal_sg()
1595 qc->err_mask |= AC_ERR_TIMEOUT; in ata_exec_internal_sg()
1597 if (ap->ops->error_handler) in ata_exec_internal_sg()
1607 spin_unlock_irqrestore(ap->lock, flags); in ata_exec_internal_sg()
1611 if (ap->ops->post_internal_cmd) in ata_exec_internal_sg()
1612 ap->ops->post_internal_cmd(qc); in ata_exec_internal_sg()
1615 if (qc->flags & ATA_QCFLAG_FAILED) { in ata_exec_internal_sg()
1616 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) in ata_exec_internal_sg()
1617 qc->err_mask |= AC_ERR_DEV; in ata_exec_internal_sg()
1619 if (!qc->err_mask) in ata_exec_internal_sg()
1620 qc->err_mask |= AC_ERR_OTHER; in ata_exec_internal_sg()
1622 if (qc->err_mask & ~AC_ERR_OTHER) in ata_exec_internal_sg()
1623 qc->err_mask &= ~AC_ERR_OTHER; in ata_exec_internal_sg()
1624 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) { in ata_exec_internal_sg()
1625 qc->result_tf.command |= ATA_SENSE; in ata_exec_internal_sg()
1629 spin_lock_irqsave(ap->lock, flags); in ata_exec_internal_sg()
1631 *tf = qc->result_tf; in ata_exec_internal_sg()
1632 err_mask = qc->err_mask; in ata_exec_internal_sg()
1635 link->active_tag = preempted_tag; in ata_exec_internal_sg()
1636 link->sactive = preempted_sactive; in ata_exec_internal_sg()
1637 ap->qc_active = preempted_qc_active; in ata_exec_internal_sg()
1638 ap->nr_active_links = preempted_nr_active_links; in ata_exec_internal_sg()
1640 spin_unlock_irqrestore(ap->lock, flags); in ata_exec_internal_sg()
1649 * ata_exec_internal - execute libata internal command
1651 * @tf: Taskfile registers for the command and the result
1668 struct ata_taskfile *tf, const u8 *cdb, in ata_exec_internal() argument
1682 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, in ata_exec_internal()
1687 * ata_pio_need_iordy - check if iordy needed
1699 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) in ata_pio_need_iordy()
1701 /* Controller doesn't support IORDY. Probably a pointless in ata_pio_need_iordy()
1704 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) in ata_pio_need_iordy()
1707 if (ata_id_is_cfa(adev->id) in ata_pio_need_iordy()
1708 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) in ata_pio_need_iordy()
1711 if (adev->pio_mode > XFER_PIO_2) in ata_pio_need_iordy()
1714 if (ata_id_has_iordy(adev->id)) in ata_pio_need_iordy()
1721 * ata_pio_mask_no_iordy - Return the non IORDY mask
1725 * -1 if no iordy mode is available.
1730 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ in ata_pio_mask_no_iordy()
1731 u16 pio = adev->id[ATA_ID_EIDE_PIO]; in ata_pio_mask_no_iordy()
1734 /* This is cycle times not frequency - watch the logic! */ in ata_pio_mask_no_iordy()
1744 * ata_do_dev_read_id - default ID read method
1746 * @tf: proposed taskfile
1754 struct ata_taskfile *tf, u16 *id) in ata_do_dev_read_id() argument
1756 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, in ata_do_dev_read_id()
1762 * ata_dev_read_id - Read ID data from the specified device
1771 * for pre-ATA4 drives.
1780 * 0 on success, -errno otherwise.
1785 struct ata_port *ap = dev->link->ap; in ata_dev_read_id()
1787 struct ata_taskfile tf; in ata_dev_read_id() local
1798 ata_tf_init(dev, &tf); in ata_dev_read_id()
1806 tf.command = ATA_CMD_ID_ATA; in ata_dev_read_id()
1809 tf.command = ATA_CMD_ID_ATAPI; in ata_dev_read_id()
1812 rc = -ENODEV; in ata_dev_read_id()
1817 tf.protocol = ATA_PROT_PIO; in ata_dev_read_id()
1819 /* Some devices choke if TF registers contain garbage. Make in ata_dev_read_id()
1822 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in ata_dev_read_id()
1827 tf.flags |= ATA_TFLAG_POLLING; in ata_dev_read_id()
1829 if (ap->ops->read_id) in ata_dev_read_id()
1830 err_mask = ap->ops->read_id(dev, &tf, id); in ata_dev_read_id()
1832 err_mask = ata_do_dev_read_id(dev, &tf, id); in ata_dev_read_id()
1837 return -ENOENT; in ata_dev_read_id()
1848 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { in ata_dev_read_id()
1850 * the wrong device class. Give a shot at the in ata_dev_read_id()
1870 return -ENOENT; in ata_dev_read_id()
1873 rc = -EIO; in ata_dev_read_id()
1878 if (dev->horkage & ATA_HORKAGE_DUMP_ID) { in ata_dev_read_id()
1894 rc = -EINVAL; in ata_dev_read_id()
1900 if (ap->host->flags & ATA_HOST_IGNORE_ATA && in ata_dev_read_id()
1904 return -ENOENT; in ata_dev_read_id()
1914 * Drive powered-up in standby mode, and requires a specific in ata_dev_read_id()
1915 * SET_FEATURES spin-up subcommand before it will accept in ata_dev_read_id()
1920 rc = -EIO; in ata_dev_read_id()
1935 * The exact sequence expected by certain pre-ATA4 drives is: in ata_dev_read_id()
1948 rc = -EIO; in ata_dev_read_id()
1953 /* current CHS translation info (id[53-58]) might be in ata_dev_read_id()
1973 * ata_read_log_page - read a specific log page
1991 unsigned long ap_flags = dev->link->ap->flags; in ata_read_log_page()
1992 struct ata_taskfile tf; in ata_read_log_page() local
1996 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); in ata_read_log_page()
2000 * which e.g. lockup on a read log page. in ata_read_log_page()
2006 ata_tf_init(dev, &tf); in ata_read_log_page()
2007 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && in ata_read_log_page()
2008 !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) { in ata_read_log_page()
2009 tf.command = ATA_CMD_READ_LOG_DMA_EXT; in ata_read_log_page()
2010 tf.protocol = ATA_PROT_DMA; in ata_read_log_page()
2013 tf.command = ATA_CMD_READ_LOG_EXT; in ata_read_log_page()
2014 tf.protocol = ATA_PROT_PIO; in ata_read_log_page()
2017 tf.lbal = log; in ata_read_log_page()
2018 tf.lbam = page; in ata_read_log_page()
2019 tf.nsect = sectors; in ata_read_log_page()
2020 tf.hob_nsect = sectors >> 8; in ata_read_log_page()
2021 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; in ata_read_log_page()
2023 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, in ata_read_log_page()
2027 dev->horkage |= ATA_HORKAGE_NO_DMA_LOG; in ata_read_log_page()
2038 struct ata_port *ap = dev->link->ap; in ata_log_supported()
2040 if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1)) in ata_log_supported()
2042 return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false; in ata_log_supported()
2047 struct ata_port *ap = dev->link->ap; in ata_identify_page_supported()
2059 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf, in ata_identify_page_supported()
2068 for (i = 0; i < ap->sector_buf[8]; i++) { in ata_identify_page_supported()
2069 if (ap->sector_buf[9 + i] == page) in ata_identify_page_supported()
2084 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) in ata_do_link_spd_horkage()
2089 target_limit = (1 << target) - 1; in ata_do_link_spd_horkage()
2092 if (plink->sata_spd_limit <= target_limit) in ata_do_link_spd_horkage()
2095 plink->sata_spd_limit = target_limit; in ata_do_link_spd_horkage()
2097 /* Request another EH round by returning -EAGAIN if link is in ata_do_link_spd_horkage()
2101 if (plink->sata_spd > target) { in ata_do_link_spd_horkage()
2104 return -EAGAIN; in ata_do_link_spd_horkage()
2111 struct ata_port *ap = dev->link->ap; in ata_dev_knobble()
2116 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); in ata_dev_knobble()
2121 struct ata_port *ap = dev->link->ap; in ata_dev_config_ncq_send_recv()
2129 0, ap->sector_buf, 1); in ata_dev_config_ncq_send_recv()
2135 u8 *cmds = dev->ncq_send_recv_cmds; in ata_dev_config_ncq_send_recv()
2137 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; in ata_dev_config_ncq_send_recv()
2138 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); in ata_dev_config_ncq_send_recv()
2140 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { in ata_dev_config_ncq_send_recv()
2150 struct ata_port *ap = dev->link->ap; in ata_dev_config_ncq_non_data()
2159 0, ap->sector_buf, 1); in ata_dev_config_ncq_non_data()
2162 "failed to get NCQ Non-Data Log Emask 0x%x\n", in ata_dev_config_ncq_non_data()
2165 u8 *cmds = dev->ncq_non_data_cmds; in ata_dev_config_ncq_non_data()
2167 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE); in ata_dev_config_ncq_non_data()
2173 struct ata_port *ap = dev->link->ap; in ata_dev_config_ncq_prio()
2176 if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) { in ata_dev_config_ncq_prio()
2177 dev->flags &= ~ATA_DFLAG_NCQ_PRIO; in ata_dev_config_ncq_prio()
2184 ap->sector_buf, in ata_dev_config_ncq_prio()
2193 if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) { in ata_dev_config_ncq_prio()
2194 dev->flags |= ATA_DFLAG_NCQ_PRIO; in ata_dev_config_ncq_prio()
2196 dev->flags &= ~ATA_DFLAG_NCQ_PRIO; in ata_dev_config_ncq_prio()
2205 struct ata_port *ap = dev->link->ap; in ata_dev_config_ncq()
2206 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); in ata_dev_config_ncq()
2210 if (!ata_id_has_ncq(dev->id)) { in ata_dev_config_ncq()
2216 if (dev->horkage & ATA_HORKAGE_NONCQ) { in ata_dev_config_ncq()
2220 if (ap->flags & ATA_FLAG_NCQ) { in ata_dev_config_ncq()
2221 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE); in ata_dev_config_ncq()
2222 dev->flags |= ATA_DFLAG_NCQ; in ata_dev_config_ncq()
2225 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && in ata_dev_config_ncq()
2226 (ap->flags & ATA_FLAG_FPDMA_AA) && in ata_dev_config_ncq()
2227 ata_id_has_fpdma_aa(dev->id)) { in ata_dev_config_ncq()
2235 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; in ata_dev_config_ncq()
2236 return -EIO; in ata_dev_config_ncq()
2248 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) { in ata_dev_config_ncq()
2249 if (ata_id_has_ncq_send_and_recv(dev->id)) in ata_dev_config_ncq()
2251 if (ata_id_has_ncq_non_data(dev->id)) in ata_dev_config_ncq()
2253 if (ata_id_has_ncq_prio(dev->id)) in ata_dev_config_ncq()
2264 if (!ata_id_has_sense_reporting(dev->id)) in ata_dev_config_sense_reporting()
2267 if (ata_id_sense_reporting_enabled(dev->id)) in ata_dev_config_sense_reporting()
2280 struct ata_port *ap = dev->link->ap; in ata_dev_config_zac()
2282 u8 *identify_buf = ap->sector_buf; in ata_dev_config_zac()
2284 dev->zac_zones_optimal_open = U32_MAX; in ata_dev_config_zac()
2285 dev->zac_zones_optimal_nonseq = U32_MAX; in ata_dev_config_zac()
2286 dev->zac_zones_max_open = U32_MAX; in ata_dev_config_zac()
2289 * Always set the 'ZAC' flag for Host-managed devices. in ata_dev_config_zac()
2291 if (dev->class == ATA_DEV_ZAC) in ata_dev_config_zac()
2292 dev->flags |= ATA_DFLAG_ZAC; in ata_dev_config_zac()
2293 else if (ata_id_zoned_cap(dev->id) == 0x01) in ata_dev_config_zac()
2295 * Check for host-aware devices. in ata_dev_config_zac()
2297 dev->flags |= ATA_DFLAG_ZAC; in ata_dev_config_zac()
2299 if (!(dev->flags & ATA_DFLAG_ZAC)) in ata_dev_config_zac()
2309 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information) in ata_dev_config_zac()
2319 dev->zac_zoned_cap = (zoned_cap & 1); in ata_dev_config_zac()
2322 dev->zac_zones_optimal_open = (u32)opt_open; in ata_dev_config_zac()
2325 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq; in ata_dev_config_zac()
2328 dev->zac_zones_max_open = (u32)max_open; in ata_dev_config_zac()
2334 struct ata_port *ap = dev->link->ap; in ata_dev_config_trusted()
2338 if (!ata_id_has_trusted(dev->id)) in ata_dev_config_trusted()
2348 ap->sector_buf, 1); in ata_dev_config_trusted()
2355 trusted_cap = get_unaligned_le64(&ap->sector_buf[40]); in ata_dev_config_trusted()
2363 dev->flags |= ATA_DFLAG_TRUSTED; in ata_dev_config_trusted()
2367 * ata_dev_configure - Configure the specified ATA/ATAPI device
2370 * Configure @dev according to @dev->id. Generic and low-level
2377 * 0 on success, -errno otherwise
2381 struct ata_port *ap = dev->link->ap; in ata_dev_configure()
2382 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_dev_configure()
2383 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; in ata_dev_configure()
2384 const u16 *id = dev->id; in ata_dev_configure()
2387 char revbuf[7]; /* XYZ-99\0 */ in ata_dev_configure()
2393 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__); in ata_dev_configure()
2401 dev->horkage |= ata_dev_blacklisted(dev); in ata_dev_configure()
2404 if (dev->horkage & ATA_HORKAGE_DISABLE) { in ata_dev_configure()
2410 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && in ata_dev_configure()
2411 dev->class == ATA_DEV_ATAPI) { in ata_dev_configure()
2423 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */ in ata_dev_configure()
2424 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) && in ata_dev_configure()
2426 dev->horkage |= ATA_HORKAGE_NOLPM; in ata_dev_configure()
2428 if (ap->flags & ATA_FLAG_NO_LPM) in ata_dev_configure()
2429 dev->horkage |= ATA_HORKAGE_NOLPM; in ata_dev_configure()
2431 if (dev->horkage & ATA_HORKAGE_NOLPM) { in ata_dev_configure()
2433 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; in ata_dev_configure()
2455 /* initialize to-be-configured parameters */ in ata_dev_configure()
2456 dev->flags &= ~ATA_DFLAG_CFG_MASK; in ata_dev_configure()
2457 dev->max_sectors = 0; in ata_dev_configure()
2458 dev->cdb_len = 0; in ata_dev_configure()
2459 dev->n_sectors = 0; in ata_dev_configure()
2460 dev->cylinders = 0; in ata_dev_configure()
2461 dev->heads = 0; in ata_dev_configure()
2462 dev->sectors = 0; in ata_dev_configure()
2463 dev->multi_count = 0; in ata_dev_configure()
2475 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ in ata_dev_configure()
2476 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, in ata_dev_configure()
2479 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, in ata_dev_configure()
2482 /* ATA-specific feature tests */ in ata_dev_configure()
2483 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { in ata_dev_configure()
2491 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); in ata_dev_configure()
2498 dev->n_sectors = ata_id_n_sectors(id); in ata_dev_configure()
2501 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { in ata_dev_configure()
2502 unsigned int max = dev->id[47] & 0xff; in ata_dev_configure()
2503 unsigned int cnt = dev->id[59] & 0xff; in ata_dev_configure()
2507 dev->multi_count = cnt; in ata_dev_configure()
2515 dev->flags |= ATA_DFLAG_LBA; in ata_dev_configure()
2517 dev->flags |= ATA_DFLAG_LBA48; in ata_dev_configure()
2520 if (dev->n_sectors >= (1UL << 28) && in ata_dev_configure()
2522 dev->flags |= ATA_DFLAG_FLUSH_EXT; in ata_dev_configure()
2537 (unsigned long long)dev->n_sectors, in ata_dev_configure()
2538 dev->multi_count, lba_desc, ncq_desc); in ata_dev_configure()
2544 dev->cylinders = id[1]; in ata_dev_configure()
2545 dev->heads = id[3]; in ata_dev_configure()
2546 dev->sectors = id[6]; in ata_dev_configure()
2550 dev->cylinders = id[54]; in ata_dev_configure()
2551 dev->heads = id[55]; in ata_dev_configure()
2552 dev->sectors = id[56]; in ata_dev_configure()
2562 (unsigned long long)dev->n_sectors, in ata_dev_configure()
2563 dev->multi_count, dev->cylinders, in ata_dev_configure()
2564 dev->heads, dev->sectors); in ata_dev_configure()
2571 if (ata_id_has_devslp(dev->id)) { in ata_dev_configure()
2572 u8 *sata_setting = ap->sector_buf; in ata_dev_configure()
2575 dev->flags |= ATA_DFLAG_DEVSLP; in ata_dev_configure()
2588 dev->devslp_timing[i] = sata_setting[j]; in ata_dev_configure()
2594 dev->cdb_len = 32; in ata_dev_configure()
2597 /* ATAPI-specific feature tests */ in ata_dev_configure()
2598 else if (dev->class == ATA_DEV_ATAPI) { in ata_dev_configure()
2608 rc = -EINVAL; in ata_dev_configure()
2611 dev->cdb_len = (unsigned int) rc; in ata_dev_configure()
2619 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && in ata_dev_configure()
2621 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { in ata_dev_configure()
2630 dev->flags |= ATA_DFLAG_AN; in ata_dev_configure()
2635 if (ata_id_cdb_intr(dev->id)) { in ata_dev_configure()
2636 dev->flags |= ATA_DFLAG_CDB_INTR; in ata_dev_configure()
2640 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) { in ata_dev_configure()
2641 dev->flags |= ATA_DFLAG_DMADIR; in ata_dev_configure()
2645 if (ata_id_has_da(dev->id)) { in ata_dev_configure()
2646 dev->flags |= ATA_DFLAG_DA; in ata_dev_configure()
2661 dev->max_sectors = ATA_MAX_SECTORS; in ata_dev_configure()
2662 if (dev->flags & ATA_DFLAG_LBA48) in ata_dev_configure()
2663 dev->max_sectors = ATA_MAX_SECTORS_LBA48; in ata_dev_configure()
2670 dev->udma_mask &= ATA_UDMA5; in ata_dev_configure()
2671 dev->max_sectors = ATA_MAX_SECTORS; in ata_dev_configure()
2674 if ((dev->class == ATA_DEV_ATAPI) && in ata_dev_configure()
2676 dev->max_sectors = ATA_MAX_SECTORS_TAPE; in ata_dev_configure()
2677 dev->horkage |= ATA_HORKAGE_STUCK_ERR; in ata_dev_configure()
2680 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) in ata_dev_configure()
2681 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, in ata_dev_configure()
2682 dev->max_sectors); in ata_dev_configure()
2684 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024) in ata_dev_configure()
2685 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024, in ata_dev_configure()
2686 dev->max_sectors); in ata_dev_configure()
2688 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) in ata_dev_configure()
2689 dev->max_sectors = ATA_MAX_SECTORS_LBA48; in ata_dev_configure()
2691 if (ap->ops->dev_config) in ata_dev_configure()
2692 ap->ops->dev_config(dev); in ata_dev_configure()
2694 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { in ata_dev_configure()
2696 rescue purposes, or in case the vendor is just a blithering in ata_dev_configure()
2703 "Drive reports diagnostics failure. This may indicate a drive\n"); in ata_dev_configure()
2709 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { in ata_dev_configure()
2723 * ata_cable_40wire - return 40 wire cable type
2737 * ata_cable_80wire - return 80 wire cable type
2751 * ata_cable_unknown - return unknown PATA cable.
2764 * ata_cable_ignore - return ignored PATA cable.
2777 * ata_cable_sata - return SATA cable type
2790 * ata_bus_probe - Reset and probe ATA bus
2793 * Master ATA bus probing function. Initiates a hardware-dependent
2811 ata_for_each_dev(dev, &ap->link, ALL) in ata_bus_probe()
2812 tries[dev->devno] = ATA_PROBE_MAX_TRIES; in ata_bus_probe()
2815 ata_for_each_dev(dev, &ap->link, ALL) { in ata_bus_probe()
2818 * we do a hard reset (or are coming from power on) in ata_bus_probe()
2819 * this is true for ATA or ATAPI. Until we've set a in ata_bus_probe()
2823 dev->pio_mode = XFER_PIO_0; in ata_bus_probe()
2824 dev->dma_mode = 0xff; in ata_bus_probe()
2826 /* If the controller has a pio mode setup function in ata_bus_probe()
2831 if (ap->ops->set_piomode) in ata_bus_probe()
2832 ap->ops->set_piomode(ap, dev); in ata_bus_probe()
2836 ap->ops->phy_reset(ap); in ata_bus_probe()
2838 ata_for_each_dev(dev, &ap->link, ALL) { in ata_bus_probe()
2839 if (dev->class != ATA_DEV_UNKNOWN) in ata_bus_probe()
2840 classes[dev->devno] = dev->class; in ata_bus_probe()
2842 classes[dev->devno] = ATA_DEV_NONE; in ata_bus_probe()
2844 dev->class = ATA_DEV_UNKNOWN; in ata_bus_probe()
2848 specific sequence bass-ackwards so that PDIAG- is released by in ata_bus_probe()
2851 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { in ata_bus_probe()
2852 if (tries[dev->devno]) in ata_bus_probe()
2853 dev->class = classes[dev->devno]; in ata_bus_probe()
2858 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, in ata_bus_probe()
2859 dev->id); in ata_bus_probe()
2864 /* Now ask for the cable type as PDIAG- should have been released */ in ata_bus_probe()
2865 if (ap->ops->cable_detect) in ata_bus_probe()
2866 ap->cbl = ap->ops->cable_detect(ap); in ata_bus_probe()
2870 * drives indicate we have a bridge, we don't know which end in ata_bus_probe()
2871 * of the link the bridge is which is a problem. in ata_bus_probe()
2873 ata_for_each_dev(dev, &ap->link, ENABLED) in ata_bus_probe()
2874 if (ata_id_is_sata(dev->id)) in ata_bus_probe()
2875 ap->cbl = ATA_CBL_SATA; in ata_bus_probe()
2880 ata_for_each_dev(dev, &ap->link, ENABLED) { in ata_bus_probe()
2881 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; in ata_bus_probe()
2883 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; in ata_bus_probe()
2889 rc = ata_set_mode(&ap->link, &dev); in ata_bus_probe()
2893 ata_for_each_dev(dev, &ap->link, ENABLED) in ata_bus_probe()
2896 return -ENODEV; in ata_bus_probe()
2899 tries[dev->devno]--; in ata_bus_probe()
2902 case -EINVAL: in ata_bus_probe()
2904 tries[dev->devno] = 0; in ata_bus_probe()
2907 case -ENODEV: in ata_bus_probe()
2909 tries[dev->devno] = min(tries[dev->devno], 1); in ata_bus_probe()
2911 case -EIO: in ata_bus_probe()
2912 if (tries[dev->devno] == 1) { in ata_bus_probe()
2916 sata_down_spd_limit(&ap->link, 0); in ata_bus_probe()
2921 if (!tries[dev->devno]) in ata_bus_probe()
2928 * sata_print_link_status - Print SATA link status
2931 * This function prints link speed and status of a SATA link.
2955 * ata_dev_pair - return other device on cable
2964 struct ata_link *link = adev->link; in ata_dev_pair()
2965 struct ata_device *pair = &link->device[1 - adev->devno]; in ata_dev_pair()
2973 * sata_down_spd_limit - adjust SATA spd limit downward
2981 * If @spd_limit is non-zero, the speed is limited to equal to or
2998 return -EOPNOTSUPP; in sata_down_spd_limit()
3001 * If not, use cached value in link->sata_spd. in sata_down_spd_limit()
3007 spd = link->sata_spd; in sata_down_spd_limit()
3009 mask = link->sata_spd_limit; in sata_down_spd_limit()
3011 return -EINVAL; in sata_down_spd_limit()
3014 bit = fls(mask) - 1; in sata_down_spd_limit()
3022 * Otherwise, we should not force 1.5Gbps on a link where we have in sata_down_spd_limit()
3027 mask &= (1 << (spd - 1)) - 1; in sata_down_spd_limit()
3029 return -EINVAL; in sata_down_spd_limit()
3033 return -EINVAL; in sata_down_spd_limit()
3036 if (mask & ((1 << spd_limit) - 1)) in sata_down_spd_limit()
3037 mask &= (1 << spd_limit) - 1; in sata_down_spd_limit()
3039 bit = ffs(mask) - 1; in sata_down_spd_limit()
3044 link->sata_spd_limit = mask; in sata_down_spd_limit()
3054 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3075 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) in ata_timing_cycle2mode()
3076 if (ent->shift == xfer_shift) in ata_timing_cycle2mode()
3077 base_mode = ent->base; in ata_timing_cycle2mode()
3080 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { in ata_timing_cycle2mode()
3086 this_cycle = t->cycle; in ata_timing_cycle2mode()
3089 this_cycle = t->udma; in ata_timing_cycle2mode()
3098 last_mode = t->mode; in ata_timing_cycle2mode()
3106 * ata_down_xfermask_limit - adjust dev xfer masks downward
3130 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, in ata_down_xfermask_limit()
3131 dev->mwdma_mask, in ata_down_xfermask_limit()
3132 dev->udma_mask); in ata_down_xfermask_limit()
3137 highbit = fls(pio_mask) - 1; in ata_down_xfermask_limit()
3143 highbit = fls(udma_mask) - 1; in ata_down_xfermask_limit()
3146 return -ENOENT; in ata_down_xfermask_limit()
3148 highbit = fls(mwdma_mask) - 1; in ata_down_xfermask_limit()
3151 return -ENOENT; in ata_down_xfermask_limit()
3174 return -ENOENT; in ata_down_xfermask_limit()
3188 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, in ata_down_xfermask_limit()
3189 &dev->udma_mask); in ata_down_xfermask_limit()
3196 struct ata_port *ap = dev->link->ap; in ata_dev_set_mode()
3197 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_dev_set_mode()
3198 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; in ata_dev_set_mode()
3204 dev->flags &= ~ATA_DFLAG_PIO; in ata_dev_set_mode()
3205 if (dev->xfer_shift == ATA_SHIFT_PIO) in ata_dev_set_mode()
3206 dev->flags |= ATA_DFLAG_PIO; in ata_dev_set_mode()
3208 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) in ata_dev_set_mode()
3213 "NOSETXFER but PATA detected - can't " in ata_dev_set_mode()
3222 ehc->i.flags |= ATA_EHI_POST_SETMODE; in ata_dev_set_mode()
3224 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; in ata_dev_set_mode()
3228 if (dev->xfer_shift == ATA_SHIFT_PIO) { in ata_dev_set_mode()
3230 if (ata_id_is_cfa(dev->id)) in ata_dev_set_mode()
3234 if (ata_id_major_version(dev->id) == 0 && in ata_dev_set_mode()
3235 dev->pio_mode <= XFER_PIO_2) in ata_dev_set_mode()
3238 any kind of SET_XFERMODE request but support PIO0-2 in ata_dev_set_mode()
3240 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) in ata_dev_set_mode()
3245 if (dev->xfer_shift == ATA_SHIFT_MWDMA && in ata_dev_set_mode()
3246 dev->dma_mode == XFER_MW_DMA_0 && in ata_dev_set_mode()
3247 (dev->id[63] >> 8) & 1) in ata_dev_set_mode()
3251 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) in ata_dev_set_mode()
3262 dev->xfer_shift, (int)dev->xfer_mode); in ata_dev_set_mode()
3264 if (!(ehc->i.flags & ATA_EHI_QUIET) || in ata_dev_set_mode()
3265 ehc->i.flags & ATA_EHI_DID_HARDRESET) in ata_dev_set_mode()
3267 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), in ata_dev_set_mode()
3274 return -EIO; in ata_dev_set_mode()
3278 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3296 struct ata_port *ap = link->ap; in ata_do_set_mode()
3306 if (dev->class == ATA_DEV_ATAPI) in ata_do_set_mode()
3308 else if (ata_id_is_cfa(dev->id)) in ata_do_set_mode()
3314 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); in ata_do_set_mode()
3317 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, in ata_do_set_mode()
3318 dev->udma_mask); in ata_do_set_mode()
3322 dev->pio_mode = ata_xfer_mask2mode(pio_mask); in ata_do_set_mode()
3323 dev->dma_mode = ata_xfer_mask2mode(dma_mask); in ata_do_set_mode()
3334 if (dev->pio_mode == 0xff) { in ata_do_set_mode()
3336 rc = -EINVAL; in ata_do_set_mode()
3340 dev->xfer_mode = dev->pio_mode; in ata_do_set_mode()
3341 dev->xfer_shift = ATA_SHIFT_PIO; in ata_do_set_mode()
3342 if (ap->ops->set_piomode) in ata_do_set_mode()
3343 ap->ops->set_piomode(ap, dev); in ata_do_set_mode()
3351 dev->xfer_mode = dev->dma_mode; in ata_do_set_mode()
3352 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); in ata_do_set_mode()
3353 if (ap->ops->set_dmamode) in ata_do_set_mode()
3354 ap->ops->set_dmamode(ap, dev); in ata_do_set_mode()
3367 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) in ata_do_set_mode()
3368 ap->host->simplex_claimed = ap; in ata_do_set_mode()
3378 * ata_wait_ready - wait for link to become ready
3384 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3388 * Transient -ENODEV conditions are allowed for
3395 * 0 if @link is ready before @deadline; otherwise, -errno.
3405 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) in ata_wait_ready()
3414 WARN_ON(link == link->ap->slave_link); in ata_wait_ready()
3428 * -ENODEV could be transient. Ignore -ENODEV if link in ata_wait_ready()
3429 * is online. Also, some SATA devices take a long in ata_wait_ready()
3431 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't in ata_wait_ready()
3438 if (ready == -ENODEV) { in ata_wait_ready()
3441 else if ((link->ap->flags & ATA_FLAG_SATA) && in ata_wait_ready()
3450 return -EBUSY; in ata_wait_ready()
3453 (deadline - now > 3 * HZ)) { in ata_wait_ready()
3460 ata_msleep(link->ap, 50); in ata_wait_ready()
3465 * ata_wait_after_reset - wait for link to become ready after reset
3476 * 0 if @link is ready before @deadline; otherwise, -errno.
3481 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); in ata_wait_after_reset()
3488 * ata_std_prereset - prepare for reset
3494 * that port, so prereset should be best-effort. It does its
3502 * 0 on success, -errno otherwise.
3506 struct ata_port *ap = link->ap; in ata_std_prereset()
3507 struct ata_eh_context *ehc = &link->eh_context; in ata_std_prereset()
3512 if (ehc->i.action & ATA_EH_HARDRESET) in ata_std_prereset()
3516 if (ap->flags & ATA_FLAG_SATA) { in ata_std_prereset()
3519 if (rc && rc != -EOPNOTSUPP) in ata_std_prereset()
3527 ehc->i.action &= ~ATA_EH_SOFTRESET; in ata_std_prereset()
3534 * sata_std_hardreset - COMRESET w/o waiting or classification
3545 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3550 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); in sata_std_hardreset()
3556 return online ? -EAGAIN : rc; in sata_std_hardreset()
3561 * ata_std_postreset - standard postreset callback
3565 * This function is invoked after a successful reset. Note that
3590 * ata_dev_same_device - Determine whether new ID matches configured device
3608 const u16 *old_id = dev->id; in ata_dev_same_device()
3612 if (dev->class != new_class) { in ata_dev_same_device()
3614 dev->class, new_class); in ata_dev_same_device()
3639 * ata_dev_reread_id - Re-read IDENTIFY data
3643 * Re-read IDENTIFY page and make sure @dev is still attached to
3654 unsigned int class = dev->class; in ata_dev_reread_id()
3655 u16 *id = (void *)dev->link->ap->sector_buf; in ata_dev_reread_id()
3665 return -ENODEV; in ata_dev_reread_id()
3667 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); in ata_dev_reread_id()
3672 * ata_dev_revalidate - Revalidate ATA device
3677 * Re-read IDENTIFY page, make sure @dev is still attached to the
3689 u64 n_sectors = dev->n_sectors; in ata_dev_revalidate()
3690 u64 n_native_sectors = dev->n_native_sectors; in ata_dev_revalidate()
3694 return -ENODEV; in ata_dev_revalidate()
3703 dev->class, new_class); in ata_dev_revalidate()
3704 rc = -ENODEV; in ata_dev_revalidate()
3708 /* re-read ID */ in ata_dev_revalidate()
3719 if (dev->class != ATA_DEV_ATA || !n_sectors || in ata_dev_revalidate()
3720 dev->n_sectors == n_sectors) in ata_dev_revalidate()
3726 (unsigned long long)dev->n_sectors); in ata_dev_revalidate()
3733 if (dev->n_native_sectors == n_native_sectors && in ata_dev_revalidate()
3734 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { in ata_dev_revalidate()
3748 if (dev->n_native_sectors == n_native_sectors && in ata_dev_revalidate()
3749 dev->n_sectors < n_sectors && n_sectors == n_native_sectors && in ata_dev_revalidate()
3750 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { in ata_dev_revalidate()
3755 dev->flags |= ATA_DFLAG_UNLOCK_HPA; in ata_dev_revalidate()
3756 rc = -EIO; in ata_dev_revalidate()
3758 rc = -ENODEV; in ata_dev_revalidate()
3761 dev->n_native_sectors = n_native_sectors; in ata_dev_revalidate()
3762 dev->n_sectors = n_sectors; in ata_dev_revalidate()
3783 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3784 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3785 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
3786 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3788 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3789 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3790 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3791 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
3792 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3793 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3794 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3795 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3796 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3797 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3798 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3799 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3800 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3802 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
3805 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
3810 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3811 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
3812 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
3813 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
3825 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
3826 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
3832 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3833 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
3844 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3847 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3850 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3853 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3858 { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
3863 Windows driver .inf file - also several Linux problem reports */
3869 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
3876 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3877 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3881 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
3891 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
3894 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
3895 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
3905 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
3906 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
3907 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
3908 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
3909 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
3930 /* These specific Samsung models/firmware-revs do not handle LPM well */
3931 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
3933 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
3934 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
3987 * Some WD SATA-I drives spin up and down erratically when the link
3990 * known prefixes and is SATA-1. As a side effect LPM partial is
3995 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
3996 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
3997 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
3998 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
3999 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4000 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4001 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4013 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); in ata_dev_blacklisted()
4014 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); in ata_dev_blacklisted()
4016 while (ad->model_num) { in ata_dev_blacklisted()
4017 if (glob_match(ad->model_num, model_num)) { in ata_dev_blacklisted()
4018 if (ad->model_rev == NULL) in ata_dev_blacklisted()
4019 return ad->horkage; in ata_dev_blacklisted()
4020 if (glob_match(ad->model_rev, model_rev)) in ata_dev_blacklisted()
4021 return ad->horkage; in ata_dev_blacklisted()
4031 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) in ata_dma_blacklisted()
4034 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && in ata_dma_blacklisted()
4035 (dev->flags & ATA_DFLAG_CDB_INTR)) in ata_dma_blacklisted()
4037 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; in ata_dma_blacklisted()
4041 * ata_is_40wire - check drive side detection
4050 if (dev->horkage & ATA_HORKAGE_IVB) in ata_is_40wire()
4051 return ata_drive_40wire_relaxed(dev->id); in ata_is_40wire()
4052 return ata_drive_40wire(dev->id); in ata_is_40wire()
4056 * cable_is_40wire - 40/80/SATA decider
4061 * there is a good case for setting ap->cbl to the result when
4074 if (ap->cbl == ATA_CBL_PATA40) in cable_is_40wire()
4078 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) in cable_is_40wire()
4085 if (ap->cbl == ATA_CBL_PATA40_SHORT) in cable_is_40wire()
4092 * - in many setups only the one drive (slave if present) will in cable_is_40wire()
4093 * give a valid detect in cable_is_40wire()
4094 * - if you have a non detect capable drive you don't want it in cable_is_40wire()
4107 * ata_dev_xfermask - Compute supported xfermask of the given device
4111 * dev->*_mask. This function is responsible for applying all
4120 struct ata_link *link = dev->link; in ata_dev_xfermask()
4121 struct ata_port *ap = link->ap; in ata_dev_xfermask()
4122 struct ata_host *host = ap->host; in ata_dev_xfermask()
4126 xfer_mask = ata_pack_xfermask(ap->pio_mask, in ata_dev_xfermask()
4127 ap->mwdma_mask, ap->udma_mask); in ata_dev_xfermask()
4130 xfer_mask &= ata_pack_xfermask(dev->pio_mask, in ata_dev_xfermask()
4131 dev->mwdma_mask, dev->udma_mask); in ata_dev_xfermask()
4132 xfer_mask &= ata_id_xfermask(dev->id); in ata_dev_xfermask()
4135 * CFA Advanced TrueIDE timings are not allowed on a shared in ata_dev_xfermask()
4151 if ((host->flags & ATA_HOST_SIMPLEX) && in ata_dev_xfermask()
4152 host->simplex_claimed && host->simplex_claimed != ap) { in ata_dev_xfermask()
4158 if (ap->flags & ATA_FLAG_NO_IORDY) in ata_dev_xfermask()
4161 if (ap->ops->mode_filter) in ata_dev_xfermask()
4162 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); in ata_dev_xfermask()
4169 * drive side as well. Cases where we know a 40wire cable in ata_dev_xfermask()
4176 "limited to UDMA/33 due to 40-wire cable\n"); in ata_dev_xfermask()
4180 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, in ata_dev_xfermask()
4181 &dev->mwdma_mask, &dev->udma_mask); in ata_dev_xfermask()
4185 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4188 * Issue SET FEATURES - XFER MODE command to device @dev
4200 struct ata_taskfile tf; in ata_dev_set_xfermode() local
4203 /* set up set-features taskfile */ in ata_dev_set_xfermode()
4204 DPRINTK("set features - xfer mode\n"); in ata_dev_set_xfermode()
4209 ata_tf_init(dev, &tf); in ata_dev_set_xfermode()
4210 tf.command = ATA_CMD_SET_FEATURES; in ata_dev_set_xfermode()
4211 tf.feature = SETFEATURES_XFER; in ata_dev_set_xfermode()
4212 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; in ata_dev_set_xfermode()
4213 tf.protocol = ATA_PROT_NODATA; in ata_dev_set_xfermode()
4216 tf.nsect = dev->xfer_mode; in ata_dev_set_xfermode()
4217 /* If the device has IORDY and the controller does not - turn it off */ in ata_dev_set_xfermode()
4218 else if (ata_id_has_iordy(dev->id)) in ata_dev_set_xfermode()
4219 tf.nsect = 0x01; in ata_dev_set_xfermode()
4220 else /* In the ancient relic department - skip all of this */ in ata_dev_set_xfermode()
4223 /* On some disks, this command causes spin-up, so we need longer timeout */ in ata_dev_set_xfermode()
4224 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000); in ata_dev_set_xfermode()
4231 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4236 * Issue SET FEATURES - SATA FEATURES command to device @dev
4247 struct ata_taskfile tf; in ata_dev_set_feature() local
4251 /* set up set-features taskfile */ in ata_dev_set_feature()
4252 DPRINTK("set features - SATA features\n"); in ata_dev_set_feature()
4254 ata_tf_init(dev, &tf); in ata_dev_set_feature()
4255 tf.command = ATA_CMD_SET_FEATURES; in ata_dev_set_feature()
4256 tf.feature = enable; in ata_dev_set_feature()
4257 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in ata_dev_set_feature()
4258 tf.protocol = ATA_PROT_NODATA; in ata_dev_set_feature()
4259 tf.nsect = feature; in ata_dev_set_feature()
4264 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout); in ata_dev_set_feature()
4272 * ata_dev_init_params - Issue INIT DEV PARAMS command
4286 struct ata_taskfile tf; in ata_dev_init_params() local
4289 /* Number of sectors per track 1-255. Number of heads 1-16 */ in ata_dev_init_params()
4296 ata_tf_init(dev, &tf); in ata_dev_init_params()
4297 tf.command = ATA_CMD_INIT_DEV_PARAMS; in ata_dev_init_params()
4298 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in ata_dev_init_params()
4299 tf.protocol = ATA_PROT_NODATA; in ata_dev_init_params()
4300 tf.nsect = sectors; in ata_dev_init_params()
4301 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ in ata_dev_init_params()
4303 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); in ata_dev_init_params()
4304 /* A clean abort indicates an original or just out of spec drive in ata_dev_init_params()
4307 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) in ata_dev_init_params()
4315 * atapi_check_dma - Check whether ATAPI DMA can be supported
4318 * Allow low-level driver to filter ATA PACKET commands, returning
4319 * a status indicating whether or not it is OK to use DMA for the
4330 struct ata_port *ap = qc->ap; in atapi_check_dma()
4332 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a in atapi_check_dma()
4335 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && in atapi_check_dma()
4336 unlikely(qc->nbytes & 15)) in atapi_check_dma()
4339 if (ap->ops->check_atapi_dma) in atapi_check_dma()
4340 return ap->ops->check_atapi_dma(qc); in atapi_check_dma()
4346 * ata_std_qc_defer - Check whether a qc needs to be deferred
4349 * Non-NCQ commands cannot run with any other command, NCQ or
4352 * whether a new command @qc can be issued.
4362 struct ata_link *link = qc->dev->link; in ata_std_qc_defer()
4364 if (ata_is_ncq(qc->tf.protocol)) { in ata_std_qc_defer()
4365 if (!ata_tag_valid(link->active_tag)) in ata_std_qc_defer()
4368 if (!ata_tag_valid(link->active_tag) && !link->sactive) in ata_std_qc_defer()
4383 * ata_sg_init - Associate command with scatter-gather table.
4385 * @sg: Scatter-gather table.
4388 * Initialize the data-related elements of queued_cmd @qc
4389 * to point to a scatter-gather table @sg, containing @n_elem
4398 qc->sg = sg; in ata_sg_init()
4399 qc->n_elem = n_elem; in ata_sg_init()
4400 qc->cursg = qc->sg; in ata_sg_init()
4406 * ata_sg_clean - Unmap DMA memory associated with command
4416 struct ata_port *ap = qc->ap; in ata_sg_clean()
4417 struct scatterlist *sg = qc->sg; in ata_sg_clean()
4418 int dir = qc->dma_dir; in ata_sg_clean()
4422 VPRINTK("unmapping %u sg elements\n", qc->n_elem); in ata_sg_clean()
4424 if (qc->n_elem) in ata_sg_clean()
4425 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); in ata_sg_clean()
4427 qc->flags &= ~ATA_QCFLAG_DMAMAP; in ata_sg_clean()
4428 qc->sg = NULL; in ata_sg_clean()
4432 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4433 * @qc: Command with scatter-gather table to be mapped.
4435 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4446 struct ata_port *ap = qc->ap; in ata_sg_setup()
4449 VPRINTK("ENTER, ata%u\n", ap->print_id); in ata_sg_setup()
4451 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); in ata_sg_setup()
4453 return -1; in ata_sg_setup()
4456 qc->orig_n_elem = qc->n_elem; in ata_sg_setup()
4457 qc->n_elem = n_elem; in ata_sg_setup()
4458 qc->flags |= ATA_QCFLAG_DMAMAP; in ata_sg_setup()
4466 static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; } in ata_sg_setup()
4471 * swap_buf_le16 - swap halves of 16-bit words in place
4473 * @buf_words: Number of 16-bit words in buffer.
4475 * Swap halves of 16-bit words if needed to convert from
4476 * little-endian byte order to native cpu byte order, or
4477 * vice-versa.
4493 * ata_qc_new_init - Request an available ATA command, and initialize it
4503 struct ata_port *ap = dev->link->ap; in ata_qc_new_init()
4507 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) in ata_qc_new_init()
4511 if (ap->flags & ATA_FLAG_SAS_HOST) { in ata_qc_new_init()
4518 qc->tag = qc->hw_tag = tag; in ata_qc_new_init()
4519 qc->scsicmd = NULL; in ata_qc_new_init()
4520 qc->ap = ap; in ata_qc_new_init()
4521 qc->dev = dev; in ata_qc_new_init()
4529 * ata_qc_free - free unused ata_queued_cmd
4544 ap = qc->ap; in ata_qc_free()
4546 qc->flags = 0; in ata_qc_free()
4547 tag = qc->tag; in ata_qc_free()
4549 qc->tag = ATA_TAG_POISON; in ata_qc_free()
4550 if (ap->flags & ATA_FLAG_SAS_HOST) in ata_qc_free()
4561 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); in __ata_qc_complete()
4562 ap = qc->ap; in __ata_qc_complete()
4563 link = qc->dev->link; in __ata_qc_complete()
4565 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) in __ata_qc_complete()
4569 if (ata_is_ncq(qc->tf.protocol)) { in __ata_qc_complete()
4570 link->sactive &= ~(1 << qc->hw_tag); in __ata_qc_complete()
4571 if (!link->sactive) in __ata_qc_complete()
4572 ap->nr_active_links--; in __ata_qc_complete()
4574 link->active_tag = ATA_TAG_POISON; in __ata_qc_complete()
4575 ap->nr_active_links--; in __ata_qc_complete()
4579 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && in __ata_qc_complete()
4580 ap->excl_link == link)) in __ata_qc_complete()
4581 ap->excl_link = NULL; in __ata_qc_complete()
4587 qc->flags &= ~ATA_QCFLAG_ACTIVE; in __ata_qc_complete()
4588 ap->qc_active &= ~(1ULL << qc->tag); in __ata_qc_complete()
4591 qc->complete_fn(qc); in __ata_qc_complete()
4596 struct ata_port *ap = qc->ap; in fill_result_tf()
4598 qc->result_tf.flags = qc->tf.flags; in fill_result_tf()
4599 ap->ops->qc_fill_rtf(qc); in fill_result_tf()
4604 struct ata_device *dev = qc->dev; in ata_verify_xfer()
4606 if (!ata_is_data(qc->tf.protocol)) in ata_verify_xfer()
4609 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) in ata_verify_xfer()
4612 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; in ata_verify_xfer()
4616 * ata_qc_complete - Complete an active ATA command
4620 * completed, with either an ok or not-ok status.
4632 struct ata_port *ap = qc->ap; in ata_qc_complete()
4635 ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE)); in ata_qc_complete()
4640 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. in ata_qc_complete()
4641 * Normal execution path is responsible for not accessing a in ata_qc_complete()
4650 if (ap->ops->error_handler) { in ata_qc_complete()
4651 struct ata_device *dev = qc->dev; in ata_qc_complete()
4652 struct ata_eh_info *ehi = &dev->link->eh_info; in ata_qc_complete()
4654 if (unlikely(qc->err_mask)) in ata_qc_complete()
4655 qc->flags |= ATA_QCFLAG_FAILED; in ata_qc_complete()
4659 * and always with the result TF filled. in ata_qc_complete()
4661 if (unlikely(ata_tag_internal(qc->tag))) { in ata_qc_complete()
4669 * Non-internal qc has failed. Fill the result TF and in ata_qc_complete()
4672 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { in ata_qc_complete()
4679 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); in ata_qc_complete()
4681 /* read result TF if requested */ in ata_qc_complete()
4682 if (qc->flags & ATA_QCFLAG_RESULT_TF) in ata_qc_complete()
4686 /* Some commands need post-processing after successful in ata_qc_complete()
4689 switch (qc->tf.command) { in ata_qc_complete()
4691 if (qc->tf.feature != SETFEATURES_WC_ON && in ata_qc_complete()
4692 qc->tf.feature != SETFEATURES_WC_OFF && in ata_qc_complete()
4693 qc->tf.feature != SETFEATURES_RA_ON && in ata_qc_complete()
4694 qc->tf.feature != SETFEATURES_RA_OFF) in ata_qc_complete()
4700 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; in ata_qc_complete()
4705 dev->flags |= ATA_DFLAG_SLEEPING; in ata_qc_complete()
4709 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) in ata_qc_complete()
4714 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) in ata_qc_complete()
4717 /* read result TF if failed or requested */ in ata_qc_complete()
4718 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) in ata_qc_complete()
4727 * ata_qc_get_active - get bitmask of active qcs
4738 u64 qc_active = ap->qc_active; in ata_qc_get_active()
4751 * ata_qc_issue - issue taskfile to device
4755 * This includes mapping the data into a DMA-able
4764 struct ata_port *ap = qc->ap; in ata_qc_issue()
4765 struct ata_link *link = qc->dev->link; in ata_qc_issue()
4766 u8 prot = qc->tf.protocol; in ata_qc_issue()
4768 /* Make sure only one non-NCQ command is outstanding. The in ata_qc_issue()
4772 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); in ata_qc_issue()
4775 WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag)); in ata_qc_issue()
4777 if (!link->sactive) in ata_qc_issue()
4778 ap->nr_active_links++; in ata_qc_issue()
4779 link->sactive |= 1 << qc->hw_tag; in ata_qc_issue()
4781 WARN_ON_ONCE(link->sactive); in ata_qc_issue()
4783 ap->nr_active_links++; in ata_qc_issue()
4784 link->active_tag = qc->tag; in ata_qc_issue()
4787 qc->flags |= ATA_QCFLAG_ACTIVE; in ata_qc_issue()
4788 ap->qc_active |= 1ULL << qc->tag; in ata_qc_issue()
4792 * non-zero sg if the command is a data command. in ata_qc_issue()
4794 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)) in ata_qc_issue()
4798 (ap->flags & ATA_FLAG_PIO_DMA))) in ata_qc_issue()
4803 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { in ata_qc_issue()
4804 link->eh_info.action |= ATA_EH_RESET; in ata_qc_issue()
4805 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); in ata_qc_issue()
4810 qc->err_mask |= ap->ops->qc_prep(qc); in ata_qc_issue()
4811 if (unlikely(qc->err_mask)) in ata_qc_issue()
4814 qc->err_mask |= ap->ops->qc_issue(qc); in ata_qc_issue()
4815 if (unlikely(qc->err_mask)) in ata_qc_issue()
4820 qc->err_mask |= AC_ERR_SYSTEM; in ata_qc_issue()
4826 * ata_phys_link_online - test whether the given link is online
4850 * ata_phys_link_offline - test whether the given link is offline
4874 * ata_link_online - test whether the given link is online
4879 * there's a slave link, this function should only be called on
4891 struct ata_link *slave = link->ap->slave_link; in ata_link_online()
4901 * ata_link_offline - test whether the given link is offline
4906 * there's a slave link, this function should only be called on
4918 struct ata_link *slave = link->ap->slave_link; in ata_link_offline()
4938 if (ap->pflags & ATA_PFLAG_PM_PENDING) { in ata_port_request_pm()
4940 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); in ata_port_request_pm()
4944 spin_lock_irqsave(ap->lock, flags); in ata_port_request_pm()
4946 ap->pm_mesg = mesg; in ata_port_request_pm()
4947 ap->pflags |= ATA_PFLAG_PM_PENDING; in ata_port_request_pm()
4949 link->eh_info.action |= action; in ata_port_request_pm()
4950 link->eh_info.flags |= ehi_flags; in ata_port_request_pm()
4955 spin_unlock_irqrestore(ap->lock, flags); in ata_port_request_pm()
4959 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); in ata_port_request_pm()
5037 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5039 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5050 if (adev->class == ATA_DEV_ATAPI && in ata_port_runtime_idle()
5052 return -EBUSY; in ata_port_runtime_idle()
5084 * and need to resume ata devices at the domain level, not the per-port
5101 * ata_host_suspend - suspend host
5109 host->dev->power.power_state = mesg; in ata_host_suspend()
5115 * ata_host_resume - resume host
5122 host->dev->power.power_state = PMSG_ON; in ata_host_resume()
5135 * ata_dev_init - Initialize an ata_device structure
5146 struct ata_port *ap = link->ap; in ata_dev_init()
5150 link->sata_spd_limit = link->hw_sata_spd_limit; in ata_dev_init()
5151 link->sata_spd = 0; in ata_dev_init()
5153 /* High bits of dev->flags are used to record warm plug in ata_dev_init()
5157 spin_lock_irqsave(ap->lock, flags); in ata_dev_init()
5158 dev->flags &= ~ATA_DFLAG_INIT_MASK; in ata_dev_init()
5159 dev->horkage = 0; in ata_dev_init()
5160 spin_unlock_irqrestore(ap->lock, flags); in ata_dev_init()
5163 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); in ata_dev_init()
5164 dev->pio_mask = UINT_MAX; in ata_dev_init()
5165 dev->mwdma_mask = UINT_MAX; in ata_dev_init()
5166 dev->udma_mask = UINT_MAX; in ata_dev_init()
5170 * ata_link_init - Initialize an ata_link structure
5186 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); in ata_link_init()
5188 link->ap = ap; in ata_link_init()
5189 link->pmp = pmp; in ata_link_init()
5190 link->active_tag = ATA_TAG_POISON; in ata_link_init()
5191 link->hw_sata_spd_limit = UINT_MAX; in ata_link_init()
5195 struct ata_device *dev = &link->device[i]; in ata_link_init()
5197 dev->link = link; in ata_link_init()
5198 dev->devno = dev - link->device; in ata_link_init()
5200 dev->gtf_filter = ata_acpi_gtf_filter; in ata_link_init()
5207 * sata_link_init_spd - Initialize link->sata_spd_limit
5210 * Initialize ``link->[hw_]sata_spd_limit`` to the currently
5217 * 0 on success, -errno on failure.
5224 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); in sata_link_init_spd()
5228 spd = (link->saved_scontrol >> 4) & 0xf; in sata_link_init_spd()
5230 link->hw_sata_spd_limit &= (1 << spd) - 1; in sata_link_init_spd()
5234 link->sata_spd_limit = link->hw_sata_spd_limit; in sata_link_init_spd()
5240 * ata_port_alloc - allocate and initialize basic ATA port resources
5261 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; in ata_port_alloc()
5262 ap->lock = &host->lock; in ata_port_alloc()
5263 ap->print_id = -1; in ata_port_alloc()
5264 ap->local_port_no = -1; in ata_port_alloc()
5265 ap->host = host; in ata_port_alloc()
5266 ap->dev = host->dev; in ata_port_alloc()
5270 ap->msg_enable = 0x00FF; in ata_port_alloc()
5272 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; in ata_port_alloc()
5274 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; in ata_port_alloc()
5277 mutex_init(&ap->scsi_scan_mutex); in ata_port_alloc()
5278 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); in ata_port_alloc()
5279 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); in ata_port_alloc()
5280 INIT_LIST_HEAD(&ap->eh_done_q); in ata_port_alloc()
5281 init_waitqueue_head(&ap->eh_wait_q); in ata_port_alloc()
5282 init_completion(&ap->park_req_pending); in ata_port_alloc()
5283 timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn, in ata_port_alloc()
5286 ap->cbl = ATA_CBL_NONE; in ata_port_alloc()
5288 ata_link_init(ap, &ap->link, 0); in ata_port_alloc()
5291 ap->stats.unhandled_irq = 1; in ata_port_alloc()
5292 ap->stats.idle_irq = 1; in ata_port_alloc()
5304 for (i = 0; i < host->n_ports; i++) { in ata_devres_release()
5305 struct ata_port *ap = host->ports[i]; in ata_devres_release()
5310 if (ap->scsi_host) in ata_devres_release()
5311 scsi_host_put(ap->scsi_host); in ata_devres_release()
5324 for (i = 0; i < host->n_ports; i++) { in ata_host_release()
5325 struct ata_port *ap = host->ports[i]; in ata_host_release()
5327 kfree(ap->pmp_link); in ata_host_release()
5328 kfree(ap->slave_link); in ata_host_release()
5330 host->ports[i] = NULL; in ata_host_release()
5337 kref_get(&host->kref); in ata_host_get()
5342 kref_put(&host->kref, ata_host_release); in ata_host_put()
5347 * ata_host_alloc - allocate and init basic ATA host resources
5352 * this function to allocate a host, initializes it fully and
5355 * @max_ports ports are allocated and host->n_ports is
5357 * host->n_ports before calling ata_host_register(). The unused
5375 /* alloc a container for our list of ATA ports (buses) */ in ata_host_alloc()
5391 spin_lock_init(&host->lock); in ata_host_alloc()
5392 mutex_init(&host->eh_mutex); in ata_host_alloc()
5393 host->dev = dev; in ata_host_alloc()
5394 host->n_ports = max_ports; in ata_host_alloc()
5395 kref_init(&host->kref); in ata_host_alloc()
5405 ap->port_no = i; in ata_host_alloc()
5406 host->ports[i] = ap; in ata_host_alloc()
5421 * ata_host_alloc_pinfo - alloc host and init with port_info array
5448 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { in ata_host_alloc_pinfo()
5449 struct ata_port *ap = host->ports[i]; in ata_host_alloc_pinfo()
5454 ap->pio_mask = pi->pio_mask; in ata_host_alloc_pinfo()
5455 ap->mwdma_mask = pi->mwdma_mask; in ata_host_alloc_pinfo()
5456 ap->udma_mask = pi->udma_mask; in ata_host_alloc_pinfo()
5457 ap->flags |= pi->flags; in ata_host_alloc_pinfo()
5458 ap->link.flags |= pi->link_flags; in ata_host_alloc_pinfo()
5459 ap->ops = pi->port_ops; in ata_host_alloc_pinfo()
5461 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) in ata_host_alloc_pinfo()
5462 host->ops = pi->port_ops; in ata_host_alloc_pinfo()
5474 WARN_ON(!(host->flags & ATA_HOST_STARTED)); in ata_host_stop()
5476 for (i = 0; i < host->n_ports; i++) { in ata_host_stop()
5477 struct ata_port *ap = host->ports[i]; in ata_host_stop()
5479 if (ap->ops->port_stop) in ata_host_stop()
5480 ap->ops->port_stop(ap); in ata_host_stop()
5483 if (host->ops->host_stop) in ata_host_stop()
5484 host->ops->host_stop(host); in ata_host_stop()
5488 * ata_finalize_port_ops - finalize ata_port_operations
5500 * methods and ->inherits is no longer necessary and cleared.
5502 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5512 void **end = (void **)&ops->inherits; in ata_finalize_port_ops()
5515 if (!ops || !ops->inherits) in ata_finalize_port_ops()
5520 for (cur = ops->inherits; cur; cur = cur->inherits) { in ata_finalize_port_ops()
5532 ops->inherits = NULL; in ata_finalize_port_ops()
5538 * ata_host_start - start and freeze ports of an ATA host
5542 * recorded in host->flags, so this function can be called
5544 * once. If host->ops isn't initialized yet, its set to the
5545 * first non-dummy port ops.
5551 * 0 if all ports are started successfully, -errno otherwise.
5559 if (host->flags & ATA_HOST_STARTED) in ata_host_start()
5562 ata_finalize_port_ops(host->ops); in ata_host_start()
5564 for (i = 0; i < host->n_ports; i++) { in ata_host_start()
5565 struct ata_port *ap = host->ports[i]; in ata_host_start()
5567 ata_finalize_port_ops(ap->ops); in ata_host_start()
5569 if (!host->ops && !ata_port_is_dummy(ap)) in ata_host_start()
5570 host->ops = ap->ops; in ata_host_start()
5572 if (ap->ops->port_stop) in ata_host_start()
5576 if (host->ops->host_stop) in ata_host_start()
5582 return -ENOMEM; in ata_host_start()
5585 for (i = 0; i < host->n_ports; i++) { in ata_host_start()
5586 struct ata_port *ap = host->ports[i]; in ata_host_start()
5588 if (ap->ops->port_start) { in ata_host_start()
5589 rc = ap->ops->port_start(ap); in ata_host_start()
5591 if (rc != -ENODEV) in ata_host_start()
5592 dev_err(host->dev, in ata_host_start()
5602 devres_add(host->dev, start_dr); in ata_host_start()
5603 host->flags |= ATA_HOST_STARTED; in ata_host_start()
5607 while (--i >= 0) { in ata_host_start()
5608 struct ata_port *ap = host->ports[i]; in ata_host_start()
5610 if (ap->ops->port_stop) in ata_host_start()
5611 ap->ops->port_stop(ap); in ata_host_start()
5619 * ata_host_init - Initialize a host struct for sas (ipr, libsas)
5628 spin_lock_init(&host->lock); in ata_host_init()
5629 mutex_init(&host->eh_mutex); in ata_host_init()
5630 host->n_tags = ATA_MAX_QUEUE; in ata_host_init()
5631 host->dev = dev; in ata_host_init()
5632 host->ops = ops; in ata_host_init()
5633 kref_init(&host->kref); in ata_host_init()
5639 struct ata_eh_info *ehi = &ap->link.eh_info; in __ata_port_probe()
5643 spin_lock_irqsave(ap->lock, flags); in __ata_port_probe()
5645 ehi->probe_mask |= ATA_ALL_DEVICES; in __ata_port_probe()
5646 ehi->action |= ATA_EH_RESET; in __ata_port_probe()
5647 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; in __ata_port_probe()
5649 ap->pflags &= ~ATA_PFLAG_INITIALIZING; in __ata_port_probe()
5650 ap->pflags |= ATA_PFLAG_LOADING; in __ata_port_probe()
5653 spin_unlock_irqrestore(ap->lock, flags); in __ata_port_probe()
5660 if (ap->ops->error_handler) { in ata_port_probe()
5664 DPRINTK("ata%u: bus probe begin\n", ap->print_id); in ata_port_probe()
5666 DPRINTK("ata%u: bus probe end\n", ap->print_id); in ata_port_probe()
5680 * Jeff Garzik says this is only within a controller, so we in async_port_probe()
5683 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) in async_port_probe()
5695 * ata_host_register - register initialized ATA host
5708 * 0 on success, -errno otherwise.
5714 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE); in ata_host_register()
5717 if (!(host->flags & ATA_HOST_STARTED)) { in ata_host_register()
5718 dev_err(host->dev, "BUG: trying to register unstarted host\n"); in ata_host_register()
5720 return -EINVAL; in ata_host_register()
5727 for (i = host->n_ports; host->ports[i]; i++) in ata_host_register()
5728 kfree(host->ports[i]); in ata_host_register()
5731 for (i = 0; i < host->n_ports; i++) { in ata_host_register()
5732 host->ports[i]->print_id = atomic_inc_return(&ata_print_id); in ata_host_register()
5733 host->ports[i]->local_port_no = i + 1; in ata_host_register()
5737 for (i = 0; i < host->n_ports; i++) { in ata_host_register()
5738 rc = ata_tport_add(host->dev,host->ports[i]); in ata_host_register()
5749 for (i = 0; i < host->n_ports; i++) { in ata_host_register()
5750 struct ata_port *ap = host->ports[i]; in ata_host_register()
5754 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) in ata_host_register()
5755 ap->cbl = ATA_CBL_SATA; in ata_host_register()
5758 sata_link_init_spd(&ap->link); in ata_host_register()
5759 if (ap->slave_link) in ata_host_register()
5760 sata_link_init_spd(ap->slave_link); in ata_host_register()
5762 /* print per-port info to dmesg */ in ata_host_register()
5763 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, in ata_host_register()
5764 ap->udma_mask); in ata_host_register()
5768 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', in ata_host_register()
5770 ap->link.eh_info.desc); in ata_host_register()
5771 ata_ehi_clear_desc(&ap->link.eh_info); in ata_host_register()
5777 for (i = 0; i < host->n_ports; i++) { in ata_host_register()
5778 struct ata_port *ap = host->ports[i]; in ata_host_register()
5779 ap->cookie = async_schedule(async_port_probe, ap); in ata_host_register()
5785 while (--i >= 0) { in ata_host_register()
5786 ata_tport_delete(host->ports[i]); in ata_host_register()
5794 * ata_host_activate - start host, request IRQ and register it
5802 * LLDs perform three steps to activate the host - start host,
5814 * 0 on success, -errno otherwise.
5833 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]", in ata_host_activate()
5834 dev_driver_string(host->dev), in ata_host_activate()
5835 dev_name(host->dev)); in ata_host_activate()
5837 return -ENOMEM; in ata_host_activate()
5839 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, in ata_host_activate()
5844 for (i = 0; i < host->n_ports; i++) in ata_host_activate()
5845 ata_port_desc(host->ports[i], "irq %d", irq); in ata_host_activate()
5850 devm_free_irq(host->dev, irq, host); in ata_host_activate()
5857 * ata_port_detach - Detach ATA port in preparation of device removal
5873 if (!ap->ops->error_handler) in ata_port_detach()
5877 spin_lock_irqsave(ap->lock, flags); in ata_port_detach()
5878 ap->pflags |= ATA_PFLAG_UNLOADING; in ata_port_detach()
5880 spin_unlock_irqrestore(ap->lock, flags); in ata_port_detach()
5886 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); in ata_port_detach()
5888 cancel_delayed_work_sync(&ap->hotplug_task); in ata_port_detach()
5898 if (ap->pmp_link) { in ata_port_detach()
5901 ata_tlink_delete(&ap->pmp_link[i]); in ata_port_detach()
5904 scsi_remove_host(ap->scsi_host); in ata_port_detach()
5909 * ata_host_detach - Detach all ports of an ATA host
5921 for (i = 0; i < host->n_ports; i++) { in ata_host_detach()
5923 async_synchronize_cookie(host->ports[i]->cookie + 1); in ata_host_detach()
5924 ata_port_detach(host->ports[i]); in ata_host_detach()
5935 * ata_pci_remove_one - PCI layer callback for device removal
5938 * PCI layer indicates to libata via this hook that hot-unplug or
5958 for (i = 0; i < host->n_ports; i++) { in ata_pci_shutdown_one()
5959 struct ata_port *ap = host->ports[i]; in ata_pci_shutdown_one()
5961 ap->pflags |= ATA_PFLAG_FROZEN; in ata_pci_shutdown_one()
5964 if (ap->ops->freeze) in ata_pci_shutdown_one()
5965 ap->ops->freeze(ap); in ata_pci_shutdown_one()
5968 if (ap->ops->port_stop) in ata_pci_shutdown_one()
5969 ap->ops->port_stop(ap); in ata_pci_shutdown_one()
5979 switch (bits->width) { in pci_test_config_bits()
5982 pci_read_config_byte(pdev, bits->reg, &tmp8); in pci_test_config_bits()
5988 pci_read_config_word(pdev, bits->reg, &tmp16); in pci_test_config_bits()
5994 pci_read_config_dword(pdev, bits->reg, &tmp32); in pci_test_config_bits()
6000 return -EINVAL; in pci_test_config_bits()
6003 tmp &= bits->mask; in pci_test_config_bits()
6005 return (tmp == bits->val) ? 1 : 0; in pci_test_config_bits()
6029 dev_err(&pdev->dev, in ata_pci_device_do_resume()
6069 * ata_platform_remove_one - Platform layer callback for device removal
6072 * Platform layer indicates to libata via this hook that hot-unplug or
6180 force_ent->device = simple_strtoul(p, &endp, 10); in ata_parse_force_one()
6183 return -EINVAL; in ata_parse_force_one()
6187 force_ent->port = simple_strtoul(id, &endp, 10); in ata_parse_force_one()
6190 return -EINVAL; in ata_parse_force_one()
6198 if (strncasecmp(val, fp->name, strlen(val))) in ata_parse_force_one()
6204 if (strcasecmp(val, fp->name) == 0) { in ata_parse_force_one()
6212 return -EINVAL; in ata_parse_force_one()
6216 return -EINVAL; in ata_parse_force_one()
6219 force_ent->param = *match_fp; in ata_parse_force_one()
6227 int last_port = -1, last_device = -1; in ata_parse_force_param()
6245 struct ata_force_ent te = { .port = -1, .device = -1 }; in ata_parse_force_param()
6255 if (te.port == -1) { in ata_parse_force_param()
6294 rc = -ENOMEM; in ata_init()
6325 * ata_msleep - ATA EH owner aware msleep
6332 * @ap->host will be allowed to own the EH while this task is
6340 bool owns_eh = ap && ap->host->eh_owner == current; in ata_msleep()
6358 * ata_wait_register - wait until register value changes
6360 * @reg: IO-mapped register
6366 * Waiting for some bits of register to change is a common
6368 * IO-mapped register @reg and tests for the following condition.
6445 printk("%sata%u: %pV", level, ap->print_id, &vaf); in ata_port_printk()
6462 if (sata_pmp_attached(link->ap) || link->ap->slave_link) in ata_link_printk()
6464 level, link->ap->print_id, link->pmp, &vaf); in ata_link_printk()
6467 level, link->ap->print_id, &vaf); in ata_link_printk()
6485 level, dev->link->ap->print_id, dev->link->pmp + dev->devno, in ata_dev_printk()