Commit 23eb3b64 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev

* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev: (54 commits)
  Revert "pata_sis: Implement MWDMA for the UDMA 133 capable chips"
  libata: Clarify ata_set_lba_range_entries function
  libata: Report zeroed read after TRIM and max discard size
  pata_hpt3x2n: fix overclocked MWDMA0 timing
  pata_it8213: MWDMA0 is unsupported
  [libata] MWDMA0 is unsupported on PIIX-like PATA controllers
  pata_via: clear UDMA transfer mode bit for PIO and MWDMA
  pata_sis: Power Management fix
  pata_rz1000: Power Management fix
  pata_radisys: fix UDMA handling
  pata_ns87415: Power Management fix
  pata_marvell: fix marvell_pre_reset() documentation
  pata_legacy: add pointers to QDI65x0 documentation
  pata_legacy: fix access to control register for QDI6580
  pata_legacy: fix QDI6580DP support
  pata_it8213: fix it8213_pre_reset() documentation
  pata_it8213: fix wrong MWDMA timings being programmed
  pata_it8213: fix PIO2 underclocking
  pata_it8213: fix wrong PIO timings being programmed
  pata_it8213: fix UDMA handling
  ...
parents 1c496784 1b52f2a4
......@@ -3,7 +3,7 @@
#
menuconfig ATA
tristate "Serial ATA (prod) and Parallel ATA (experimental) drivers"
tristate "Serial ATA and Parallel ATA drivers"
depends on HAS_IOMEM
depends on BLOCK
depends on !(M32R || M68K) || BROKEN
......@@ -374,8 +374,8 @@ config PATA_HPT366
If unsure, say N.
config PATA_HPT37X
tristate "HPT 370/370A/371/372/374/302 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "HPT 370/370A/371/372/374/302 PATA support"
depends on PCI
help
This option enables support for the majority of the later HPT
PATA controllers via the new ATA layer.
......@@ -383,8 +383,8 @@ config PATA_HPT37X
If unsure, say N.
config PATA_HPT3X2N
tristate "HPT 372N/302N PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "HPT 372N/302N PATA support"
depends on PCI
help
This option enables support for the N variant HPT PATA
controllers via the new ATA layer
......@@ -401,7 +401,7 @@ config PATA_HPT3X3
If unsure, say N.
config PATA_HPT3X3_DMA
bool "HPT 343/363 DMA support (Experimental)"
bool "HPT 343/363 DMA support"
depends on PATA_HPT3X3
help
This option enables DMA support for the HPT343/363
......@@ -510,8 +510,8 @@ config PATA_NETCELL
If unsure, say N.
config PATA_NINJA32
tristate "Ninja32/Delkin Cardbus ATA support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "Ninja32/Delkin Cardbus ATA support"
depends on PCI
help
This option enables support for the Ninja32, Delkin and
possibly other brands of Cardbus ATA adapter
......@@ -573,6 +573,14 @@ config PATA_PCMCIA
If unsure, say N.
config PATA_PDC2027X
tristate "Promise PATA 2027x support"
depends on PCI
help
This option enables support for Promise PATA pdc20268 to pdc20277 host adapters.
If unsure, say N.
config PATA_PDC_OLD
tristate "Older Promise PATA controller support"
depends on PCI
......@@ -643,14 +651,6 @@ config PATA_SERVERWORKS
If unsure, say N.
config PATA_PDC2027X
tristate "Promise PATA 2027x support"
depends on PCI
help
This option enables support for Promise PATA pdc20268 to pdc20277 host adapters.
If unsure, say N.
config PATA_SIL680
tristate "CMD / Silicon Image 680 PATA support"
depends on PCI
......@@ -667,6 +667,15 @@ config PATA_SIS
If unsure, say N.
config PATA_TOSHIBA
tristate "Toshiba Piccolo support (Experimental)"
depends on PCI && EXPERIMENTAL
help
Support for the Toshiba Piccolo controllers. Currently only the
primary channel is supported by this driver.
If unsure, say N.
config PATA_VIA
tristate "VIA PATA support"
depends on PCI
......
......@@ -63,6 +63,7 @@ obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
obj-$(CONFIG_PATA_SIL680) += pata_sil680.o
obj-$(CONFIG_PATA_TOSHIBA) += pata_piccolo.o
obj-$(CONFIG_PATA_VIA) += pata_via.o
obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
......
......@@ -113,6 +113,7 @@ enum {
board_ahci_mcp65 = 6,
board_ahci_nopmp = 7,
board_ahci_yesncq = 8,
board_ahci_nosntf = 9,
/* global controller registers */
HOST_CAP = 0x00, /* host capabilities */
......@@ -235,6 +236,7 @@ enum {
AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
link offline */
AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
/* ap->flags bits */
......@@ -508,7 +510,7 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
/* board_ahci_yesncq */
[board_ahci_yesncq] =
{
AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
.flags = AHCI_FLAG_COMMON,
......@@ -516,6 +518,14 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_nosntf] =
{
AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
};
static const struct pci_device_id ahci_pci_tbl[] = {
......@@ -531,7 +541,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
......@@ -849,6 +859,12 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
cap &= ~HOST_CAP_PMP;
}
if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
dev_printk(KERN_INFO, &pdev->dev,
"controller can't do SNTF, turning off CAP_SNTF\n");
cap &= ~HOST_CAP_SNTF;
}
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
port_map != 1) {
dev_printk(KERN_INFO, &pdev->dev,
......@@ -2988,6 +3004,14 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
return -ENODEV;
/* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
* At the moment, we can only use the AHCI mode. Let the users know
* that for SAS drives they're out of luck.
*/
if (pdev->vendor == PCI_VENDOR_ID_PROMISE)
dev_printk(KERN_INFO, &pdev->dev, "PDC42819 "
"can only drive SATA devices with this driver\n");
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
......
......@@ -168,9 +168,12 @@ static struct pci_device_id ata_generic[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), },
{ PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), },
{ PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO), },
#if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE)
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), },
#endif
/* Must come last. If you add entries adjust this table appropriately */
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
{ 0, },
......
......@@ -869,10 +869,10 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in
(timings[pio][1] << 8);
}
if (ap->udma_mask) {
if (ap->udma_mask)
udma_enable &= ~(1 << devid);
pci_write_config_word(dev, master_port, master_data);
}
pci_write_config_word(dev, master_port, master_data);
}
/* Don't scribble on 0x48 if the controller does not support UDMA */
if (ap->udma_mask)
......
......@@ -807,12 +807,11 @@ static int ata_acpi_exec_tfs(struct ata_device *dev, int *nr_executed)
* EH context.
*
* RETURNS:
* 0 on success, -errno on failure.
* 0 on success, -ENOENT if _SDD doesn't exist, -errno on failure.
*/
static int ata_acpi_push_id(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
int err;
acpi_status status;
struct acpi_object_list input;
union acpi_object in_params[1];
......@@ -835,12 +834,16 @@ static int ata_acpi_push_id(struct ata_device *dev)
status = acpi_evaluate_object(dev->acpi_handle, "_SDD", &input, NULL);
swap_buf_le16(dev->id, ATA_ID_WORDS);
err = ACPI_FAILURE(status) ? -EIO : 0;
if (err < 0)
if (status == AE_NOT_FOUND)
return -ENOENT;
if (ACPI_FAILURE(status)) {
ata_dev_printk(dev, KERN_WARNING,
"ACPI _SDD failed (AE 0x%x)\n", status);
return -EIO;
}
return err;
return 0;
}
/**
......@@ -971,7 +974,7 @@ int ata_acpi_on_devcfg(struct ata_device *dev)
/* do _SDD if SATA */
if (acpi_sata) {
rc = ata_acpi_push_id(dev);
if (rc)
if (rc && rc != -ENOENT)
goto acpi_err;
}
......
......@@ -6616,6 +6616,13 @@ static int __init ata_init(void)
{
ata_parse_force_param();
/*
* FIXME: In UP case, there is only one workqueue thread and if you
* have more than one PIO device, latency is bloody awful, with
* occasional multi-second "hiccups" as one PIO device waits for
* another. It's an ugly wart that users DO occasionally complain
* about; luckily most users have at most one PIO polled device.
*/
ata_wq = create_workqueue("ata");
if (!ata_wq)
goto free_force_tbl;
......
......@@ -110,6 +110,13 @@ static const unsigned long ata_eh_identify_timeouts[] = {
ULONG_MAX,
};
static const unsigned long ata_eh_flush_timeouts[] = {
15000, /* be generous with flush */
15000, /* ditto */
30000, /* and even more generous */
ULONG_MAX,
};
static const unsigned long ata_eh_other_timeouts[] = {
5000, /* same rationale as identify timeout */
10000, /* ditto */
......@@ -147,6 +154,8 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
.timeouts = ata_eh_other_timeouts, },
{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
.timeouts = ata_eh_other_timeouts, },
{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
.timeouts = ata_eh_flush_timeouts },
};
#undef CMDS
......@@ -3112,6 +3121,82 @@ static int atapi_eh_clear_ua(struct ata_device *dev)
return 0;
}
/**
* ata_eh_maybe_retry_flush - Retry FLUSH if necessary
* @dev: ATA device which may need FLUSH retry
*
* If @dev failed FLUSH, it needs to be reported upper layer
* immediately as it means that @dev failed to remap and already
* lost at least a sector and further FLUSH retrials won't make
* any difference to the lost sector. However, if FLUSH failed
* for other reasons, for example transmission error, FLUSH needs
* to be retried.
*
* This function determines whether FLUSH failure retry is
* necessary and performs it if so.
*
* RETURNS:
* 0 if EH can continue, -errno if EH needs to be repeated.
*/
static int ata_eh_maybe_retry_flush(struct ata_device *dev)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
struct ata_queued_cmd *qc;
struct ata_taskfile tf;
unsigned int err_mask;
int rc = 0;
/* did flush fail for this device? */
if (!ata_tag_valid(link->active_tag))
return 0;
qc = __ata_qc_from_tag(ap, link->active_tag);
if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
qc->tf.command != ATA_CMD_FLUSH))
return 0;
/* if the device failed it, it should be reported to upper layers */
if (qc->err_mask & AC_ERR_DEV)
return 0;
/* flush failed for some other reason, give it another shot */
ata_tf_init(dev, &tf);
tf.command = qc->tf.command;
tf.flags |= ATA_TFLAG_DEVICE;
tf.protocol = ATA_PROT_NODATA;
ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
tf.command, qc->err_mask);
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (!err_mask) {
/*
* FLUSH is complete but there's no way to
* successfully complete a failed command from EH.
* Making sure retry is allowed at least once and
* retrying it should do the trick - whatever was in
* the cache is already on the platter and this won't
* cause infinite loop.
*/
qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
} else {
ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
err_mask);
rc = -EIO;
/* if device failed it, report it to upper layers */
if (err_mask & AC_ERR_DEV) {
qc->err_mask |= AC_ERR_DEV;
qc->result_tf = tf;
if (!(ap->pflags & ATA_PFLAG_FROZEN))
rc = 0;
}
}
return rc;
}
static int ata_link_nr_enabled(struct ata_link *link)
{
struct ata_device *dev;
......@@ -3455,6 +3540,15 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
}
}
/* retry flush if necessary */
ata_for_each_dev(dev, link, ALL) {
if (dev->class != ATA_DEV_ATA)
continue;
rc = ata_eh_maybe_retry_flush(dev);
if (rc)
goto dev_fail;
}
/* configure link power saving */
if (ehc->i.action & ATA_EH_LPM)
ata_for_each_dev(dev, link, ALL)
......
......@@ -47,6 +47,7 @@
#include <linux/hdreg.h>
#include <linux/uaccess.h>
#include <linux/suspend.h>
#include <asm/unaligned.h>
#include "libata.h"
......@@ -154,8 +155,7 @@ static ssize_t ata_scsi_lpm_put(struct device *dev,
*/
for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) {
const int len = strlen(link_pm_policy[i].name);
if (strncmp(link_pm_policy[i].name, buf, len) == 0 &&
buf[len] == '\n') {
if (strncmp(link_pm_policy[i].name, buf, len) == 0) {
policy = link_pm_policy[i].value;
break;
}
......@@ -1964,6 +1964,7 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
0x80, /* page 0x80, unit serial no page */
0x83, /* page 0x83, device ident page */
0x89, /* page 0x89, ata info page */
0xb0, /* page 0xb0, block limits page */
0xb1, /* page 0xb1, block device characteristics page */
};
......@@ -2085,6 +2086,43 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
return 0;
}
static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
{
u32 min_io_sectors;
rbuf[1] = 0xb0;
rbuf[3] = 0x3c; /* required VPD size with unmap support */
/*
* Optimal transfer length granularity.
*
* This is always one physical block, but for disks with a smaller
* logical than physical sector size we need to figure out what the
* latter is.
*/
if (ata_id_has_large_logical_sectors(args->id))
min_io_sectors = ata_id_logical_per_physical_sectors(args->id);
else
min_io_sectors = 1;
put_unaligned_be16(min_io_sectors, &rbuf[6]);
/*
* Optimal unmap granularity.
*
* The ATA spec doesn't even know about a granularity or alignment
* for the TRIM command. We can leave away most of the unmap related
* VPD page entries, but we have specifify a granularity to signal
* that we support some form of unmap - in thise case via WRITE SAME
* with the unmap bit set.
*/
if (ata_id_has_trim(args->id)) {
put_unaligned_be32(65535 * 512 / 8, &rbuf[20]);
put_unaligned_be32(1, &rbuf[28]);
}
return 0;
}
static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
{
int form_factor = ata_id_form_factor(args->id);
......@@ -2374,6 +2412,13 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[13] = log_per_phys;
rbuf[14] = (lowest_aligned >> 8) & 0x3f;
rbuf[15] = lowest_aligned;
if (ata_id_has_trim(args->id)) {
rbuf[14] |= 0x80; /* TPE */
if (ata_id_has_zero_after_trim(args->id))
rbuf[14] |= 0x40; /* TPRZ */
}
}
return 0;
......@@ -2896,6 +2941,58 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
return 1;
}
static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_device *dev = qc->dev;
const u8 *cdb = scmd->cmnd;
u64 block;
u32 n_block;
u32 size;
void *buf;
/* we may not issue DMA commands if no DMA mode is set */
if (unlikely(!dev->dma_mode))
goto invalid_fld;
if (unlikely(scmd->cmd_len < 16))
goto invalid_fld;
scsi_16_lba_len(cdb, &block, &n_block);
/* for now we only support WRITE SAME with the unmap bit set */
if (unlikely(!(cdb[1] & 0x8)))
goto invalid_fld;
/*
* WRITE SAME always has a sector sized buffer as payload, this
* should never be a multiple entry S/G list.
*/
if (!scsi_sg_count(scmd))
goto invalid_fld;
buf = page_address(sg_page(scsi_sglist(scmd)));
size = ata_set_lba_range_entries(buf, 512, block, n_block);
tf->protocol = ATA_PROT_DMA;
tf->hob_feature = 0;
tf->feature = ATA_DSM_TRIM;
tf->hob_nsect = (size / 512) >> 8;
tf->nsect = size / 512;
tf->command = ATA_CMD_DSM;
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 |
ATA_TFLAG_WRITE;
ata_qc_set_pc_nbytes(qc);
return 0;
invalid_fld:
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00);
/* "Invalid field in cdb" */
return 1;
}
/**
* ata_get_xlat_func - check if SCSI to ATA translation is possible
* @dev: ATA device
......@@ -2920,6 +3017,9 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
case WRITE_16:
return ata_scsi_rw_xlat;
case 0x93 /*WRITE_SAME_16*/:
return ata_scsi_write_same_xlat;
case SYNCHRONIZE_CACHE:
if (ata_try_flush_cache(dev))
return ata_scsi_flush_xlat;
......@@ -3109,6 +3209,9 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
case 0x89:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
break;
case 0xb0:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0);
break;
case 0xb1:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
break;
......
......@@ -2384,7 +2384,7 @@ void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
ap->hsm_task_state = HSM_ST_IDLE;
if (ap->ioaddr.bmdma_addr)
ata_bmdma_stop(qc);
ap->ops->bmdma_stop(qc);
spin_unlock_irqrestore(ap->lock, flags);
}
......
......@@ -453,7 +453,9 @@ static void ali_init_chipset(struct pci_dev *pdev)
/* Clear CD-ROM DMA write bit */
tmp &= 0x7F;
/* Cable and UDMA */
pci_write_config_byte(pdev, 0x4B, tmp | 0x09);
if (pdev->revision >= 0xc2)
tmp |= 0x01;
pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
/*
* CD_ROM DMA on (0x53 bit 0). Enable this even if we want
* to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
......
......@@ -31,7 +31,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_cmd64x"
#define DRV_VERSION "0.2.5"
#define DRV_VERSION "0.3.1"
/*
* CMD64x specific registers definition.
......@@ -254,17 +254,109 @@ static void cmd648_bmdma_stop(struct ata_queued_cmd *qc)
}
/**
* cmd646r1_dma_stop - DMA stop callback
* cmd64x_bmdma_stop - DMA stop callback
* @qc: Command in progress
*
* Stub for now while investigating the r1 quirk in the old driver.
* Track the completion of live DMA commands and clear the
* host->private_data DMA tracking flag as we do.
*/
static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc)
static void cmd64x_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
ata_bmdma_stop(qc);
WARN_ON(ap->host->private_data != ap);
ap->host->private_data = NULL;
}
/**
* cmd64x_qc_defer - Defer logic for chip limits
* @qc: queued command
*
* Decide whether we can issue the command. Called under the host lock.
*/
static int cmd64x_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_host *host = qc->ap->host;
struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
int rc;
int dma = 0;
/* Apply the ATA rules first */
rc = ata_std_qc_defer(qc);
if (rc)
return rc;
if (qc->tf.protocol == ATAPI_PROT_DMA ||
qc->tf.protocol == ATA_PROT_DMA)
dma = 1;
/* If the other port is not live then issue the command */
if (alt == NULL || !alt->qc_active) {
if (dma)
host->private_data = qc->ap;
return 0;
}
/* If there is a live DMA command then wait */
if (host->private_data != NULL)
return ATA_DEFER_PORT;
if (dma)
/* Cannot overlap our DMA command */