[PATCH] PATCH: libata. Add ->data_xfer method
We need to pass the device in order to do per device checks such as 32bit I/O enables. With the changes to include dev->ap we now don't have to add parameters however just clean them up. Also add data_xfer methods to the existing drivers except ata_piix (which is in the other block of patches). If you reject the piix one just add a data_xfer to it... Signed-off-by: Alan Cox <alan@redhat.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
This commit is contained in:
parent
8190bdb929
commit
a6b2c5d475
13 changed files with 37 additions and 45 deletions
|
@ -3527,7 +3527,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
|
|||
|
||||
/**
|
||||
* ata_mmio_data_xfer - Transfer data by MMIO
|
||||
* @ap: port to read/write
|
||||
* @dev: device for this I/O
|
||||
* @buf: data buffer
|
||||
* @buflen: buffer length
|
||||
* @write_data: read/write
|
||||
|
@ -3538,9 +3538,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
|
|||
* Inherited from caller.
|
||||
*/
|
||||
|
||||
static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
|
||||
unsigned int buflen, int write_data)
|
||||
void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
|
||||
unsigned int buflen, int write_data)
|
||||
{
|
||||
struct ata_port *ap = adev->ap;
|
||||
unsigned int i;
|
||||
unsigned int words = buflen >> 1;
|
||||
u16 *buf16 = (u16 *) buf;
|
||||
|
@ -3572,7 +3573,7 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
|
|||
|
||||
/**
|
||||
* ata_pio_data_xfer - Transfer data by PIO
|
||||
* @ap: port to read/write
|
||||
* @adev: device to target
|
||||
* @buf: data buffer
|
||||
* @buflen: buffer length
|
||||
* @write_data: read/write
|
||||
|
@ -3583,9 +3584,10 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
|
|||
* Inherited from caller.
|
||||
*/
|
||||
|
||||
static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
|
||||
unsigned int buflen, int write_data)
|
||||
void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
|
||||
unsigned int buflen, int write_data)
|
||||
{
|
||||
struct ata_port *ap = adev->ap;
|
||||
unsigned int words = buflen >> 1;
|
||||
|
||||
/* Transfer multiple of 2 bytes */
|
||||
|
@ -3609,39 +3611,6 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_data_xfer - Transfer data from/to the data register.
|
||||
* @ap: port to read/write
|
||||
* @buf: data buffer
|
||||
* @buflen: buffer length
|
||||
* @do_write: read/write
|
||||
*
|
||||
* Transfer data from/to the device data register.
|
||||
*
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
|
||||
static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
|
||||
unsigned int buflen, int do_write)
|
||||
{
|
||||
/* Make the crap hardware pay the costs not the good stuff */
|
||||
if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
if (ap->flags & ATA_FLAG_MMIO)
|
||||
ata_mmio_data_xfer(ap, buf, buflen, do_write);
|
||||
else
|
||||
ata_pio_data_xfer(ap, buf, buflen, do_write);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
if (ap->flags & ATA_FLAG_MMIO)
|
||||
ata_mmio_data_xfer(ap, buf, buflen, do_write);
|
||||
else
|
||||
ata_pio_data_xfer(ap, buf, buflen, do_write);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
|
||||
* @qc: Command on going
|
||||
|
@ -3676,17 +3645,18 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
|||
if (PageHighMem(page)) {
|
||||
unsigned long flags;
|
||||
|
||||
/* FIXME: use a bounce buffer */
|
||||
local_irq_save(flags);
|
||||
buf = kmap_atomic(page, KM_IRQ0);
|
||||
|
||||
/* do the actual data transfer */
|
||||
ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
|
||||
ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
|
||||
|
||||
kunmap_atomic(buf, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
buf = page_address(page);
|
||||
ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
|
||||
ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
|
||||
}
|
||||
|
||||
qc->cursect++;
|
||||
|
@ -3742,7 +3712,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
|
|||
DPRINTK("send cdb\n");
|
||||
WARN_ON(qc->dev->cdb_len < 12);
|
||||
|
||||
ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
|
||||
ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
|
||||
ata_altstatus(ap); /* flush */
|
||||
|
||||
switch (qc->tf.protocol) {
|
||||
|
@ -3802,7 +3772,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
|
|||
"%u bytes trailing data\n", bytes);
|
||||
|
||||
for (i = 0; i < words; i++)
|
||||
ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
|
||||
ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
|
||||
|
||||
ap->hsm_task_state = HSM_ST_LAST;
|
||||
return;
|
||||
|
@ -3828,17 +3798,18 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
|
|||
if (PageHighMem(page)) {
|
||||
unsigned long flags;
|
||||
|
||||
/* FIXME: use bounce buffer */
|
||||
local_irq_save(flags);
|
||||
buf = kmap_atomic(page, KM_IRQ0);
|
||||
|
||||
/* do the actual data transfer */
|
||||
ata_data_xfer(ap, buf + offset, count, do_write);
|
||||
ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
|
||||
|
||||
kunmap_atomic(buf, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
buf = page_address(page);
|
||||
ata_data_xfer(ap, buf + offset, count, do_write);
|
||||
ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
|
||||
}
|
||||
|
||||
bytes -= count;
|
||||
|
@ -5702,6 +5673,8 @@ EXPORT_SYMBOL_GPL(ata_port_start);
|
|||
EXPORT_SYMBOL_GPL(ata_port_stop);
|
||||
EXPORT_SYMBOL_GPL(ata_host_stop);
|
||||
EXPORT_SYMBOL_GPL(ata_interrupt);
|
||||
EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
|
||||
EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
|
||||
EXPORT_SYMBOL_GPL(ata_qc_prep);
|
||||
EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
|
||||
EXPORT_SYMBOL_GPL(ata_bmdma_setup);
|
||||
|
|
|
@ -406,6 +406,7 @@ static const struct ata_port_operations mv5_ops = {
|
|||
|
||||
.qc_prep = mv_qc_prep,
|
||||
.qc_issue = mv_qc_issue,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
|
||||
.eng_timeout = mv_eng_timeout,
|
||||
|
||||
|
@ -433,6 +434,7 @@ static const struct ata_port_operations mv6_ops = {
|
|||
|
||||
.qc_prep = mv_qc_prep,
|
||||
.qc_issue = mv_qc_issue,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
|
||||
.eng_timeout = mv_eng_timeout,
|
||||
|
||||
|
|
|
@ -234,6 +234,7 @@ static const struct ata_port_operations nv_ops = {
|
|||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.eng_timeout = ata_eng_timeout,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.irq_handler = nv_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
.scr_read = nv_scr_read,
|
||||
|
|
|
@ -137,6 +137,7 @@ static const struct ata_port_operations pdc_sata_ops = {
|
|||
.qc_prep = pdc_qc_prep,
|
||||
.qc_issue = pdc_qc_issue_prot,
|
||||
.eng_timeout = pdc_eng_timeout,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.irq_handler = pdc_interrupt,
|
||||
.irq_clear = pdc_irq_clear,
|
||||
|
||||
|
@ -159,6 +160,7 @@ static const struct ata_port_operations pdc_pata_ops = {
|
|||
|
||||
.qc_prep = pdc_qc_prep,
|
||||
.qc_issue = pdc_qc_issue_prot,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.eng_timeout = pdc_eng_timeout,
|
||||
.irq_handler = pdc_interrupt,
|
||||
.irq_clear = pdc_irq_clear,
|
||||
|
|
|
@ -156,6 +156,7 @@ static const struct ata_port_operations qs_ata_ops = {
|
|||
.phy_reset = qs_phy_reset,
|
||||
.qc_prep = qs_qc_prep,
|
||||
.qc_issue = qs_qc_issue,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.eng_timeout = qs_eng_timeout,
|
||||
.irq_handler = qs_intr,
|
||||
.irq_clear = qs_irq_clear,
|
||||
|
|
|
@ -176,6 +176,7 @@ static const struct ata_port_operations sil_ops = {
|
|||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.freeze = sil_freeze,
|
||||
.thaw = sil_thaw,
|
||||
.error_handler = ata_bmdma_error_handler,
|
||||
|
|
|
@ -113,6 +113,7 @@ static const struct ata_port_operations sis_ops = {
|
|||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.eng_timeout = ata_eng_timeout,
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
|
|
@ -320,6 +320,7 @@ static const struct ata_port_operations k2_sata_ops = {
|
|||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.eng_timeout = ata_eng_timeout,
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
|
|
@ -204,6 +204,7 @@ static const struct ata_port_operations pdc_20621_ops = {
|
|||
.phy_reset = pdc_20621_phy_reset,
|
||||
.qc_prep = pdc20621_qc_prep,
|
||||
.qc_issue = pdc20621_qc_issue_prot,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.eng_timeout = pdc_eng_timeout,
|
||||
.irq_handler = pdc20621_interrupt,
|
||||
.irq_clear = pdc20621_irq_clear,
|
||||
|
|
|
@ -110,6 +110,7 @@ static const struct ata_port_operations uli_ops = {
|
|||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
|
||||
.eng_timeout = ata_eng_timeout,
|
||||
|
||||
|
|
|
@ -124,6 +124,7 @@ static const struct ata_port_operations svia_sata_ops = {
|
|||
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
|
||||
.eng_timeout = ata_eng_timeout,
|
||||
|
||||
|
|
|
@ -297,6 +297,7 @@ static const struct ata_port_operations vsc_sata_ops = {
|
|||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.eng_timeout = ata_eng_timeout,
|
||||
.irq_handler = vsc_sata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
|
|
@ -525,6 +525,8 @@ struct ata_port_operations {
|
|||
void (*bmdma_setup) (struct ata_queued_cmd *qc);
|
||||
void (*bmdma_start) (struct ata_queued_cmd *qc);
|
||||
|
||||
void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
|
||||
|
||||
void (*qc_prep) (struct ata_queued_cmd *qc);
|
||||
unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
|
||||
|
||||
|
@ -646,6 +648,10 @@ extern int ata_port_start (struct ata_port *ap);
|
|||
extern void ata_port_stop (struct ata_port *ap);
|
||||
extern void ata_host_stop (struct ata_host_set *host_set);
|
||||
extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
|
||||
extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
|
||||
unsigned int buflen, int write_data);
|
||||
extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
|
||||
unsigned int buflen, int write_data);
|
||||
extern void ata_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
|
||||
|
|
Loading…
Reference in a new issue