Merge branch 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block: (63 commits) Fix memory leak in dm-crypt SPARC64: sg chaining support SPARC: sg chaining support PPC: sg chaining support PS3: sg chaining support IA64: sg chaining support x86-64: enable sg chaining x86-64: update pci-gart iommu to sg helpers x86-64: update nommu to sg helpers x86-64: update calgary iommu to sg helpers swiotlb: sg chaining support i386: enable sg chaining i386 dma_map_sg: convert to using sg helpers mmc: need to zero sglist on init Panic in blk_rq_map_sg() from CCISS driver remove sglist_len remove blk_queue_max_phys_segments in libata revert sg segment size ifdefs Fixup u14-34f ENABLE_SG_CHAINING qla1280: enable use_sg_chaining option ...
This commit is contained in:
commit
92d15c2ccb
142 changed files with 1186 additions and 1115 deletions
|
@ -514,7 +514,7 @@ With scatterlists, you map a region gathered from several regions by:
|
|||
int i, count = pci_map_sg(dev, sglist, nents, direction);
|
||||
struct scatterlist *sg;
|
||||
|
||||
for (i = 0, sg = sglist; i < count; i++, sg++) {
|
||||
for_each_sg(sglist, sg, count, i) {
|
||||
hw_address[i] = sg_dma_address(sg);
|
||||
hw_len[i] = sg_dma_len(sg);
|
||||
}
|
||||
|
@ -782,5 +782,5 @@ following people:
|
|||
Jay Estabrook <Jay.Estabrook@compaq.com>
|
||||
Thomas Sailer <sailer@ife.ee.ethz.ch>
|
||||
Andrea Arcangeli <andrea@suse.de>
|
||||
Jens Axboe <axboe@suse.de>
|
||||
Jens Axboe <jens.axboe@oracle.com>
|
||||
David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
|
|
|
@ -330,7 +330,7 @@ Here is a list of some of the different kernel trees available:
|
|||
- ACPI development tree, Len Brown <len.brown@intel.com>
|
||||
git.kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
|
||||
|
||||
- Block development tree, Jens Axboe <axboe@suse.de>
|
||||
- Block development tree, Jens Axboe <jens.axboe@oracle.com>
|
||||
git.kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git
|
||||
|
||||
- DRM development tree, Dave Airlie <airlied@linux.ie>
|
||||
|
|
20
Documentation/block/00-INDEX
Normal file
20
Documentation/block/00-INDEX
Normal file
|
@ -0,0 +1,20 @@
|
|||
00-INDEX
|
||||
- This file
|
||||
as-iosched.txt
|
||||
- Anticipatory IO scheduler
|
||||
barrier.txt
|
||||
- I/O Barriers
|
||||
biodoc.txt
|
||||
- Notes on the Generic Block Layer Rewrite in Linux 2.5
|
||||
capability.txt
|
||||
- Generic Block Device Capability (/sys/block/<disk>/capability)
|
||||
deadline-iosched.txt
|
||||
- Deadline IO scheduler tunables
|
||||
ioprio.txt
|
||||
- Block io priorities (in CFQ scheduler)
|
||||
request.txt
|
||||
- The members of struct request (in include/linux/blkdev.h)
|
||||
stat.txt
|
||||
- Block layer statistics in /sys/block/<dev>/stat
|
||||
switching-sched.txt
|
||||
- Switching I/O schedulers at runtime
|
|
@ -20,15 +20,10 @@ actually has a head for each physical device in the logical RAID device.
|
|||
However, setting the antic_expire (see tunable parameters below) produces
|
||||
very similar behavior to the deadline IO scheduler.
|
||||
|
||||
|
||||
Selecting IO schedulers
|
||||
-----------------------
|
||||
To choose IO schedulers at boot time, use the argument 'elevator=deadline'.
|
||||
'noop', 'as' and 'cfq' (the default) are also available. IO schedulers are
|
||||
assigned globally at boot time only presently. It's also possible to change
|
||||
the IO scheduler for a determined device on the fly, as described in
|
||||
Documentation/block/switching-sched.txt.
|
||||
|
||||
Refer to Documentation/block/switching-sched.txt for information on
|
||||
selecting an io scheduler on a per-device basis.
|
||||
|
||||
Anticipatory IO scheduler Policies
|
||||
----------------------------------
|
||||
|
@ -115,7 +110,7 @@ statistics (average think time, average seek distance) on the process
|
|||
that submitted the just completed request are examined. If it seems
|
||||
likely that that process will submit another request soon, and that
|
||||
request is likely to be near the just completed request, then the IO
|
||||
scheduler will stop dispatching more read requests for up time (antic_expire)
|
||||
scheduler will stop dispatching more read requests for up to (antic_expire)
|
||||
milliseconds, hoping that process will submit a new request near the one
|
||||
that just completed. If such a request is made, then it is dispatched
|
||||
immediately. If the antic_expire wait time expires, then the IO scheduler
|
||||
|
@ -165,3 +160,13 @@ The parameters are:
|
|||
for big seek time devices though not a linear correspondence - most
|
||||
processes have only a few ms thinktime.
|
||||
|
||||
In addition to the tunables above there is a read-only file named est_time
|
||||
which, when read, will show:
|
||||
|
||||
- The probability of a task exiting without a cooperating task
|
||||
submitting an anticipated IO.
|
||||
|
||||
- The current mean think time.
|
||||
|
||||
- The seek distance used to determine if an incoming IO is better.
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
=====================================================
|
||||
|
||||
Notes Written on Jan 15, 2002:
|
||||
Jens Axboe <axboe@suse.de>
|
||||
Jens Axboe <jens.axboe@oracle.com>
|
||||
Suparna Bhattacharya <suparna@in.ibm.com>
|
||||
|
||||
Last Updated May 2, 2002
|
||||
|
@ -21,7 +21,7 @@ Credits:
|
|||
---------
|
||||
|
||||
2.5 bio rewrite:
|
||||
Jens Axboe <axboe@suse.de>
|
||||
Jens Axboe <jens.axboe@oracle.com>
|
||||
|
||||
Many aspects of the generic block layer redesign were driven by and evolved
|
||||
over discussions, prior patches and the collective experience of several
|
||||
|
|
|
@ -5,16 +5,10 @@ This little file attempts to document how the deadline io scheduler works.
|
|||
In particular, it will clarify the meaning of the exposed tunables that may be
|
||||
of interest to power users.
|
||||
|
||||
Each io queue has a set of io scheduler tunables associated with it. These
|
||||
tunables control how the io scheduler works. You can find these entries
|
||||
in:
|
||||
|
||||
/sys/block/<device>/queue/iosched
|
||||
|
||||
assuming that you have sysfs mounted on /sys. If you don't have sysfs mounted,
|
||||
you can do so by typing:
|
||||
|
||||
# mount none /sys -t sysfs
|
||||
Selecting IO schedulers
|
||||
-----------------------
|
||||
Refer to Documentation/block/switching-sched.txt for information on
|
||||
selecting an io scheduler on a per-device basis.
|
||||
|
||||
|
||||
********************************************************************************
|
||||
|
@ -41,14 +35,11 @@ fifo_batch
|
|||
|
||||
When a read request expires its deadline, we must move some requests from
|
||||
the sorted io scheduler list to the block device dispatch queue. fifo_batch
|
||||
controls how many requests we move, based on the cost of each request. A
|
||||
request is either qualified as a seek or a stream. The io scheduler knows
|
||||
the last request that was serviced by the drive (or will be serviced right
|
||||
before this one). See seek_cost and stream_unit.
|
||||
controls how many requests we move.
|
||||
|
||||
|
||||
write_starved (number of dispatches)
|
||||
-------------
|
||||
writes_starved (number of dispatches)
|
||||
--------------
|
||||
|
||||
When we have to move requests from the io scheduler queue to the block
|
||||
device dispatch queue, we always give a preference to reads. However, we
|
||||
|
@ -73,6 +64,6 @@ that comes at basically 0 cost we leave that on. We simply disable the
|
|||
rbtree front sector lookup when the io scheduler merge function is called.
|
||||
|
||||
|
||||
Nov 11 2002, Jens Axboe <axboe@suse.de>
|
||||
Nov 11 2002, Jens Axboe <jens.axboe@oracle.com>
|
||||
|
||||
|
||||
|
|
|
@ -180,4 +180,4 @@ int main(int argc, char *argv[])
|
|||
---> snip ionice.c tool <---
|
||||
|
||||
|
||||
March 11 2005, Jens Axboe <axboe@suse.de>
|
||||
March 11 2005, Jens Axboe <jens.axboe@oracle.com>
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
|
||||
struct request documentation
|
||||
|
||||
Jens Axboe <axboe@suse.de> 27/05/02
|
||||
Jens Axboe <jens.axboe@oracle.com> 27/05/02
|
||||
|
||||
1.0
|
||||
Index
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
To choose IO schedulers at boot time, use the argument 'elevator=deadline'.
|
||||
'noop', 'as' and 'cfq' (the default) are also available. IO schedulers are
|
||||
assigned globally at boot time only presently.
|
||||
|
||||
Each io queue has a set of io scheduler tunables associated with it. These
|
||||
tunables control how the io scheduler works. You can find these entries
|
||||
in:
|
||||
|
||||
/sys/block/<device>/queue/iosched
|
||||
|
||||
assuming that you have sysfs mounted on /sys. If you don't have sysfs mounted,
|
||||
you can do so by typing:
|
||||
|
||||
# mount none /sys -t sysfs
|
||||
|
||||
As of the Linux 2.6.10 kernel, it is now possible to change the
|
||||
IO scheduler for a given block device on the fly (thus making it possible,
|
||||
for instance, to set the CFQ scheduler for the system default, but
|
||||
|
@ -20,3 +35,9 @@ noop anticipatory deadline [cfq]
|
|||
# echo anticipatory > /sys/block/hda/queue/scheduler
|
||||
# cat /sys/block/hda/queue/scheduler
|
||||
noop [anticipatory] deadline cfq
|
||||
|
||||
Each io queue has a set of io scheduler tunables associated with it. These
|
||||
tunables control how the io scheduler works. You can find these entries
|
||||
in:
|
||||
|
||||
/sys/block/<device>/queue/iosched
|
||||
|
|
|
@ -396,7 +396,7 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
|
|||
printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
|
||||
startsg->dma_address, startsg->dma_length,
|
||||
sba_sg_address(startsg));
|
||||
startsg++;
|
||||
startsg = sg_next(startsg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -409,7 +409,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
|
|||
while (the_nents-- > 0) {
|
||||
if (sba_sg_address(the_sg) == 0x0UL)
|
||||
sba_dump_sg(NULL, startsg, nents);
|
||||
the_sg++;
|
||||
the_sg = sg_next(the_sg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1201,7 +1201,7 @@ sba_fill_pdir(
|
|||
u32 pide = startsg->dma_address & ~PIDE_FLAG;
|
||||
dma_offset = (unsigned long) pide & ~iovp_mask;
|
||||
startsg->dma_address = 0;
|
||||
dma_sg++;
|
||||
dma_sg = sg_next(dma_sg);
|
||||
dma_sg->dma_address = pide | ioc->ibase;
|
||||
pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
|
||||
n_mappings++;
|
||||
|
@ -1228,7 +1228,7 @@ sba_fill_pdir(
|
|||
pdirp++;
|
||||
} while (cnt > 0);
|
||||
}
|
||||
startsg++;
|
||||
startsg = sg_next(startsg);
|
||||
}
|
||||
/* force pdir update */
|
||||
wmb();
|
||||
|
@ -1297,7 +1297,7 @@ sba_coalesce_chunks( struct ioc *ioc,
|
|||
while (--nents > 0) {
|
||||
unsigned long vaddr; /* tmp */
|
||||
|
||||
startsg++;
|
||||
startsg = sg_next(startsg);
|
||||
|
||||
/* PARANOID */
|
||||
startsg->dma_address = startsg->dma_length = 0;
|
||||
|
@ -1407,7 +1407,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
|
|||
#ifdef ALLOW_IOV_BYPASS_SG
|
||||
ASSERT(to_pci_dev(dev)->dma_mask);
|
||||
if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
|
||||
for (sg = sglist ; filled < nents ; filled++, sg++){
|
||||
for_each_sg(sglist, sg, nents, filled) {
|
||||
sg->dma_length = sg->length;
|
||||
sg->dma_address = virt_to_phys(sba_sg_address(sg));
|
||||
}
|
||||
|
@ -1501,7 +1501,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
|
|||
while (nents && sglist->dma_length) {
|
||||
|
||||
sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
|
||||
sglist++;
|
||||
sglist = sg_next(sglist);
|
||||
nents--;
|
||||
}
|
||||
|
||||
|
|
|
@ -360,6 +360,7 @@ static struct scsi_host_template driver_template = {
|
|||
.max_sectors = 1024,
|
||||
.cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
static int __init
|
||||
|
|
|
@ -218,16 +218,17 @@ EXPORT_SYMBOL(sn_dma_unmap_single);
|
|||
*
|
||||
* Unmap a set of streaming mode DMA translations.
|
||||
*/
|
||||
void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nhwentries, int direction)
|
||||
{
|
||||
int i;
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
|
||||
struct scatterlist *sg;
|
||||
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
for (i = 0; i < nhwentries; i++, sg++) {
|
||||
for_each_sg(sgl, sg, nhwentries, i) {
|
||||
provider->dma_unmap(pdev, sg->dma_address, direction);
|
||||
sg->dma_address = (dma_addr_t) NULL;
|
||||
sg->dma_length = 0;
|
||||
|
@ -244,11 +245,11 @@ EXPORT_SYMBOL(sn_dma_unmap_sg);
|
|||
*
|
||||
* Maps each entry of @sg for DMA.
|
||||
*/
|
||||
int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
||||
int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
|
||||
int direction)
|
||||
{
|
||||
unsigned long phys_addr;
|
||||
struct scatterlist *saved_sg = sg;
|
||||
struct scatterlist *saved_sg = sgl, *sg;
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
|
||||
int i;
|
||||
|
@ -258,7 +259,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
|||
/*
|
||||
* Setup a DMA address for each entry in the scatterlist.
|
||||
*/
|
||||
for (i = 0; i < nhwentries; i++, sg++) {
|
||||
for_each_sg(sgl, sg, nhwentries, i) {
|
||||
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
|
||||
sg->dma_address = provider->dma_map(pdev,
|
||||
phys_addr, sg->length,
|
||||
|
|
|
@ -154,12 +154,13 @@ static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
|
|||
{
|
||||
}
|
||||
|
||||
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction direction)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
|
||||
dma_direct_offset;
|
||||
sg->dma_length = sg->length;
|
||||
|
|
|
@ -87,15 +87,16 @@ static void ibmebus_unmap_single(struct device *dev,
|
|||
}
|
||||
|
||||
static int ibmebus_map_sg(struct device *dev,
|
||||
struct scatterlist *sg,
|
||||
struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction direction)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nents; i++) {
|
||||
sg[i].dma_address = (dma_addr_t)page_address(sg[i].page)
|
||||
+ sg[i].offset;
|
||||
sg[i].dma_length = sg[i].length;
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg->dma_address = (dma_addr_t)page_address(sg->page)
|
||||
+ sg->offset;
|
||||
sg->dma_length = sg->length;
|
||||
}
|
||||
|
||||
return nents;
|
||||
|
|
|
@ -277,7 +277,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
|||
dma_addr_t dma_next = 0, dma_addr;
|
||||
unsigned long flags;
|
||||
struct scatterlist *s, *outs, *segstart;
|
||||
int outcount, incount;
|
||||
int outcount, incount, i;
|
||||
unsigned long handle;
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
@ -297,7 +297,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
|||
|
||||
spin_lock_irqsave(&(tbl->it_lock), flags);
|
||||
|
||||
for (s = outs; nelems; nelems--, s++) {
|
||||
for_each_sg(sglist, s, nelems, i) {
|
||||
unsigned long vaddr, npages, entry, slen;
|
||||
|
||||
slen = s->length;
|
||||
|
@ -341,7 +341,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
|||
if (novmerge || (dma_addr != dma_next)) {
|
||||
/* Can't merge: create a new segment */
|
||||
segstart = s;
|
||||
outcount++; outs++;
|
||||
outcount++;
|
||||
outs = sg_next(outs);
|
||||
DBG(" can't merge, new segment.\n");
|
||||
} else {
|
||||
outs->dma_length += s->length;
|
||||
|
@ -374,7 +375,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
|||
* next entry of the sglist if we didn't fill the list completely
|
||||
*/
|
||||
if (outcount < incount) {
|
||||
outs++;
|
||||
outs = sg_next(outs);
|
||||
outs->dma_address = DMA_ERROR_CODE;
|
||||
outs->dma_length = 0;
|
||||
}
|
||||
|
@ -385,7 +386,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
|||
return outcount;
|
||||
|
||||
failure:
|
||||
for (s = &sglist[0]; s <= outs; s++) {
|
||||
for_each_sg(sglist, s, nelems, i) {
|
||||
if (s->dma_length != 0) {
|
||||
unsigned long vaddr, npages;
|
||||
|
||||
|
@ -395,6 +396,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
|||
s->dma_address = DMA_ERROR_CODE;
|
||||
s->dma_length = 0;
|
||||
}
|
||||
if (s == outs)
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&(tbl->it_lock), flags);
|
||||
return 0;
|
||||
|
@ -404,6 +407,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
|||
void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||
int nelems, enum dma_data_direction direction)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
@ -413,15 +417,16 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
|||
|
||||
spin_lock_irqsave(&(tbl->it_lock), flags);
|
||||
|
||||
sg = sglist;
|
||||
while (nelems--) {
|
||||
unsigned int npages;
|
||||
dma_addr_t dma_handle = sglist->dma_address;
|
||||
dma_addr_t dma_handle = sg->dma_address;
|
||||
|
||||
if (sglist->dma_length == 0)
|
||||
if (sg->dma_length == 0)
|
||||
break;
|
||||
npages = iommu_num_pages(dma_handle,sglist->dma_length);
|
||||
npages = iommu_num_pages(dma_handle, sg->dma_length);
|
||||
__iommu_free(tbl, dma_handle, npages);
|
||||
sglist++;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
|
||||
|
|
|
@ -616,17 +616,18 @@ static void ps3_unmap_single(struct device *_dev, dma_addr_t dma_addr,
|
|||
}
|
||||
}
|
||||
|
||||
static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction)
|
||||
static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction direction)
|
||||
{
|
||||
#if defined(CONFIG_PS3_DYNAMIC_DMA)
|
||||
BUG_ON("do");
|
||||
return -EPERM;
|
||||
#else
|
||||
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
int result = ps3_dma_map(dev->d_region,
|
||||
page_to_phys(sg->page) + sg->offset, sg->length,
|
||||
&sg->dma_address, 0);
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/pci.h> /* struct pci_dev */
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/vaddrs.h>
|
||||
|
@ -717,19 +718,19 @@ void pci_unmap_page(struct pci_dev *hwdev,
|
|||
* Device ownership issues as mentioned above for pci_map_single are
|
||||
* the same here.
|
||||
*/
|
||||
int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
|
||||
int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
|
||||
int direction)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int n;
|
||||
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
/* IIep is write-through, not flushing. */
|
||||
for (n = 0; n < nents; n++) {
|
||||
for_each_sg(sgl, sg, nents, n) {
|
||||
BUG_ON(page_address(sg->page) == NULL);
|
||||
sg->dvma_address =
|
||||
virt_to_phys(page_address(sg->page)) + sg->offset;
|
||||
sg->dvma_length = sg->length;
|
||||
sg++;
|
||||
}
|
||||
return nents;
|
||||
}
|
||||
|
@ -738,19 +739,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
|
|||
* Again, cpu read rules concerning calls here are the same as for
|
||||
* pci_unmap_single() above.
|
||||
*/
|
||||
void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
|
||||
void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
|
||||
int direction)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int n;
|
||||
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
if (direction != PCI_DMA_TODEVICE) {
|
||||
for (n = 0; n < nents; n++) {
|
||||
for_each_sg(sgl, sg, nents, n) {
|
||||
BUG_ON(page_address(sg->page) == NULL);
|
||||
mmu_inval_dma_area(
|
||||
(unsigned long) page_address(sg->page),
|
||||
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
||||
sg++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -789,34 +790,34 @@ void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t
|
|||
* The same as pci_dma_sync_single_* but for a scatter-gather list,
|
||||
* same rules and usage.
|
||||
*/
|
||||
void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
|
||||
void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int n;
|
||||
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
if (direction != PCI_DMA_TODEVICE) {
|
||||
for (n = 0; n < nents; n++) {
|
||||
for_each_sg(sgl, sg, nents, n) {
|
||||
BUG_ON(page_address(sg->page) == NULL);
|
||||
mmu_inval_dma_area(
|
||||
(unsigned long) page_address(sg->page),
|
||||
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
||||
sg++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
|
||||
void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int n;
|
||||
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
if (direction != PCI_DMA_TODEVICE) {
|
||||
for (n = 0; n < nents; n++) {
|
||||
for_each_sg(sgl, sg, nents, n) {
|
||||
BUG_ON(page_address(sg->page) == NULL);
|
||||
mmu_inval_dma_area(
|
||||
(unsigned long) page_address(sg->page),
|
||||
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
||||
sg++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/sbus.h>
|
||||
|
@ -144,8 +144,9 @@ static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus
|
|||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
|
||||
sg[sz].dvma_length = sg[sz].length;
|
||||
sg->dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg->page) + sg->offset, sg->length);
|
||||
sg->dvma_length = sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
}
|
||||
|
@ -173,11 +174,12 @@ static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_
|
|||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
|
||||
vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
|
||||
len = ((sg->dvma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
|
||||
vaddr = (sg->dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
|
||||
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
|
||||
for (len += vaddr; vaddr < len; vaddr++)
|
||||
clear_bit(vaddr, iounit->bmap);
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
}
|
||||
|
|
|
@ -12,8 +12,8 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/sbus.h>
|
||||
|
@ -240,7 +240,7 @@ static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sb
|
|||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
||||
sg->dvma_length = (__u32) sg->length;
|
||||
sg++;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -254,7 +254,7 @@ static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbu
|
|||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
||||
sg->dvma_length = (__u32) sg->length;
|
||||
sg++;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -285,7 +285,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu
|
|||
|
||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
||||
sg->dvma_length = (__u32) sg->length;
|
||||
sg++;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -325,7 +325,7 @@ static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_b
|
|||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
|
||||
sg->dvma_address = 0x21212121;
|
||||
sg++;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
#include <linux/highmem.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
@ -1228,8 +1228,9 @@ static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *
|
|||
{
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
sg[sz].dvma_address = (__u32)sun4c_lockarea(page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
|
||||
sg[sz].dvma_length = sg[sz].length;
|
||||
sg->dvma_address = (__u32)sun4c_lockarea(page_address(sg->page) + sg->offset, sg->length);
|
||||
sg->dvma_length = sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1244,7 +1245,8 @@ static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_b
|
|||
{
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length);
|
||||
sun4c_unlockarea((char *)sg->dvma_address, sg->length);
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
#include <linux/pci.h>
|
||||
|
@ -480,7 +481,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
|
|||
unsigned long iopte_protection)
|
||||
{
|
||||
struct scatterlist *dma_sg = sg;
|
||||
struct scatterlist *sg_end = sg + nelems;
|
||||
struct scatterlist *sg_end = sg_last(sg, nelems);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nused; i++) {
|
||||
|
@ -515,7 +516,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
|
|||
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
|
||||
break;
|
||||
}
|
||||
sg++;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
pteval = iopte_protection | (pteval & IOPTE_PAGE);
|
||||
|
@ -528,24 +529,24 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
|
|||
}
|
||||
|
||||
pteval = (pteval & IOPTE_PAGE) + len;
|
||||
sg++;
|
||||
sg = sg_next(sg);
|
||||
|
||||
/* Skip over any tail mappings we've fully mapped,
|
||||
* adjusting pteval along the way. Stop when we
|
||||
* detect a page crossing event.
|
||||
*/
|
||||
while (sg < sg_end &&
|
||||
while (sg != sg_end &&
|
||||
(pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
|
||||
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
|
||||
((pteval ^
|
||||
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
|
||||
pteval += sg->length;
|
||||
sg++;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
|
||||
pteval = ~0UL;
|
||||
} while (dma_npages != 0);
|
||||
dma_sg++;
|
||||
dma_sg = sg_next(dma_sg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -606,7 +607,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
sgtmp = sglist;
|
||||
while (used && sgtmp->dma_length) {
|
||||
sgtmp->dma_address += dma_base;
|
||||
sgtmp++;
|
||||
sgtmp = sg_next(sgtmp);
|
||||
used--;
|
||||
}
|
||||
used = nelems - used;
|
||||
|
@ -642,6 +643,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|||
struct strbuf *strbuf;
|
||||
iopte_t *base;
|
||||
unsigned long flags, ctx, i, npages;
|
||||
struct scatterlist *sg, *sgprv;
|
||||
u32 bus_addr;
|
||||
|
||||
if (unlikely(direction == DMA_NONE)) {
|
||||
|
@ -654,11 +656,14 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|||
|
||||
bus_addr = sglist->dma_address & IO_PAGE_MASK;
|
||||
|
||||
for (i = 1; i < nelems; i++)
|
||||
if (sglist[i].dma_length == 0)
|
||||
sgprv = NULL;
|
||||
for_each_sg(sglist, sg, nelems, i) {
|
||||
if (sg->dma_length == 0)
|
||||
break;
|
||||
i--;
|
||||
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
|
||||
sgprv = sg;
|
||||
}
|
||||
|
||||
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
|
||||
bus_addr) >> IO_PAGE_SHIFT;
|
||||
|
||||
base = iommu->page_table +
|
||||
|
@ -730,6 +735,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
|
|||
struct iommu *iommu;
|
||||
struct strbuf *strbuf;
|
||||
unsigned long flags, ctx, npages, i;
|
||||
struct scatterlist *sg, *sgprv;
|
||||
u32 bus_addr;
|
||||
|
||||
iommu = dev->archdata.iommu;
|
||||
|
@ -753,11 +759,14 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
|
|||
|
||||
/* Step 2: Kick data out of streaming buffers. */
|
||||
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
|
||||
for(i = 1; i < nelems; i++)
|
||||
if (!sglist[i].dma_length)
|
||||
sgprv = NULL;
|
||||
for_each_sg(sglist, sg, nelems, i) {
|
||||
if (sg->dma_length == 0)
|
||||
break;
|
||||
i--;
|
||||
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
|
||||
sgprv = sg;
|
||||
}
|
||||
|
||||
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
|
||||
- bus_addr) >> IO_PAGE_SHIFT;
|
||||
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/irq.h>
|
||||
|
@ -373,7 +374,7 @@ static inline long fill_sg(long entry, struct device *dev,
|
|||
int nused, int nelems, unsigned long prot)
|
||||
{
|
||||
struct scatterlist *dma_sg = sg;
|
||||
struct scatterlist *sg_end = sg + nelems;
|
||||
struct scatterlist *sg_end = sg_last(sg, nelems);
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
|
@ -413,7 +414,7 @@ static inline long fill_sg(long entry, struct device *dev,
|
|||
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
|
||||
break;
|
||||
}
|
||||
sg++;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
pteval = (pteval & IOPTE_PAGE);
|
||||
|
@ -431,24 +432,25 @@ static inline long fill_sg(long entry, struct device *dev,
|
|||
}
|
||||
|
||||
pteval = (pteval & IOPTE_PAGE) + len;
|
||||
sg++;
|
||||
sg = sg_next(sg);
|
||||
|
||||
/* Skip over any tail mappings we've fully mapped,
|
||||
* adjusting pteval along the way. Stop when we
|
||||
* detect a page crossing event.
|
||||
*/
|
||||
while (sg < sg_end &&
|
||||
(pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
|
||||
while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
|
||||
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
|
||||
((pteval ^
|
||||
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
|
||||
pteval += sg->length;
|
||||
sg++;
|
||||
if (sg == sg_end)
|
||||
break;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
|
||||
pteval = ~0UL;
|
||||
} while (dma_npages != 0);
|
||||
dma_sg++;
|
||||
dma_sg = sg_next(dma_sg);
|
||||
}
|
||||
|
||||
if (unlikely(iommu_batch_end() < 0L))
|
||||
|
@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
sgtmp = sglist;
|
||||
while (used && sgtmp->dma_length) {
|
||||
sgtmp->dma_address += dma_base;
|
||||
sgtmp++;
|
||||
sgtmp = sg_next(sgtmp);
|
||||
used--;
|
||||
}
|
||||
used = nelems - used;
|
||||
|
@ -545,6 +547,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|||
struct pci_pbm_info *pbm;
|
||||
struct iommu *iommu;
|
||||
unsigned long flags, i, npages;
|
||||
struct scatterlist *sg, *sgprv;
|
||||
long entry;
|
||||
u32 devhandle, bus_addr;
|
||||
|
||||
|
@ -558,12 +561,15 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|||
devhandle = pbm->devhandle;
|
||||
|
||||
bus_addr = sglist->dma_address & IO_PAGE_MASK;
|
||||
|
||||
for (i = 1; i < nelems; i++)
|
||||
if (sglist[i].dma_length == 0)
|
||||
sgprv = NULL;
|
||||
for_each_sg(sglist, sg, nelems, i) {
|
||||
if (sg->dma_length == 0)
|
||||
break;
|
||||
i--;
|
||||
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
|
||||
|
||||
sgprv = sg;
|
||||
}
|
||||
|
||||
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
|
||||
bus_addr) >> IO_PAGE_SHIFT;
|
||||
|
||||
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/pci_ids.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/calgary.h>
|
||||
#include <asm/tce.h>
|
||||
|
@ -384,31 +385,32 @@ static void calgary_unmap_sg(struct device *dev,
|
|||
struct scatterlist *sglist, int nelems, int direction)
|
||||
{
|
||||
struct iommu_table *tbl = find_iommu_table(dev);
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
if (!translate_phb(to_pci_dev(dev)))
|
||||
return;
|
||||
|
||||
while (nelems--) {
|
||||
for_each_sg(sglist, s, nelems, i) {
|
||||
unsigned int npages;
|
||||
dma_addr_t dma = sglist->dma_address;
|
||||
unsigned int dmalen = sglist->dma_length;
|
||||
dma_addr_t dma = s->dma_address;
|
||||
unsigned int dmalen = s->dma_length;
|
||||
|
||||
if (dmalen == 0)
|
||||
break;
|
||||
|
||||
npages = num_dma_pages(dma, dmalen);
|
||||
iommu_free(tbl, dma, npages);
|
||||
sglist++;
|
||||
}
|
||||
}
|
||||
|
||||
static int calgary_nontranslate_map_sg(struct device* dev,
|
||||
struct scatterlist *sg, int nelems, int direction)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nelems; i++ ) {
|
||||
struct scatterlist *s = &sg[i];
|
||||
for_each_sg(sg, s, nelems, i) {
|
||||
BUG_ON(!s->page);
|
||||
s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
|
||||
s->dma_length = s->length;
|
||||
|
@ -420,6 +422,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
int nelems, int direction)
|
||||
{
|
||||
struct iommu_table *tbl = find_iommu_table(dev);
|
||||
struct scatterlist *s;
|
||||
unsigned long vaddr;
|
||||
unsigned int npages;
|
||||
unsigned long entry;
|
||||
|
@ -428,8 +431,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
if (!translate_phb(to_pci_dev(dev)))
|
||||
return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
|
||||
|
||||
for (i = 0; i < nelems; i++ ) {
|
||||
struct scatterlist *s = &sg[i];
|
||||
for_each_sg(sg, s, nelems, i) {
|
||||
BUG_ON(!s->page);
|
||||
|
||||
vaddr = (unsigned long)page_address(s->page) + s->offset;
|
||||
|
@ -454,9 +456,9 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
return nelems;
|
||||
error:
|
||||
calgary_unmap_sg(dev, sg, nelems, direction);
|
||||
for (i = 0; i < nelems; i++) {
|
||||
sg[i].dma_address = bad_dma_address;
|
||||
sg[i].dma_length = 0;
|
||||
for_each_sg(sg, s, nelems, i) {
|
||||
sg->dma_address = bad_dma_address;
|
||||
sg->dma_length = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/mtrr.h>
|
||||
|
@ -278,10 +279,10 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
|
|||
*/
|
||||
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nents; i++) {
|
||||
struct scatterlist *s = &sg[i];
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
if (!s->dma_length || !s->length)
|
||||
break;
|
||||
gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
|
||||
|
@ -292,14 +293,14 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int dir)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_IOMMU_DEBUG
|
||||
printk(KERN_DEBUG "dma_map_sg overflow\n");
|
||||
#endif
|
||||
|
||||
for (i = 0; i < nents; i++ ) {
|
||||
struct scatterlist *s = &sg[i];
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
unsigned long addr = page_to_phys(s->page) + s->offset;
|
||||
if (nonforced_iommu(dev, addr, s->length)) {
|
||||
addr = dma_map_area(dev, addr, s->length, dir);
|
||||
|
@ -319,23 +320,23 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
|
|||
}
|
||||
|
||||
/* Map multiple scatterlist entries continuous into the first. */
|
||||
static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
|
||||
static int __dma_map_cont(struct scatterlist *start, int nelems,
|
||||
struct scatterlist *sout, unsigned long pages)
|
||||
{
|
||||
unsigned long iommu_start = alloc_iommu(pages);
|
||||
unsigned long iommu_page = iommu_start;
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
if (iommu_start == -1)
|
||||
return -1;
|
||||
|
||||
for (i = start; i < stopat; i++) {
|
||||
struct scatterlist *s = &sg[i];
|
||||
|
||||
for_each_sg(start, s, nelems, i) {
|
||||
unsigned long pages, addr;
|
||||
unsigned long phys_addr = s->dma_address;
|
||||
|
||||
BUG_ON(i > start && s->offset);
|
||||
if (i == start) {
|
||||
BUG_ON(s != start && s->offset);
|
||||
if (s == start) {
|
||||
*sout = *s;
|
||||
sout->dma_address = iommu_bus_base;
|
||||
sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
|
||||
|
@ -357,17 +358,17 @@ static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
|
||||
static inline int dma_map_cont(struct scatterlist *start, int nelems,
|
||||
struct scatterlist *sout,
|
||||
unsigned long pages, int need)
|
||||
{
|
||||
if (!need) {
|
||||
BUG_ON(stopat - start != 1);
|
||||
*sout = sg[start];
|
||||
sout->dma_length = sg[start].length;
|
||||
if (!need) {
|
||||
BUG_ON(nelems != 1);
|
||||
*sout = *start;
|
||||
sout->dma_length = start->length;
|
||||
return 0;
|
||||
}
|
||||
return __dma_map_cont(sg, start, stopat, sout, pages);
|
||||
}
|
||||
return __dma_map_cont(start, nelems, sout, pages);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -381,6 +382,7 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
|||
int start;
|
||||
unsigned long pages = 0;
|
||||
int need = 0, nextneed;
|
||||
struct scatterlist *s, *ps, *start_sg, *sgmap;
|
||||
|
||||
if (nents == 0)
|
||||
return 0;
|
||||
|
@ -390,8 +392,9 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
|||
|
||||
out = 0;
|
||||
start = 0;
|
||||
for (i = 0; i < nents; i++) {
|
||||
struct scatterlist *s = &sg[i];
|
||||
start_sg = sgmap = sg;
|
||||
ps = NULL; /* shut up gcc */
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
dma_addr_t addr = page_to_phys(s->page) + s->offset;
|
||||
s->dma_address = addr;
|
||||
BUG_ON(s->length == 0);
|
||||
|
@ -400,29 +403,33 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
|||
|
||||
/* Handle the previous not yet processed entries */
|
||||
if (i > start) {
|
||||
struct scatterlist *ps = &sg[i-1];
|
||||
/* Can only merge when the last chunk ends on a page
|
||||
boundary and the new one doesn't have an offset. */
|
||||
if (!iommu_merge || !nextneed || !need || s->offset ||
|
||||
(ps->offset + ps->length) % PAGE_SIZE) {
|
||||
if (dma_map_cont(sg, start, i, sg+out, pages,
|
||||
need) < 0)
|
||||
(ps->offset + ps->length) % PAGE_SIZE) {
|
||||
if (dma_map_cont(start_sg, i - start, sgmap,
|
||||
pages, need) < 0)
|
||||
goto error;
|
||||
out++;
|
||||
sgmap = sg_next(sgmap);
|
||||
pages = 0;
|
||||
start = i;
|
||||
start = i;
|
||||
start_sg = s;
|
||||
}
|
||||
}
|
||||
|
||||
need = nextneed;
|
||||
pages += to_pages(s->offset, s->length);
|
||||
ps = s;
|
||||
}
|
||||
if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
|
||||
if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0)
|
||||
goto error;
|
||||
out++;
|
||||
flush_gart();
|
||||
if (out < nents)
|
||||
sg[out].dma_length = 0;
|
||||
if (out < nents) {
|
||||
sgmap = sg_next(sgmap);
|
||||
sgmap->dma_length = 0;
|
||||
}
|
||||
return out;
|
||||
|
||||
error:
|
||||
|
@ -437,8 +444,8 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
|||
if (panic_on_overflow)
|
||||
panic("dma_map_sg: overflow on %lu pages\n", pages);
|
||||
iommu_full(dev, pages << PAGE_SHIFT, dir);
|
||||
for (i = 0; i < nents; i++)
|
||||
sg[i].dma_address = bad_dma_address;
|
||||
for_each_sg(sg, s, nents, i)
|
||||
s->dma_address = bad_dma_address;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <linux/pci.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -57,10 +58,10 @@ static void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
|
|||
static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
|
||||
int nents, int direction)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nents; i++ ) {
|
||||
struct scatterlist *s = &sg[i];
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
BUG_ON(!s->page);
|
||||
s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
|
||||
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
|
||||
|
|
|
@ -908,7 +908,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
}
|
||||
}
|
||||
|
||||
static struct file_operations bsg_fops = {
|
||||
static const struct file_operations bsg_fops = {
|
||||
.read = bsg_read,
|
||||
.write = bsg_write,
|
||||
.poll = bsg_poll,
|
||||
|
|
|
@ -712,6 +712,14 @@ struct request *elv_next_request(struct request_queue *q)
|
|||
int ret;
|
||||
|
||||
while ((rq = __elv_next_request(q)) != NULL) {
|
||||
/*
|
||||
* Kill the empty barrier place holder, the driver must
|
||||
* not ever see it.
|
||||
*/
|
||||
if (blk_empty_barrier(rq)) {
|
||||
end_queued_request(rq, 1);
|
||||
continue;
|
||||
}
|
||||
if (!(rq->cmd_flags & REQ_STARTED)) {
|
||||
/*
|
||||
* This is the first time the device driver
|
||||
|
@ -751,15 +759,8 @@ struct request *elv_next_request(struct request_queue *q)
|
|||
rq = NULL;
|
||||
break;
|
||||
} else if (ret == BLKPREP_KILL) {
|
||||
int nr_bytes = rq->hard_nr_sectors << 9;
|
||||
|
||||
if (!nr_bytes)
|
||||
nr_bytes = rq->data_len;
|
||||
|
||||
blkdev_dequeue_request(rq);
|
||||
rq->cmd_flags |= REQ_QUIET;
|
||||
end_that_request_chunk(rq, 0, nr_bytes);
|
||||
end_that_request_last(rq, 0);
|
||||
end_queued_request(rq, 0);
|
||||
} else {
|
||||
printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
|
||||
ret);
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/cpu.h>
|
||||
#include <linux/blktrace_api.h>
|
||||
#include <linux/fault-inject.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
/*
|
||||
* for max sense size
|
||||
|
@ -304,23 +305,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
|
|||
|
||||
EXPORT_SYMBOL(blk_queue_ordered);
|
||||
|
||||
/**
|
||||
* blk_queue_issue_flush_fn - set function for issuing a flush
|
||||
* @q: the request queue
|
||||
* @iff: the function to be called issuing the flush
|
||||
*
|
||||
* Description:
|
||||
* If a driver supports issuing a flush command, the support is notified
|
||||
* to the block layer by defining it through this call.
|
||||
*
|
||||
**/
|
||||
void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
|
||||
{
|
||||
q->issue_flush_fn = iff;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(blk_queue_issue_flush_fn);
|
||||
|
||||
/*
|
||||
* Cache flushing for ordered writes handling
|
||||
*/
|
||||
|
@ -377,10 +361,12 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
|
|||
/*
|
||||
* Okay, sequence complete.
|
||||
*/
|
||||
rq = q->orig_bar_rq;
|
||||
uptodate = q->orderr ? q->orderr : 1;
|
||||
uptodate = 1;
|
||||
if (q->orderr)
|
||||
uptodate = q->orderr;
|
||||
|
||||
q->ordseq = 0;
|
||||
rq = q->orig_bar_rq;
|
||||
|
||||
end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
|
||||
end_that_request_last(rq, uptodate);
|
||||
|
@ -445,7 +431,8 @@ static inline struct request *start_ordered(struct request_queue *q,
|
|||
rq_init(q, rq);
|
||||
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
|
||||
rq->cmd_flags |= REQ_RW;
|
||||
rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
|
||||
if (q->ordered & QUEUE_ORDERED_FUA)
|
||||
rq->cmd_flags |= REQ_FUA;
|
||||
rq->elevator_private = NULL;
|
||||
rq->elevator_private2 = NULL;
|
||||
init_request_from_bio(rq, q->orig_bar_rq->bio);
|
||||
|
@ -455,9 +442,12 @@ static inline struct request *start_ordered(struct request_queue *q,
|
|||
* Queue ordered sequence. As we stack them at the head, we
|
||||
* need to queue in reverse order. Note that we rely on that
|
||||
* no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
|
||||
* request gets inbetween ordered sequence.
|
||||
* request gets inbetween ordered sequence. If this request is
|
||||
* an empty barrier, we don't need to do a postflush ever since
|
||||
* there will be no data written between the pre and post flush.
|
||||
* Hence a single flush will suffice.
|
||||
*/
|
||||
if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
|
||||
if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
|
||||
queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
|
||||
else
|
||||
q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
|
||||
|
@ -481,7 +471,7 @@ static inline struct request *start_ordered(struct request_queue *q,
|
|||
int blk_do_ordered(struct request_queue *q, struct request **rqp)
|
||||
{
|
||||
struct request *rq = *rqp;
|
||||
int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
|
||||
const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
|
||||
|
||||
if (!q->ordseq) {
|
||||
if (!is_barrier)
|
||||
|
@ -1329,9 +1319,10 @@ static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
|
|||
* must make sure sg can hold rq->nr_phys_segments entries
|
||||
*/
|
||||
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
||||
struct scatterlist *sg)
|
||||
struct scatterlist *sglist)
|
||||
{
|
||||
struct bio_vec *bvec, *bvprv;
|
||||
struct scatterlist *next_sg, *sg;
|
||||
struct req_iterator iter;
|
||||
int nsegs, cluster;
|
||||
|
||||
|
@ -1342,11 +1333,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
|||
* for each bio in rq
|
||||
*/
|
||||
bvprv = NULL;
|
||||
sg = next_sg = &sglist[0];
|
||||
rq_for_each_segment(bvec, rq, iter) {
|
||||
int nbytes = bvec->bv_len;
|
||||
|
||||
if (bvprv && cluster) {
|
||||
if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
|
||||
if (sg->length + nbytes > q->max_segment_size)
|
||||
goto new_segment;
|
||||
|
||||
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
|
||||
|
@ -1354,14 +1346,15 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
|||
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
|
||||
goto new_segment;
|
||||
|
||||
sg[nsegs - 1].length += nbytes;
|
||||
sg->length += nbytes;
|
||||
} else {
|
||||
new_segment:
|
||||
memset(&sg[nsegs],0,sizeof(struct scatterlist));
|
||||
sg[nsegs].page = bvec->bv_page;
|
||||
sg[nsegs].length = nbytes;
|
||||
sg[nsegs].offset = bvec->bv_offset;
|
||||
sg = next_sg;
|
||||
next_sg = sg_next(sg);
|
||||
|
||||
sg->page = bvec->bv_page;
|
||||
sg->length = nbytes;
|
||||
sg->offset = bvec->bv_offset;
|
||||
nsegs++;
|
||||
}
|
||||
bvprv = bvec;
|
||||
|
@ -2660,6 +2653,14 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
|
|||
|
||||
EXPORT_SYMBOL(blk_execute_rq);
|
||||
|
||||
static void bio_end_empty_barrier(struct bio *bio, int err)
|
||||
{
|
||||
if (err)
|
||||
clear_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
|
||||
complete(bio->bi_private);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkdev_issue_flush - queue a flush
|
||||
* @bdev: blockdev to issue flush for
|
||||
|
@ -2672,7 +2673,10 @@ EXPORT_SYMBOL(blk_execute_rq);
|
|||
*/
|
||||
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
struct request_queue *q;
|
||||
struct bio *bio;
|
||||
int ret;
|
||||
|
||||
if (bdev->bd_disk == NULL)
|
||||
return -ENXIO;
|
||||
|
@ -2680,10 +2684,32 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
|
|||
q = bdev_get_queue(bdev);
|
||||
if (!q)
|
||||
return -ENXIO;
|
||||
if (!q->issue_flush_fn)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
|
||||
bio = bio_alloc(GFP_KERNEL, 0);
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
|
||||
bio->bi_end_io = bio_end_empty_barrier;
|
||||
bio->bi_private = &wait;
|
||||
bio->bi_bdev = bdev;
|
||||
submit_bio(1 << BIO_RW_BARRIER, bio);
|
||||
|
||||
wait_for_completion(&wait);
|
||||
|
||||
/*
|
||||
* The driver must store the error location in ->bi_sector, if
|
||||
* it supports it. For non-stacked drivers, this should be copied
|
||||
* from rq->sector.
|
||||
*/
|
||||
if (error_sector)
|
||||
*error_sector = bio->bi_sector;
|
||||
|
||||
ret = 0;
|
||||
if (!bio_flagged(bio, BIO_UPTODATE))
|
||||
ret = -EIO;
|
||||
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(blkdev_issue_flush);
|
||||
|
@ -3051,7 +3077,7 @@ static inline void blk_partition_remap(struct bio *bio)
|
|||
{
|
||||
struct block_device *bdev = bio->bi_bdev;
|
||||
|
||||
if (bdev != bdev->bd_contains) {
|
||||
if (bio_sectors(bio) && bdev != bdev->bd_contains) {
|
||||
struct hd_struct *p = bdev->bd_part;
|
||||
const int rw = bio_data_dir(bio);
|
||||
|
||||
|
@ -3117,6 +3143,35 @@ static inline int should_fail_request(struct bio *bio)
|
|||
|
||||
#endif /* CONFIG_FAIL_MAKE_REQUEST */
|
||||
|
||||
/*
|
||||
* Check whether this bio extends beyond the end of the device.
|
||||
*/
|
||||
static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
|
||||
{
|
||||
sector_t maxsector;
|
||||
|
||||
if (!nr_sectors)
|
||||
return 0;
|
||||
|
||||
/* Test device or partition size, when known. */
|
||||
maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
|
||||
if (maxsector) {
|
||||
sector_t sector = bio->bi_sector;
|
||||
|
||||
if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
|
||||
/*
|
||||
* This may well happen - the kernel calls bread()
|
||||
* without checking the size of the device, e.g., when
|
||||
* mounting a device.
|
||||
*/
|
||||
handle_bad_sector(bio);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* generic_make_request: hand a buffer to its device driver for I/O
|
||||
* @bio: The bio describing the location in memory and on the device.
|
||||
|
@ -3144,27 +3199,14 @@ static inline int should_fail_request(struct bio *bio)
|
|||
static inline void __generic_make_request(struct bio *bio)
|
||||
{
|
||||
struct request_queue *q;
|
||||
sector_t maxsector;
|
||||
sector_t old_sector;
|
||||
int ret, nr_sectors = bio_sectors(bio);
|
||||
dev_t old_dev;
|
||||
|
||||
might_sleep();
|
||||
/* Test device or partition size, when known. */
|
||||
maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
|
||||
if (maxsector) {
|
||||
sector_t sector = bio->bi_sector;
|
||||
|
||||
if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
|
||||
/*
|
||||
* This may well happen - the kernel calls bread()
|
||||
* without checking the size of the device, e.g., when
|
||||
* mounting a device.
|
||||
*/
|
||||
handle_bad_sector(bio);
|
||||
goto end_io;
|
||||
}
|
||||
}
|
||||
if (bio_check_eod(bio, nr_sectors))
|
||||
goto end_io;
|
||||
|
||||
/*
|
||||
* Resolve the mapping until finished. (drivers are
|
||||
|
@ -3191,7 +3233,7 @@ static inline void __generic_make_request(struct bio *bio)
|
|||
break;
|
||||
}
|
||||
|
||||
if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
|
||||
if (unlikely(nr_sectors > q->max_hw_sectors)) {
|
||||
printk("bio too big device %s (%u > %u)\n",
|
||||
bdevname(bio->bi_bdev, b),
|
||||
bio_sectors(bio),
|
||||
|
@ -3212,7 +3254,7 @@ static inline void __generic_make_request(struct bio *bio)
|
|||
blk_partition_remap(bio);
|
||||
|
||||
if (old_sector != -1)
|
||||
blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
|
||||
blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
|
||||
old_sector);
|
||||
|
||||
blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
|
||||
|
@ -3220,21 +3262,8 @@ static inline void __generic_make_request(struct bio *bio)
|
|||
old_sector = bio->bi_sector;
|
||||
old_dev = bio->bi_bdev->bd_dev;
|
||||
|
||||
maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
|
||||
if (maxsector) {
|
||||
sector_t sector = bio->bi_sector;
|
||||
|
||||
if (maxsector < nr_sectors ||
|
||||
maxsector - nr_sectors < sector) {
|
||||
/*
|
||||
* This may well happen - partitions are not
|
||||
* checked to make sure they are within the size
|
||||
* of the whole device.
|
||||
*/
|
||||
handle_bad_sector(bio);
|
||||
goto end_io;
|
||||
}
|
||||
}
|
||||
if (bio_check_eod(bio, nr_sectors))
|
||||
goto end_io;
|
||||
|
||||
ret = q->make_request_fn(q, bio);
|
||||
} while (ret);
|
||||
|
@ -3307,23 +3336,32 @@ void submit_bio(int rw, struct bio *bio)
|
|||
{
|
||||
int count = bio_sectors(bio);
|
||||
|
||||
BIO_BUG_ON(!bio->bi_size);
|
||||
BIO_BUG_ON(!bio->bi_io_vec);
|
||||
bio->bi_rw |= rw;
|
||||
if (rw & WRITE) {
|
||||
count_vm_events(PGPGOUT, count);
|
||||
} else {
|
||||
task_io_account_read(bio->bi_size);
|
||||
count_vm_events(PGPGIN, count);
|
||||
}
|
||||
|
||||
if (unlikely(block_dump)) {
|
||||
char b[BDEVNAME_SIZE];
|
||||
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
|
||||
current->comm, current->pid,
|
||||
(rw & WRITE) ? "WRITE" : "READ",
|
||||
(unsigned long long)bio->bi_sector,
|
||||
bdevname(bio->bi_bdev,b));
|
||||
/*
|
||||
* If it's a regular read/write or a barrier with data attached,
|
||||
* go through the normal accounting stuff before submission.
|
||||
*/
|
||||
if (!bio_empty_barrier(bio)) {
|
||||
|
||||
BIO_BUG_ON(!bio->bi_size);
|
||||
BIO_BUG_ON(!bio->bi_io_vec);
|
||||
|
||||
if (rw & WRITE) {
|
||||
count_vm_events(PGPGOUT, count);
|
||||
} else {
|
||||
task_io_account_read(bio->bi_size);
|
||||
count_vm_events(PGPGIN, count);
|
||||
}
|
||||
|
||||
if (unlikely(block_dump)) {
|
||||
char b[BDEVNAME_SIZE];
|
||||
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
|
||||
current->comm, current->pid,
|
||||
(rw & WRITE) ? "WRITE" : "READ",
|
||||
(unsigned long long)bio->bi_sector,
|
||||
bdevname(bio->bi_bdev,b));
|
||||
}
|
||||
}
|
||||
|
||||
generic_make_request(bio);
|
||||
|
@ -3399,6 +3437,14 @@ static int __end_that_request_first(struct request *req, int uptodate,
|
|||
while ((bio = req->bio) != NULL) {
|
||||
int nbytes;
|
||||
|
||||
/*
|
||||
* For an empty barrier request, the low level driver must
|
||||
* store a potential error location in ->sector. We pass
|
||||
* that back up in ->bi_sector.
|
||||
*/
|
||||
if (blk_empty_barrier(req))
|
||||
bio->bi_sector = req->sector;
|
||||
|
||||
if (nr_bytes >= bio->bi_size) {
|
||||
req->bio = bio->bi_next;
|
||||
nbytes = bio->bi_size;
|
||||
|
@ -3564,7 +3610,7 @@ static struct notifier_block blk_cpu_notifier __cpuinitdata = {
|
|||
* Description:
|
||||
* Ends all I/O on a request. It does not handle partial completions,
|
||||
* unless the driver actually implements this in its completion callback
|
||||
* through requeueing. Theh actual completion happens out-of-order,
|
||||
* through requeueing. The actual completion happens out-of-order,
|
||||
* through a softirq handler. The user must have registered a completion
|
||||
* callback through blk_queue_softirq_done().
|
||||
**/
|
||||
|
@ -3627,15 +3673,83 @@ void end_that_request_last(struct request *req, int uptodate)
|
|||
|
||||
EXPORT_SYMBOL(end_that_request_last);
|
||||
|
||||
void end_request(struct request *req, int uptodate)
|
||||
static inline void __end_request(struct request *rq, int uptodate,
|
||||
unsigned int nr_bytes, int dequeue)
|
||||
{
|
||||
if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
|
||||
add_disk_randomness(req->rq_disk);
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
|
||||
if (dequeue)
|
||||
blkdev_dequeue_request(rq);
|
||||
add_disk_randomness(rq->rq_disk);
|
||||
end_that_request_last(rq, uptodate);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int rq_byte_size(struct request *rq)
|
||||
{
|
||||
if (blk_fs_request(rq))
|
||||
return rq->hard_nr_sectors << 9;
|
||||
|
||||
return rq->data_len;
|
||||
}
|
||||
|
||||
/**
|
||||
* end_queued_request - end all I/O on a queued request
|
||||
* @rq: the request being processed
|
||||
* @uptodate: error value or 0/1 uptodate flag
|
||||
*
|
||||
* Description:
|
||||
* Ends all I/O on a request, and removes it from the block layer queues.
|
||||
* Not suitable for normal IO completion, unless the driver still has
|
||||
* the request attached to the block layer.
|
||||
*
|
||||
**/
|
||||
void end_queued_request(struct request *rq, int uptodate)
|
||||
{
|
||||
__end_request(rq, uptodate, rq_byte_size(rq), 1);
|
||||
}
|
||||
EXPORT_SYMBOL(end_queued_request);
|
||||
|
||||
/**
|
||||
* end_dequeued_request - end all I/O on a dequeued request
|
||||
* @rq: the request being processed
|
||||
* @uptodate: error value or 0/1 uptodate flag
|
||||
*
|
||||
* Description:
|
||||
* Ends all I/O on a request. The request must already have been
|
||||
* dequeued using blkdev_dequeue_request(), as is normally the case
|
||||
* for most drivers.
|
||||
*
|
||||
**/
|
||||
void end_dequeued_request(struct request *rq, int uptodate)
|
||||
{
|
||||
__end_request(rq, uptodate, rq_byte_size(rq), 0);
|
||||
}
|
||||
EXPORT_SYMBOL(end_dequeued_request);
|
||||
|
||||
|
||||
/**
|
||||
* end_request - end I/O on the current segment of the request
|
||||
* @rq: the request being processed
|
||||
* @uptodate: error value or 0/1 uptodate flag
|
||||
*
|
||||
* Description:
|
||||
* Ends I/O on the current segment of a request. If that is the only
|
||||
* remaining segment, the request is also completed and freed.
|
||||
*
|
||||
* This is a remnant of how older block drivers handled IO completions.
|
||||
* Modern drivers typically end IO on the full request in one go, unless
|
||||
* they have a residual value to account for. For that case this function
|
||||
* isn't really useful, unless the residual just happens to be the
|
||||
* full current segment. In other words, don't use this function in new
|
||||
* code. Either use end_request_completely(), or the
|
||||
* end_that_request_chunk() (along with end_that_request_last()) for
|
||||
* partial completions.
|
||||
*
|
||||
**/
|
||||
void end_request(struct request *req, int uptodate)
|
||||
{
|
||||
__end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(end_request);
|
||||
|
||||
static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
||||
|
@ -3949,7 +4063,23 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
|
|||
return queue_var_show(max_hw_sectors_kb, (page));
|
||||
}
|
||||
|
||||
static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
|
||||
{
|
||||
return queue_var_show(q->max_phys_segments, page);
|
||||
}
|
||||
|
||||
static ssize_t queue_max_segments_store(struct request_queue *q,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
unsigned long segments;
|
||||
ssize_t ret = queue_var_store(&segments, page, count);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
q->max_phys_segments = segments;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
static struct queue_sysfs_entry queue_requests_entry = {
|
||||
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
|
||||
.show = queue_requests_show,
|
||||
|
@ -3973,6 +4103,12 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
|
|||
.show = queue_max_hw_sectors_show,
|
||||
};
|
||||
|
||||
static struct queue_sysfs_entry queue_max_segments_entry = {
|
||||
.attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR },
|
||||
.show = queue_max_segments_show,
|
||||
.store = queue_max_segments_store,
|
||||
};
|
||||
|
||||
static struct queue_sysfs_entry queue_iosched_entry = {
|
||||
.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
|
||||
.show = elv_iosched_show,
|
||||
|
@ -3984,6 +4120,7 @@ static struct attribute *default_attrs[] = {
|
|||
&queue_ra_entry.attr,
|
||||
&queue_max_hw_sectors_entry.attr,
|
||||
&queue_max_sectors_entry.attr,
|
||||
&queue_max_segments_entry.attr,
|
||||
&queue_iosched_entry.attr,
|
||||
NULL,
|
||||
};
|
||||
|
|
|
@ -77,7 +77,7 @@ static int update2(struct hash_desc *desc,
|
|||
|
||||
if (!nbytes)
|
||||
break;
|
||||
sg = sg_next(sg);
|
||||
sg = scatterwalk_sg_next(sg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -62,7 +62,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
|
|||
walk->offset += PAGE_SIZE - 1;
|
||||
walk->offset &= PAGE_MASK;
|
||||
if (walk->offset >= walk->sg->offset + walk->sg->length)
|
||||
scatterwalk_start(walk, sg_next(walk->sg));
|
||||
scatterwalk_start(walk, scatterwalk_sg_next(walk->sg));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
#include "internal.h"
|
||||
|
||||
static inline struct scatterlist *sg_next(struct scatterlist *sg)
|
||||
static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
|
||||
{
|
||||
return (++sg)->length ? sg : (void *)sg->page;
|
||||
}
|
||||
|
|
|
@ -1410,7 +1410,7 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
|
|||
*/
|
||||
unsigned ata_exec_internal_sg(struct ata_device *dev,
|
||||
struct ata_taskfile *tf, const u8 *cdb,
|
||||
int dma_dir, struct scatterlist *sg,
|
||||
int dma_dir, struct scatterlist *sgl,
|
||||
unsigned int n_elem, unsigned long timeout)
|
||||
{
|
||||
struct ata_link *link = dev->link;
|
||||
|
@ -1472,11 +1472,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
|
|||
qc->dma_dir = dma_dir;
|
||||
if (dma_dir != DMA_NONE) {
|
||||
unsigned int i, buflen = 0;
|
||||
struct scatterlist *sg;
|
||||
|
||||
for (i = 0; i < n_elem; i++)
|
||||
buflen += sg[i].length;
|
||||
for_each_sg(sgl, sg, n_elem, i)
|
||||
buflen += sg->length;
|
||||
|
||||
ata_sg_init(qc, sg, n_elem);
|
||||
ata_sg_init(qc, sgl, n_elem);
|
||||
qc->nbytes = buflen;
|
||||
}
|
||||
|
||||
|
@ -4292,7 +4293,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
|
|||
if (qc->n_elem)
|
||||
dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
|
||||
/* restore last sg */
|
||||
sg[qc->orig_n_elem - 1].length += qc->pad_len;
|
||||
sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
|
||||
if (pad_buf) {
|
||||
struct scatterlist *psg = &qc->pad_sgent;
|
||||
void *addr = kmap_atomic(psg->page, KM_IRQ0);
|
||||
|
@ -4547,6 +4548,7 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
|
|||
qc->orig_n_elem = 1;
|
||||
qc->buf_virt = buf;
|
||||
qc->nbytes = buflen;
|
||||
qc->cursg = qc->__sg;
|
||||
|
||||
sg_init_one(&qc->sgent, buf, buflen);
|
||||
}
|
||||
|
@ -4572,6 +4574,7 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
|
|||
qc->__sg = sg;
|
||||
qc->n_elem = n_elem;
|
||||
qc->orig_n_elem = n_elem;
|
||||
qc->cursg = qc->__sg;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4661,7 +4664,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
|
|||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct scatterlist *sg = qc->__sg;
|
||||
struct scatterlist *lsg = &sg[qc->n_elem - 1];
|
||||
struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
|
||||
int n_elem, pre_n_elem, dir, trim_sg = 0;
|
||||
|
||||
VPRINTK("ENTER, ata%u\n", ap->print_id);
|
||||
|
@ -4825,7 +4828,6 @@ void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
|
|||
static void ata_pio_sector(struct ata_queued_cmd *qc)
|
||||
{
|
||||
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
|
||||
struct scatterlist *sg = qc->__sg;
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct page *page;
|
||||
unsigned int offset;
|
||||
|
@ -4834,8 +4836,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
|||
if (qc->curbytes == qc->nbytes - qc->sect_size)
|
||||
ap->hsm_task_state = HSM_ST_LAST;
|
||||
|
||||
page = sg[qc->cursg].page;
|
||||
offset = sg[qc->cursg].offset + qc->cursg_ofs;
|
||||
page = qc->cursg->page;
|
||||
offset = qc->cursg->offset + qc->cursg_ofs;
|
||||
|
||||
/* get the current page and offset */
|
||||
page = nth_page(page, (offset >> PAGE_SHIFT));
|
||||
|
@ -4863,8 +4865,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
|||
qc->curbytes += qc->sect_size;
|
||||
qc->cursg_ofs += qc->sect_size;
|
||||
|
||||
if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
|
||||
qc->cursg++;
|
||||
if (qc->cursg_ofs == qc->cursg->length) {
|
||||
qc->cursg = sg_next(qc->cursg);
|
||||
qc->cursg_ofs = 0;
|
||||
}
|
||||
}
|
||||
|
@ -4950,16 +4952,18 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
|
|||
{
|
||||
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
|
||||
struct scatterlist *sg = qc->__sg;
|
||||
struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct page *page;
|
||||
unsigned char *buf;
|
||||
unsigned int offset, count;
|
||||
int no_more_sg = 0;
|
||||
|
||||
if (qc->curbytes + bytes >= qc->nbytes)
|
||||
ap->hsm_task_state = HSM_ST_LAST;
|
||||
|
||||
next_sg:
|
||||
if (unlikely(qc->cursg >= qc->n_elem)) {
|
||||
if (unlikely(no_more_sg)) {
|
||||
/*
|
||||
* The end of qc->sg is reached and the device expects
|
||||
* more data to transfer. In order not to overrun qc->sg
|
||||
|
@ -4982,7 +4986,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
|
|||
return;
|
||||
}
|
||||
|
||||
sg = &qc->__sg[qc->cursg];
|
||||
sg = qc->cursg;
|
||||
|
||||
page = sg->page;
|
||||
offset = sg->offset + qc->cursg_ofs;
|
||||
|
@ -5021,7 +5025,10 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
|
|||
qc->cursg_ofs += count;
|
||||
|
||||
if (qc->cursg_ofs == sg->length) {
|
||||
qc->cursg++;
|
||||
if (qc->cursg == lsg)
|
||||
no_more_sg = 1;
|
||||
|
||||
qc->cursg = sg_next(qc->cursg);
|
||||
qc->cursg_ofs = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -801,8 +801,6 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
|
|||
|
||||
ata_scsi_sdev_config(sdev);
|
||||
|
||||
blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);
|
||||
|
||||
sdev->manage_start_stop = 1;
|
||||
|
||||
if (dev)
|
||||
|
|
|
@ -1191,7 +1191,6 @@ static inline void complete_buffers(struct bio *bio, int status)
|
|||
{
|
||||
while (bio) {
|
||||
struct bio *xbh = bio->bi_next;
|
||||
int nr_sectors = bio_sectors(bio);
|
||||
|
||||
bio->bi_next = NULL;
|
||||
bio_endio(bio, status ? 0 : -EIO);
|
||||
|
@ -2570,6 +2569,7 @@ static void do_cciss_request(struct request_queue *q)
|
|||
(int)creq->nr_sectors);
|
||||
#endif /* CCISS_DEBUG */
|
||||
|
||||
memset(tmp_sg, 0, sizeof(tmp_sg));
|
||||
seg = blk_rq_map_sg(q, creq, tmp_sg);
|
||||
|
||||
/* get the DMA records for the setup */
|
||||
|
|
|
@ -981,9 +981,8 @@ static void start_io(ctlr_info_t *h)
|
|||
static inline void complete_buffers(struct bio *bio, int ok)
|
||||
{
|
||||
struct bio *xbh;
|
||||
while(bio) {
|
||||
int nr_sectors = bio_sectors(bio);
|
||||
|
||||
while (bio) {
|
||||
xbh = bio->bi_next;
|
||||
bio->bi_next = NULL;
|
||||
|
||||
|
|
|
@ -1133,16 +1133,21 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
|
|||
* Schedule reads for missing parts of the packet.
|
||||
*/
|
||||
for (f = 0; f < pkt->frames; f++) {
|
||||
struct bio_vec *vec;
|
||||
|
||||
int p, offset;
|
||||
if (written[f])
|
||||
continue;
|
||||
bio = pkt->r_bios[f];
|
||||
vec = bio->bi_io_vec;
|
||||
bio_init(bio);
|
||||
bio->bi_max_vecs = 1;
|
||||
bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
|
||||
bio->bi_bdev = pd->bdev;
|
||||
bio->bi_end_io = pkt_end_io_read;
|
||||
bio->bi_private = pkt;
|
||||
bio->bi_io_vec = vec;
|
||||
bio->bi_destructor = pkt_bio_destructor;
|
||||
|
||||
p = (f * CD_FRAMESIZE) / PAGE_SIZE;
|
||||
offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
|
||||
|
@ -1439,6 +1444,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
|
|||
pkt->w_bio->bi_bdev = pd->bdev;
|
||||
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
|
||||
pkt->w_bio->bi_private = pkt;
|
||||
pkt->w_bio->bi_io_vec = bvec;
|
||||
pkt->w_bio->bi_destructor = pkt_bio_destructor;
|
||||
for (f = 0; f < pkt->frames; f++)
|
||||
if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
|
||||
BUG();
|
||||
|
|
|
@ -414,26 +414,6 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
|
|||
req->cmd_type = REQ_TYPE_FLUSH;
|
||||
}
|
||||
|
||||
static int ps3disk_issue_flush(struct request_queue *q, struct gendisk *gendisk,
|
||||
sector_t *sector)
|
||||
{
|
||||
struct ps3_storage_device *dev = q->queuedata;
|
||||
struct request *req;
|
||||
int res;
|
||||
|
||||
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
|
||||
|
||||
req = blk_get_request(q, WRITE, __GFP_WAIT);
|
||||
ps3disk_prepare_flush(q, req);
|
||||
res = blk_execute_rq(q, gendisk, req, 0);
|
||||
if (res)
|
||||
dev_err(&dev->sbd.core, "%s:%u: flush request failed %d\n",
|
||||
__func__, __LINE__, res);
|
||||
blk_put_request(req);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
static unsigned long ps3disk_mask;
|
||||
|
||||
static DEFINE_MUTEX(ps3disk_mask_mutex);
|
||||
|
@ -506,7 +486,6 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
|
|||
blk_queue_dma_alignment(queue, dev->blk_size-1);
|
||||
blk_queue_hardsect_size(queue, dev->blk_size);
|
||||
|
||||
blk_queue_issue_flush_fn(queue, ps3disk_issue_flush);
|
||||
blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
|
||||
ps3disk_prepare_flush);
|
||||
|
||||
|
|
|
@ -939,7 +939,8 @@ static int cris_ide_build_dmatable (ide_drive_t *drive)
|
|||
/* group sequential buffers into one large buffer */
|
||||
addr = page_to_phys(sg->page) + sg->offset;
|
||||
size = sg_dma_len(sg);
|
||||
while (sg++, --i) {
|
||||
while (--i) {
|
||||
sg = sg_next(sg);
|
||||
if ((addr + size) != page_to_phys(sg->page) + sg->offset)
|
||||
break;
|
||||
size += sg_dma_len(sg);
|
||||
|
|
|
@ -716,32 +716,6 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
|
|||
rq->buffer = rq->cmd;
|
||||
}
|
||||
|
||||
static int idedisk_issue_flush(struct request_queue *q, struct gendisk *disk,
|
||||
sector_t *error_sector)
|
||||
{
|
||||
ide_drive_t *drive = q->queuedata;
|
||||
struct request *rq;
|
||||
int ret;
|
||||
|
||||
if (!drive->wcache)
|
||||
return 0;
|
||||
|
||||
rq = blk_get_request(q, WRITE, __GFP_WAIT);
|
||||
|
||||
idedisk_prepare_flush(q, rq);
|
||||
|
||||
ret = blk_execute_rq(q, disk, rq, 0);
|
||||
|
||||
/*
|
||||
* if we failed and caller wants error offset, get it
|
||||
*/
|
||||
if (ret && error_sector)
|
||||
*error_sector = ide_get_error_location(drive, rq->cmd);
|
||||
|
||||
blk_put_request(rq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is tightly woven into the driver->do_special can not touch.
|
||||
* DON'T do it again until a total personality rewrite is committed.
|
||||
|
@ -781,7 +755,6 @@ static void update_ordered(ide_drive_t *drive)
|
|||
struct hd_driveid *id = drive->id;
|
||||
unsigned ordered = QUEUE_ORDERED_NONE;
|
||||
prepare_flush_fn *prep_fn = NULL;
|
||||
issue_flush_fn *issue_fn = NULL;
|
||||
|
||||
if (drive->wcache) {
|
||||
unsigned long long capacity;
|
||||
|
@ -805,13 +778,11 @@ static void update_ordered(ide_drive_t *drive)
|
|||
if (barrier) {
|
||||
ordered = QUEUE_ORDERED_DRAIN_FLUSH;
|
||||
prep_fn = idedisk_prepare_flush;
|
||||
issue_fn = idedisk_issue_flush;
|
||||
}
|
||||
} else
|
||||
ordered = QUEUE_ORDERED_DRAIN;
|
||||
|
||||
blk_queue_ordered(drive->queue, ordered, prep_fn);
|
||||
blk_queue_issue_flush_fn(drive->queue, issue_fn);
|
||||
}
|
||||
|
||||
static int write_cache(ide_drive_t *drive, int arg)
|
||||
|
|
|
@ -280,7 +280,7 @@ int ide_build_dmatable (ide_drive_t *drive, struct request *rq)
|
|||
}
|
||||
}
|
||||
|
||||
sg++;
|
||||
sg = sg_next(sg);
|
||||
i--;
|
||||
}
|
||||
|
||||
|
|
|
@ -322,41 +322,6 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
|
|||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: probably move this somewhere else, name is bad too :)
|
||||
*/
|
||||
u64 ide_get_error_location(ide_drive_t *drive, char *args)
|
||||
{
|
||||
u32 high, low;
|
||||
u8 hcyl, lcyl, sect;
|
||||
u64 sector;
|
||||
|
||||
high = 0;
|
||||
hcyl = args[5];
|
||||
lcyl = args[4];
|
||||
sect = args[3];
|
||||
|
||||
if (ide_id_has_flush_cache_ext(drive->id)) {
|
||||
low = (hcyl << 16) | (lcyl << 8) | sect;
|
||||
HWIF(drive)->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
|
||||
high = ide_read_24(drive);
|
||||
} else {
|
||||
u8 cur = HWIF(drive)->INB(IDE_SELECT_REG);
|
||||
if (cur & 0x40) {
|
||||
high = cur & 0xf;
|
||||
low = (hcyl << 16) | (lcyl << 8) | sect;
|
||||
} else {
|
||||
low = hcyl * drive->head * drive->sect;
|
||||
low += lcyl * drive->sect;
|
||||
low += sect - 1;
|
||||
}
|
||||
}
|
||||
|
||||
sector = ((u64) high << 24) | low;
|
||||
return sector;
|
||||
}
|
||||
EXPORT_SYMBOL(ide_get_error_location);
|
||||
|
||||
/**
|
||||
* ide_end_drive_cmd - end an explicit drive command
|
||||
* @drive: command
|
||||
|
@ -881,7 +846,8 @@ void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
|
|||
ide_hwif_t *hwif = drive->hwif;
|
||||
|
||||
hwif->nsect = hwif->nleft = rq->nr_sectors;
|
||||
hwif->cursg = hwif->cursg_ofs = 0;
|
||||
hwif->cursg_ofs = 0;
|
||||
hwif->cursg = NULL;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
|
||||
|
|
|
@ -1349,7 +1349,7 @@ static int hwif_init(ide_hwif_t *hwif)
|
|||
if (!hwif->sg_max_nents)
|
||||
hwif->sg_max_nents = PRD_ENTRIES;
|
||||
|
||||
hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
|
||||
hwif->sg_table = kzalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
|
||||
GFP_KERNEL);
|
||||
if (!hwif->sg_table) {
|
||||
printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include <linux/hdreg.h>
|
||||
#include <linux/ide.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/irq.h>
|
||||
|
@ -263,6 +264,7 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
|
|||
{
|
||||
ide_hwif_t *hwif = drive->hwif;
|
||||
struct scatterlist *sg = hwif->sg_table;
|
||||
struct scatterlist *cursg = hwif->cursg;
|
||||
struct page *page;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
unsigned long flags;
|
||||
|
@ -270,8 +272,14 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
|
|||
unsigned int offset;
|
||||
u8 *buf;
|
||||
|
||||
page = sg[hwif->cursg].page;
|
||||
offset = sg[hwif->cursg].offset + hwif->cursg_ofs * SECTOR_SIZE;
|
||||
cursg = hwif->cursg;
|
||||
if (!cursg) {
|
||||
cursg = sg;
|
||||
hwif->cursg = sg;
|
||||
}
|
||||
|
||||
page = cursg->page;
|
||||
offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
|
||||
|
||||
/* get the current page and offset */
|
||||
page = nth_page(page, (offset >> PAGE_SHIFT));
|
||||
|
@ -285,8 +293,8 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
|
|||
hwif->nleft--;
|
||||
hwif->cursg_ofs++;
|
||||
|
||||
if ((hwif->cursg_ofs * SECTOR_SIZE) == sg[hwif->cursg].length) {
|
||||
hwif->cursg++;
|
||||
if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
|
||||
hwif->cursg = sg_next(hwif->cursg);
|
||||
hwif->cursg_ofs = 0;
|
||||
}
|
||||
|
||||
|
@ -367,6 +375,8 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
|
|||
|
||||
static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
|
||||
{
|
||||
HWIF(drive)->cursg = NULL;
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
|
||||
ide_task_t *task = rq->special;
|
||||
|
||||
|
|
|
@ -296,7 +296,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
|
|||
cur_addr += tc;
|
||||
cur_len -= tc;
|
||||
}
|
||||
sg++;
|
||||
sg = sg_next(sg);
|
||||
i--;
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/ioc4.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
|
@ -537,7 +538,7 @@ sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir)
|
|||
}
|
||||
}
|
||||
|
||||
sg++;
|
||||
sg = sg_next(sg);
|
||||
i--;
|
||||
}
|
||||
|
||||
|
|
|
@ -1539,7 +1539,7 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
|
|||
cur_len -= tc;
|
||||
++table;
|
||||
}
|
||||
sg++;
|
||||
sg = sg_next(sg);
|
||||
i--;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
#include "ipath_verbs.h"
|
||||
|
@ -96,17 +97,18 @@ static void ipath_dma_unmap_page(struct ib_device *dev,
|
|||
BUG_ON(!valid_dma_direction(direction));
|
||||
}
|
||||
|
||||
static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction)
|
||||
static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction direction)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
u64 addr;
|
||||
int i;
|
||||
int ret = nents;
|
||||
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
|
||||
for (i = 0; i < nents; i++) {
|
||||
addr = (u64) page_address(sg[i].page);
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
addr = (u64) page_address(sg->page);
|
||||
/* TODO: handle highmem pages */
|
||||
if (!addr) {
|
||||
ret = 0;
|
||||
|
|
|
@ -124,17 +124,19 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
|
|||
|
||||
if (cmd_dir == ISER_DIR_OUT) {
|
||||
/* copy the unaligned sg the buffer which is used for RDMA */
|
||||
struct scatterlist *sg = (struct scatterlist *)data->buf;
|
||||
struct scatterlist *sgl = (struct scatterlist *)data->buf;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
char *p, *from;
|
||||
|
||||
for (p = mem, i = 0; i < data->size; i++) {
|
||||
from = kmap_atomic(sg[i].page, KM_USER0);
|
||||
p = mem;
|
||||
for_each_sg(sgl, sg, data->size, i) {
|
||||
from = kmap_atomic(sg->page, KM_USER0);
|
||||
memcpy(p,
|
||||
from + sg[i].offset,
|
||||
sg[i].length);
|
||||
from + sg->offset,
|
||||
sg->length);
|
||||
kunmap_atomic(from, KM_USER0);
|
||||
p += sg[i].length;
|
||||
p += sg->length;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -176,7 +178,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
|
|||
|
||||
if (cmd_dir == ISER_DIR_IN) {
|
||||
char *mem;
|
||||
struct scatterlist *sg;
|
||||
struct scatterlist *sgl, *sg;
|
||||
unsigned char *p, *to;
|
||||
unsigned int sg_size;
|
||||
int i;
|
||||
|
@ -184,16 +186,17 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
|
|||
/* copy back read RDMA to unaligned sg */
|
||||
mem = mem_copy->copy_buf;
|
||||
|
||||
sg = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
|
||||
sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
|
||||
sg_size = iser_ctask->data[ISER_DIR_IN].size;
|
||||
|
||||
for (p = mem, i = 0; i < sg_size; i++){
|
||||
to = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
|
||||
memcpy(to + sg[i].offset,
|
||||
p = mem;
|
||||
for_each_sg(sgl, sg, sg_size, i) {
|
||||
to = kmap_atomic(sg->page, KM_SOFTIRQ0);
|
||||
memcpy(to + sg->offset,
|
||||
p,
|
||||
sg[i].length);
|
||||
sg->length);
|
||||
kunmap_atomic(to, KM_SOFTIRQ0);
|
||||
p += sg[i].length;
|
||||
p += sg->length;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -224,7 +227,8 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
|
|||
struct iser_page_vec *page_vec,
|
||||
struct ib_device *ibdev)
|
||||
{
|
||||
struct scatterlist *sg = (struct scatterlist *)data->buf;
|
||||
struct scatterlist *sgl = (struct scatterlist *)data->buf;
|
||||
struct scatterlist *sg;
|
||||
u64 first_addr, last_addr, page;
|
||||
int end_aligned;
|
||||
unsigned int cur_page = 0;
|
||||
|
@ -232,24 +236,25 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
|
|||
int i;
|
||||
|
||||
/* compute the offset of first element */
|
||||
page_vec->offset = (u64) sg[0].offset & ~MASK_4K;
|
||||
page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
|
||||
|
||||
for (i = 0; i < data->dma_nents; i++) {
|
||||
unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]);
|
||||
for_each_sg(sgl, sg, data->dma_nents, i) {
|
||||
unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
|
||||
|
||||
total_sz += dma_len;
|
||||
|
||||
first_addr = ib_sg_dma_address(ibdev, &sg[i]);
|
||||
first_addr = ib_sg_dma_address(ibdev, sg);
|
||||
last_addr = first_addr + dma_len;
|
||||
|
||||
end_aligned = !(last_addr & ~MASK_4K);
|
||||
|
||||
/* continue to collect page fragments till aligned or SG ends */
|
||||
while (!end_aligned && (i + 1 < data->dma_nents)) {
|
||||
sg = sg_next(sg);
|
||||
i++;
|
||||
dma_len = ib_sg_dma_len(ibdev, &sg[i]);
|
||||
dma_len = ib_sg_dma_len(ibdev, sg);
|
||||
total_sz += dma_len;
|
||||
last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len;
|
||||
last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
|
||||
end_aligned = !(last_addr & ~MASK_4K);
|
||||
}
|
||||
|
||||
|
@ -284,25 +289,26 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
|
|||
static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
|
||||
struct ib_device *ibdev)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
struct scatterlist *sgl, *sg;
|
||||
u64 end_addr, next_addr;
|
||||
int i, cnt;
|
||||
unsigned int ret_len = 0;
|
||||
|
||||
sg = (struct scatterlist *)data->buf;
|
||||
sgl = (struct scatterlist *)data->buf;
|
||||
|
||||
for (cnt = 0, i = 0; i < data->dma_nents; i++, cnt++) {
|
||||
cnt = 0;
|
||||
for_each_sg(sgl, sg, data->dma_nents, i) {
|
||||
/* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
|
||||
"offset: %ld sz: %ld\n", i,
|
||||
(unsigned long)page_to_phys(sg[i].page),
|
||||
(unsigned long)sg[i].offset,
|
||||
(unsigned long)sg[i].length); */
|
||||
end_addr = ib_sg_dma_address(ibdev, &sg[i]) +
|
||||
ib_sg_dma_len(ibdev, &sg[i]);
|
||||
(unsigned long)page_to_phys(sg->page),
|
||||
(unsigned long)sg->offset,
|
||||
(unsigned long)sg->length); */
|
||||
end_addr = ib_sg_dma_address(ibdev, sg) +
|
||||
ib_sg_dma_len(ibdev, sg);
|
||||
/* iser_dbg("Checking sg iobuf end address "
|
||||
"0x%08lX\n", end_addr); */
|
||||
if (i + 1 < data->dma_nents) {
|
||||
next_addr = ib_sg_dma_address(ibdev, &sg[i+1]);
|
||||
next_addr = ib_sg_dma_address(ibdev, sg_next(sg));
|
||||
/* are i, i+1 fragments of the same page? */
|
||||
if (end_addr == next_addr)
|
||||
continue;
|
||||
|
@ -322,15 +328,16 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
|
|||
static void iser_data_buf_dump(struct iser_data_buf *data,
|
||||
struct ib_device *ibdev)
|
||||
{
|
||||
struct scatterlist *sg = (struct scatterlist *)data->buf;
|
||||
struct scatterlist *sgl = (struct scatterlist *)data->buf;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < data->dma_nents; i++)
|
||||
for_each_sg(sgl, sg, data->dma_nents, i)
|
||||
iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
|
||||
"off:0x%x sz:0x%x dma_len:0x%x\n",
|
||||
i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]),
|
||||
sg[i].page, sg[i].offset,
|
||||
sg[i].length, ib_sg_dma_len(ibdev, &sg[i]));
|
||||
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
|
||||
sg->page, sg->offset,
|
||||
sg->length, ib_sg_dma_len(ibdev, sg));
|
||||
}
|
||||
|
||||
static void iser_dump_page_vec(struct iser_page_vec *page_vec)
|
||||
|
|
|
@ -441,33 +441,12 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
|
|||
return clone;
|
||||
}
|
||||
|
||||
static void crypt_free_buffer_pages(struct crypt_config *cc,
|
||||
struct bio *clone, unsigned int bytes)
|
||||
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
|
||||
{
|
||||
unsigned int i, start, end;
|
||||
unsigned int i;
|
||||
struct bio_vec *bv;
|
||||
|
||||
/*
|
||||
* This is ugly, but Jens Axboe thinks that using bi_idx in the
|
||||
* endio function is too dangerous at the moment, so I calculate the
|
||||
* correct position using bi_vcnt and bi_size.
|
||||
* The bv_offset and bv_len fields might already be modified but we
|
||||
* know that we always allocated whole pages.
|
||||
* A fix to the bi_idx issue in the kernel is in the works, so
|
||||
* we will hopefully be able to revert to the cleaner solution soon.
|
||||
*/
|
||||
i = clone->bi_vcnt - 1;
|
||||
bv = bio_iovec_idx(clone, i);
|
||||
end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - clone->bi_size;
|
||||
start = end - bytes;
|
||||
|
||||
start >>= PAGE_SHIFT;
|
||||
if (!clone->bi_size)
|
||||
end = clone->bi_vcnt;
|
||||
else
|
||||
end >>= PAGE_SHIFT;
|
||||
|
||||
for (i = start; i < end; i++) {
|
||||
for (i = 0; i < clone->bi_vcnt; i++) {
|
||||
bv = bio_iovec_idx(clone, i);
|
||||
BUG_ON(!bv->bv_page);
|
||||
mempool_free(bv->bv_page, cc->page_pool);
|
||||
|
@ -519,7 +498,7 @@ static void crypt_endio(struct bio *clone, int error)
|
|||
* free the processed pages
|
||||
*/
|
||||
if (!read_io) {
|
||||
crypt_free_buffer_pages(cc, clone, clone->bi_size);
|
||||
crypt_free_buffer_pages(cc, clone);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -608,7 +587,7 @@ static void process_write(struct dm_crypt_io *io)
|
|||
ctx.idx_out = 0;
|
||||
|
||||
if (unlikely(crypt_convert(cc, &ctx) < 0)) {
|
||||
crypt_free_buffer_pages(cc, clone, clone->bi_size);
|
||||
crypt_free_buffer_pages(cc, clone);
|
||||
bio_put(clone);
|
||||
dec_pending(io, -EIO);
|
||||
return;
|
||||
|
|
|
@ -999,33 +999,6 @@ void dm_table_unplug_all(struct dm_table *t)
|
|||
}
|
||||
}
|
||||
|
||||
int dm_table_flush_all(struct dm_table *t)
|
||||
{
|
||||
struct list_head *d, *devices = dm_table_get_devices(t);
|
||||
int ret = 0;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < t->num_targets; i++)
|
||||
if (t->targets[i].type->flush)
|
||||
t->targets[i].type->flush(&t->targets[i]);
|
||||
|
||||
for (d = devices->next; d != devices; d = d->next) {
|
||||
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
|
||||
struct request_queue *q = bdev_get_queue(dd->bdev);
|
||||
int err;
|
||||
|
||||
if (!q->issue_flush_fn)
|
||||
err = -EOPNOTSUPP;
|
||||
else
|
||||
err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
|
||||
|
||||
if (!ret)
|
||||
ret = err;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct mapped_device *dm_table_get_md(struct dm_table *t)
|
||||
{
|
||||
dm_get(t->md);
|
||||
|
@ -1043,4 +1016,3 @@ EXPORT_SYMBOL(dm_table_get_md);
|
|||
EXPORT_SYMBOL(dm_table_put);
|
||||
EXPORT_SYMBOL(dm_table_get);
|
||||
EXPORT_SYMBOL(dm_table_unplug_all);
|
||||
EXPORT_SYMBOL(dm_table_flush_all);
|
||||
|
|
|
@ -840,21 +840,6 @@ static int dm_request(struct request_queue *q, struct bio *bio)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
|
||||
sector_t *error_sector)
|
||||
{
|
||||
struct mapped_device *md = q->queuedata;
|
||||
struct dm_table *map = dm_get_table(md);
|
||||
int ret = -ENXIO;
|
||||
|
||||
if (map) {
|
||||
ret = dm_table_flush_all(map);
|
||||
dm_table_put(map);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dm_unplug_all(struct request_queue *q)
|
||||
{
|
||||
struct mapped_device *md = q->queuedata;
|
||||
|
@ -1003,7 +988,6 @@ static struct mapped_device *alloc_dev(int minor)
|
|||
blk_queue_make_request(md->queue, dm_request);
|
||||
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
|
||||
md->queue->unplug_fn = dm_unplug_all;
|
||||
md->queue->issue_flush_fn = dm_flush_all;
|
||||
|
||||
md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
|
||||
if (!md->io_pool)
|
||||
|
|
|
@ -111,7 +111,6 @@ void dm_table_postsuspend_targets(struct dm_table *t);
|
|||
int dm_table_resume_targets(struct dm_table *t);
|
||||
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
|
||||
void dm_table_unplug_all(struct dm_table *t);
|
||||
int dm_table_flush_all(struct dm_table *t);
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* A registry of target types.
|
||||
|
|
|
@ -92,25 +92,6 @@ static void linear_unplug(struct request_queue *q)
|
|||
}
|
||||
}
|
||||
|
||||
static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
|
||||
sector_t *error_sector)
|
||||
{
|
||||
mddev_t *mddev = q->queuedata;
|
||||
linear_conf_t *conf = mddev_to_conf(mddev);
|
||||
int i, ret = 0;
|
||||
|
||||
for (i=0; i < mddev->raid_disks && ret == 0; i++) {
|
||||
struct block_device *bdev = conf->disks[i].rdev->bdev;
|
||||
struct request_queue *r_queue = bdev_get_queue(bdev);
|
||||
|
||||
if (!r_queue->issue_flush_fn)
|
||||
ret = -EOPNOTSUPP;
|
||||
else
|
||||
ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int linear_congested(void *data, int bits)
|
||||
{
|
||||
mddev_t *mddev = data;
|
||||
|
@ -279,7 +260,6 @@ static int linear_run (mddev_t *mddev)
|
|||
|
||||
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
|
||||
mddev->queue->unplug_fn = linear_unplug;
|
||||
mddev->queue->issue_flush_fn = linear_issue_flush;
|
||||
mddev->queue->backing_dev_info.congested_fn = linear_congested;
|
||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||
return 0;
|
||||
|
|
|
@ -3463,7 +3463,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
|
|||
mddev->pers->stop(mddev);
|
||||
mddev->queue->merge_bvec_fn = NULL;
|
||||
mddev->queue->unplug_fn = NULL;
|
||||
mddev->queue->issue_flush_fn = NULL;
|
||||
mddev->queue->backing_dev_info.congested_fn = NULL;
|
||||
if (mddev->pers->sync_request)
|
||||
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
|
||||
|
|
|
@ -194,35 +194,6 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
|
|||
seq_printf (seq, "]");
|
||||
}
|
||||
|
||||
static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
|
||||
sector_t *error_sector)
|
||||
{
|
||||
mddev_t *mddev = q->queuedata;
|
||||
multipath_conf_t *conf = mddev_to_conf(mddev);
|
||||
int i, ret = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
|
||||
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
||||
struct block_device *bdev = rdev->bdev;
|
||||
struct request_queue *r_queue = bdev_get_queue(bdev);
|
||||
|
||||
if (!r_queue->issue_flush_fn)
|
||||
ret = -EOPNOTSUPP;
|
||||
else {
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
rcu_read_unlock();
|
||||
ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
|
||||
error_sector);
|
||||
rdev_dec_pending(rdev, mddev);
|
||||
rcu_read_lock();
|
||||
}
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
static int multipath_congested(void *data, int bits)
|
||||
{
|
||||
mddev_t *mddev = data;
|
||||
|
@ -527,7 +498,6 @@ static int multipath_run (mddev_t *mddev)
|
|||
mddev->array_size = mddev->size;
|
||||
|
||||
mddev->queue->unplug_fn = multipath_unplug;
|
||||
mddev->queue->issue_flush_fn = multipath_issue_flush;
|
||||
mddev->queue->backing_dev_info.congested_fn = multipath_congested;
|
||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||
|
||||
|
|
|
@ -40,26 +40,6 @@ static void raid0_unplug(struct request_queue *q)
|
|||
}
|
||||
}
|
||||
|
||||
static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
|
||||
sector_t *error_sector)
|
||||
{
|
||||
mddev_t *mddev = q->queuedata;
|
||||
raid0_conf_t *conf = mddev_to_conf(mddev);
|
||||
mdk_rdev_t **devlist = conf->strip_zone[0].dev;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
||||
struct block_device *bdev = devlist[i]->bdev;
|
||||
struct request_queue *r_queue = bdev_get_queue(bdev);
|
||||
|
||||
if (!r_queue->issue_flush_fn)
|
||||
ret = -EOPNOTSUPP;
|
||||
else
|
||||
ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int raid0_congested(void *data, int bits)
|
||||
{
|
||||
mddev_t *mddev = data;
|
||||
|
@ -250,7 +230,6 @@ static int create_strip_zones (mddev_t *mddev)
|
|||
|
||||
mddev->queue->unplug_fn = raid0_unplug;
|
||||
|
||||
mddev->queue->issue_flush_fn = raid0_issue_flush;
|
||||
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
|
||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||
|
||||
|
|
|
@ -567,36 +567,6 @@ static void raid1_unplug(struct request_queue *q)
|
|||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
|
||||
static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
|
||||
sector_t *error_sector)
|
||||
{
|
||||
mddev_t *mddev = q->queuedata;
|
||||
conf_t *conf = mddev_to_conf(mddev);
|
||||
int i, ret = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
||||
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
||||
struct block_device *bdev = rdev->bdev;
|
||||
struct request_queue *r_queue = bdev_get_queue(bdev);
|
||||
|
||||
if (!r_queue->issue_flush_fn)
|
||||
ret = -EOPNOTSUPP;
|
||||
else {
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
rcu_read_unlock();
|
||||
ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
|
||||
error_sector);
|
||||
rdev_dec_pending(rdev, mddev);
|
||||
rcu_read_lock();
|
||||
}
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int raid1_congested(void *data, int bits)
|
||||
{
|
||||
mddev_t *mddev = data;
|
||||
|
@ -1997,7 +1967,6 @@ static int run(mddev_t *mddev)
|
|||
mddev->array_size = mddev->size;
|
||||
|
||||
mddev->queue->unplug_fn = raid1_unplug;
|
||||
mddev->queue->issue_flush_fn = raid1_issue_flush;
|
||||
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
|
||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||
|
||||
|
|
|
@ -611,36 +611,6 @@ static void raid10_unplug(struct request_queue *q)
|
|||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
|
||||
static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
|
||||
sector_t *error_sector)
|
||||
{
|
||||
mddev_t *mddev = q->queuedata;
|
||||
conf_t *conf = mddev_to_conf(mddev);
|
||||
int i, ret = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
||||
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
||||
struct block_device *bdev = rdev->bdev;
|
||||
struct request_queue *r_queue = bdev_get_queue(bdev);
|
||||
|
||||
if (!r_queue->issue_flush_fn)
|
||||
ret = -EOPNOTSUPP;
|
||||
else {
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
rcu_read_unlock();
|
||||
ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
|
||||
error_sector);
|
||||
rdev_dec_pending(rdev, mddev);
|
||||
rcu_read_lock();
|
||||
}
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int raid10_congested(void *data, int bits)
|
||||
{
|
||||
mddev_t *mddev = data;
|
||||
|
@ -2118,7 +2088,6 @@ static int run(mddev_t *mddev)
|
|||
mddev->resync_max_sectors = size << conf->chunk_shift;
|
||||
|
||||
mddev->queue->unplug_fn = raid10_unplug;
|
||||
mddev->queue->issue_flush_fn = raid10_issue_flush;
|
||||
mddev->queue->backing_dev_info.congested_fn = raid10_congested;
|
||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||
|
||||
|
|
|
@ -3204,36 +3204,6 @@ static void raid5_unplug_device(struct request_queue *q)
|
|||
unplug_slaves(mddev);
|
||||
}
|
||||
|
||||
static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
|
||||
sector_t *error_sector)
|
||||
{
|
||||
mddev_t *mddev = q->queuedata;
|
||||
raid5_conf_t *conf = mddev_to_conf(mddev);
|
||||
int i, ret = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
|
||||
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
||||
struct block_device *bdev = rdev->bdev;
|
||||
struct request_queue *r_queue = bdev_get_queue(bdev);
|
||||
|
||||
if (!r_queue->issue_flush_fn)
|
||||
ret = -EOPNOTSUPP;
|
||||
else {
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
rcu_read_unlock();
|
||||
ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
|
||||
error_sector);
|
||||
rdev_dec_pending(rdev, mddev);
|
||||
rcu_read_lock();
|
||||
}
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int raid5_congested(void *data, int bits)
|
||||
{
|
||||
mddev_t *mddev = data;
|
||||
|
@ -4263,7 +4233,6 @@ static int run(mddev_t *mddev)
|
|||
mdname(mddev));
|
||||
|
||||
mddev->queue->unplug_fn = raid5_unplug_device;
|
||||
mddev->queue->issue_flush_fn = raid5_issue_flush;
|
||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
|
||||
|
||||
|
|
|
@ -293,7 +293,7 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
|
|||
for (ii=0; ii < (numSgeThisFrame-1); ii++) {
|
||||
thisxfer = sg_dma_len(sg);
|
||||
if (thisxfer == 0) {
|
||||
sg ++; /* Get next SG element from the OS */
|
||||
sg = sg_next(sg); /* Get next SG element from the OS */
|
||||
sg_done++;
|
||||
continue;
|
||||
}
|
||||
|
@ -301,7 +301,7 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
|
|||
v2 = sg_dma_address(sg);
|
||||
mptscsih_add_sge(psge, sgflags | thisxfer, v2);
|
||||
|
||||
sg++; /* Get next SG element from the OS */
|
||||
sg = sg_next(sg); /* Get next SG element from the OS */
|
||||
psge += (sizeof(u32) + sizeof(dma_addr_t));
|
||||
sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
|
||||
sg_done++;
|
||||
|
@ -322,7 +322,7 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
|
|||
v2 = sg_dma_address(sg);
|
||||
mptscsih_add_sge(psge, sgflags | thisxfer, v2);
|
||||
/*
|
||||
sg++;
|
||||
sg = sg_next(sg);
|
||||
psge += (sizeof(u32) + sizeof(dma_addr_t));
|
||||
*/
|
||||
sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
|
||||
|
|
|
@ -148,29 +148,6 @@ static int i2o_block_device_flush(struct i2o_device *dev)
|
|||
return i2o_msg_post_wait(dev->iop, msg, 60);
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_block_issue_flush - device-flush interface for block-layer
|
||||
* @queue: the request queue of the device which should be flushed
|
||||
* @disk: gendisk
|
||||
* @error_sector: error offset
|
||||
*
|
||||
* Helper function to provide flush functionality to block-layer.
|
||||
*
|
||||
* Returns 0 on success or negative error code on failure.
|
||||
*/
|
||||
|
||||
static int i2o_block_issue_flush(struct request_queue * queue, struct gendisk *disk,
|
||||
sector_t * error_sector)
|
||||
{
|
||||
struct i2o_block_device *i2o_blk_dev = queue->queuedata;
|
||||
int rc = -ENODEV;
|
||||
|
||||
if (likely(i2o_blk_dev))
|
||||
rc = i2o_block_device_flush(i2o_blk_dev->i2o_dev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* i2o_block_device_mount - Mount (load) the media of device dev
|
||||
* @dev: I2O device which should receive the mount request
|
||||
|
@ -1009,7 +986,6 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
|
|||
}
|
||||
|
||||
blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
|
||||
blk_queue_issue_flush_fn(queue, i2o_block_issue_flush);
|
||||
|
||||
gd->major = I2O_MAJOR;
|
||||
gd->queue = queue;
|
||||
|
|
|
@ -153,14 +153,14 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
|
|||
blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
|
||||
blk_queue_max_segment_size(mq->queue, bouncesz);
|
||||
|
||||
mq->sg = kmalloc(sizeof(struct scatterlist),
|
||||
mq->sg = kzalloc(sizeof(struct scatterlist),
|
||||
GFP_KERNEL);
|
||||
if (!mq->sg) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup_queue;
|
||||
}
|
||||
|
||||
mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
|
||||
mq->bounce_sg = kzalloc(sizeof(struct scatterlist) *
|
||||
bouncesz / 512, GFP_KERNEL);
|
||||
if (!mq->bounce_sg) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -177,7 +177,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
|
|||
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
|
||||
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
|
||||
|
||||
mq->sg = kmalloc(sizeof(struct scatterlist) *
|
||||
mq->sg = kzalloc(sizeof(struct scatterlist) *
|
||||
host->max_phys_segs, GFP_KERNEL);
|
||||
if (!mq->sg) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/ioctl.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
|
|
|
@ -590,7 +590,7 @@ zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
|
|||
*/
|
||||
int
|
||||
zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
|
||||
struct scatterlist *sg, int sg_count, int max_sbals)
|
||||
struct scatterlist *sgl, int sg_count, int max_sbals)
|
||||
{
|
||||
int sg_index;
|
||||
struct scatterlist *sg_segment;
|
||||
|
@ -606,9 +606,7 @@ zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
|
|||
sbale->flags |= sbtype;
|
||||
|
||||
/* process all segements of scatter-gather list */
|
||||
for (sg_index = 0, sg_segment = sg, bytes = 0;
|
||||
sg_index < sg_count;
|
||||
sg_index++, sg_segment++) {
|
||||
for_each_sg(sgl, sg_segment, sg_count, sg_index) {
|
||||
retval = zfcp_qdio_sbals_from_segment(
|
||||
fsf_req,
|
||||
sbtype,
|
||||
|
|
|
@ -1990,6 +1990,7 @@ static struct scsi_host_template driver_template = {
|
|||
.max_sectors = TW_MAX_SECTORS,
|
||||
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.shost_attrs = twa_host_attrs,
|
||||
.emulated = 1
|
||||
};
|
||||
|
|
|
@ -2261,6 +2261,7 @@ static struct scsi_host_template driver_template = {
|
|||
.max_sectors = TW_MAX_SECTORS,
|
||||
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.shost_attrs = tw_host_attrs,
|
||||
.emulated = 1
|
||||
};
|
||||
|
|
|
@ -3575,6 +3575,7 @@ static struct scsi_host_template Bus_Logic_template = {
|
|||
.unchecked_isa_dma = 1,
|
||||
.max_sectors = 128,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -1066,7 +1066,8 @@ static struct scsi_host_template driver_template =
|
|||
.sg_tablesize = 32 /*SG_ALL*/ /*SG_NONE*/,
|
||||
.cmd_per_lun = 1 /* commands per lun */,
|
||||
.unchecked_isa_dma = 1 /* unchecked_isa_dma */,
|
||||
.use_clustering = ENABLE_CLUSTERING
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
#include "scsi_module.c"
|
||||
|
|
|
@ -1071,6 +1071,7 @@ static struct scsi_host_template inia100_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = 1,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
static int __devinit inia100_probe_one(struct pci_dev *pdev,
|
||||
|
|
|
@ -944,6 +944,7 @@ static struct scsi_host_template aac_driver_template = {
|
|||
.cmd_per_lun = AAC_NUM_IO_FIB,
|
||||
#endif
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.emulated = 1,
|
||||
};
|
||||
|
||||
|
|
|
@ -61,15 +61,15 @@ static void BAD_DMA(void *address, unsigned int length)
|
|||
}
|
||||
|
||||
static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
|
||||
struct scatterlist *sgpnt,
|
||||
struct scatterlist *sgp,
|
||||
int nseg,
|
||||
int badseg)
|
||||
{
|
||||
printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n",
|
||||
badseg, nseg,
|
||||
page_address(sgpnt[badseg].page) + sgpnt[badseg].offset,
|
||||
(unsigned long long)SCSI_SG_PA(&sgpnt[badseg]),
|
||||
sgpnt[badseg].length);
|
||||
page_address(sgp->page) + sgp->offset,
|
||||
(unsigned long long)SCSI_SG_PA(sgp),
|
||||
sgp->length);
|
||||
|
||||
/*
|
||||
* Not safe to continue.
|
||||
|
@ -691,7 +691,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
|
|||
memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen);
|
||||
|
||||
if (SCpnt->use_sg) {
|
||||
struct scatterlist *sgpnt;
|
||||
struct scatterlist *sg;
|
||||
struct chain *cptr;
|
||||
#ifdef DEBUG
|
||||
unsigned char *ptr;
|
||||
|
@ -699,23 +699,21 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
|
|||
int i;
|
||||
ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
|
||||
SCpnt->host_scribble = kmalloc(512, GFP_KERNEL | GFP_DMA);
|
||||
sgpnt = (struct scatterlist *) SCpnt->request_buffer;
|
||||
cptr = (struct chain *) SCpnt->host_scribble;
|
||||
if (cptr == NULL) {
|
||||
/* free the claimed mailbox slot */
|
||||
HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL;
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
}
|
||||
for (i = 0; i < SCpnt->use_sg; i++) {
|
||||
if (sgpnt[i].length == 0 || SCpnt->use_sg > 16 ||
|
||||
(((int) sgpnt[i].offset) & 1) || (sgpnt[i].length & 1)) {
|
||||
scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
|
||||
if (sg->length == 0 || SCpnt->use_sg > 16 ||
|
||||
(((int) sg->offset) & 1) || (sg->length & 1)) {
|
||||
unsigned char *ptr;
|
||||
printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i);
|
||||
for (i = 0; i < SCpnt->use_sg; i++) {
|
||||
scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
|
||||
printk(KERN_CRIT "%d: %p %d\n", i,
|
||||
(page_address(sgpnt[i].page) +
|
||||
sgpnt[i].offset),
|
||||
sgpnt[i].length);
|
||||
(page_address(sg->page) +
|
||||
sg->offset), sg->length);
|
||||
};
|
||||
printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr);
|
||||
ptr = (unsigned char *) &cptr[i];
|
||||
|
@ -723,10 +721,10 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
|
|||
printk("%02x ", ptr[i]);
|
||||
panic("Foooooooood fight!");
|
||||
};
|
||||
any2scsi(cptr[i].dataptr, SCSI_SG_PA(&sgpnt[i]));
|
||||
if (SCSI_SG_PA(&sgpnt[i]) + sgpnt[i].length - 1 > ISA_DMA_THRESHOLD)
|
||||
BAD_SG_DMA(SCpnt, sgpnt, SCpnt->use_sg, i);
|
||||
any2scsi(cptr[i].datalen, sgpnt[i].length);
|
||||
any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg));
|
||||
if (SCSI_SG_PA(sg) + sg->length - 1 > ISA_DMA_THRESHOLD)
|
||||
BAD_SG_DMA(SCpnt, sg, SCpnt->use_sg, i);
|
||||
any2scsi(cptr[i].datalen, sg->length);
|
||||
};
|
||||
any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain));
|
||||
any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr));
|
||||
|
|
|
@ -563,6 +563,7 @@ static struct scsi_host_template aha1740_template = {
|
|||
.sg_tablesize = AHA1740_SCATTER,
|
||||
.cmd_per_lun = AHA1740_CMDLUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.eh_abort_handler = aha1740_eh_abort_handler,
|
||||
};
|
||||
|
||||
|
|
|
@ -766,6 +766,7 @@ struct scsi_host_template aic79xx_driver_template = {
|
|||
.max_sectors = 8192,
|
||||
.cmd_per_lun = 2,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.slave_alloc = ahd_linux_slave_alloc,
|
||||
.slave_configure = ahd_linux_slave_configure,
|
||||
.target_alloc = ahd_linux_target_alloc,
|
||||
|
|
|
@ -747,6 +747,7 @@ struct scsi_host_template aic7xxx_driver_template = {
|
|||
.max_sectors = 8192,
|
||||
.cmd_per_lun = 2,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.slave_alloc = ahc_linux_slave_alloc,
|
||||
.slave_configure = ahc_linux_slave_configure,
|
||||
.target_alloc = ahc_linux_target_alloc,
|
||||
|
|
|
@ -11142,6 +11142,7 @@ static struct scsi_host_template driver_template = {
|
|||
.max_sectors = 2048,
|
||||
.cmd_per_lun = 3,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
#include "scsi_module.c"
|
||||
|
|
|
@ -94,7 +94,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
|
|||
res = -ENOMEM;
|
||||
goto err_unmap;
|
||||
}
|
||||
for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
|
||||
for_each_sg(task->scatter, sc, num_sg, i) {
|
||||
struct sg_el *sg =
|
||||
&((struct sg_el *)ascb->sg_arr->vaddr)[i];
|
||||
sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
|
||||
|
@ -103,7 +103,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
|
|||
sg->flags |= ASD_SG_EL_LIST_EOL;
|
||||
}
|
||||
|
||||
for (sc = task->scatter, i = 0; i < 2; i++, sc++) {
|
||||
for_each_sg(task->scatter, sc, 2, i) {
|
||||
sg_arr[i].bus_addr =
|
||||
cpu_to_le64((u64)sg_dma_address(sc));
|
||||
sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
|
||||
|
@ -115,7 +115,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
|
|||
sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
|
||||
} else {
|
||||
int i;
|
||||
for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
|
||||
for_each_sg(task->scatter, sc, num_sg, i) {
|
||||
sg_arr[i].bus_addr =
|
||||
cpu_to_le64((u64)sg_dma_address(sc));
|
||||
sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
|
||||
|
|
|
@ -122,6 +122,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
|
|||
.max_sectors = ARCMSR_MAX_XFER_SECTORS,
|
||||
.cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.shost_attrs = arcmsr_host_attrs,
|
||||
};
|
||||
#ifdef CONFIG_SCSI_ARCMSR_AER
|
||||
|
|
|
@ -4765,6 +4765,7 @@ static struct scsi_host_template dc395x_driver_template = {
|
|||
.eh_bus_reset_handler = dc395x_eh_bus_reset,
|
||||
.unchecked_isa_dma = 0,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -3295,6 +3295,7 @@ static struct scsi_host_template adpt_template = {
|
|||
.this_id = 7,
|
||||
.cmd_per_lun = 1,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
static s32 adpt_scsi_register(adpt_hba* pHba)
|
||||
|
|
|
@ -523,7 +523,8 @@ static struct scsi_host_template driver_template = {
|
|||
.slave_configure = eata2x_slave_configure,
|
||||
.this_id = 7,
|
||||
.unchecked_isa_dma = 1,
|
||||
.use_clustering = ENABLE_CLUSTERING
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
|
|
|
@ -343,6 +343,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
|
|||
shost->use_clustering = sht->use_clustering;
|
||||
shost->ordered_tag = sht->ordered_tag;
|
||||
shost->active_mode = sht->supported_mode;
|
||||
shost->use_sg_chaining = sht->use_sg_chaining;
|
||||
|
||||
if (sht->max_host_blocked)
|
||||
shost->max_host_blocked = sht->max_host_blocked;
|
||||
|
|
|
@ -655,6 +655,7 @@ static struct scsi_host_template driver_template = {
|
|||
.unchecked_isa_dma = 0,
|
||||
.emulated = 0,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.proc_name = driver_name,
|
||||
.shost_attrs = hptiop_attrs,
|
||||
.this_id = -1,
|
||||
|
|
|
@ -1501,6 +1501,7 @@ static struct scsi_host_template ibmmca_driver_template = {
|
|||
.sg_tablesize = 16,
|
||||
.cmd_per_lun = 1,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
static int ibmmca_probe(struct device *dev)
|
||||
|
|
|
@ -1548,6 +1548,7 @@ static struct scsi_host_template driver_template = {
|
|||
.this_id = -1,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.shost_attrs = ibmvscsi_attrs,
|
||||
};
|
||||
|
||||
|
|
|
@ -70,6 +70,7 @@ typedef struct idescsi_pc_s {
|
|||
u8 *buffer; /* Data buffer */
|
||||
u8 *current_position; /* Pointer into the above buffer */
|
||||
struct scatterlist *sg; /* Scatter gather table */
|
||||
struct scatterlist *last_sg; /* Last sg element */
|
||||
int b_count; /* Bytes transferred from current entry */
|
||||
struct scsi_cmnd *scsi_cmd; /* SCSI command */
|
||||
void (*done)(struct scsi_cmnd *); /* Scsi completion routine */
|
||||
|
@ -173,12 +174,6 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
|
|||
char *buf;
|
||||
|
||||
while (bcount) {
|
||||
if (pc->sg - scsi_sglist(pc->scsi_cmd) >
|
||||
scsi_sg_count(pc->scsi_cmd)) {
|
||||
printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
|
||||
idescsi_discard_data (drive, bcount);
|
||||
return;
|
||||
}
|
||||
count = min(pc->sg->length - pc->b_count, bcount);
|
||||
if (PageHighMem(pc->sg->page)) {
|
||||
unsigned long flags;
|
||||
|
@ -197,10 +192,17 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
|
|||
}
|
||||
bcount -= count; pc->b_count += count;
|
||||
if (pc->b_count == pc->sg->length) {
|
||||
pc->sg++;
|
||||
if (pc->sg == pc->last_sg)
|
||||
break;
|
||||
pc->sg = sg_next(pc->sg);
|
||||
pc->b_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (bcount) {
|
||||
printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
|
||||
idescsi_discard_data (drive, bcount);
|
||||
}
|
||||
}
|
||||
|
||||
static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigned int bcount)
|
||||
|
@ -209,12 +211,6 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
|
|||
char *buf;
|
||||
|
||||
while (bcount) {
|
||||
if (pc->sg - scsi_sglist(pc->scsi_cmd) >
|
||||
scsi_sg_count(pc->scsi_cmd)) {
|
||||
printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
|
||||
idescsi_output_zeros (drive, bcount);
|
||||
return;
|
||||
}
|
||||
count = min(pc->sg->length - pc->b_count, bcount);
|
||||
if (PageHighMem(pc->sg->page)) {
|
||||
unsigned long flags;
|
||||
|
@ -233,10 +229,17 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
|
|||
}
|
||||
bcount -= count; pc->b_count += count;
|
||||
if (pc->b_count == pc->sg->length) {
|
||||
pc->sg++;
|
||||
if (pc->sg == pc->last_sg)
|
||||
break;
|
||||
pc->sg = sg_next(pc->sg);
|
||||
pc->b_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (bcount) {
|
||||
printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
|
||||
idescsi_output_zeros (drive, bcount);
|
||||
}
|
||||
}
|
||||
|
||||
static void hexdump(u8 *x, int len)
|
||||
|
@ -804,6 +807,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
|
|||
memcpy (pc->c, cmd->cmnd, cmd->cmd_len);
|
||||
pc->buffer = NULL;
|
||||
pc->sg = scsi_sglist(cmd);
|
||||
pc->last_sg = sg_last(pc->sg, cmd->use_sg);
|
||||
pc->b_count = 0;
|
||||
pc->request_transfer = pc->buffer_size = scsi_bufflen(cmd);
|
||||
pc->scsi_cmd = cmd;
|
||||
|
|
|
@ -2831,6 +2831,7 @@ static struct scsi_host_template initio_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = 1,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
static int initio_probe_one(struct pci_dev *pdev,
|
||||
|
|
|
@ -3252,7 +3252,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
|
|||
*/
|
||||
if ((scb->breakup) || (scb->sg_break)) {
|
||||
struct scatterlist *sg;
|
||||
int sg_dma_index, ips_sg_index = 0;
|
||||
int i, sg_dma_index, ips_sg_index = 0;
|
||||
|
||||
/* we had a data breakup */
|
||||
scb->data_len = 0;
|
||||
|
@ -3261,20 +3261,22 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
|
|||
|
||||
/* Spin forward to last dma chunk */
|
||||
sg_dma_index = scb->breakup;
|
||||
for (i = 0; i < scb->breakup; i++)
|
||||
sg = sg_next(sg);
|
||||
|
||||
/* Take care of possible partial on last chunk */
|
||||
ips_fill_scb_sg_single(ha,
|
||||
sg_dma_address(&sg[sg_dma_index]),
|
||||
sg_dma_address(sg),
|
||||
scb, ips_sg_index++,
|
||||
sg_dma_len(&sg[sg_dma_index]));
|
||||
sg_dma_len(sg));
|
||||
|
||||
for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
|
||||
sg_dma_index++) {
|
||||
sg_dma_index++, sg = sg_next(sg)) {
|
||||
if (ips_fill_scb_sg_single
|
||||
(ha,
|
||||
sg_dma_address(&sg[sg_dma_index]),
|
||||
sg_dma_address(sg),
|
||||
scb, ips_sg_index++,
|
||||
sg_dma_len(&sg[sg_dma_index])) < 0)
|
||||
sg_dma_len(sg)) < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -1438,6 +1438,7 @@ struct scsi_host_template lpfc_template = {
|
|||
.scan_finished = lpfc_scan_finished,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = LPFC_SG_SEG_CNT,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.cmd_per_lun = LPFC_CMD_PER_LUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = lpfc_hba_attrs,
|
||||
|
@ -1460,6 +1461,7 @@ struct scsi_host_template lpfc_vport_template = {
|
|||
.sg_tablesize = LPFC_SG_SEG_CNT,
|
||||
.cmd_per_lun = LPFC_CMD_PER_LUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.shost_attrs = lpfc_vport_attrs,
|
||||
.max_sectors = 0xFFFF,
|
||||
};
|
||||
|
|
|
@ -402,6 +402,7 @@ static struct scsi_host_template mac53c94_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = 1,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
|
||||
|
|
|
@ -4492,6 +4492,7 @@ static struct scsi_host_template megaraid_template = {
|
|||
.sg_tablesize = MAX_SGLIST,
|
||||
.cmd_per_lun = DEF_CMD_PER_LUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.eh_abort_handler = megaraid_abort,
|
||||
.eh_device_reset_handler = megaraid_reset,
|
||||
.eh_bus_reset_handler = megaraid_reset,
|
||||
|
|
|
@ -361,6 +361,7 @@ static struct scsi_host_template megaraid_template_g = {
|
|||
.eh_host_reset_handler = megaraid_reset_handler,
|
||||
.change_queue_depth = megaraid_change_queue_depth,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.sdev_attrs = megaraid_sdev_attrs,
|
||||
.shost_attrs = megaraid_shost_attrs,
|
||||
};
|
||||
|
|
|
@ -1110,6 +1110,7 @@ static struct scsi_host_template megasas_template = {
|
|||
.eh_timed_out = megasas_reset_timer,
|
||||
.bios_param = megasas_bios_param,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -1843,6 +1843,7 @@ static struct scsi_host_template mesh_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = 2,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
|
||||
|
|
|
@ -281,6 +281,7 @@ static struct scsi_host_template nsp32_template = {
|
|||
.cmd_per_lun = 1,
|
||||
.this_id = NSP32_HOST_SCSIID,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.eh_abort_handler = nsp32_eh_abort,
|
||||
.eh_bus_reset_handler = nsp32_eh_bus_reset,
|
||||
.eh_host_reset_handler = nsp32_eh_host_reset,
|
||||
|
|
|
@ -694,6 +694,7 @@ static struct scsi_host_template sym53c500_driver_template = {
|
|||
.sg_tablesize = 32,
|
||||
.cmd_per_lun = 1,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.shost_attrs = SYM53C500_shost_attrs
|
||||
};
|
||||
|
||||
|
|
|
@ -2775,7 +2775,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
|||
struct device_reg __iomem *reg = ha->iobase;
|
||||
struct scsi_cmnd *cmd = sp->cmd;
|
||||
cmd_a64_entry_t *pkt;
|
||||
struct scatterlist *sg = NULL;
|
||||
struct scatterlist *sg = NULL, *s;
|
||||
__le32 *dword_ptr;
|
||||
dma_addr_t dma_handle;
|
||||
int status = 0;
|
||||
|
@ -2889,13 +2889,16 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
|||
* Load data segments.
|
||||
*/
|
||||
if (seg_cnt) { /* If data transfer. */
|
||||
int remseg = seg_cnt;
|
||||
/* Setup packet address segment pointer. */
|
||||
dword_ptr = (u32 *)&pkt->dseg_0_address;
|
||||
|
||||
if (cmd->use_sg) { /* If scatter gather */
|
||||
/* Load command entry data segments. */
|
||||
for (cnt = 0; cnt < 2 && seg_cnt; cnt++, seg_cnt--) {
|
||||
dma_handle = sg_dma_address(sg);
|
||||
for_each_sg(sg, s, seg_cnt, cnt) {
|
||||
if (cnt == 2)
|
||||
break;
|
||||
dma_handle = sg_dma_address(s);
|
||||
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
|
||||
if (ha->flags.use_pci_vchannel)
|
||||
sn_pci_set_vchan(ha->pdev,
|
||||
|
@ -2906,12 +2909,12 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
|||
cpu_to_le32(pci_dma_lo32(dma_handle));
|
||||
*dword_ptr++ =
|
||||
cpu_to_le32(pci_dma_hi32(dma_handle));
|
||||
*dword_ptr++ = cpu_to_le32(sg_dma_len(sg));
|
||||
sg++;
|
||||
*dword_ptr++ = cpu_to_le32(sg_dma_len(s));
|
||||
dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
|
||||
cpu_to_le32(pci_dma_hi32(dma_handle)),
|
||||
cpu_to_le32(pci_dma_lo32(dma_handle)),
|
||||
cpu_to_le32(sg_dma_len(sg)));
|
||||
cpu_to_le32(sg_dma_len(sg_next(s))));
|
||||
remseg--;
|
||||
}
|
||||
dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
|
||||
"command packet data - b %i, t %i, l %i \n",
|
||||
|
@ -2926,7 +2929,9 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
|||
dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
|
||||
"remains\n", seg_cnt);
|
||||
|
||||
while (seg_cnt > 0) {
|
||||
while (remseg > 0) {
|
||||
/* Update sg start */
|
||||
sg = s;
|
||||
/* Adjust ring index. */
|
||||
ha->req_ring_index++;
|
||||
if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
|
||||
|
@ -2952,9 +2957,10 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
|||
(u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
|
||||
|
||||
/* Load continuation entry data segments. */
|
||||
for (cnt = 0; cnt < 5 && seg_cnt;
|
||||
cnt++, seg_cnt--) {
|
||||
dma_handle = sg_dma_address(sg);
|
||||
for_each_sg(sg, s, remseg, cnt) {
|
||||
if (cnt == 5)
|
||||
break;
|
||||
dma_handle = sg_dma_address(s);
|
||||
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
|
||||
if (ha->flags.use_pci_vchannel)
|
||||
sn_pci_set_vchan(ha->pdev,
|
||||
|
@ -2966,13 +2972,13 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
|||
*dword_ptr++ =
|
||||
cpu_to_le32(pci_dma_hi32(dma_handle));
|
||||
*dword_ptr++ =
|
||||
cpu_to_le32(sg_dma_len(sg));
|
||||
cpu_to_le32(sg_dma_len(s));
|
||||
dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
|
||||
cpu_to_le32(pci_dma_hi32(dma_handle)),
|
||||
cpu_to_le32(pci_dma_lo32(dma_handle)),
|
||||
cpu_to_le32(sg_dma_len(sg)));
|
||||
sg++;
|
||||
cpu_to_le32(sg_dma_len(s)));
|
||||
}
|
||||
remseg -= cnt;
|
||||
dprintk(5, "qla1280_64bit_start_scsi: "
|
||||
"continuation packet data - b %i, t "
|
||||
"%i, l %i \n", SCSI_BUS_32(cmd),
|
||||
|
@ -3062,7 +3068,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
|||
struct device_reg __iomem *reg = ha->iobase;
|
||||
struct scsi_cmnd *cmd = sp->cmd;
|
||||
struct cmd_entry *pkt;
|
||||
struct scatterlist *sg = NULL;
|
||||
struct scatterlist *sg = NULL, *s;
|
||||
__le32 *dword_ptr;
|
||||
int status = 0;
|
||||
int cnt;
|
||||
|
@ -3188,6 +3194,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
|||
* Load data segments.
|
||||
*/
|
||||
if (seg_cnt) {
|
||||
int remseg = seg_cnt;
|
||||
/* Setup packet address segment pointer. */
|
||||
dword_ptr = &pkt->dseg_0_address;
|
||||
|
||||
|
@ -3196,22 +3203,25 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
|||
qla1280_dump_buffer(1, (char *)sg, 4 * 16);
|
||||
|
||||
/* Load command entry data segments. */
|
||||
for (cnt = 0; cnt < 4 && seg_cnt; cnt++, seg_cnt--) {
|
||||
for_each_sg(sg, s, seg_cnt, cnt) {
|
||||
if (cnt == 4)
|
||||
break;
|
||||
*dword_ptr++ =
|
||||
cpu_to_le32(pci_dma_lo32(sg_dma_address(sg)));
|
||||
*dword_ptr++ =
|
||||
cpu_to_le32(sg_dma_len(sg));
|
||||
cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
|
||||
*dword_ptr++ = cpu_to_le32(sg_dma_len(s));
|
||||
dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
|
||||
(pci_dma_lo32(sg_dma_address(sg))),
|
||||
(sg_dma_len(sg)));
|
||||
sg++;
|
||||
(pci_dma_lo32(sg_dma_address(s))),
|
||||
(sg_dma_len(s)));
|
||||
remseg--;
|
||||
}
|
||||
/*
|
||||
* Build continuation packets.
|
||||
*/
|
||||
dprintk(3, "S/G Building Continuation"
|
||||
"...seg_cnt=0x%x remains\n", seg_cnt);
|
||||
while (seg_cnt > 0) {
|
||||
while (remseg > 0) {
|
||||
/* Continue from end point */
|
||||
sg = s;
|
||||
/* Adjust ring index. */
|
||||
ha->req_ring_index++;
|
||||
if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
|
||||
|
@ -3239,19 +3249,20 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
|||
&((struct cont_entry *) pkt)->dseg_0_address;
|
||||
|
||||
/* Load continuation entry data segments. */
|
||||
for (cnt = 0; cnt < 7 && seg_cnt;
|
||||
cnt++, seg_cnt--) {
|
||||
for_each_sg(sg, s, remseg, cnt) {
|
||||
if (cnt == 7)
|
||||
break;
|
||||
*dword_ptr++ =
|
||||
cpu_to_le32(pci_dma_lo32(sg_dma_address(sg)));
|
||||
cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
|
||||
*dword_ptr++ =
|
||||
cpu_to_le32(sg_dma_len(sg));
|
||||
cpu_to_le32(sg_dma_len(s));
|
||||
dprintk(1,
|
||||
"S/G Segment Cont. phys_addr=0x%x, "
|
||||
"len=0x%x\n",
|
||||
cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))),
|
||||
cpu_to_le32(sg_dma_len(sg)));
|
||||
sg++;
|
||||
cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
|
||||
cpu_to_le32(sg_dma_len(s)));
|
||||
}
|
||||
remseg -= cnt;
|
||||
dprintk(5, "qla1280_32bit_start_scsi: "
|
||||
"continuation packet data - "
|
||||
"scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
|
||||
|
@ -4248,6 +4259,7 @@ static struct scsi_host_template qla1280_driver_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = 1,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -132,6 +132,7 @@ struct scsi_host_template qla2x00_driver_template = {
|
|||
.this_id = -1,
|
||||
.cmd_per_lun = 3,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.sg_tablesize = SG_ALL,
|
||||
|
||||
/*
|
||||
|
@ -163,6 +164,7 @@ struct scsi_host_template qla24xx_driver_template = {
|
|||
.this_id = -1,
|
||||
.cmd_per_lun = 3,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.sg_tablesize = SG_ALL,
|
||||
|
||||
.max_sectors = 0xFFFF,
|
||||
|
|
|
@ -94,6 +94,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
|
|||
.this_id = -1,
|
||||
.cmd_per_lun = 3,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
.sg_tablesize = SG_ALL,
|
||||
|
||||
.max_sectors = 0xFFFF,
|
||||
|
|
|
@ -197,6 +197,7 @@ static struct scsi_host_template qlogicfas_driver_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = 1,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||
};
|
||||
|
||||
static __init int qlogicfas_init(void)
|
||||
|
|
|
@ -868,7 +868,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
|
|||
struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr)
|
||||
{
|
||||
struct dataseg *ds;
|
||||
struct scatterlist *sg;
|
||||
struct scatterlist *sg, *s;
|
||||
int i, n;
|
||||
|
||||
if (Cmnd->use_sg) {
|
||||
|
@ -884,11 +884,12 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
|
|||
n = sg_count;
|
||||
if (n > 4)
|
||||
n = 4;
|
||||
for (i = 0; i < n; i++, sg++) {
|
||||
ds[i].d_base = sg_dma_address(sg);
|
||||
ds[i].d_count = sg_dma_len(sg);
|
||||
for_each_sg(sg, s, n, i) {
|
||||
ds[i].d_base = sg_dma_address(s);
|
||||
ds[i].d_count = sg_dma_len(s);
|
||||
}
|
||||
sg_count -= 4;
|
||||
sg = s;
|
||||
while (sg_count > 0) {
|
||||
struct Continuation_Entry *cont;
|
||||
|
||||
|
@ -907,9 +908,9 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
|
|||
n = sg_count;
|
||||
if (n > 7)
|
||||
n = 7;
|
||||
for (i = 0; i < n; i++, sg++) {
|
||||
ds[i].d_base = sg_dma_address(sg);
|
||||
ds[i].d_count = sg_dma_len(sg);
|
||||
for_each_sg(sg, s, n, i) {
|
||||
ds[i].d_base = sg_dma_address(s);
|
||||
ds[i].d_count = sg_dma_len(s);
|
||||
}
|
||||
sg_count -= n;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue