dma-mapping: fix off-by-one error in dma_capable()
dma_mask is, when interpreted as address, the last valid byte, and hence comparison msut also be done using the last valid of the buffer in question. Also fix the open-coded instances in lib/swiotlb.c. Signed-off-by: Jan Beulich <jbeulich@novell.com> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Becky Bruce <beckyb@kernel.crashing.org> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
bbead2104e
commit
ac2b3e67dd
4 changed files with 5 additions and 5 deletions
|
@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
return addr + size <= *dev->dma_mask;
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
|
|
|
@ -197,7 +197,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
return addr + size <= *dev->dma_mask;
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
|
|
|
@ -67,7 +67,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
return addr + size <= *dev->dma_mask;
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
|
|
|
@ -549,7 +549,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|||
dma_mask = hwdev->coherent_dma_mask;
|
||||
|
||||
ret = (void *)__get_free_pages(flags, order);
|
||||
if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
|
||||
if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
|
||||
/*
|
||||
* The allocated memory isn't reachable by the device.
|
||||
*/
|
||||
|
@ -571,7 +571,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|||
dev_addr = swiotlb_virt_to_bus(hwdev, ret);
|
||||
|
||||
/* Confirm address can be DMA'd by device */
|
||||
if (dev_addr + size > dma_mask) {
|
||||
if (dev_addr + size - 1 > dma_mask) {
|
||||
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
|
||||
(unsigned long long)dma_mask,
|
||||
(unsigned long long)dev_addr);
|
||||
|
|
Loading…
Reference in a new issue