e24c2d963a
After seeing, at best, "guesses" as to the following kind of information in several drivers, I decided that we really need a way for platforms to specifically give advice in this area for what works best with their PCI controller implementation. Basically, this new interface gives DMA bursting advice on PCI. There are three forms of the advice: 1) Burst as much as possible, it is not necessary to end bursts on some particular boundary for best performance. 2) Burst on some byte count multiple. A DMA burst to some multiple of number of bytes may be done, but it is important to end the burst on an exact multiple for best performance. The best example of this I am aware of are the PPC64 PCI controllers, where if you end a burst mid-cacheline then chip has to refetch the data and the IOMMU translations which hurts performance a lot. 3) Burst on a single byte count multiple. Bursts shall end exactly on the next multiple boundary for best performance. Sparc64 and Alpha's PCI controllers operate this way. They disconnect any device which tries to burst across a cacheline boundary. Actually, newer sparc64 PCI controllers do not have this behavior. That is why the "pdev" is passed into the interface, so I can add code later to check which PCI controller the system is using and give advice accordingly. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
118 lines
3.1 KiB
C
118 lines
3.1 KiB
C
#ifndef __i386_PCI_H
|
|
#define __i386_PCI_H
|
|
|
|
#include <linux/config.h>
|
|
|
|
#ifdef __KERNEL__
|
|
#include <linux/mm.h> /* for struct page */
|
|
|
|
/* Can be used to override the logic in pci_scan_bus for skipping
|
|
already-configured bus numbers - to be used for buggy BIOSes
|
|
or architectures with incomplete PCI setup by the loader */
|
|
|
|
#ifdef CONFIG_PCI
|
|
extern unsigned int pcibios_assign_all_busses(void);
|
|
#else
|
|
#define pcibios_assign_all_busses() 0
|
|
#endif
|
|
#define pcibios_scan_all_fns(a, b) 0
|
|
|
|
extern unsigned long pci_mem_start;
|
|
#define PCIBIOS_MIN_IO 0x1000
|
|
#define PCIBIOS_MIN_MEM (pci_mem_start)
|
|
|
|
#define PCIBIOS_MIN_CARDBUS_IO 0x4000
|
|
|
|
void pcibios_config_init(void);
|
|
struct pci_bus * pcibios_scan_root(int bus);
|
|
|
|
void pcibios_set_master(struct pci_dev *dev);
|
|
void pcibios_penalize_isa_irq(int irq);
|
|
struct irq_routing_table *pcibios_get_irq_routing_table(void);
|
|
int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
|
|
|
|
/* Dynamic DMA mapping stuff.
|
|
* i386 has everything mapped statically.
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/slab.h>
|
|
#include <asm/scatterlist.h>
|
|
#include <linux/string.h>
|
|
#include <asm/io.h>
|
|
|
|
struct pci_dev;
|
|
|
|
/* The PCI address space does equal the physical memory
|
|
* address space. The networking and block device layers use
|
|
* this boolean for bounce buffer decisions.
|
|
*/
|
|
#define PCI_DMA_BUS_IS_PHYS (1)
|
|
|
|
/* pci_unmap_{page,single} is a nop so... */
|
|
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
|
|
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
|
|
#define pci_unmap_addr(PTR, ADDR_NAME) (0)
|
|
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
|
|
#define pci_unmap_len(PTR, LEN_NAME) (0)
|
|
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
|
|
|
|
/* This is always fine. */
|
|
#define pci_dac_dma_supported(pci_dev, mask) (1)
|
|
|
|
static inline dma64_addr_t
|
|
pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
|
|
{
|
|
return ((dma64_addr_t) page_to_phys(page) +
|
|
(dma64_addr_t) offset);
|
|
}
|
|
|
|
static inline struct page *
|
|
pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
|
|
{
|
|
return pfn_to_page(dma_addr >> PAGE_SHIFT);
|
|
}
|
|
|
|
static inline unsigned long
|
|
pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
|
|
{
|
|
return (dma_addr & ~PAGE_MASK);
|
|
}
|
|
|
|
static inline void
|
|
pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
|
|
{
|
|
}
|
|
|
|
static inline void
|
|
pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
|
|
{
|
|
flush_write_buffers();
|
|
}
|
|
|
|
#define HAVE_PCI_MMAP
|
|
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
|
enum pci_mmap_state mmap_state, int write_combine);
|
|
|
|
|
|
static inline void pcibios_add_platform_entries(struct pci_dev *dev)
|
|
{
|
|
}
|
|
|
|
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
|
enum pci_dma_burst_strategy *strat,
|
|
unsigned long *strategy_parameter)
|
|
{
|
|
*strat = PCI_DMA_BURST_INFINITY;
|
|
*strategy_parameter = ~0UL;
|
|
}
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
/* implement the pci_ DMA API in terms of the generic device dma_ one */
|
|
#include <asm-generic/pci-dma-compat.h>
|
|
|
|
/* generic pci stuff */
|
|
#include <asm-generic/pci.h>
|
|
|
|
#endif /* __i386_PCI_H */
|