kernel-fxtec-pro1x/arch/x86/include/asm/pci_x86.h

198 lines
5 KiB
C
Raw Normal View History

/*
* Low-Level PCI Access for i386 machines.
*
* (c) 1999 Martin Mares <mj@ucw.cz>
*/
#undef DEBUG
#ifdef DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
#define PCI_PROBE_BIOS 0x0001
#define PCI_PROBE_CONF1 0x0002
#define PCI_PROBE_CONF2 0x0004
#define PCI_PROBE_MMCONF 0x0008
#define PCI_PROBE_MASK 0x000f
#define PCI_PROBE_NOEARLY 0x0010
#define PCI_NO_CHECKS 0x0400
#define PCI_USE_PIRQ_MASK 0x0800
#define PCI_ASSIGN_ROMS 0x1000
#define PCI_BIOS_IRQ_SCAN 0x2000
#define PCI_ASSIGN_ALL_BUSSES 0x4000
#define PCI_CAN_SKIP_ISA_ALIGN 0x8000
#define PCI_USE__CRS 0x10000
#define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000
#define PCI_HAS_IO_ECS 0x40000
#define PCI_NOASSIGN_ROMS 0x80000
#define PCI_ROOT_NO_CRS 0x100000
extern unsigned int pci_probe;
extern unsigned long pirq_table_addr;
PCI: optionally sort device lists breadth-first Problem: New Dell PowerEdge servers have 2 embedded ethernet ports, which are labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and in the printed documentation. Assuming no other add-in ethernet ports in the system, Linux 2.4 kernels name these eth0 and eth1 respectively. Many people have come to expect this naming. Linux 2.6 kernels name these eth1 and eth0 respectively (backwards from expectations). I also have reports that various Sun and HP servers have similar behavior. Root cause: Linux 2.4 kernels walk the pci_devices list, which happens to be sorted in breadth-first order (or pcbios_find_device order on i386, which most often is breadth-first also). 2.6 kernels have both the pci_devices list and the pci_bus_type.klist_devices list, the latter is what is walked at driver load time to match the pci_id tables; this klist happens to be in depth-first order. On systems where, for physical routing reasons, NIC1 appears on a lower bus number than NIC2, but NIC2's bridge is discovered first in the depth-first ordering, NIC2 will be discovered before NIC1. If the list were sorted breadth-first, NIC1 would be discovered before NIC2. A PowerEdge 1955 system has the following topology which easily exhibits the difference between depth-first and breadth-first device lists. -[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub +-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0) +-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1) Other factors, such as device driver load order and the presence of PCI slots at various points in the bus hierarchy further complicate this problem; I'm not trying to solve those here, just restore the device order, and thus basic behavior, that 2.4 kernels had. Solution: The solution can come in multiple steps. Suggested fix #1: kernel Patch below optionally sorts the two device lists into breadth-first ordering to maintain compatibility with 2.4 kernels. It adds two new command line options: pci=bfsort pci=nobfsort to force the sort order, or not, as you wish. It also adds DMI checks for the specific Dell systems which exhibit "backwards" ordering, to make them "right". Suggested fix #2: udev rules from userland Many people also have the expectation that embedded NICs are always discovered before add-in NICs (which this patch does not try to do). Using the PCI IRQ Routing Table provided by system BIOS, it's easy to determine which PCI devices are embedded, or if add-in, which PCI slot they're in. I'm working on a tool that would allow udev to name ethernet devices in ascending embedded, slot 1 .. slot N order, subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it independent of udev as well for those distributions that don't use udev in their installers. Suggested fix #3: system board routing rules One can constrain the system board layout to put NIC1 ahead of NIC2 regardless of breadth-first or depth-first discovery order. This adds a significant level of complexity to board routing, and may not be possible in all instances (witness the above systems from several major manufacturers). I don't want to encourage this particular train of thought too far, at the expense of not doing #1 or #2 above. Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade with 2.6.18. You'll also note I took some liberty and temporarily break the klist abstraction to simplify and speed up the sort algorithm. I think that's both safe and appropriate in this instance. Signed-off-by: Matt Domsch <Matt_Domsch@dell.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-29 14:23:23 -06:00
enum pci_bf_sort_state {
pci_bf_sort_default,
pci_force_nobf,
pci_force_bf,
pci_dmi_bf,
};
/* pci-i386.c */
extern unsigned int pcibios_max_latency;
void pcibios_resource_survey(void);
/* pci-pc.c */
extern int pcibios_last_bus;
extern struct pci_bus *pci_root_bus;
extern struct pci_ops pci_root_ops;
/* pci-irq.c */
struct irq_info {
u8 bus, devfn; /* Bus, device and function */
struct {
u8 link; /* IRQ line ID, chipset dependent,
0 = not routed */
u16 bitmap; /* Available IRQs */
} __attribute__((packed)) irq[4];
u8 slot; /* Slot number, 0=onboard */
u8 rfu;
} __attribute__((packed));
struct irq_routing_table {
u32 signature; /* PIRQ_SIGNATURE should be here */
u16 version; /* PIRQ_VERSION */
u16 size; /* Table size in bytes */
u8 rtr_bus, rtr_devfn; /* Where the interrupt router lies */
u16 exclusive_irqs; /* IRQs devoted exclusively to
PCI usage */
u16 rtr_vendor, rtr_device; /* Vendor and device ID of
interrupt router */
u32 miniport_data; /* Crap */
u8 rfu[11];
u8 checksum; /* Modulo 256 checksum must give 0 */
struct irq_info slots[0];
} __attribute__((packed));
extern unsigned int pcibios_irq_mask;
extern spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
struct pci_raw_ops {
int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val);
int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 val);
};
extern struct pci_raw_ops *raw_pci_ops;
extern struct pci_raw_ops *raw_pci_ext_ops;
extern struct pci_raw_ops pci_direct_conf1;
extern bool port_cf9_safe;
/* arch_initcall level */
extern int pci_direct_probe(void);
extern void pci_direct_init(int type);
extern void pci_pcbios_init(void);
extern void __init dmi_check_pciprobe(void);
extern void __init dmi_check_skip_isa_align(void);
/* some common used subsys_initcalls */
extern int __init pci_acpi_init(void);
extern void __init pcibios_irq_init(void);
extern int __init pcibios_init(void);
extern int pci_legacy_init(void);
extern void pcibios_fixup_irqs(void);
/* pci-mmconfig.c */
/* "PCI MMCONFIG %04x [bus %02x-%02x]" */
#define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2)
struct pci_mmcfg_region {
struct list_head list;
struct resource res;
u64 address;
char __iomem *virt;
u16 segment;
u8 start_bus;
u8 end_bus;
char name[PCI_MMCFG_RESOURCE_NAME_LEN];
};
extern int __init pci_mmcfg_arch_init(void);
extern void __init pci_mmcfg_arch_free(void);
extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus);
extern struct list_head pci_mmcfg_list;
#define PCI_MMCFG_BUS_OFFSET(bus) ((bus) << 20)
/*
* AMD Fam10h CPUs are buggy, and cannot access MMIO config space
* on their northbrige except through the * %eax register. As such, you MUST
* NOT use normal IOMEM accesses, you need to only use the magic mmio-config
* accessor functions.
* In fact just use pci_config_*, nothing else please.
*/
static inline unsigned char mmio_config_readb(void __iomem *pos)
{
u8 val;
asm volatile("movb (%1),%%al" : "=a" (val) : "r" (pos));
return val;
}
static inline unsigned short mmio_config_readw(void __iomem *pos)
{
u16 val;
asm volatile("movw (%1),%%ax" : "=a" (val) : "r" (pos));
return val;
}
static inline unsigned int mmio_config_readl(void __iomem *pos)
{
u32 val;
asm volatile("movl (%1),%%eax" : "=a" (val) : "r" (pos));
return val;
}
static inline void mmio_config_writeb(void __iomem *pos, u8 val)
{
asm volatile("movb %%al,(%1)" : : "a" (val), "r" (pos) : "memory");
}
static inline void mmio_config_writew(void __iomem *pos, u16 val)
{
asm volatile("movw %%ax,(%1)" : : "a" (val), "r" (pos) : "memory");
}
static inline void mmio_config_writel(void __iomem *pos, u32 val)
{
asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory");
}
#ifdef CONFIG_PCI
# ifdef CONFIG_ACPI
# define x86_default_pci_init pci_acpi_init
# else
# define x86_default_pci_init pci_legacy_init
# endif
# define x86_default_pci_init_irq pcibios_irq_init
# define x86_default_pci_fixup_irqs pcibios_fixup_irqs
#else
# define x86_default_pci_init NULL
# define x86_default_pci_init_irq NULL
# define x86_default_pci_fixup_irqs NULL
#endif