Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next
Pull sparc updates from David Miller: 1) Queued spinlocks and rwlocks for sparc64, from Babu Moger. 2) Some const'ification from Arvind Yadav. 3) LDC/VIO driver infrastructure changes to facilitate future upcoming drivers, from Jag Raman. 4) Initialize sched_clock() et al. early so that the initial printk timestamps are all done while the implementation is available and functioning. From Pavel Tatashin. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next: (38 commits) sparc: kernel: pmc: make of_device_ids const. sparc64: fix typo in property sparc64: add port_id to VIO device metadata sparc64: Enhance search for VIO device in MDESC sparc64: enhance VIO device probing sparc64: check if a client is allowed to register for MDESC notifications sparc64: remove restriction on VIO device name size sparc64: refactor code to obtain cfg_handle property from MDESC sparc64: add MDESC node name property to VIO device metadata sparc64: mdesc: use __GFP_REPEAT action modifier for VM allocation sparc64: expand MDESC interface sparc64: skip handshake for LDC channels in RAW mode sparc64: specify the device class in VIO version info. packet sparc64: ensure VIO operations are defined while being used sparc: kernel: apc: make of_device_ids const sparc/time: make of_device_ids const sparc64: broken %tick frequency on spitfire cpus sparc64: use prom interface to get %stick frequency sparc64: optimize functions that access tick sparc64: add hot-patched and inlined get_tick() ...
This commit is contained in:
commit
fe1b518075
24 changed files with 888 additions and 490 deletions
|
@ -83,6 +83,8 @@ config SPARC64
|
|||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||
select HAVE_NMI
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select ARCH_USE_QUEUED_RWLOCKS
|
||||
select ARCH_USE_QUEUED_SPINLOCKS
|
||||
|
||||
config ARCH_DEFCONFIG
|
||||
string
|
||||
|
@ -92,6 +94,9 @@ config ARCH_DEFCONFIG
|
|||
config ARCH_PROC_KCORE_TEXT
|
||||
def_bool y
|
||||
|
||||
config CPU_BIG_ENDIAN
|
||||
def_bool y
|
||||
|
||||
config ARCH_ATU
|
||||
bool
|
||||
default y if SPARC64
|
||||
|
|
|
@ -6,6 +6,17 @@
|
|||
#ifndef __ARCH_SPARC64_CMPXCHG__
|
||||
#define __ARCH_SPARC64_CMPXCHG__
|
||||
|
||||
static inline unsigned long
|
||||
__cmpxchg_u32(volatile int *m, int old, int new)
|
||||
{
|
||||
__asm__ __volatile__("cas [%2], %3, %0"
|
||||
: "=&r" (new)
|
||||
: "0" (new), "r" (m), "r" (old)
|
||||
: "memory");
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
|
||||
{
|
||||
unsigned long tmp1, tmp2;
|
||||
|
@ -44,10 +55,38 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
|
|||
|
||||
void __xchg_called_with_bad_pointer(void);
|
||||
|
||||
/*
|
||||
* Use 4 byte cas instruction to achieve 2 byte xchg. Main logic
|
||||
* here is to get the bit shift of the byte we are interested in.
|
||||
* The XOR is handy for reversing the bits for big-endian byte order.
|
||||
*/
|
||||
static inline unsigned long
|
||||
xchg16(__volatile__ unsigned short *m, unsigned short val)
|
||||
{
|
||||
unsigned long maddr = (unsigned long)m;
|
||||
int bit_shift = (((unsigned long)m & 2) ^ 2) << 3;
|
||||
unsigned int mask = 0xffff << bit_shift;
|
||||
unsigned int *ptr = (unsigned int *) (maddr & ~2);
|
||||
unsigned int old32, new32, load32;
|
||||
|
||||
/* Read the old value */
|
||||
load32 = *ptr;
|
||||
|
||||
do {
|
||||
old32 = load32;
|
||||
new32 = (load32 & (~mask)) | val << bit_shift;
|
||||
load32 = __cmpxchg_u32(ptr, old32, new32);
|
||||
} while (load32 != old32);
|
||||
|
||||
return (load32 & mask) >> bit_shift;
|
||||
}
|
||||
|
||||
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
|
||||
int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 2:
|
||||
return xchg16(ptr, x);
|
||||
case 4:
|
||||
return xchg32(ptr, x);
|
||||
case 8:
|
||||
|
@ -65,16 +104,6 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
|
|||
|
||||
#include <asm-generic/cmpxchg-local.h>
|
||||
|
||||
static inline unsigned long
|
||||
__cmpxchg_u32(volatile int *m, int old, int new)
|
||||
{
|
||||
__asm__ __volatile__("cas [%2], %3, %0"
|
||||
: "=&r" (new)
|
||||
: "0" (new), "r" (m), "r" (old)
|
||||
: "memory");
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
|
||||
|
@ -87,6 +116,33 @@ __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
|
|||
return new;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic
|
||||
* here is to get the bit shift of the byte we are interested in.
|
||||
* The XOR is handy for reversing the bits for big-endian byte order
|
||||
*/
|
||||
static inline unsigned long
|
||||
__cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new)
|
||||
{
|
||||
unsigned long maddr = (unsigned long)m;
|
||||
int bit_shift = (((unsigned long)m & 3) ^ 3) << 3;
|
||||
unsigned int mask = 0xff << bit_shift;
|
||||
unsigned int *ptr = (unsigned int *) (maddr & ~3);
|
||||
unsigned int old32, new32, load;
|
||||
unsigned int load32 = *ptr;
|
||||
|
||||
do {
|
||||
new32 = (load32 & ~mask) | (new << bit_shift);
|
||||
old32 = (load32 & ~mask) | (old << bit_shift);
|
||||
load32 = __cmpxchg_u32(ptr, old32, new32);
|
||||
if (load32 == old32)
|
||||
return old;
|
||||
load = (load32 & mask) >> bit_shift;
|
||||
} while (load == old);
|
||||
|
||||
return load;
|
||||
}
|
||||
|
||||
/* This function doesn't exist, so you'll get a linker error
|
||||
if something tries to do an invalid cmpxchg(). */
|
||||
void __cmpxchg_called_with_bad_pointer(void);
|
||||
|
@ -95,6 +151,8 @@ static inline unsigned long
|
|||
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
return __cmpxchg_u8(ptr, old, new);
|
||||
case 4:
|
||||
return __cmpxchg_u32(ptr, old, new);
|
||||
case 8:
|
||||
|
|
|
@ -48,6 +48,8 @@ struct ldc_channel_config {
|
|||
#define LDC_STATE_READY 0x03
|
||||
#define LDC_STATE_CONNECTED 0x04
|
||||
|
||||
#define LDC_PACKET_SIZE 64
|
||||
|
||||
struct ldc_channel;
|
||||
|
||||
/* Allocate state for a channel. */
|
||||
|
@ -72,6 +74,12 @@ int ldc_connect(struct ldc_channel *lp);
|
|||
int ldc_disconnect(struct ldc_channel *lp);
|
||||
|
||||
int ldc_state(struct ldc_channel *lp);
|
||||
void ldc_set_state(struct ldc_channel *lp, u8 state);
|
||||
int ldc_mode(struct ldc_channel *lp);
|
||||
void __ldc_print(struct ldc_channel *lp, const char *caller);
|
||||
int ldc_rx_reset(struct ldc_channel *lp);
|
||||
|
||||
#define ldc_print(chan) __ldc_print(chan, __func__)
|
||||
|
||||
/* Read and write operations. Only valid when the link is up. */
|
||||
int ldc_write(struct ldc_channel *lp, const void *buf,
|
||||
|
|
|
@ -16,6 +16,7 @@ struct mdesc_handle *mdesc_grab(void);
|
|||
void mdesc_release(struct mdesc_handle *);
|
||||
|
||||
#define MDESC_NODE_NULL (~(u64)0)
|
||||
#define MDESC_MAX_STR_LEN 256
|
||||
|
||||
u64 mdesc_node_by_name(struct mdesc_handle *handle,
|
||||
u64 from_node, const char *name);
|
||||
|
@ -62,15 +63,32 @@ u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc);
|
|||
void mdesc_update(void);
|
||||
|
||||
struct mdesc_notifier_client {
|
||||
void (*add)(struct mdesc_handle *handle, u64 node);
|
||||
void (*remove)(struct mdesc_handle *handle, u64 node);
|
||||
|
||||
void (*add)(struct mdesc_handle *handle, u64 node,
|
||||
const char *node_name);
|
||||
void (*remove)(struct mdesc_handle *handle, u64 node,
|
||||
const char *node_name);
|
||||
const char *node_name;
|
||||
struct mdesc_notifier_client *next;
|
||||
};
|
||||
|
||||
void mdesc_register_notifier(struct mdesc_notifier_client *client);
|
||||
|
||||
union md_node_info {
|
||||
struct vdev_port {
|
||||
u64 id; /* id */
|
||||
u64 parent_cfg_hdl; /* parent config handle */
|
||||
const char *name; /* name (property) */
|
||||
} vdev_port;
|
||||
struct ds_port {
|
||||
u64 id; /* id */
|
||||
} ds_port;
|
||||
};
|
||||
|
||||
u64 mdesc_get_node(struct mdesc_handle *hp, const char *node_name,
|
||||
union md_node_info *node_info);
|
||||
int mdesc_get_node_info(struct mdesc_handle *hp, u64 node,
|
||||
const char *node_name, union md_node_info *node_info);
|
||||
|
||||
void mdesc_fill_in_cpu_data(cpumask_t *mask);
|
||||
void mdesc_populate_present_mask(cpumask_t *mask);
|
||||
void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask);
|
||||
|
|
7
arch/sparc/include/asm/qrwlock.h
Normal file
7
arch/sparc/include/asm/qrwlock.h
Normal file
|
@ -0,0 +1,7 @@
|
|||
#ifndef _ASM_SPARC_QRWLOCK_H
|
||||
#define _ASM_SPARC_QRWLOCK_H
|
||||
|
||||
#include <asm-generic/qrwlock_types.h>
|
||||
#include <asm-generic/qrwlock.h>
|
||||
|
||||
#endif /* _ASM_SPARC_QRWLOCK_H */
|
7
arch/sparc/include/asm/qspinlock.h
Normal file
7
arch/sparc/include/asm/qspinlock.h
Normal file
|
@ -0,0 +1,7 @@
|
|||
#ifndef _ASM_SPARC_QSPINLOCK_H
|
||||
#define _ASM_SPARC_QSPINLOCK_H
|
||||
|
||||
#include <asm-generic/qspinlock_types.h>
|
||||
#include <asm-generic/qspinlock.h>
|
||||
|
||||
#endif /* _ASM_SPARC_QSPINLOCK_H */
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Just a place holder.
|
||||
* Just a place holder.
|
||||
*/
|
||||
#ifndef _SPARC_SETUP_H
|
||||
#define _SPARC_SETUP_H
|
||||
|
|
|
@ -10,216 +10,12 @@
|
|||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/* To get debugging spinlocks which detect and catch
|
||||
* deadlock situations, set CONFIG_DEBUG_SPINLOCK
|
||||
* and rebuild your kernel.
|
||||
*/
|
||||
|
||||
/* Because we play games to save cycles in the non-contention case, we
|
||||
* need to be extra careful about branch targets into the "spinning"
|
||||
* code. They live in their own section, but the newer V9 branches
|
||||
* have a shorter range than the traditional 32-bit sparc branch
|
||||
* variants. The rule is that the branches that go into and out of
|
||||
* the spinner sections must be pre-V9 branches.
|
||||
*/
|
||||
|
||||
#define arch_spin_is_locked(lp) ((lp)->lock != 0)
|
||||
|
||||
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
||||
{
|
||||
smp_cond_load_acquire(&lock->lock, !VAL);
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: ldstub [%1], %0\n"
|
||||
" brnz,pn %0, 2f\n"
|
||||
" nop\n"
|
||||
" .subsection 2\n"
|
||||
"2: ldub [%1], %0\n"
|
||||
" brnz,pt %0, 2b\n"
|
||||
" nop\n"
|
||||
" ba,a,pt %%xcc, 1b\n"
|
||||
" .previous"
|
||||
: "=&r" (tmp)
|
||||
: "r" (lock)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned long result;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" ldstub [%1], %0\n"
|
||||
: "=r" (result)
|
||||
: "r" (lock)
|
||||
: "memory");
|
||||
|
||||
return (result == 0UL);
|
||||
}
|
||||
|
||||
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
" stb %%g0, [%0]"
|
||||
: /* No outputs */
|
||||
: "r" (lock)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||
{
|
||||
unsigned long tmp1, tmp2;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: ldstub [%2], %0\n"
|
||||
" brnz,pn %0, 2f\n"
|
||||
" nop\n"
|
||||
" .subsection 2\n"
|
||||
"2: rdpr %%pil, %1\n"
|
||||
" wrpr %3, %%pil\n"
|
||||
"3: ldub [%2], %0\n"
|
||||
" brnz,pt %0, 3b\n"
|
||||
" nop\n"
|
||||
" ba,pt %%xcc, 1b\n"
|
||||
" wrpr %1, %%pil\n"
|
||||
" .previous"
|
||||
: "=&r" (tmp1), "=&r" (tmp2)
|
||||
: "r"(lock), "r"(flags)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *lock)
|
||||
{
|
||||
unsigned long tmp1, tmp2;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1: ldsw [%2], %0\n"
|
||||
" brlz,pn %0, 2f\n"
|
||||
"4: add %0, 1, %1\n"
|
||||
" cas [%2], %0, %1\n"
|
||||
" cmp %0, %1\n"
|
||||
" bne,pn %%icc, 1b\n"
|
||||
" nop\n"
|
||||
" .subsection 2\n"
|
||||
"2: ldsw [%2], %0\n"
|
||||
" brlz,pt %0, 2b\n"
|
||||
" nop\n"
|
||||
" ba,a,pt %%xcc, 4b\n"
|
||||
" .previous"
|
||||
: "=&r" (tmp1), "=&r" (tmp2)
|
||||
: "r" (lock)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline int arch_read_trylock(arch_rwlock_t *lock)
|
||||
{
|
||||
int tmp1, tmp2;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1: ldsw [%2], %0\n"
|
||||
" brlz,a,pn %0, 2f\n"
|
||||
" mov 0, %0\n"
|
||||
" add %0, 1, %1\n"
|
||||
" cas [%2], %0, %1\n"
|
||||
" cmp %0, %1\n"
|
||||
" bne,pn %%icc, 1b\n"
|
||||
" mov 1, %0\n"
|
||||
"2:"
|
||||
: "=&r" (tmp1), "=&r" (tmp2)
|
||||
: "r" (lock)
|
||||
: "memory");
|
||||
|
||||
return tmp1;
|
||||
}
|
||||
|
||||
static inline void arch_read_unlock(arch_rwlock_t *lock)
|
||||
{
|
||||
unsigned long tmp1, tmp2;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: lduw [%2], %0\n"
|
||||
" sub %0, 1, %1\n"
|
||||
" cas [%2], %0, %1\n"
|
||||
" cmp %0, %1\n"
|
||||
" bne,pn %%xcc, 1b\n"
|
||||
" nop"
|
||||
: "=&r" (tmp1), "=&r" (tmp2)
|
||||
: "r" (lock)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void arch_write_lock(arch_rwlock_t *lock)
|
||||
{
|
||||
unsigned long mask, tmp1, tmp2;
|
||||
|
||||
mask = 0x80000000UL;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: lduw [%2], %0\n"
|
||||
" brnz,pn %0, 2f\n"
|
||||
"4: or %0, %3, %1\n"
|
||||
" cas [%2], %0, %1\n"
|
||||
" cmp %0, %1\n"
|
||||
" bne,pn %%icc, 1b\n"
|
||||
" nop\n"
|
||||
" .subsection 2\n"
|
||||
"2: lduw [%2], %0\n"
|
||||
" brnz,pt %0, 2b\n"
|
||||
" nop\n"
|
||||
" ba,a,pt %%xcc, 4b\n"
|
||||
" .previous"
|
||||
: "=&r" (tmp1), "=&r" (tmp2)
|
||||
: "r" (lock), "r" (mask)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void arch_write_unlock(arch_rwlock_t *lock)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
" stw %%g0, [%0]"
|
||||
: /* no outputs */
|
||||
: "r" (lock)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline int arch_write_trylock(arch_rwlock_t *lock)
|
||||
{
|
||||
unsigned long mask, tmp1, tmp2, result;
|
||||
|
||||
mask = 0x80000000UL;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" mov 0, %2\n"
|
||||
"1: lduw [%3], %0\n"
|
||||
" brnz,pn %0, 2f\n"
|
||||
" or %0, %4, %1\n"
|
||||
" cas [%3], %0, %1\n"
|
||||
" cmp %0, %1\n"
|
||||
" bne,pn %%icc, 1b\n"
|
||||
" nop\n"
|
||||
" mov 1, %2\n"
|
||||
"2:"
|
||||
: "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
|
||||
: "r" (lock), "r" (mask)
|
||||
: "memory");
|
||||
|
||||
return result;
|
||||
}
|
||||
#include <asm/qrwlock.h>
|
||||
#include <asm/qspinlock.h>
|
||||
|
||||
#define arch_read_lock_flags(p, f) arch_read_lock(p)
|
||||
#define arch_write_lock_flags(p, f) arch_write_lock(p)
|
||||
|
||||
#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
|
||||
#define arch_write_can_lock(rw) (!(rw)->lock)
|
||||
|
||||
#define arch_spin_relax(lock) cpu_relax()
|
||||
#define arch_read_relax(lock) cpu_relax()
|
||||
#define arch_write_relax(lock) cpu_relax()
|
||||
|
|
|
@ -1,20 +1,24 @@
|
|||
#ifndef __SPARC_SPINLOCK_TYPES_H
|
||||
#define __SPARC_SPINLOCK_TYPES_H
|
||||
|
||||
#ifndef __LINUX_SPINLOCK_TYPES_H
|
||||
# error "please don't include this file directly"
|
||||
#endif
|
||||
#ifdef CONFIG_QUEUED_SPINLOCKS
|
||||
#include <asm-generic/qspinlock_types.h>
|
||||
#else
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned char lock;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||
#endif /* CONFIG_QUEUED_SPINLOCKS */
|
||||
|
||||
#ifdef CONFIG_QUEUED_RWLOCKS
|
||||
#include <asm-generic/qrwlock_types.h>
|
||||
#else
|
||||
typedef struct {
|
||||
volatile unsigned int lock;
|
||||
} arch_rwlock_t;
|
||||
|
||||
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
||||
|
||||
#endif /* CONFIG_QUEUED_RWLOCKS */
|
||||
#endif
|
||||
|
|
|
@ -9,7 +9,12 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
/* The most frequently accessed fields should be first,
|
||||
* to fit into the same cacheline.
|
||||
*/
|
||||
struct sparc64_tick_ops {
|
||||
unsigned long ticks_per_nsec_quotient;
|
||||
unsigned long offset;
|
||||
unsigned long long (*get_tick)(void);
|
||||
int (*add_compare)(unsigned long);
|
||||
unsigned long softint_mask;
|
||||
|
@ -17,6 +22,8 @@ struct sparc64_tick_ops {
|
|||
|
||||
void (*init_tick)(void);
|
||||
unsigned long (*add_tick)(unsigned long);
|
||||
unsigned long (*get_frequency)(void);
|
||||
unsigned long frequency;
|
||||
|
||||
char *name;
|
||||
};
|
||||
|
@ -27,4 +34,64 @@ unsigned long sparc64_get_clock_tick(unsigned int cpu);
|
|||
void setup_sparc64_timer(void);
|
||||
void __init time_init(void);
|
||||
|
||||
#define TICK_PRIV_BIT BIT(63)
|
||||
#define TICKCMP_IRQ_BIT BIT(63)
|
||||
|
||||
#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
|
||||
#define HBIRD_STICK_ADDR 0x1fe0000f070UL
|
||||
|
||||
#define GET_TICK_NINSTR 13
|
||||
struct get_tick_patch {
|
||||
unsigned int addr;
|
||||
unsigned int tick[GET_TICK_NINSTR];
|
||||
unsigned int stick[GET_TICK_NINSTR];
|
||||
};
|
||||
|
||||
extern struct get_tick_patch __get_tick_patch;
|
||||
extern struct get_tick_patch __get_tick_patch_end;
|
||||
|
||||
static inline unsigned long get_tick(void)
|
||||
{
|
||||
unsigned long tick, tmp1, tmp2;
|
||||
|
||||
__asm__ __volatile__(
|
||||
/* read hbtick 13 instructions */
|
||||
"661:\n"
|
||||
" mov 0x1fe, %1\n"
|
||||
" sllx %1, 0x20, %1\n"
|
||||
" sethi %%hi(0xf000), %2\n"
|
||||
" or %2, 0x70, %2\n"
|
||||
" or %1, %2, %1\n" /* %1 = HBIRD_STICK_ADDR */
|
||||
" add %1, 8, %2\n"
|
||||
" ldxa [%2]%3, %0\n"
|
||||
" ldxa [%1]%3, %1\n"
|
||||
" ldxa [%2]%3, %2\n"
|
||||
" sub %2, %0, %0\n" /* don't modify %xcc */
|
||||
" brnz,pn %0, 661b\n" /* restart to save one register */
|
||||
" sllx %2, 32, %2\n"
|
||||
" or %2, %1, %0\n"
|
||||
/* Common/not patched code */
|
||||
" sllx %0, 1, %0\n"
|
||||
" srlx %0, 1, %0\n" /* Clear TICK_PRIV_BIT */
|
||||
/* Beginning of patch section */
|
||||
" .section .get_tick_patch, \"ax\"\n"
|
||||
" .word 661b\n"
|
||||
/* read tick 2 instructions and 11 skipped */
|
||||
" ba 1f\n"
|
||||
" rd %%tick, %0\n"
|
||||
" .skip 4 * (%4 - 2)\n"
|
||||
"1:\n"
|
||||
/* read stick 2 instructions and 11 skipped */
|
||||
" ba 1f\n"
|
||||
" rd %%asr24, %0\n"
|
||||
" .skip 4 * (%4 - 2)\n"
|
||||
"1:\n"
|
||||
/* End of patch section */
|
||||
" .previous\n"
|
||||
: "=&r" (tick), "=&r" (tmp1), "=&r" (tmp2)
|
||||
: "i" (ASI_PHYS_BYPASS_EC_E), "i" (GET_TICK_NINSTR));
|
||||
|
||||
return tick;
|
||||
}
|
||||
|
||||
#endif /* _SPARC64_TIMER_H */
|
||||
|
|
|
@ -316,24 +316,33 @@ static inline u32 vio_dring_prev(struct vio_dring_state *dr, u32 index)
|
|||
}
|
||||
|
||||
#define VIO_MAX_TYPE_LEN 32
|
||||
#define VIO_MAX_NAME_LEN 32
|
||||
#define VIO_MAX_COMPAT_LEN 64
|
||||
|
||||
struct vio_dev {
|
||||
u64 mp;
|
||||
struct device_node *dp;
|
||||
|
||||
char node_name[VIO_MAX_NAME_LEN];
|
||||
char type[VIO_MAX_TYPE_LEN];
|
||||
char compat[VIO_MAX_COMPAT_LEN];
|
||||
int compat_len;
|
||||
|
||||
u64 dev_no;
|
||||
u64 id;
|
||||
|
||||
unsigned long port_id;
|
||||
unsigned long channel_id;
|
||||
|
||||
unsigned int tx_irq;
|
||||
unsigned int rx_irq;
|
||||
u64 rx_ino;
|
||||
u64 tx_ino;
|
||||
|
||||
/* Handle to the root of "channel-devices" sub-tree in MDESC */
|
||||
u64 cdev_handle;
|
||||
|
||||
/* MD specific data used to identify the vdev in MD */
|
||||
union md_node_info md_node_info;
|
||||
|
||||
struct device dev;
|
||||
};
|
||||
|
@ -347,6 +356,7 @@ struct vio_driver {
|
|||
void (*shutdown)(struct vio_dev *dev);
|
||||
unsigned long driver_data;
|
||||
struct device_driver driver;
|
||||
bool no_irq;
|
||||
};
|
||||
|
||||
struct vio_version {
|
||||
|
@ -490,5 +500,6 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
|
|||
|
||||
void vio_port_up(struct vio_driver_state *vio);
|
||||
int vio_set_intr(unsigned long dev_ino, int state);
|
||||
u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev);
|
||||
|
||||
#endif /* _SPARC64_VIO_H */
|
||||
|
|
|
@ -167,7 +167,7 @@ static int apc_probe(struct platform_device *op)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id apc_match[] = {
|
||||
static const struct of_device_id apc_match[] = {
|
||||
{
|
||||
.name = APC_OBPNAME,
|
||||
},
|
||||
|
|
|
@ -52,6 +52,9 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs);
|
|||
void do_signal32(struct pt_regs * regs);
|
||||
asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp);
|
||||
|
||||
/* time_64.c */
|
||||
void __init time_init_early(void);
|
||||
|
||||
/* compat_audit.c */
|
||||
extern unsigned int sparc32_dir_class[];
|
||||
extern unsigned int sparc32_chattr_class[];
|
||||
|
|
|
@ -34,7 +34,6 @@
|
|||
|
||||
static char version[] =
|
||||
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
|
||||
#define LDC_PACKET_SIZE 64
|
||||
|
||||
/* Packet header layout for unreliable and reliable mode frames.
|
||||
* When in RAW mode, packets are simply straight 64-byte payloads
|
||||
|
@ -178,6 +177,8 @@ do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
|
|||
printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
|
||||
} while (0)
|
||||
|
||||
#define LDC_ABORT(lp) ldc_abort((lp), __func__)
|
||||
|
||||
static const char *state_to_str(u8 state)
|
||||
{
|
||||
switch (state) {
|
||||
|
@ -196,15 +197,6 @@ static const char *state_to_str(u8 state)
|
|||
}
|
||||
}
|
||||
|
||||
static void ldc_set_state(struct ldc_channel *lp, u8 state)
|
||||
{
|
||||
ldcdbg(STATE, "STATE (%s) --> (%s)\n",
|
||||
state_to_str(lp->state),
|
||||
state_to_str(state));
|
||||
|
||||
lp->state = state;
|
||||
}
|
||||
|
||||
static unsigned long __advance(unsigned long off, unsigned long num_entries)
|
||||
{
|
||||
off += LDC_PACKET_SIZE;
|
||||
|
@ -516,11 +508,12 @@ static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int ldc_abort(struct ldc_channel *lp)
|
||||
static int ldc_abort(struct ldc_channel *lp, const char *msg)
|
||||
{
|
||||
unsigned long hv_err;
|
||||
|
||||
ldcdbg(STATE, "ABORT\n");
|
||||
ldcdbg(STATE, "ABORT[%s]\n", msg);
|
||||
ldc_print(lp);
|
||||
|
||||
/* We report but do not act upon the hypervisor errors because
|
||||
* there really isn't much we can do if they fail at this point.
|
||||
|
@ -605,7 +598,7 @@ static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp)
|
|||
}
|
||||
}
|
||||
if (err)
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -618,13 +611,13 @@ static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp)
|
|||
if (lp->hs_state == LDC_HS_GOTVERS) {
|
||||
if (lp->ver.major != vp->major ||
|
||||
lp->ver.minor != vp->minor)
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
} else {
|
||||
lp->ver = *vp;
|
||||
lp->hs_state = LDC_HS_GOTVERS;
|
||||
}
|
||||
if (send_rts(lp))
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -635,17 +628,17 @@ static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp)
|
|||
unsigned long new_tail;
|
||||
|
||||
if (vp->major == 0 && vp->minor == 0)
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
vap = find_by_major(vp->major);
|
||||
if (!vap)
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
|
||||
vap, sizeof(*vap),
|
||||
&new_tail);
|
||||
if (!p)
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
return send_tx_packet(lp, p, new_tail);
|
||||
}
|
||||
|
@ -668,7 +661,7 @@ static int process_version(struct ldc_channel *lp,
|
|||
return process_ver_nack(lp, vp);
|
||||
|
||||
default:
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -681,13 +674,13 @@ static int process_rts(struct ldc_channel *lp,
|
|||
if (p->stype != LDC_INFO ||
|
||||
lp->hs_state != LDC_HS_GOTVERS ||
|
||||
p->env != lp->cfg.mode)
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
lp->snd_nxt = p->seqid;
|
||||
lp->rcv_nxt = p->seqid;
|
||||
lp->hs_state = LDC_HS_SENTRTR;
|
||||
if (send_rtr(lp))
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -700,7 +693,7 @@ static int process_rtr(struct ldc_channel *lp,
|
|||
|
||||
if (p->stype != LDC_INFO ||
|
||||
p->env != lp->cfg.mode)
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
lp->snd_nxt = p->seqid;
|
||||
lp->hs_state = LDC_HS_COMPLETE;
|
||||
|
@ -723,7 +716,7 @@ static int process_rdx(struct ldc_channel *lp,
|
|||
|
||||
if (p->stype != LDC_INFO ||
|
||||
!(rx_seq_ok(lp, p->seqid)))
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
lp->rcv_nxt = p->seqid;
|
||||
|
||||
|
@ -750,14 +743,14 @@ static int process_control_frame(struct ldc_channel *lp,
|
|||
return process_rdx(lp, p);
|
||||
|
||||
default:
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
}
|
||||
}
|
||||
|
||||
static int process_error_frame(struct ldc_channel *lp,
|
||||
struct ldc_packet *p)
|
||||
{
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
}
|
||||
|
||||
static int process_data_ack(struct ldc_channel *lp,
|
||||
|
@ -776,7 +769,7 @@ static int process_data_ack(struct ldc_channel *lp,
|
|||
return 0;
|
||||
}
|
||||
if (head == lp->tx_tail)
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -820,16 +813,21 @@ static irqreturn_t ldc_rx(int irq, void *dev_id)
|
|||
lp->hs_state = LDC_HS_COMPLETE;
|
||||
ldc_set_state(lp, LDC_STATE_CONNECTED);
|
||||
|
||||
event_mask |= LDC_EVENT_UP;
|
||||
|
||||
orig_state = lp->chan_state;
|
||||
/*
|
||||
* Generate an LDC_EVENT_UP event if the channel
|
||||
* was not already up.
|
||||
*/
|
||||
if (orig_state != LDC_CHANNEL_UP) {
|
||||
event_mask |= LDC_EVENT_UP;
|
||||
orig_state = lp->chan_state;
|
||||
}
|
||||
}
|
||||
|
||||
/* If we are in reset state, flush the RX queue and ignore
|
||||
* everything.
|
||||
*/
|
||||
if (lp->flags & LDC_FLAG_RESET) {
|
||||
(void) __set_rx_head(lp, lp->rx_tail);
|
||||
(void) ldc_rx_reset(lp);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -880,7 +878,7 @@ static irqreturn_t ldc_rx(int irq, void *dev_id)
|
|||
break;
|
||||
|
||||
default:
|
||||
err = ldc_abort(lp);
|
||||
err = LDC_ABORT(lp);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -895,7 +893,7 @@ static irqreturn_t ldc_rx(int irq, void *dev_id)
|
|||
|
||||
err = __set_rx_head(lp, new);
|
||||
if (err < 0) {
|
||||
(void) ldc_abort(lp);
|
||||
(void) LDC_ABORT(lp);
|
||||
break;
|
||||
}
|
||||
if (lp->hs_state == LDC_HS_COMPLETE)
|
||||
|
@ -936,7 +934,14 @@ static irqreturn_t ldc_tx(int irq, void *dev_id)
|
|||
lp->hs_state = LDC_HS_COMPLETE;
|
||||
ldc_set_state(lp, LDC_STATE_CONNECTED);
|
||||
|
||||
event_mask |= LDC_EVENT_UP;
|
||||
/*
|
||||
* Generate an LDC_EVENT_UP event if the channel
|
||||
* was not already up.
|
||||
*/
|
||||
if (orig_state != LDC_CHANNEL_UP) {
|
||||
event_mask |= LDC_EVENT_UP;
|
||||
orig_state = lp->chan_state;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
|
@ -1342,6 +1347,14 @@ int ldc_bind(struct ldc_channel *lp)
|
|||
lp->hs_state = LDC_HS_OPEN;
|
||||
ldc_set_state(lp, LDC_STATE_BOUND);
|
||||
|
||||
if (lp->cfg.mode == LDC_MODE_RAW) {
|
||||
/*
|
||||
* There is no handshake in RAW mode, so handshake
|
||||
* is completed.
|
||||
*/
|
||||
lp->hs_state = LDC_HS_COMPLETE;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
|
||||
return 0;
|
||||
|
@ -1447,12 +1460,54 @@ int ldc_state(struct ldc_channel *lp)
|
|||
}
|
||||
EXPORT_SYMBOL(ldc_state);
|
||||
|
||||
void ldc_set_state(struct ldc_channel *lp, u8 state)
|
||||
{
|
||||
ldcdbg(STATE, "STATE (%s) --> (%s)\n",
|
||||
state_to_str(lp->state),
|
||||
state_to_str(state));
|
||||
|
||||
lp->state = state;
|
||||
}
|
||||
EXPORT_SYMBOL(ldc_set_state);
|
||||
|
||||
int ldc_mode(struct ldc_channel *lp)
|
||||
{
|
||||
return lp->cfg.mode;
|
||||
}
|
||||
EXPORT_SYMBOL(ldc_mode);
|
||||
|
||||
int ldc_rx_reset(struct ldc_channel *lp)
|
||||
{
|
||||
return __set_rx_head(lp, lp->rx_tail);
|
||||
}
|
||||
|
||||
void __ldc_print(struct ldc_channel *lp, const char *caller)
|
||||
{
|
||||
pr_info("%s: id=0x%lx flags=0x%x state=%s cstate=0x%lx hsstate=0x%x\n"
|
||||
"\trx_h=0x%lx rx_t=0x%lx rx_n=%ld\n"
|
||||
"\ttx_h=0x%lx tx_t=0x%lx tx_n=%ld\n"
|
||||
"\trcv_nxt=%u snd_nxt=%u\n",
|
||||
caller, lp->id, lp->flags, state_to_str(lp->state),
|
||||
lp->chan_state, lp->hs_state,
|
||||
lp->rx_head, lp->rx_tail, lp->rx_num_entries,
|
||||
lp->tx_head, lp->tx_tail, lp->tx_num_entries,
|
||||
lp->rcv_nxt, lp->snd_nxt);
|
||||
}
|
||||
|
||||
static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size)
|
||||
{
|
||||
struct ldc_packet *p;
|
||||
unsigned long new_tail;
|
||||
unsigned long new_tail, hv_err;
|
||||
int err;
|
||||
|
||||
hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
|
||||
&lp->chan_state);
|
||||
if (unlikely(hv_err))
|
||||
return -EBUSY;
|
||||
|
||||
if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
if (size > LDC_PACKET_SIZE)
|
||||
return -EMSGSIZE;
|
||||
|
||||
|
@ -1483,7 +1538,7 @@ static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size)
|
|||
&lp->rx_tail,
|
||||
&lp->chan_state);
|
||||
if (hv_err)
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
if (lp->chan_state == LDC_CHANNEL_DOWN ||
|
||||
lp->chan_state == LDC_CHANNEL_RESETTING)
|
||||
|
@ -1526,7 +1581,7 @@ static int write_nonraw(struct ldc_channel *lp, const void *buf,
|
|||
return -EBUSY;
|
||||
|
||||
if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
if (!tx_has_space_for(lp, size))
|
||||
return -EAGAIN;
|
||||
|
@ -1592,9 +1647,9 @@ static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = __set_rx_head(lp, lp->rx_tail);
|
||||
err = ldc_rx_reset(lp);
|
||||
if (err < 0)
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1607,7 +1662,7 @@ static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p)
|
|||
return err;
|
||||
}
|
||||
if (p->stype & LDC_NACK)
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1627,7 +1682,7 @@ static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head)
|
|||
&lp->rx_tail,
|
||||
&lp->chan_state);
|
||||
if (hv_err)
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
if (lp->chan_state == LDC_CHANNEL_DOWN ||
|
||||
lp->chan_state == LDC_CHANNEL_RESETTING)
|
||||
|
@ -1650,7 +1705,7 @@ static int rx_set_head(struct ldc_channel *lp, unsigned long head)
|
|||
int err = __set_rx_head(lp, head);
|
||||
|
||||
if (err < 0)
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
lp->rx_head = head;
|
||||
return 0;
|
||||
|
@ -1689,7 +1744,7 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
|
|||
&lp->rx_tail,
|
||||
&lp->chan_state);
|
||||
if (hv_err)
|
||||
return ldc_abort(lp);
|
||||
return LDC_ABORT(lp);
|
||||
|
||||
if (lp->chan_state == LDC_CHANNEL_DOWN ||
|
||||
lp->chan_state == LDC_CHANNEL_RESETTING)
|
||||
|
@ -1733,9 +1788,14 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
|
|||
|
||||
lp->rcv_nxt = p->seqid;
|
||||
|
||||
/*
|
||||
* If this is a control-only packet, there is nothing
|
||||
* else to do but advance the rx queue since the packet
|
||||
* was already processed above.
|
||||
*/
|
||||
if (!(p->type & LDC_DATA)) {
|
||||
new = rx_advance(lp, new);
|
||||
goto no_data;
|
||||
break;
|
||||
}
|
||||
if (p->stype & (LDC_ACK | LDC_NACK)) {
|
||||
err = data_ack_nack(lp, p);
|
||||
|
@ -1900,6 +1960,8 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
|
|||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
ldcdbg(RX, "%s: entered size=%d\n", __func__, size);
|
||||
|
||||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1915,6 +1977,9 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
|
|||
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
|
||||
ldcdbg(RX, "%s: mode=%d, head=%lu, tail=%lu rv=%d\n", __func__,
|
||||
lp->cfg.mode, lp->rx_head, lp->rx_tail, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(ldc_read);
|
||||
|
|
|
@ -75,6 +75,74 @@ struct mdesc_handle {
|
|||
struct mdesc_hdr mdesc;
|
||||
};
|
||||
|
||||
typedef int (*mdesc_node_info_get_f)(struct mdesc_handle *, u64,
|
||||
union md_node_info *);
|
||||
typedef void (*mdesc_node_info_rel_f)(union md_node_info *);
|
||||
typedef bool (*mdesc_node_match_f)(union md_node_info *, union md_node_info *);
|
||||
|
||||
struct md_node_ops {
|
||||
char *name;
|
||||
mdesc_node_info_get_f get_info;
|
||||
mdesc_node_info_rel_f rel_info;
|
||||
mdesc_node_match_f node_match;
|
||||
};
|
||||
|
||||
static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
|
||||
union md_node_info *node_info);
|
||||
static void rel_vdev_port_node_info(union md_node_info *node_info);
|
||||
static bool vdev_port_node_match(union md_node_info *a_node_info,
|
||||
union md_node_info *b_node_info);
|
||||
|
||||
static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
|
||||
union md_node_info *node_info);
|
||||
static void rel_ds_port_node_info(union md_node_info *node_info);
|
||||
static bool ds_port_node_match(union md_node_info *a_node_info,
|
||||
union md_node_info *b_node_info);
|
||||
|
||||
/* supported node types which can be registered */
|
||||
static struct md_node_ops md_node_ops_table[] = {
|
||||
{"virtual-device-port", get_vdev_port_node_info,
|
||||
rel_vdev_port_node_info, vdev_port_node_match},
|
||||
{"domain-services-port", get_ds_port_node_info,
|
||||
rel_ds_port_node_info, ds_port_node_match},
|
||||
{NULL, NULL, NULL, NULL}
|
||||
};
|
||||
|
||||
static void mdesc_get_node_ops(const char *node_name,
|
||||
mdesc_node_info_get_f *get_info_f,
|
||||
mdesc_node_info_rel_f *rel_info_f,
|
||||
mdesc_node_match_f *match_f)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (get_info_f)
|
||||
*get_info_f = NULL;
|
||||
|
||||
if (rel_info_f)
|
||||
*rel_info_f = NULL;
|
||||
|
||||
if (match_f)
|
||||
*match_f = NULL;
|
||||
|
||||
if (!node_name)
|
||||
return;
|
||||
|
||||
for (i = 0; md_node_ops_table[i].name != NULL; i++) {
|
||||
if (strcmp(md_node_ops_table[i].name, node_name) == 0) {
|
||||
if (get_info_f)
|
||||
*get_info_f = md_node_ops_table[i].get_info;
|
||||
|
||||
if (rel_info_f)
|
||||
*rel_info_f = md_node_ops_table[i].rel_info;
|
||||
|
||||
if (match_f)
|
||||
*match_f = md_node_ops_table[i].node_match;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void mdesc_handle_init(struct mdesc_handle *hp,
|
||||
unsigned int handle_size,
|
||||
void *base)
|
||||
|
@ -137,12 +205,10 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
|
|||
handle_size = (sizeof(struct mdesc_handle) -
|
||||
sizeof(struct mdesc_hdr) +
|
||||
mdesc_size);
|
||||
base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_REPEAT);
|
||||
if (!base)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Allocation has to succeed because mdesc update would be missed
|
||||
* and such events are not retransmitted.
|
||||
*/
|
||||
base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
|
||||
addr = (unsigned long)base;
|
||||
addr = (addr + 15UL) & ~15UL;
|
||||
hp = (struct mdesc_handle *) addr;
|
||||
|
@ -218,14 +284,31 @@ static struct mdesc_notifier_client *client_list;
|
|||
|
||||
void mdesc_register_notifier(struct mdesc_notifier_client *client)
|
||||
{
|
||||
bool supported = false;
|
||||
u64 node;
|
||||
int i;
|
||||
|
||||
mutex_lock(&mdesc_mutex);
|
||||
|
||||
/* check to see if the node is supported for registration */
|
||||
for (i = 0; md_node_ops_table[i].name != NULL; i++) {
|
||||
if (strcmp(md_node_ops_table[i].name, client->node_name) == 0) {
|
||||
supported = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!supported) {
|
||||
pr_err("MD: %s node not supported\n", client->node_name);
|
||||
mutex_unlock(&mdesc_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
client->next = client_list;
|
||||
client_list = client;
|
||||
|
||||
mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
|
||||
client->add(cur_mdesc, node);
|
||||
client->add(cur_mdesc, node, client->node_name);
|
||||
|
||||
mutex_unlock(&mdesc_mutex);
|
||||
}
|
||||
|
@ -249,59 +332,145 @@ static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node)
|
|||
return id;
|
||||
}
|
||||
|
||||
static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
|
||||
union md_node_info *node_info)
|
||||
{
|
||||
const u64 *parent_cfg_hdlp;
|
||||
const char *name;
|
||||
const u64 *idp;
|
||||
|
||||
/*
|
||||
* Virtual device nodes are distinguished by:
|
||||
* 1. "id" property
|
||||
* 2. "name" property
|
||||
* 3. parent node "cfg-handle" property
|
||||
*/
|
||||
idp = mdesc_get_property(md, node, "id", NULL);
|
||||
name = mdesc_get_property(md, node, "name", NULL);
|
||||
parent_cfg_hdlp = parent_cfg_handle(md, node);
|
||||
|
||||
if (!idp || !name || !parent_cfg_hdlp)
|
||||
return -1;
|
||||
|
||||
node_info->vdev_port.id = *idp;
|
||||
node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL);
|
||||
node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rel_vdev_port_node_info(union md_node_info *node_info)
|
||||
{
|
||||
if (node_info && node_info->vdev_port.name) {
|
||||
kfree_const(node_info->vdev_port.name);
|
||||
node_info->vdev_port.name = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static bool vdev_port_node_match(union md_node_info *a_node_info,
|
||||
union md_node_info *b_node_info)
|
||||
{
|
||||
if (a_node_info->vdev_port.id != b_node_info->vdev_port.id)
|
||||
return false;
|
||||
|
||||
if (a_node_info->vdev_port.parent_cfg_hdl !=
|
||||
b_node_info->vdev_port.parent_cfg_hdl)
|
||||
return false;
|
||||
|
||||
if (strncmp(a_node_info->vdev_port.name,
|
||||
b_node_info->vdev_port.name, MDESC_MAX_STR_LEN) != 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
|
||||
union md_node_info *node_info)
|
||||
{
|
||||
const u64 *idp;
|
||||
|
||||
/* DS port nodes use the "id" property to distinguish them */
|
||||
idp = mdesc_get_property(md, node, "id", NULL);
|
||||
if (!idp)
|
||||
return -1;
|
||||
|
||||
node_info->ds_port.id = *idp;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rel_ds_port_node_info(union md_node_info *node_info)
|
||||
{
|
||||
}
|
||||
|
||||
static bool ds_port_node_match(union md_node_info *a_node_info,
|
||||
union md_node_info *b_node_info)
|
||||
{
|
||||
if (a_node_info->ds_port.id != b_node_info->ds_port.id)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Run 'func' on nodes which are in A but not in B. */
|
||||
static void invoke_on_missing(const char *name,
|
||||
struct mdesc_handle *a,
|
||||
struct mdesc_handle *b,
|
||||
void (*func)(struct mdesc_handle *, u64))
|
||||
void (*func)(struct mdesc_handle *, u64,
|
||||
const char *node_name))
|
||||
{
|
||||
u64 node;
|
||||
mdesc_node_info_get_f get_info_func;
|
||||
mdesc_node_info_rel_f rel_info_func;
|
||||
mdesc_node_match_f node_match_func;
|
||||
union md_node_info a_node_info;
|
||||
union md_node_info b_node_info;
|
||||
bool found;
|
||||
u64 a_node;
|
||||
u64 b_node;
|
||||
int rv;
|
||||
|
||||
mdesc_for_each_node_by_name(a, node, name) {
|
||||
int found = 0, is_vdc_port = 0;
|
||||
const char *name_prop;
|
||||
const u64 *id;
|
||||
u64 fnode;
|
||||
/*
|
||||
* Find the get_info, rel_info and node_match ops for the given
|
||||
* node name
|
||||
*/
|
||||
mdesc_get_node_ops(name, &get_info_func, &rel_info_func,
|
||||
&node_match_func);
|
||||
|
||||
name_prop = mdesc_get_property(a, node, "name", NULL);
|
||||
if (name_prop && !strcmp(name_prop, "vdc-port")) {
|
||||
is_vdc_port = 1;
|
||||
id = parent_cfg_handle(a, node);
|
||||
} else
|
||||
id = mdesc_get_property(a, node, "id", NULL);
|
||||
/* If we didn't find a match, the node type is not supported */
|
||||
if (!get_info_func || !rel_info_func || !node_match_func) {
|
||||
pr_err("MD: %s node type is not supported\n", name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!id) {
|
||||
printk(KERN_ERR "MD: Cannot find ID for %s node.\n",
|
||||
(name_prop ? name_prop : name));
|
||||
mdesc_for_each_node_by_name(a, a_node, name) {
|
||||
found = false;
|
||||
|
||||
rv = get_info_func(a, a_node, &a_node_info);
|
||||
if (rv != 0) {
|
||||
pr_err("MD: Cannot find 1 or more required match properties for %s node.\n",
|
||||
name);
|
||||
continue;
|
||||
}
|
||||
|
||||
mdesc_for_each_node_by_name(b, fnode, name) {
|
||||
const u64 *fid;
|
||||
/* Check each node in B for node matching a_node */
|
||||
mdesc_for_each_node_by_name(b, b_node, name) {
|
||||
rv = get_info_func(b, b_node, &b_node_info);
|
||||
if (rv != 0)
|
||||
continue;
|
||||
|
||||
if (is_vdc_port) {
|
||||
name_prop = mdesc_get_property(b, fnode,
|
||||
"name", NULL);
|
||||
if (!name_prop ||
|
||||
strcmp(name_prop, "vdc-port"))
|
||||
continue;
|
||||
fid = parent_cfg_handle(b, fnode);
|
||||
if (!fid) {
|
||||
printk(KERN_ERR "MD: Cannot find ID "
|
||||
"for vdc-port node.\n");
|
||||
continue;
|
||||
}
|
||||
} else
|
||||
fid = mdesc_get_property(b, fnode,
|
||||
"id", NULL);
|
||||
|
||||
if (*id == *fid) {
|
||||
found = 1;
|
||||
if (node_match_func(&a_node_info, &b_node_info)) {
|
||||
found = true;
|
||||
rel_info_func(&b_node_info);
|
||||
break;
|
||||
}
|
||||
|
||||
rel_info_func(&b_node_info);
|
||||
}
|
||||
|
||||
rel_info_func(&a_node_info);
|
||||
|
||||
if (!found)
|
||||
func(a, node);
|
||||
func(a, a_node, name);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -367,6 +536,76 @@ void mdesc_update(void)
|
|||
mutex_unlock(&mdesc_mutex);
|
||||
}
|
||||
|
||||
u64 mdesc_get_node(struct mdesc_handle *hp, const char *node_name,
|
||||
union md_node_info *node_info)
|
||||
{
|
||||
mdesc_node_info_get_f get_info_func;
|
||||
mdesc_node_info_rel_f rel_info_func;
|
||||
mdesc_node_match_f node_match_func;
|
||||
union md_node_info hp_node_info;
|
||||
u64 hp_node;
|
||||
int rv;
|
||||
|
||||
if (hp == NULL || node_name == NULL || node_info == NULL)
|
||||
return MDESC_NODE_NULL;
|
||||
|
||||
/* Find the ops for the given node name */
|
||||
mdesc_get_node_ops(node_name, &get_info_func, &rel_info_func,
|
||||
&node_match_func);
|
||||
|
||||
/* If we didn't find ops for the given node name, it is not supported */
|
||||
if (!get_info_func || !rel_info_func || !node_match_func) {
|
||||
pr_err("MD: %s node is not supported\n", node_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mdesc_for_each_node_by_name(hp, hp_node, node_name) {
|
||||
rv = get_info_func(hp, hp_node, &hp_node_info);
|
||||
if (rv != 0)
|
||||
continue;
|
||||
|
||||
if (node_match_func(node_info, &hp_node_info))
|
||||
break;
|
||||
|
||||
rel_info_func(&hp_node_info);
|
||||
}
|
||||
|
||||
rel_info_func(&hp_node_info);
|
||||
|
||||
return hp_node;
|
||||
}
|
||||
EXPORT_SYMBOL(mdesc_get_node);
|
||||
|
||||
int mdesc_get_node_info(struct mdesc_handle *hp, u64 node,
|
||||
const char *node_name, union md_node_info *node_info)
|
||||
{
|
||||
mdesc_node_info_get_f get_info_func;
|
||||
int rv;
|
||||
|
||||
if (hp == NULL || node == MDESC_NODE_NULL ||
|
||||
node_name == NULL || node_info == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Find the get_info op for the given node name */
|
||||
mdesc_get_node_ops(node_name, &get_info_func, NULL, NULL);
|
||||
|
||||
/* If we didn't find a get_info_func, the node name is not supported */
|
||||
if (get_info_func == NULL) {
|
||||
pr_err("MD: %s node is not supported\n", node_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rv = get_info_func(hp, node, node_info);
|
||||
if (rv != 0) {
|
||||
pr_err("MD: Cannot find 1 or more required match properties for %s node.\n",
|
||||
node_name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mdesc_get_node_info);
|
||||
|
||||
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
|
||||
{
|
||||
return (struct mdesc_elem *) (mdesc + 1);
|
||||
|
|
|
@ -71,7 +71,7 @@ static int pmc_probe(struct platform_device *op)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id pmc_match[] = {
|
||||
static const struct of_device_id pmc_match[] = {
|
||||
{
|
||||
.name = PMC_OBPNAME,
|
||||
},
|
||||
|
|
|
@ -381,7 +381,7 @@ bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
|
|||
int this_cpu_id;
|
||||
|
||||
/* On hypervisor based platforms we interrogate the 'reg'
|
||||
* property. On everything else we look for a 'upa-portis',
|
||||
* property. On everything else we look for a 'upa-portid',
|
||||
* 'portid', or 'cpuid' property.
|
||||
*/
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ static struct console prom_early_console = {
|
|||
.index = -1,
|
||||
};
|
||||
|
||||
/*
|
||||
/*
|
||||
* Process kernel command line switches that are specific to the
|
||||
* SPARC or that require special low-level processing.
|
||||
*/
|
||||
|
@ -365,6 +365,7 @@ void __init start_early_boot(void)
|
|||
}
|
||||
current_thread_info()->cpu = cpu;
|
||||
|
||||
time_init_early();
|
||||
prom_init_report();
|
||||
start_kernel();
|
||||
}
|
||||
|
@ -639,7 +640,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
#ifdef CONFIG_BLK_DEV_RAM
|
||||
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
|
||||
rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
|
||||
rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
|
||||
rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
|
||||
#endif
|
||||
|
||||
task_thread_info(&init_task)->kregs = &fake_swapper_regs;
|
||||
|
@ -648,7 +649,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
if (!ic_set_manually) {
|
||||
phandle chosen = prom_finddevice("/chosen");
|
||||
u32 cl, sv, gw;
|
||||
|
||||
|
||||
cl = prom_getintdefault (chosen, "client-ip", 0);
|
||||
sv = prom_getintdefault (chosen, "server-ip", 0);
|
||||
gw = prom_getintdefault (chosen, "gateway-ip", 0);
|
||||
|
|
|
@ -298,7 +298,7 @@ static int clock_probe(struct platform_device *op)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id clock_match[] = {
|
||||
static const struct of_device_id clock_match[] = {
|
||||
{
|
||||
.name = "eeprom",
|
||||
},
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include <linux/kernel_stat.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/ftrace.h>
|
||||
|
||||
|
@ -47,14 +46,13 @@
|
|||
#include <asm/cpudata.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include "entry.h"
|
||||
#include "kernel.h"
|
||||
|
||||
DEFINE_SPINLOCK(rtc_lock);
|
||||
|
||||
#define TICK_PRIV_BIT (1UL << 63)
|
||||
#define TICKCMP_IRQ_BIT (1UL << 63)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long profile_pc(struct pt_regs *regs)
|
||||
{
|
||||
|
@ -164,13 +162,44 @@ static unsigned long tick_add_tick(unsigned long adj)
|
|||
return new_tick;
|
||||
}
|
||||
|
||||
static struct sparc64_tick_ops tick_operations __read_mostly = {
|
||||
/* Searches for cpu clock frequency with given cpuid in OpenBoot tree */
|
||||
static unsigned long cpuid_to_freq(phandle node, int cpuid)
|
||||
{
|
||||
bool is_cpu_node = false;
|
||||
unsigned long freq = 0;
|
||||
char type[128];
|
||||
|
||||
if (!node)
|
||||
return freq;
|
||||
|
||||
if (prom_getproperty(node, "device_type", type, sizeof(type)) != -1)
|
||||
is_cpu_node = (strcmp(type, "cpu") == 0);
|
||||
|
||||
/* try upa-portid then cpuid to get cpuid, see prom_64.c */
|
||||
if (is_cpu_node && (prom_getint(node, "upa-portid") == cpuid ||
|
||||
prom_getint(node, "cpuid") == cpuid))
|
||||
freq = prom_getintdefault(node, "clock-frequency", 0);
|
||||
if (!freq)
|
||||
freq = cpuid_to_freq(prom_getchild(node), cpuid);
|
||||
if (!freq)
|
||||
freq = cpuid_to_freq(prom_getsibling(node), cpuid);
|
||||
|
||||
return freq;
|
||||
}
|
||||
|
||||
static unsigned long tick_get_frequency(void)
|
||||
{
|
||||
return cpuid_to_freq(prom_root_node, hard_smp_processor_id());
|
||||
}
|
||||
|
||||
static struct sparc64_tick_ops tick_operations __cacheline_aligned = {
|
||||
.name = "tick",
|
||||
.init_tick = tick_init_tick,
|
||||
.disable_irq = tick_disable_irq,
|
||||
.get_tick = tick_get_tick,
|
||||
.add_tick = tick_add_tick,
|
||||
.add_compare = tick_add_compare,
|
||||
.get_frequency = tick_get_frequency,
|
||||
.softint_mask = 1UL << 0,
|
||||
};
|
||||
|
||||
|
@ -250,6 +279,11 @@ static int stick_add_compare(unsigned long adj)
|
|||
return ((long)(new_tick - (orig_tick+adj))) > 0L;
|
||||
}
|
||||
|
||||
static unsigned long stick_get_frequency(void)
|
||||
{
|
||||
return prom_getintdefault(prom_root_node, "stick-frequency", 0);
|
||||
}
|
||||
|
||||
static struct sparc64_tick_ops stick_operations __read_mostly = {
|
||||
.name = "stick",
|
||||
.init_tick = stick_init_tick,
|
||||
|
@ -257,6 +291,7 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
|
|||
.get_tick = stick_get_tick,
|
||||
.add_tick = stick_add_tick,
|
||||
.add_compare = stick_add_compare,
|
||||
.get_frequency = stick_get_frequency,
|
||||
.softint_mask = 1UL << 16,
|
||||
};
|
||||
|
||||
|
@ -277,9 +312,6 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
|
|||
* 2) write high
|
||||
* 3) write low
|
||||
*/
|
||||
#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
|
||||
#define HBIRD_STICK_ADDR 0x1fe0000f070UL
|
||||
|
||||
static unsigned long __hbird_read_stick(void)
|
||||
{
|
||||
unsigned long ret, tmp1, tmp2, tmp3;
|
||||
|
@ -381,6 +413,11 @@ static int hbtick_add_compare(unsigned long adj)
|
|||
return ((long)(val2 - val)) > 0L;
|
||||
}
|
||||
|
||||
static unsigned long hbtick_get_frequency(void)
|
||||
{
|
||||
return prom_getintdefault(prom_root_node, "stick-frequency", 0);
|
||||
}
|
||||
|
||||
static struct sparc64_tick_ops hbtick_operations __read_mostly = {
|
||||
.name = "hbtick",
|
||||
.init_tick = hbtick_init_tick,
|
||||
|
@ -388,11 +425,10 @@ static struct sparc64_tick_ops hbtick_operations __read_mostly = {
|
|||
.get_tick = hbtick_get_tick,
|
||||
.add_tick = hbtick_add_tick,
|
||||
.add_compare = hbtick_add_compare,
|
||||
.get_frequency = hbtick_get_frequency,
|
||||
.softint_mask = 1UL << 0,
|
||||
};
|
||||
|
||||
static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
|
||||
|
||||
unsigned long cmos_regs;
|
||||
EXPORT_SYMBOL(cmos_regs);
|
||||
|
||||
|
@ -582,34 +618,17 @@ static int __init clock_init(void)
|
|||
*/
|
||||
fs_initcall(clock_init);
|
||||
|
||||
/* This is gets the master TICK_INT timer going. */
|
||||
static unsigned long sparc64_init_timers(void)
|
||||
/* Return true if this is Hummingbird, aka Ultra-IIe */
|
||||
static bool is_hummingbird(void)
|
||||
{
|
||||
struct device_node *dp;
|
||||
unsigned long freq;
|
||||
unsigned long ver, manuf, impl;
|
||||
|
||||
dp = of_find_node_by_path("/");
|
||||
if (tlb_type == spitfire) {
|
||||
unsigned long ver, manuf, impl;
|
||||
__asm__ __volatile__ ("rdpr %%ver, %0"
|
||||
: "=&r" (ver));
|
||||
manuf = ((ver >> 48) & 0xffff);
|
||||
impl = ((ver >> 32) & 0xffff);
|
||||
|
||||
__asm__ __volatile__ ("rdpr %%ver, %0"
|
||||
: "=&r" (ver));
|
||||
manuf = ((ver >> 48) & 0xffff);
|
||||
impl = ((ver >> 32) & 0xffff);
|
||||
if (manuf == 0x17 && impl == 0x13) {
|
||||
/* Hummingbird, aka Ultra-IIe */
|
||||
tick_ops = &hbtick_operations;
|
||||
freq = of_getintprop_default(dp, "stick-frequency", 0);
|
||||
} else {
|
||||
tick_ops = &tick_operations;
|
||||
freq = local_cpu_data().clock_tick;
|
||||
}
|
||||
} else {
|
||||
tick_ops = &stick_operations;
|
||||
freq = of_getintprop_default(dp, "stick-frequency", 0);
|
||||
}
|
||||
|
||||
return freq;
|
||||
return (manuf == 0x17 && impl == 0x13);
|
||||
}
|
||||
|
||||
struct freq_table {
|
||||
|
@ -671,12 +690,12 @@ core_initcall(register_sparc64_cpufreq_notifier);
|
|||
static int sparc64_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
return tick_ops->add_compare(delta) ? -ETIME : 0;
|
||||
return tick_operations.add_compare(delta) ? -ETIME : 0;
|
||||
}
|
||||
|
||||
static int sparc64_timer_shutdown(struct clock_event_device *evt)
|
||||
{
|
||||
tick_ops->disable_irq();
|
||||
tick_operations.disable_irq();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -693,7 +712,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
|
|||
void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
unsigned long tick_mask = tick_ops->softint_mask;
|
||||
unsigned long tick_mask = tick_operations.softint_mask;
|
||||
int cpu = smp_processor_id();
|
||||
struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
|
||||
|
||||
|
@ -728,7 +747,7 @@ void setup_sparc64_timer(void)
|
|||
: "=r" (pstate)
|
||||
: "i" (PSTATE_IE));
|
||||
|
||||
tick_ops->init_tick();
|
||||
tick_operations.init_tick();
|
||||
|
||||
/* Restore PSTATE_IE. */
|
||||
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
|
||||
|
@ -755,12 +774,10 @@ static unsigned long tb_ticks_per_usec __read_mostly;
|
|||
|
||||
void __delay(unsigned long loops)
|
||||
{
|
||||
unsigned long bclock, now;
|
||||
unsigned long bclock = get_tick();
|
||||
|
||||
bclock = tick_ops->get_tick();
|
||||
do {
|
||||
now = tick_ops->get_tick();
|
||||
} while ((now-bclock) < loops);
|
||||
while ((get_tick() - bclock) < loops)
|
||||
;
|
||||
}
|
||||
EXPORT_SYMBOL(__delay);
|
||||
|
||||
|
@ -772,26 +789,71 @@ EXPORT_SYMBOL(udelay);
|
|||
|
||||
static u64 clocksource_tick_read(struct clocksource *cs)
|
||||
{
|
||||
return tick_ops->get_tick();
|
||||
return get_tick();
|
||||
}
|
||||
|
||||
static void __init get_tick_patch(void)
|
||||
{
|
||||
unsigned int *addr, *instr, i;
|
||||
struct get_tick_patch *p;
|
||||
|
||||
if (tlb_type == spitfire && is_hummingbird())
|
||||
return;
|
||||
|
||||
for (p = &__get_tick_patch; p < &__get_tick_patch_end; p++) {
|
||||
instr = (tlb_type == spitfire) ? p->tick : p->stick;
|
||||
addr = (unsigned int *)(unsigned long)p->addr;
|
||||
for (i = 0; i < GET_TICK_NINSTR; i++) {
|
||||
addr[i] = instr[i];
|
||||
/* ensure that address is modified before flush */
|
||||
wmb();
|
||||
flushi(&addr[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void init_tick_ops(struct sparc64_tick_ops *ops)
|
||||
{
|
||||
unsigned long freq, quotient, tick;
|
||||
|
||||
freq = ops->get_frequency();
|
||||
quotient = clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
|
||||
tick = ops->get_tick();
|
||||
|
||||
ops->offset = (tick * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT;
|
||||
ops->ticks_per_nsec_quotient = quotient;
|
||||
ops->frequency = freq;
|
||||
tick_operations = *ops;
|
||||
get_tick_patch();
|
||||
}
|
||||
|
||||
void __init time_init_early(void)
|
||||
{
|
||||
if (tlb_type == spitfire) {
|
||||
if (is_hummingbird())
|
||||
init_tick_ops(&hbtick_operations);
|
||||
else
|
||||
init_tick_ops(&tick_operations);
|
||||
} else {
|
||||
init_tick_ops(&stick_operations);
|
||||
}
|
||||
}
|
||||
|
||||
void __init time_init(void)
|
||||
{
|
||||
unsigned long freq = sparc64_init_timers();
|
||||
unsigned long freq;
|
||||
|
||||
freq = tick_operations.frequency;
|
||||
tb_ticks_per_usec = freq / USEC_PER_SEC;
|
||||
|
||||
timer_ticks_per_nsec_quotient =
|
||||
clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
|
||||
|
||||
clocksource_tick.name = tick_ops->name;
|
||||
clocksource_tick.name = tick_operations.name;
|
||||
clocksource_tick.read = clocksource_tick_read;
|
||||
|
||||
clocksource_register_hz(&clocksource_tick, freq);
|
||||
printk("clocksource: mult[%x] shift[%d]\n",
|
||||
clocksource_tick.mult, clocksource_tick.shift);
|
||||
|
||||
sparc64_clockevent.name = tick_ops->name;
|
||||
sparc64_clockevent.name = tick_operations.name;
|
||||
clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
|
||||
|
||||
sparc64_clockevent.max_delta_ns =
|
||||
|
@ -809,14 +871,21 @@ void __init time_init(void)
|
|||
|
||||
unsigned long long sched_clock(void)
|
||||
{
|
||||
unsigned long ticks = tick_ops->get_tick();
|
||||
unsigned long quotient = tick_operations.ticks_per_nsec_quotient;
|
||||
unsigned long offset = tick_operations.offset;
|
||||
|
||||
return (ticks * timer_ticks_per_nsec_quotient)
|
||||
>> SPARC64_NSEC_PER_CYC_SHIFT;
|
||||
/* Use barrier so the compiler emits the loads first and overlaps load
|
||||
* latency with reading tick, because reading %tick/%stick is a
|
||||
* post-sync instruction that will flush and restart subsequent
|
||||
* instructions after it commits.
|
||||
*/
|
||||
barrier();
|
||||
|
||||
return ((get_tick() * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT) - offset;
|
||||
}
|
||||
|
||||
int read_current_timer(unsigned long *timer_val)
|
||||
{
|
||||
*timer_val = tick_ops->get_tick();
|
||||
*timer_val = get_tick();
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -70,15 +70,26 @@ static int vio_device_probe(struct device *dev)
|
|||
struct vio_dev *vdev = to_vio_dev(dev);
|
||||
struct vio_driver *drv = to_vio_driver(dev->driver);
|
||||
const struct vio_device_id *id;
|
||||
int error = -ENODEV;
|
||||
|
||||
if (drv->probe) {
|
||||
id = vio_match_device(drv->id_table, vdev);
|
||||
if (id)
|
||||
error = drv->probe(vdev, id);
|
||||
if (!drv->probe)
|
||||
return -ENODEV;
|
||||
|
||||
id = vio_match_device(drv->id_table, vdev);
|
||||
if (!id)
|
||||
return -ENODEV;
|
||||
|
||||
/* alloc irqs (unless the driver specified not to) */
|
||||
if (!drv->no_irq) {
|
||||
if (vdev->tx_irq == 0 && vdev->tx_ino != ~0UL)
|
||||
vdev->tx_irq = sun4v_build_virq(vdev->cdev_handle,
|
||||
vdev->tx_ino);
|
||||
|
||||
if (vdev->rx_irq == 0 && vdev->rx_ino != ~0UL)
|
||||
vdev->rx_irq = sun4v_build_virq(vdev->cdev_handle,
|
||||
vdev->rx_ino);
|
||||
}
|
||||
|
||||
return error;
|
||||
return drv->probe(vdev, id);
|
||||
}
|
||||
|
||||
static int vio_device_remove(struct device *dev)
|
||||
|
@ -86,8 +97,15 @@ static int vio_device_remove(struct device *dev)
|
|||
struct vio_dev *vdev = to_vio_dev(dev);
|
||||
struct vio_driver *drv = to_vio_driver(dev->driver);
|
||||
|
||||
if (drv->remove)
|
||||
if (drv->remove) {
|
||||
/*
|
||||
* Ideally, we would remove/deallocate tx/rx virqs
|
||||
* here - however, there are currently no support
|
||||
* routines to do so at the moment. TBD
|
||||
*/
|
||||
|
||||
return drv->remove(vdev);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -185,11 +203,58 @@ static struct device_node *cdev_node;
|
|||
static struct vio_dev *root_vdev;
|
||||
static u64 cdev_cfg_handle;
|
||||
|
||||
static const u64 *vio_cfg_handle(struct mdesc_handle *hp, u64 node)
|
||||
{
|
||||
const u64 *cfg_handle = NULL;
|
||||
u64 a;
|
||||
|
||||
mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
|
||||
u64 target;
|
||||
|
||||
target = mdesc_arc_target(hp, a);
|
||||
cfg_handle = mdesc_get_property(hp, target,
|
||||
"cfg-handle", NULL);
|
||||
if (cfg_handle)
|
||||
break;
|
||||
}
|
||||
|
||||
return cfg_handle;
|
||||
}
|
||||
|
||||
/**
|
||||
* vio_vdev_node() - Find VDEV node in MD
|
||||
* @hp: Handle to the MD
|
||||
* @vdev: Pointer to VDEV
|
||||
*
|
||||
* Find the node in the current MD which matches the given vio_dev. This
|
||||
* must be done dynamically since the node value can change if the MD
|
||||
* is updated.
|
||||
*
|
||||
* NOTE: the MD must be locked, using mdesc_grab(), when calling this routine
|
||||
*
|
||||
* Return: The VDEV node in MDESC
|
||||
*/
|
||||
u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev)
|
||||
{
|
||||
u64 node;
|
||||
|
||||
if (vdev == NULL)
|
||||
return MDESC_NODE_NULL;
|
||||
|
||||
node = mdesc_get_node(hp, (const char *)vdev->node_name,
|
||||
&vdev->md_node_info);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp,
|
||||
struct vio_dev *vdev)
|
||||
{
|
||||
u64 a;
|
||||
|
||||
vdev->tx_ino = ~0UL;
|
||||
vdev->rx_ino = ~0UL;
|
||||
vdev->channel_id = ~0UL;
|
||||
mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
|
||||
const u64 *chan_id;
|
||||
const u64 *irq;
|
||||
|
@ -199,18 +264,18 @@ static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp,
|
|||
|
||||
irq = mdesc_get_property(hp, target, "tx-ino", NULL);
|
||||
if (irq)
|
||||
vdev->tx_irq = sun4v_build_virq(cdev_cfg_handle, *irq);
|
||||
vdev->tx_ino = *irq;
|
||||
|
||||
irq = mdesc_get_property(hp, target, "rx-ino", NULL);
|
||||
if (irq) {
|
||||
vdev->rx_irq = sun4v_build_virq(cdev_cfg_handle, *irq);
|
||||
if (irq)
|
||||
vdev->rx_ino = *irq;
|
||||
}
|
||||
|
||||
chan_id = mdesc_get_property(hp, target, "id", NULL);
|
||||
if (chan_id)
|
||||
vdev->channel_id = *chan_id;
|
||||
}
|
||||
|
||||
vdev->cdev_handle = cdev_cfg_handle;
|
||||
}
|
||||
|
||||
int vio_set_intr(unsigned long dev_ino, int state)
|
||||
|
@ -223,14 +288,14 @@ int vio_set_intr(unsigned long dev_ino, int state)
|
|||
EXPORT_SYMBOL(vio_set_intr);
|
||||
|
||||
static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
|
||||
const char *node_name,
|
||||
struct device *parent)
|
||||
{
|
||||
const char *type, *compat, *bus_id_name;
|
||||
const char *type, *compat;
|
||||
struct device_node *dp;
|
||||
struct vio_dev *vdev;
|
||||
int err, tlen, clen;
|
||||
const u64 *id, *cfg_handle;
|
||||
u64 a;
|
||||
|
||||
type = mdesc_get_property(hp, mp, "device-type", &tlen);
|
||||
if (!type) {
|
||||
|
@ -240,7 +305,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
|
|||
tlen = strlen(type) + 1;
|
||||
}
|
||||
}
|
||||
if (tlen > VIO_MAX_TYPE_LEN) {
|
||||
if (tlen > VIO_MAX_TYPE_LEN || strlen(type) >= VIO_MAX_TYPE_LEN) {
|
||||
printk(KERN_ERR "VIO: Type string [%s] is too long.\n",
|
||||
type);
|
||||
return NULL;
|
||||
|
@ -248,31 +313,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
|
|||
|
||||
id = mdesc_get_property(hp, mp, "id", NULL);
|
||||
|
||||
cfg_handle = NULL;
|
||||
mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
|
||||
u64 target;
|
||||
|
||||
target = mdesc_arc_target(hp, a);
|
||||
cfg_handle = mdesc_get_property(hp, target,
|
||||
"cfg-handle", NULL);
|
||||
if (cfg_handle)
|
||||
break;
|
||||
}
|
||||
|
||||
bus_id_name = type;
|
||||
if (!strcmp(type, "domain-services-port"))
|
||||
bus_id_name = "ds";
|
||||
|
||||
/*
|
||||
* 20 char is the old driver-core name size limit, which is no more.
|
||||
* This check can probably be removed after review and possible
|
||||
* adaption of the vio users name length handling.
|
||||
*/
|
||||
if (strlen(bus_id_name) >= 20 - 4) {
|
||||
printk(KERN_ERR "VIO: bus_id_name [%s] is too long.\n",
|
||||
bus_id_name);
|
||||
return NULL;
|
||||
}
|
||||
cfg_handle = vio_cfg_handle(hp, mp);
|
||||
|
||||
compat = mdesc_get_property(hp, mp, "device-type", &clen);
|
||||
if (!compat) {
|
||||
|
@ -297,25 +338,23 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
|
|||
memset(vdev->compat, 0, sizeof(vdev->compat));
|
||||
vdev->compat_len = clen;
|
||||
|
||||
vdev->channel_id = ~0UL;
|
||||
vdev->tx_irq = ~0;
|
||||
vdev->rx_irq = ~0;
|
||||
vdev->port_id = ~0UL;
|
||||
vdev->tx_irq = 0;
|
||||
vdev->rx_irq = 0;
|
||||
|
||||
vio_fill_channel_info(hp, mp, vdev);
|
||||
|
||||
if (!id) {
|
||||
dev_set_name(&vdev->dev, "%s", bus_id_name);
|
||||
dev_set_name(&vdev->dev, "%s", type);
|
||||
vdev->dev_no = ~(u64)0;
|
||||
vdev->id = ~(u64)0;
|
||||
} else if (!cfg_handle) {
|
||||
dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
|
||||
dev_set_name(&vdev->dev, "%s-%llu", type, *id);
|
||||
vdev->dev_no = *id;
|
||||
vdev->id = ~(u64)0;
|
||||
} else {
|
||||
dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
|
||||
dev_set_name(&vdev->dev, "%s-%llu-%llu", type,
|
||||
*cfg_handle, *id);
|
||||
vdev->dev_no = *cfg_handle;
|
||||
vdev->id = *id;
|
||||
vdev->port_id = *id;
|
||||
}
|
||||
|
||||
vdev->dev.parent = parent;
|
||||
|
@ -337,7 +376,26 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
|
|||
}
|
||||
vdev->dp = dp;
|
||||
|
||||
printk(KERN_INFO "VIO: Adding device %s\n", dev_name(&vdev->dev));
|
||||
/*
|
||||
* node_name is NULL for the parent/channel-devices node and
|
||||
* the parent doesn't require the MD node info.
|
||||
*/
|
||||
if (node_name != NULL) {
|
||||
(void) snprintf(vdev->node_name, VIO_MAX_NAME_LEN, "%s",
|
||||
node_name);
|
||||
|
||||
err = mdesc_get_node_info(hp, mp, node_name,
|
||||
&vdev->md_node_info);
|
||||
if (err) {
|
||||
pr_err("VIO: Could not get MD node info %s, err=%d\n",
|
||||
dev_name(&vdev->dev), err);
|
||||
kfree(vdev);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
pr_info("VIO: Adding device %s (tx_ino = %llx, rx_ino = %llx)\n",
|
||||
dev_name(&vdev->dev), vdev->tx_ino, vdev->rx_ino);
|
||||
|
||||
err = device_register(&vdev->dev);
|
||||
if (err) {
|
||||
|
@ -353,73 +411,42 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
|
|||
return vdev;
|
||||
}
|
||||
|
||||
static void vio_add(struct mdesc_handle *hp, u64 node)
|
||||
static void vio_add(struct mdesc_handle *hp, u64 node,
|
||||
const char *node_name)
|
||||
{
|
||||
(void) vio_create_one(hp, node, &root_vdev->dev);
|
||||
(void) vio_create_one(hp, node, node_name, &root_vdev->dev);
|
||||
}
|
||||
|
||||
struct vio_md_node_query {
|
||||
const char *type;
|
||||
u64 dev_no;
|
||||
u64 id;
|
||||
struct vio_remove_node_data {
|
||||
struct mdesc_handle *hp;
|
||||
u64 node;
|
||||
};
|
||||
|
||||
static int vio_md_node_match(struct device *dev, void *arg)
|
||||
{
|
||||
struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
|
||||
struct vio_dev *vdev = to_vio_dev(dev);
|
||||
struct vio_remove_node_data *node_data;
|
||||
u64 node;
|
||||
|
||||
if (vdev->dev_no != query->dev_no)
|
||||
return 0;
|
||||
if (vdev->id != query->id)
|
||||
return 0;
|
||||
if (strcmp(vdev->type, query->type))
|
||||
return 0;
|
||||
node_data = (struct vio_remove_node_data *)arg;
|
||||
|
||||
return 1;
|
||||
node = vio_vdev_node(node_data->hp, vdev);
|
||||
|
||||
if (node == node_data->node)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vio_remove(struct mdesc_handle *hp, u64 node)
|
||||
static void vio_remove(struct mdesc_handle *hp, u64 node, const char *node_name)
|
||||
{
|
||||
const char *type;
|
||||
const u64 *id, *cfg_handle;
|
||||
u64 a;
|
||||
struct vio_md_node_query query;
|
||||
struct vio_remove_node_data node_data;
|
||||
struct device *dev;
|
||||
|
||||
type = mdesc_get_property(hp, node, "device-type", NULL);
|
||||
if (!type) {
|
||||
type = mdesc_get_property(hp, node, "name", NULL);
|
||||
if (!type)
|
||||
type = mdesc_node_name(hp, node);
|
||||
}
|
||||
node_data.hp = hp;
|
||||
node_data.node = node;
|
||||
|
||||
query.type = type;
|
||||
|
||||
id = mdesc_get_property(hp, node, "id", NULL);
|
||||
cfg_handle = NULL;
|
||||
mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
|
||||
u64 target;
|
||||
|
||||
target = mdesc_arc_target(hp, a);
|
||||
cfg_handle = mdesc_get_property(hp, target,
|
||||
"cfg-handle", NULL);
|
||||
if (cfg_handle)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!id) {
|
||||
query.dev_no = ~(u64)0;
|
||||
query.id = ~(u64)0;
|
||||
} else if (!cfg_handle) {
|
||||
query.dev_no = *id;
|
||||
query.id = ~(u64)0;
|
||||
} else {
|
||||
query.dev_no = *cfg_handle;
|
||||
query.id = *id;
|
||||
}
|
||||
|
||||
dev = device_find_child(&root_vdev->dev, &query,
|
||||
dev = device_find_child(&root_vdev->dev, (void *)&node_data,
|
||||
vio_md_node_match);
|
||||
if (dev) {
|
||||
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
|
||||
|
@ -427,15 +454,7 @@ static void vio_remove(struct mdesc_handle *hp, u64 node)
|
|||
device_unregister(dev);
|
||||
put_device(dev);
|
||||
} else {
|
||||
if (!id)
|
||||
printk(KERN_ERR "VIO: Removed unknown %s node.\n",
|
||||
type);
|
||||
else if (!cfg_handle)
|
||||
printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
|
||||
type, *id);
|
||||
else
|
||||
printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
|
||||
type, *cfg_handle, *id);
|
||||
pr_err("VIO: %s node not found in MDESC\n", node_name);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -450,7 +469,8 @@ static struct mdesc_notifier_client vio_device_notifier = {
|
|||
* under "openboot" that we should not mess with as aparently that is
|
||||
* reserved exclusively for OBP use.
|
||||
*/
|
||||
static void vio_add_ds(struct mdesc_handle *hp, u64 node)
|
||||
static void vio_add_ds(struct mdesc_handle *hp, u64 node,
|
||||
const char *node_name)
|
||||
{
|
||||
int found;
|
||||
u64 a;
|
||||
|
@ -467,7 +487,7 @@ static void vio_add_ds(struct mdesc_handle *hp, u64 node)
|
|||
}
|
||||
|
||||
if (found)
|
||||
(void) vio_create_one(hp, node, &root_vdev->dev);
|
||||
(void) vio_create_one(hp, node, node_name, &root_vdev->dev);
|
||||
}
|
||||
|
||||
static struct mdesc_notifier_client vio_ds_notifier = {
|
||||
|
@ -534,7 +554,7 @@ static int __init vio_init(void)
|
|||
|
||||
cdev_cfg_handle = *cfg_handle;
|
||||
|
||||
root_vdev = vio_create_one(hp, root, NULL);
|
||||
root_vdev = vio_create_one(hp, root, NULL, NULL);
|
||||
err = -ENODEV;
|
||||
if (!root_vdev) {
|
||||
printk(KERN_ERR "VIO: Could not create root device.\n");
|
||||
|
|
|
@ -223,6 +223,9 @@ static int send_rdx(struct vio_driver_state *vio)
|
|||
|
||||
static int send_attr(struct vio_driver_state *vio)
|
||||
{
|
||||
if (!vio->ops)
|
||||
return -EINVAL;
|
||||
|
||||
return vio->ops->send_attr(vio);
|
||||
}
|
||||
|
||||
|
@ -283,6 +286,7 @@ static int process_ver_info(struct vio_driver_state *vio,
|
|||
ver.minor = vap->minor;
|
||||
pkt->minor = ver.minor;
|
||||
pkt->tag.stype = VIO_SUBTYPE_ACK;
|
||||
pkt->dev_class = vio->dev_class;
|
||||
viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
|
||||
pkt->major, pkt->minor);
|
||||
err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
|
||||
|
@ -374,6 +378,9 @@ static int process_attr(struct vio_driver_state *vio, void *pkt)
|
|||
if (!(vio->hs_state & VIO_HS_GOTVERS))
|
||||
return handshake_failure(vio);
|
||||
|
||||
if (!vio->ops)
|
||||
return 0;
|
||||
|
||||
err = vio->ops->handle_attr(vio, pkt);
|
||||
if (err < 0) {
|
||||
return handshake_failure(vio);
|
||||
|
@ -388,6 +395,7 @@ static int process_attr(struct vio_driver_state *vio, void *pkt)
|
|||
vio->hs_state |= VIO_HS_SENT_DREG;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -647,10 +655,13 @@ int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
|
|||
err = process_unknown(vio, pkt);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!err &&
|
||||
vio->hs_state != prev_state &&
|
||||
(vio->hs_state & VIO_HS_COMPLETE))
|
||||
vio->ops->handshake_complete(vio);
|
||||
(vio->hs_state & VIO_HS_COMPLETE)) {
|
||||
if (vio->ops)
|
||||
vio->ops->handshake_complete(vio);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -765,7 +776,11 @@ void vio_port_up(struct vio_driver_state *vio)
|
|||
}
|
||||
|
||||
if (!err) {
|
||||
err = ldc_connect(vio->lp);
|
||||
if (ldc_mode(vio->lp) == LDC_MODE_RAW)
|
||||
ldc_set_state(vio->lp, LDC_STATE_CONNECTED);
|
||||
else
|
||||
err = ldc_connect(vio->lp);
|
||||
|
||||
if (err)
|
||||
printk(KERN_WARNING "%s: Port %lu connect failed, "
|
||||
"err=%d\n",
|
||||
|
@ -805,8 +820,7 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!ops->send_attr ||
|
||||
!ops->handle_attr ||
|
||||
if (!ops || !ops->send_attr || !ops->handle_attr ||
|
||||
!ops->handshake_complete)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -149,6 +149,11 @@ SECTIONS
|
|||
*(.sun_m7_2insn_patch)
|
||||
__sun_m7_2insn_patch_end = .;
|
||||
}
|
||||
.get_tick_patch : {
|
||||
__get_tick_patch = .;
|
||||
*(.get_tick_patch)
|
||||
__get_tick_patch_end = .;
|
||||
}
|
||||
PERCPU_SECTION(SMP_CACHE_BYTES)
|
||||
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/cpumask.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/qrwlock.h>
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue