Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: arch/tile: convert a BUG_ON to BUILD_BUG_ON arch/tile: make ptrace() work properly for TILE-Gx COMPAT mode arch/tile: support new info op generated by compiler arch/tile: minor whitespace/naming changes for string support files arch/tile: enable single-step support for TILE-Gx arch/tile: parameterize system PLs to support KVM port arch/tile: add Tilera's <arch/sim.h> header as an open-source header arch/tile: Bomb C99 comments to C89 comments in tile's <arch/sim_def.h> arch/tile: prevent corrupt top frame from causing backtracer runaway arch/tile: various top-level Makefile cleanups arch/tile: change lower bound on syscall error return to -4095 arch/tile: properly export __mb_incoherent for modules arch/tile: provide a definition of MAP_STACK kmemleak: add TILE to the list of supported architectures. char: hvc: check for error case arch/tile: Add a warning if we try to allocate too much vmalloc memory. arch/tile: update some comments to clarify register usage. arch/tile: use better "punctuation" for VMSPLIT_3_5G and friends arch/tile: Use <asm-generic/syscalls.h> tile: replace some BUG_ON checks with BUILD_BUG_ON checks
This commit is contained in:
commit
e404f91ed2
48 changed files with 1609 additions and 732 deletions
|
@ -96,6 +96,7 @@ config HVC_TILE
|
|||
|
||||
config TILE
|
||||
def_bool y
|
||||
select HAVE_KVM if !TILEGX
|
||||
select GENERIC_FIND_FIRST_BIT
|
||||
select GENERIC_FIND_NEXT_BIT
|
||||
select USE_GENERIC_SMP_HELPERS
|
||||
|
@ -236,9 +237,9 @@ choice
|
|||
If you are not absolutely sure what you are doing, leave this
|
||||
option alone!
|
||||
|
||||
config VMSPLIT_375G
|
||||
config VMSPLIT_3_75G
|
||||
bool "3.75G/0.25G user/kernel split (no kernel networking)"
|
||||
config VMSPLIT_35G
|
||||
config VMSPLIT_3_5G
|
||||
bool "3.5G/0.5G user/kernel split"
|
||||
config VMSPLIT_3G
|
||||
bool "3G/1G user/kernel split"
|
||||
|
@ -252,8 +253,8 @@ endchoice
|
|||
|
||||
config PAGE_OFFSET
|
||||
hex
|
||||
default 0xF0000000 if VMSPLIT_375G
|
||||
default 0xE0000000 if VMSPLIT_35G
|
||||
default 0xF0000000 if VMSPLIT_3_75G
|
||||
default 0xE0000000 if VMSPLIT_3_5G
|
||||
default 0xB0000000 if VMSPLIT_3G_OPT
|
||||
default 0x80000000 if VMSPLIT_2G
|
||||
default 0x40000000 if VMSPLIT_1G
|
||||
|
@ -314,6 +315,15 @@ config HARDWALL
|
|||
bool "Hardwall support to allow access to user dynamic network"
|
||||
default y
|
||||
|
||||
config KERNEL_PL
|
||||
int "Processor protection level for kernel"
|
||||
range 1 2
|
||||
default "1"
|
||||
---help---
|
||||
This setting determines the processor protection level the
|
||||
kernel will be built to run at. Generally you should use
|
||||
the default value here.
|
||||
|
||||
endmenu # Tilera-specific configuration
|
||||
|
||||
menu "Bus options"
|
||||
|
@ -354,3 +364,5 @@ source "security/Kconfig"
|
|||
source "crypto/Kconfig"
|
||||
|
||||
source "lib/Kconfig"
|
||||
|
||||
source "arch/tile/kvm/Kconfig"
|
||||
|
|
|
@ -26,8 +26,9 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
|
|||
endif
|
||||
endif
|
||||
|
||||
|
||||
ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
|
||||
KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
|
||||
endif
|
||||
|
||||
LIBGCC_PATH := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
|
||||
|
||||
|
@ -49,6 +50,20 @@ head-y := arch/tile/kernel/head_$(BITS).o
|
|||
libs-y += arch/tile/lib/
|
||||
libs-y += $(LIBGCC_PATH)
|
||||
|
||||
|
||||
# See arch/tile/Kbuild for content of core part of the kernel
|
||||
core-y += arch/tile/
|
||||
|
||||
core-$(CONFIG_KVM) += arch/tile/kvm/
|
||||
|
||||
ifdef TILERA_ROOT
|
||||
INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot
|
||||
endif
|
||||
|
||||
install:
|
||||
install -D -m 755 vmlinux $(INSTALL_PATH)/vmlinux-$(KERNELRELEASE)
|
||||
install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE)
|
||||
install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE)
|
||||
|
||||
define archhelp
|
||||
echo ' install - install kernel into $(INSTALL_PATH)'
|
||||
endef
|
||||
|
|
619
arch/tile/include/arch/sim.h
Normal file
619
arch/tile/include/arch/sim.h
Normal file
|
@ -0,0 +1,619 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* Provides an API for controlling the simulator at runtime.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup arch_sim
|
||||
* @{
|
||||
*
|
||||
* An API for controlling the simulator at runtime.
|
||||
*
|
||||
* The simulator's behavior can be modified while it is running.
|
||||
* For example, human-readable trace output can be enabled and disabled
|
||||
* around code of interest.
|
||||
*
|
||||
* There are two ways to modify simulator behavior:
|
||||
* programmatically, by calling various sim_* functions, and
|
||||
* interactively, by entering commands like "sim set functional true"
|
||||
* at the tile-monitor prompt. Typing "sim help" at that prompt provides
|
||||
* a list of interactive commands.
|
||||
*
|
||||
* All interactive commands can also be executed programmatically by
|
||||
* passing a string to the sim_command function.
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_SIM_H__
|
||||
#define __ARCH_SIM_H__
|
||||
|
||||
#include <arch/sim_def.h>
|
||||
#include <arch/abi.h>
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
|
||||
/**
|
||||
* Return true if the current program is running under a simulator,
|
||||
* rather than on real hardware. If running on hardware, other "sim_xxx()"
|
||||
* calls have no useful effect.
|
||||
*/
|
||||
static inline int
|
||||
sim_is_simulator(void)
|
||||
{
|
||||
return __insn_mfspr(SPR_SIM_CONTROL) != 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Checkpoint the simulator state to a checkpoint file.
|
||||
*
|
||||
* The checkpoint file name is either the default or the name specified
|
||||
* on the command line with "--checkpoint-file".
|
||||
*/
|
||||
static __inline void
|
||||
sim_checkpoint(void)
|
||||
{
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_CHECKPOINT);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Report whether or not various kinds of simulator tracing are enabled.
|
||||
*
|
||||
* @return The bitwise OR of these values:
|
||||
*
|
||||
* SIM_TRACE_CYCLES (--trace-cycles),
|
||||
* SIM_TRACE_ROUTER (--trace-router),
|
||||
* SIM_TRACE_REGISTER_WRITES (--trace-register-writes),
|
||||
* SIM_TRACE_DISASM (--trace-disasm),
|
||||
* SIM_TRACE_STALL_INFO (--trace-stall-info)
|
||||
* SIM_TRACE_MEMORY_CONTROLLER (--trace-memory-controller)
|
||||
* SIM_TRACE_L2_CACHE (--trace-l2)
|
||||
* SIM_TRACE_LINES (--trace-lines)
|
||||
*/
|
||||
static __inline unsigned int
|
||||
sim_get_tracing(void)
|
||||
{
|
||||
return __insn_mfspr(SPR_SIM_CONTROL) & SIM_TRACE_FLAG_MASK;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Turn on or off different kinds of simulator tracing.
|
||||
*
|
||||
* @param mask Either one of these special values:
|
||||
*
|
||||
* SIM_TRACE_NONE (turns off tracing),
|
||||
* SIM_TRACE_ALL (turns on all possible tracing).
|
||||
*
|
||||
* or the bitwise OR of these values:
|
||||
*
|
||||
* SIM_TRACE_CYCLES (--trace-cycles),
|
||||
* SIM_TRACE_ROUTER (--trace-router),
|
||||
* SIM_TRACE_REGISTER_WRITES (--trace-register-writes),
|
||||
* SIM_TRACE_DISASM (--trace-disasm),
|
||||
* SIM_TRACE_STALL_INFO (--trace-stall-info)
|
||||
* SIM_TRACE_MEMORY_CONTROLLER (--trace-memory-controller)
|
||||
* SIM_TRACE_L2_CACHE (--trace-l2)
|
||||
* SIM_TRACE_LINES (--trace-lines)
|
||||
*/
|
||||
static __inline void
|
||||
sim_set_tracing(unsigned int mask)
|
||||
{
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_TRACE_SPR_ARG(mask));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Request dumping of different kinds of simulator state.
|
||||
*
|
||||
* @param mask Either this special value:
|
||||
*
|
||||
* SIM_DUMP_ALL (dump all known state)
|
||||
*
|
||||
* or the bitwise OR of these values:
|
||||
*
|
||||
* SIM_DUMP_REGS (the register file),
|
||||
* SIM_DUMP_SPRS (the SPRs),
|
||||
* SIM_DUMP_ITLB (the iTLB),
|
||||
* SIM_DUMP_DTLB (the dTLB),
|
||||
* SIM_DUMP_L1I (the L1 I-cache),
|
||||
* SIM_DUMP_L1D (the L1 D-cache),
|
||||
* SIM_DUMP_L2 (the L2 cache),
|
||||
* SIM_DUMP_SNREGS (the switch register file),
|
||||
* SIM_DUMP_SNITLB (the switch iTLB),
|
||||
* SIM_DUMP_SNL1I (the switch L1 I-cache),
|
||||
* SIM_DUMP_BACKTRACE (the current backtrace)
|
||||
*/
|
||||
static __inline void
|
||||
sim_dump(unsigned int mask)
|
||||
{
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_DUMP_SPR_ARG(mask));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Print a string to the simulator stdout.
|
||||
*
|
||||
* @param str The string to be written; a newline is automatically added.
|
||||
*/
|
||||
static __inline void
|
||||
sim_print_string(const char* str)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; str[i] != 0; i++)
|
||||
{
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
|
||||
(str[i] << _SIM_CONTROL_OPERATOR_BITS));
|
||||
}
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
|
||||
(SIM_PUTC_FLUSH_STRING << _SIM_CONTROL_OPERATOR_BITS));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Execute a simulator command string.
|
||||
*
|
||||
* Type 'sim help' at the tile-monitor prompt to learn what commands
|
||||
* are available. Note the use of the tile-monitor "sim" command to
|
||||
* pass commands to the simulator.
|
||||
*
|
||||
* The argument to sim_command() does not include the leading "sim"
|
||||
* prefix used at the tile-monitor prompt; for example, you might call
|
||||
* sim_command("trace disasm").
|
||||
*/
|
||||
static __inline void
|
||||
sim_command(const char* str)
|
||||
{
|
||||
int c;
|
||||
do
|
||||
{
|
||||
c = *str++;
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_COMMAND |
|
||||
(c << _SIM_CONTROL_OPERATOR_BITS));
|
||||
}
|
||||
while (c);
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifndef __DOXYGEN__
|
||||
|
||||
/**
|
||||
* The underlying implementation of "_sim_syscall()".
|
||||
*
|
||||
* We use extra "and" instructions to ensure that all the values
|
||||
* we are passing to the simulator are actually valid in the registers
|
||||
* (i.e. returned from memory) prior to the SIM_CONTROL spr.
|
||||
*/
|
||||
static __inline int _sim_syscall0(int val)
|
||||
{
|
||||
long result;
|
||||
__asm__ __volatile__ ("mtspr SIM_CONTROL, r0"
|
||||
: "=R00" (result) : "R00" (val));
|
||||
return result;
|
||||
}
|
||||
|
||||
static __inline int _sim_syscall1(int val, long arg1)
|
||||
{
|
||||
long result;
|
||||
__asm__ __volatile__ ("{ and zero, r1, r1; mtspr SIM_CONTROL, r0 }"
|
||||
: "=R00" (result) : "R00" (val), "R01" (arg1));
|
||||
return result;
|
||||
}
|
||||
|
||||
static __inline int _sim_syscall2(int val, long arg1, long arg2)
|
||||
{
|
||||
long result;
|
||||
__asm__ __volatile__ ("{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
|
||||
: "=R00" (result)
|
||||
: "R00" (val), "R01" (arg1), "R02" (arg2));
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Note that _sim_syscall3() and higher are technically at risk of
|
||||
receiving an interrupt right before the mtspr bundle, in which case
|
||||
the register values for arguments 3 and up may still be in flight
|
||||
to the core from a stack frame reload. */
|
||||
|
||||
static __inline int _sim_syscall3(int val, long arg1, long arg2, long arg3)
|
||||
{
|
||||
long result;
|
||||
__asm__ __volatile__ ("{ and zero, r3, r3 };"
|
||||
"{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
|
||||
: "=R00" (result)
|
||||
: "R00" (val), "R01" (arg1), "R02" (arg2),
|
||||
"R03" (arg3));
|
||||
return result;
|
||||
}
|
||||
|
||||
static __inline int _sim_syscall4(int val, long arg1, long arg2, long arg3,
|
||||
long arg4)
|
||||
{
|
||||
long result;
|
||||
__asm__ __volatile__ ("{ and zero, r3, r4 };"
|
||||
"{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
|
||||
: "=R00" (result)
|
||||
: "R00" (val), "R01" (arg1), "R02" (arg2),
|
||||
"R03" (arg3), "R04" (arg4));
|
||||
return result;
|
||||
}
|
||||
|
||||
static __inline int _sim_syscall5(int val, long arg1, long arg2, long arg3,
|
||||
long arg4, long arg5)
|
||||
{
|
||||
long result;
|
||||
__asm__ __volatile__ ("{ and zero, r3, r4; and zero, r5, r5 };"
|
||||
"{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
|
||||
: "=R00" (result)
|
||||
: "R00" (val), "R01" (arg1), "R02" (arg2),
|
||||
"R03" (arg3), "R04" (arg4), "R05" (arg5));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Make a special syscall to the simulator itself, if running under
|
||||
* simulation. This is used as the implementation of other functions
|
||||
* and should not be used outside this file.
|
||||
*
|
||||
* @param syscall_num The simulator syscall number.
|
||||
* @param nr The number of additional arguments provided.
|
||||
*
|
||||
* @return Varies by syscall.
|
||||
*/
|
||||
#define _sim_syscall(syscall_num, nr, args...) \
|
||||
_sim_syscall##nr( \
|
||||
((syscall_num) << _SIM_CONTROL_OPERATOR_BITS) | SIM_CONTROL_SYSCALL, args)
|
||||
|
||||
|
||||
/* Values for the "access_mask" parameters below. */
|
||||
#define SIM_WATCHPOINT_READ 1
|
||||
#define SIM_WATCHPOINT_WRITE 2
|
||||
#define SIM_WATCHPOINT_EXECUTE 4
|
||||
|
||||
|
||||
static __inline int
|
||||
sim_add_watchpoint(unsigned int process_id,
|
||||
unsigned long address,
|
||||
unsigned long size,
|
||||
unsigned int access_mask,
|
||||
unsigned long user_data)
|
||||
{
|
||||
return _sim_syscall(SIM_SYSCALL_ADD_WATCHPOINT, 5, process_id,
|
||||
address, size, access_mask, user_data);
|
||||
}
|
||||
|
||||
|
||||
static __inline int
|
||||
sim_remove_watchpoint(unsigned int process_id,
|
||||
unsigned long address,
|
||||
unsigned long size,
|
||||
unsigned int access_mask,
|
||||
unsigned long user_data)
|
||||
{
|
||||
return _sim_syscall(SIM_SYSCALL_REMOVE_WATCHPOINT, 5, process_id,
|
||||
address, size, access_mask, user_data);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Return value from sim_query_watchpoint.
|
||||
*/
|
||||
struct SimQueryWatchpointStatus
|
||||
{
|
||||
/**
|
||||
* 0 if a watchpoint fired, 1 if no watchpoint fired, or -1 for
|
||||
* error (meaning a bad process_id).
|
||||
*/
|
||||
int syscall_status;
|
||||
|
||||
/**
|
||||
* The address of the watchpoint that fired (this is the address
|
||||
* passed to sim_add_watchpoint, not an address within that range
|
||||
* that actually triggered the watchpoint).
|
||||
*/
|
||||
unsigned long address;
|
||||
|
||||
/** The arbitrary user_data installed by sim_add_watchpoint. */
|
||||
unsigned long user_data;
|
||||
};
|
||||
|
||||
|
||||
static __inline struct SimQueryWatchpointStatus
|
||||
sim_query_watchpoint(unsigned int process_id)
|
||||
{
|
||||
struct SimQueryWatchpointStatus status;
|
||||
long val = SIM_CONTROL_SYSCALL |
|
||||
(SIM_SYSCALL_QUERY_WATCHPOINT << _SIM_CONTROL_OPERATOR_BITS);
|
||||
__asm__ __volatile__ ("{ and zero, r1, r1; mtspr SIM_CONTROL, r0 }"
|
||||
: "=R00" (status.syscall_status),
|
||||
"=R01" (status.address),
|
||||
"=R02" (status.user_data)
|
||||
: "R00" (val), "R01" (process_id));
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
/* On the simulator, confirm lines have been evicted everywhere. */
|
||||
static __inline void
|
||||
sim_validate_lines_evicted(unsigned long long pa, unsigned long length)
|
||||
{
|
||||
#ifdef __LP64__
|
||||
_sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, 2, pa, length);
|
||||
#else
|
||||
_sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, 4,
|
||||
0 /* dummy */, (long)(pa), (long)(pa >> 32), length);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#endif /* !__DOXYGEN__ */
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Modify the shaping parameters of a shim.
|
||||
*
|
||||
* @param shim The shim to modify. One of:
|
||||
* SIM_CONTROL_SHAPING_GBE_0
|
||||
* SIM_CONTROL_SHAPING_GBE_1
|
||||
* SIM_CONTROL_SHAPING_GBE_2
|
||||
* SIM_CONTROL_SHAPING_GBE_3
|
||||
* SIM_CONTROL_SHAPING_XGBE_0
|
||||
* SIM_CONTROL_SHAPING_XGBE_1
|
||||
*
|
||||
* @param type The type of shaping. This should be the same type of
|
||||
* shaping that is already in place on the shim. One of:
|
||||
* SIM_CONTROL_SHAPING_MULTIPLIER
|
||||
* SIM_CONTROL_SHAPING_PPS
|
||||
* SIM_CONTROL_SHAPING_BPS
|
||||
*
|
||||
* @param units The magnitude of the rate. One of:
|
||||
* SIM_CONTROL_SHAPING_UNITS_SINGLE
|
||||
* SIM_CONTROL_SHAPING_UNITS_KILO
|
||||
* SIM_CONTROL_SHAPING_UNITS_MEGA
|
||||
* SIM_CONTROL_SHAPING_UNITS_GIGA
|
||||
*
|
||||
* @param rate The rate to which to change it. This must fit in
|
||||
* SIM_CONTROL_SHAPING_RATE_BITS bits or a warning is issued and
|
||||
* the shaping is not changed.
|
||||
*
|
||||
* @return 0 if no problems were detected in the arguments to sim_set_shaping
|
||||
* or 1 if problems were detected (for example, rate does not fit in 17 bits).
|
||||
*/
|
||||
static __inline int
|
||||
sim_set_shaping(unsigned shim,
|
||||
unsigned type,
|
||||
unsigned units,
|
||||
unsigned rate)
|
||||
{
|
||||
if ((rate & ~((1 << SIM_CONTROL_SHAPING_RATE_BITS) - 1)) != 0)
|
||||
return 1;
|
||||
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_SHAPING_SPR_ARG(shim, type, units, rate));
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __tilegx__
|
||||
|
||||
/** Enable a set of mPIPE links. Pass a -1 link_mask to enable all links. */
|
||||
static __inline void
|
||||
sim_enable_mpipe_links(unsigned mpipe, unsigned long link_mask)
|
||||
{
|
||||
__insn_mtspr(SPR_SIM_CONTROL,
|
||||
(SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE |
|
||||
(mpipe << 8) | (1 << 16) | ((uint_reg_t)link_mask << 32)));
|
||||
}
|
||||
|
||||
/** Disable a set of mPIPE links. Pass a -1 link_mask to disable all links. */
|
||||
static __inline void
|
||||
sim_disable_mpipe_links(unsigned mpipe, unsigned long link_mask)
|
||||
{
|
||||
__insn_mtspr(SPR_SIM_CONTROL,
|
||||
(SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE |
|
||||
(mpipe << 8) | (0 << 16) | ((uint_reg_t)link_mask << 32)));
|
||||
}
|
||||
|
||||
#endif /* __tilegx__ */
|
||||
|
||||
|
||||
/*
|
||||
* An API for changing "functional" mode.
|
||||
*/
|
||||
|
||||
#ifndef __DOXYGEN__
|
||||
|
||||
#define sim_enable_functional() \
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_ENABLE_FUNCTIONAL)
|
||||
|
||||
#define sim_disable_functional() \
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_DISABLE_FUNCTIONAL)
|
||||
|
||||
#endif /* __DOXYGEN__ */
|
||||
|
||||
|
||||
/*
|
||||
* Profiler support.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Turn profiling on for the current task.
|
||||
*
|
||||
* Note that this has no effect if run in an environment without
|
||||
* profiling support (thus, the proper flags to the simulator must
|
||||
* be supplied).
|
||||
*/
|
||||
static __inline void
|
||||
sim_profiler_enable(void)
|
||||
{
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_ENABLE);
|
||||
}
|
||||
|
||||
|
||||
/** Turn profiling off for the current task. */
|
||||
static __inline void
|
||||
sim_profiler_disable(void)
|
||||
{
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_DISABLE);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Turn profiling on or off for the current task.
|
||||
*
|
||||
* @param enabled If true, turns on profiling. If false, turns it off.
|
||||
*
|
||||
* Note that this has no effect if run in an environment without
|
||||
* profiling support (thus, the proper flags to the simulator must
|
||||
* be supplied).
|
||||
*/
|
||||
static __inline void
|
||||
sim_profiler_set_enabled(int enabled)
|
||||
{
|
||||
int val =
|
||||
enabled ? SIM_CONTROL_PROFILER_ENABLE : SIM_CONTROL_PROFILER_DISABLE;
|
||||
__insn_mtspr(SPR_SIM_CONTROL, val);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Return true if and only if profiling is currently enabled
|
||||
* for the current task.
|
||||
*
|
||||
* This returns false even if sim_profiler_enable() was called
|
||||
* if the current execution environment does not support profiling.
|
||||
*/
|
||||
static __inline int
|
||||
sim_profiler_is_enabled(void)
|
||||
{
|
||||
return ((__insn_mfspr(SPR_SIM_CONTROL) & SIM_PROFILER_ENABLED_MASK) != 0);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Reset profiling counters to zero for the current task.
|
||||
*
|
||||
* Resetting can be done while profiling is enabled. It does not affect
|
||||
* the chip-wide profiling counters.
|
||||
*/
|
||||
static __inline void
|
||||
sim_profiler_clear(void)
|
||||
{
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_CLEAR);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Enable specified chip-level profiling counters.
|
||||
*
|
||||
* Does not affect the per-task profiling counters.
|
||||
*
|
||||
* @param mask Either this special value:
|
||||
*
|
||||
* SIM_CHIP_ALL (enables all chip-level components).
|
||||
*
|
||||
* or the bitwise OR of these values:
|
||||
*
|
||||
* SIM_CHIP_MEMCTL (enable all memory controllers)
|
||||
* SIM_CHIP_XAUI (enable all XAUI controllers)
|
||||
* SIM_CHIP_MPIPE (enable all MPIPE controllers)
|
||||
*/
|
||||
static __inline void
|
||||
sim_profiler_chip_enable(unsigned int mask)
|
||||
{
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Disable specified chip-level profiling counters.
|
||||
*
|
||||
* Does not affect the per-task profiling counters.
|
||||
*
|
||||
* @param mask Either this special value:
|
||||
*
|
||||
* SIM_CHIP_ALL (disables all chip-level components).
|
||||
*
|
||||
* or the bitwise OR of these values:
|
||||
*
|
||||
* SIM_CHIP_MEMCTL (disable all memory controllers)
|
||||
* SIM_CHIP_XAUI (disable all XAUI controllers)
|
||||
* SIM_CHIP_MPIPE (disable all MPIPE controllers)
|
||||
*/
|
||||
static __inline void
|
||||
sim_profiler_chip_disable(unsigned int mask)
|
||||
{
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Reset specified chip-level profiling counters to zero.
|
||||
*
|
||||
* Does not affect the per-task profiling counters.
|
||||
*
|
||||
* @param mask Either this special value:
|
||||
*
|
||||
* SIM_CHIP_ALL (clears all chip-level components).
|
||||
*
|
||||
* or the bitwise OR of these values:
|
||||
*
|
||||
* SIM_CHIP_MEMCTL (clear all memory controllers)
|
||||
* SIM_CHIP_XAUI (clear all XAUI controllers)
|
||||
* SIM_CHIP_MPIPE (clear all MPIPE controllers)
|
||||
*/
|
||||
static __inline void
|
||||
sim_profiler_chip_clear(unsigned int mask)
|
||||
{
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Event support.
|
||||
*/
|
||||
|
||||
#ifndef __DOXYGEN__
|
||||
|
||||
static __inline void
|
||||
sim_event_begin(unsigned int x)
|
||||
{
|
||||
#if defined(__tile__) && !defined(__NO_EVENT_SPR__)
|
||||
__insn_mtspr(SPR_EVENT_BEGIN, x);
|
||||
#endif
|
||||
}
|
||||
|
||||
static __inline void
|
||||
sim_event_end(unsigned int x)
|
||||
{
|
||||
#if defined(__tile__) && !defined(__NO_EVENT_SPR__)
|
||||
__insn_mtspr(SPR_EVENT_END, x);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* !__DOXYGEN__ */
|
||||
|
||||
#endif /* !__ASSEMBLER__ */
|
||||
|
||||
#endif /* !__ARCH_SIM_H__ */
|
||||
|
||||
/** @} */
|
|
@ -1,477 +1,461 @@
|
|||
// Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU General Public License
|
||||
// as published by the Free Software Foundation, version 2.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but
|
||||
// WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
// NON INFRINGEMENT. See the GNU General Public License for
|
||||
// more details.
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
//! @file
|
||||
//!
|
||||
//! Some low-level simulator definitions.
|
||||
//!
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* Some low-level simulator definitions.
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_SIM_DEF_H__
|
||||
#define __ARCH_SIM_DEF_H__
|
||||
|
||||
|
||||
//! Internal: the low bits of the SIM_CONTROL_* SPR values specify
|
||||
//! the operation to perform, and the remaining bits are
|
||||
//! an operation-specific parameter (often unused).
|
||||
//!
|
||||
/**
|
||||
* Internal: the low bits of the SIM_CONTROL_* SPR values specify
|
||||
* the operation to perform, and the remaining bits are
|
||||
* an operation-specific parameter (often unused).
|
||||
*/
|
||||
#define _SIM_CONTROL_OPERATOR_BITS 8
|
||||
|
||||
|
||||
//== Values which can be written to SPR_SIM_CONTROL.
|
||||
/*
|
||||
* Values which can be written to SPR_SIM_CONTROL.
|
||||
*/
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, stops profiling.
|
||||
//!
|
||||
/** If written to SPR_SIM_CONTROL, stops profiling. */
|
||||
#define SIM_CONTROL_PROFILER_DISABLE 0
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, starts profiling.
|
||||
//!
|
||||
/** If written to SPR_SIM_CONTROL, starts profiling. */
|
||||
#define SIM_CONTROL_PROFILER_ENABLE 1
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, clears profiling counters.
|
||||
//!
|
||||
/** If written to SPR_SIM_CONTROL, clears profiling counters. */
|
||||
#define SIM_CONTROL_PROFILER_CLEAR 2
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, checkpoints the simulator.
|
||||
//!
|
||||
/** If written to SPR_SIM_CONTROL, checkpoints the simulator. */
|
||||
#define SIM_CONTROL_CHECKPOINT 3
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
|
||||
//! sets the tracing mask to the given mask. See "sim_set_tracing()".
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
|
||||
* sets the tracing mask to the given mask. See "sim_set_tracing()".
|
||||
*/
|
||||
#define SIM_CONTROL_SET_TRACING 4
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
|
||||
//! dumps the requested items of machine state to the log.
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
|
||||
* dumps the requested items of machine state to the log.
|
||||
*/
|
||||
#define SIM_CONTROL_DUMP 5
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, clears chip-level profiling counters.
|
||||
//!
|
||||
/** If written to SPR_SIM_CONTROL, clears chip-level profiling counters. */
|
||||
#define SIM_CONTROL_PROFILER_CHIP_CLEAR 6
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, disables chip-level profiling.
|
||||
//!
|
||||
/** If written to SPR_SIM_CONTROL, disables chip-level profiling. */
|
||||
#define SIM_CONTROL_PROFILER_CHIP_DISABLE 7
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, enables chip-level profiling.
|
||||
//!
|
||||
/** If written to SPR_SIM_CONTROL, enables chip-level profiling. */
|
||||
#define SIM_CONTROL_PROFILER_CHIP_ENABLE 8
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, enables chip-level functional mode
|
||||
//!
|
||||
/** If written to SPR_SIM_CONTROL, enables chip-level functional mode */
|
||||
#define SIM_CONTROL_ENABLE_FUNCTIONAL 9
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, disables chip-level functional mode.
|
||||
//!
|
||||
/** If written to SPR_SIM_CONTROL, disables chip-level functional mode. */
|
||||
#define SIM_CONTROL_DISABLE_FUNCTIONAL 10
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, enables chip-level functional mode.
|
||||
//! All tiles must perform this write for functional mode to be enabled.
|
||||
//! Ignored in naked boot mode unless --functional is specified.
|
||||
//! WARNING: Only the hypervisor startup code should use this!
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, enables chip-level functional mode.
|
||||
* All tiles must perform this write for functional mode to be enabled.
|
||||
* Ignored in naked boot mode unless --functional is specified.
|
||||
* WARNING: Only the hypervisor startup code should use this!
|
||||
*/
|
||||
#define SIM_CONTROL_ENABLE_FUNCTIONAL_BARRIER 11
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
//! writes a string directly to the simulator output. Written to once for
|
||||
//! each character in the string, plus a final NUL. Instead of NUL,
|
||||
//! you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY".
|
||||
//!
|
||||
// ISSUE: Document the meaning of "newline", and the handling of NUL.
|
||||
//
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
* writes a string directly to the simulator output. Written to once for
|
||||
* each character in the string, plus a final NUL. Instead of NUL,
|
||||
* you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY".
|
||||
*/
|
||||
/* ISSUE: Document the meaning of "newline", and the handling of NUL. */
|
||||
#define SIM_CONTROL_PUTC 12
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, clears the --grind-coherence state for
|
||||
//! this core. This is intended to be used before a loop that will
|
||||
//! invalidate the cache by loading new data and evicting all current data.
|
||||
//! Generally speaking, this API should only be used by system code.
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, clears the --grind-coherence state for
|
||||
* this core. This is intended to be used before a loop that will
|
||||
* invalidate the cache by loading new data and evicting all current data.
|
||||
* Generally speaking, this API should only be used by system code.
|
||||
*/
|
||||
#define SIM_CONTROL_GRINDER_CLEAR 13
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, shuts down the simulator.
|
||||
//!
|
||||
/** If written to SPR_SIM_CONTROL, shuts down the simulator. */
|
||||
#define SIM_CONTROL_SHUTDOWN 14
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
|
||||
//! indicates that a fork syscall just created the given process.
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
|
||||
* indicates that a fork syscall just created the given process.
|
||||
*/
|
||||
#define SIM_CONTROL_OS_FORK 15
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
|
||||
//! indicates that an exit syscall was just executed by the given process.
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
|
||||
* indicates that an exit syscall was just executed by the given process.
|
||||
*/
|
||||
#define SIM_CONTROL_OS_EXIT 16
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
|
||||
//! indicates that the OS just switched to the given process.
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
|
||||
* indicates that the OS just switched to the given process.
|
||||
*/
|
||||
#define SIM_CONTROL_OS_SWITCH 17
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
//! indicates that an exec syscall was just executed. Written to once for
|
||||
//! each character in the executable name, plus a final NUL.
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
* indicates that an exec syscall was just executed. Written to once for
|
||||
* each character in the executable name, plus a final NUL.
|
||||
*/
|
||||
#define SIM_CONTROL_OS_EXEC 18
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
//! indicates that an interpreter (PT_INTERP) was loaded. Written to once
|
||||
//! for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a
|
||||
//! hex load address starting with "0x", and "PATH" is the executable name.
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
* indicates that an interpreter (PT_INTERP) was loaded. Written to once
|
||||
* for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a
|
||||
* hex load address starting with "0x", and "PATH" is the executable name.
|
||||
*/
|
||||
#define SIM_CONTROL_OS_INTERP 19
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
//! indicates that a dll was loaded. Written to once for each character
|
||||
//! in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load
|
||||
//! address starting with "0x", and "PATH" is the executable name.
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
* indicates that a dll was loaded. Written to once for each character
|
||||
* in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load
|
||||
* address starting with "0x", and "PATH" is the executable name.
|
||||
*/
|
||||
#define SIM_CONTROL_DLOPEN 20
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
//! indicates that a dll was unloaded. Written to once for each character
|
||||
//! in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load
|
||||
//! address starting with "0x".
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
* indicates that a dll was unloaded. Written to once for each character
|
||||
* in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load
|
||||
* address starting with "0x".
|
||||
*/
|
||||
#define SIM_CONTROL_DLCLOSE 21
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8),
|
||||
//! indicates whether to allow data reads to remotely-cached
|
||||
//! dirty cache lines to be cached locally without grinder warnings or
|
||||
//! assertions (used by Linux kernel fast memcpy).
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8),
|
||||
* indicates whether to allow data reads to remotely-cached
|
||||
* dirty cache lines to be cached locally without grinder warnings or
|
||||
* assertions (used by Linux kernel fast memcpy).
|
||||
*/
|
||||
#define SIM_CONTROL_ALLOW_MULTIPLE_CACHING 22
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, enables memory tracing.
|
||||
//!
|
||||
/** If written to SPR_SIM_CONTROL, enables memory tracing. */
|
||||
#define SIM_CONTROL_ENABLE_MEM_LOGGING 23
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, disables memory tracing.
|
||||
//!
|
||||
/** If written to SPR_SIM_CONTROL, disables memory tracing. */
|
||||
#define SIM_CONTROL_DISABLE_MEM_LOGGING 24
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, changes the shaping parameters of one of
|
||||
//! the gbe or xgbe shims. Must specify the shim id, the type, the units, and
|
||||
//! the rate, as defined in SIM_SHAPING_SPR_ARG.
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, changes the shaping parameters of one of
|
||||
* the gbe or xgbe shims. Must specify the shim id, the type, the units, and
|
||||
* the rate, as defined in SIM_SHAPING_SPR_ARG.
|
||||
*/
|
||||
#define SIM_CONTROL_SHAPING 25
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with character (shifted by 8),
|
||||
//! requests that a simulator command be executed. Written to once for each
|
||||
//! character in the command, plus a final NUL.
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with character (shifted by 8),
|
||||
* requests that a simulator command be executed. Written to once for each
|
||||
* character in the command, plus a final NUL.
|
||||
*/
|
||||
#define SIM_CONTROL_COMMAND 26
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, indicates that the simulated system
|
||||
//! is panicking, to allow debugging via --debug-on-panic.
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, indicates that the simulated system
|
||||
* is panicking, to allow debugging via --debug-on-panic.
|
||||
*/
|
||||
#define SIM_CONTROL_PANIC 27
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, triggers a simulator syscall.
|
||||
//! See "sim_syscall()" for more info.
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, triggers a simulator syscall.
|
||||
* See "sim_syscall()" for more info.
|
||||
*/
|
||||
#define SIM_CONTROL_SYSCALL 32
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
|
||||
//! provides the pid that subsequent SIM_CONTROL_OS_FORK writes should
|
||||
//! use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH.
|
||||
//!
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
|
||||
* provides the pid that subsequent SIM_CONTROL_OS_FORK writes should
|
||||
* use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH.
|
||||
*/
|
||||
#define SIM_CONTROL_OS_FORK_PARENT 33
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
|
||||
//! (shifted by 8), clears the pending magic data section. The cleared
|
||||
//! pending magic data section and any subsequently appended magic bytes
|
||||
//! will only take effect when the classifier blast programmer is run.
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
|
||||
* (shifted by 8), clears the pending magic data section. The cleared
|
||||
* pending magic data section and any subsequently appended magic bytes
|
||||
* will only take effect when the classifier blast programmer is run.
|
||||
*/
|
||||
#define SIM_CONTROL_CLEAR_MPIPE_MAGIC_BYTES 34
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
|
||||
//! (shifted by 8) and a byte of data (shifted by 16), appends that byte
|
||||
//! to the shim's pending magic data section. The pending magic data
|
||||
//! section takes effect when the classifier blast programmer is run.
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
|
||||
* (shifted by 8) and a byte of data (shifted by 16), appends that byte
|
||||
* to the shim's pending magic data section. The pending magic data
|
||||
* section takes effect when the classifier blast programmer is run.
|
||||
*/
|
||||
#define SIM_CONTROL_APPEND_MPIPE_MAGIC_BYTE 35
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
|
||||
//! (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a
|
||||
//! mask of links (shifted by 32), enable or disable the corresponding
|
||||
//! mPIPE links.
|
||||
/**
|
||||
* If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
|
||||
* (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a
|
||||
* mask of links (shifted by 32), enable or disable the corresponding
|
||||
* mPIPE links.
|
||||
*/
|
||||
#define SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE 36
|
||||
|
||||
//== Syscall numbers for use with "sim_syscall()".
|
||||
|
||||
//! Syscall number for sim_add_watchpoint().
|
||||
//!
|
||||
/*
|
||||
* Syscall numbers for use with "sim_syscall()".
|
||||
*/
|
||||
|
||||
/** Syscall number for sim_add_watchpoint(). */
|
||||
#define SIM_SYSCALL_ADD_WATCHPOINT 2
|
||||
|
||||
//! Syscall number for sim_remove_watchpoint().
|
||||
//!
|
||||
/** Syscall number for sim_remove_watchpoint(). */
|
||||
#define SIM_SYSCALL_REMOVE_WATCHPOINT 3
|
||||
|
||||
//! Syscall number for sim_query_watchpoint().
|
||||
//!
|
||||
/** Syscall number for sim_query_watchpoint(). */
|
||||
#define SIM_SYSCALL_QUERY_WATCHPOINT 4
|
||||
|
||||
//! Syscall number that asserts that the cache lines whose 64-bit PA
|
||||
//! is passed as the second argument to sim_syscall(), and over a
|
||||
//! range passed as the third argument, are no longer in cache.
|
||||
//! The simulator raises an error if this is not the case.
|
||||
//!
|
||||
/**
|
||||
* Syscall number that asserts that the cache lines whose 64-bit PA
|
||||
* is passed as the second argument to sim_syscall(), and over a
|
||||
* range passed as the third argument, are no longer in cache.
|
||||
* The simulator raises an error if this is not the case.
|
||||
*/
|
||||
#define SIM_SYSCALL_VALIDATE_LINES_EVICTED 5
|
||||
|
||||
|
||||
//== Bit masks which can be shifted by 8, combined with
|
||||
//== SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL.
|
||||
/*
|
||||
* Bit masks which can be shifted by 8, combined with
|
||||
* SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL.
|
||||
*/
|
||||
|
||||
//! @addtogroup arch_sim
|
||||
//! @{
|
||||
/**
|
||||
* @addtogroup arch_sim
|
||||
* @{
|
||||
*/
|
||||
|
||||
//! Enable --trace-cycle when passed to simulator_set_tracing().
|
||||
//!
|
||||
/** Enable --trace-cycle when passed to simulator_set_tracing(). */
|
||||
#define SIM_TRACE_CYCLES 0x01
|
||||
|
||||
//! Enable --trace-router when passed to simulator_set_tracing().
|
||||
//!
|
||||
/** Enable --trace-router when passed to simulator_set_tracing(). */
|
||||
#define SIM_TRACE_ROUTER 0x02
|
||||
|
||||
//! Enable --trace-register-writes when passed to simulator_set_tracing().
|
||||
//!
|
||||
/** Enable --trace-register-writes when passed to simulator_set_tracing(). */
|
||||
#define SIM_TRACE_REGISTER_WRITES 0x04
|
||||
|
||||
//! Enable --trace-disasm when passed to simulator_set_tracing().
|
||||
//!
|
||||
/** Enable --trace-disasm when passed to simulator_set_tracing(). */
|
||||
#define SIM_TRACE_DISASM 0x08
|
||||
|
||||
//! Enable --trace-stall-info when passed to simulator_set_tracing().
|
||||
//!
|
||||
/** Enable --trace-stall-info when passed to simulator_set_tracing(). */
|
||||
#define SIM_TRACE_STALL_INFO 0x10
|
||||
|
||||
//! Enable --trace-memory-controller when passed to simulator_set_tracing().
|
||||
//!
|
||||
/** Enable --trace-memory-controller when passed to simulator_set_tracing(). */
|
||||
#define SIM_TRACE_MEMORY_CONTROLLER 0x20
|
||||
|
||||
//! Enable --trace-l2 when passed to simulator_set_tracing().
|
||||
//!
|
||||
/** Enable --trace-l2 when passed to simulator_set_tracing(). */
|
||||
#define SIM_TRACE_L2_CACHE 0x40
|
||||
|
||||
//! Enable --trace-lines when passed to simulator_set_tracing().
|
||||
//!
|
||||
/** Enable --trace-lines when passed to simulator_set_tracing(). */
|
||||
#define SIM_TRACE_LINES 0x80
|
||||
|
||||
//! Turn off all tracing when passed to simulator_set_tracing().
|
||||
//!
|
||||
/** Turn off all tracing when passed to simulator_set_tracing(). */
|
||||
#define SIM_TRACE_NONE 0
|
||||
|
||||
//! Turn on all tracing when passed to simulator_set_tracing().
|
||||
//!
|
||||
/** Turn on all tracing when passed to simulator_set_tracing(). */
|
||||
#define SIM_TRACE_ALL (-1)
|
||||
|
||||
//! @}
|
||||
/** @} */
|
||||
|
||||
//! Computes the value to write to SPR_SIM_CONTROL to set tracing flags.
|
||||
//!
|
||||
/** Computes the value to write to SPR_SIM_CONTROL to set tracing flags. */
|
||||
#define SIM_TRACE_SPR_ARG(mask) \
|
||||
(SIM_CONTROL_SET_TRACING | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
|
||||
|
||||
|
||||
//== Bit masks which can be shifted by 8, combined with
|
||||
//== SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL.
|
||||
/*
|
||||
* Bit masks which can be shifted by 8, combined with
|
||||
* SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL.
|
||||
*/
|
||||
|
||||
//! @addtogroup arch_sim
|
||||
//! @{
|
||||
/**
|
||||
* @addtogroup arch_sim
|
||||
* @{
|
||||
*/
|
||||
|
||||
//! Dump the general-purpose registers.
|
||||
//!
|
||||
/** Dump the general-purpose registers. */
|
||||
#define SIM_DUMP_REGS 0x001
|
||||
|
||||
//! Dump the SPRs.
|
||||
//!
|
||||
/** Dump the SPRs. */
|
||||
#define SIM_DUMP_SPRS 0x002
|
||||
|
||||
//! Dump the ITLB.
|
||||
//!
|
||||
/** Dump the ITLB. */
|
||||
#define SIM_DUMP_ITLB 0x004
|
||||
|
||||
//! Dump the DTLB.
|
||||
//!
|
||||
/** Dump the DTLB. */
|
||||
#define SIM_DUMP_DTLB 0x008
|
||||
|
||||
//! Dump the L1 I-cache.
|
||||
//!
|
||||
/** Dump the L1 I-cache. */
|
||||
#define SIM_DUMP_L1I 0x010
|
||||
|
||||
//! Dump the L1 D-cache.
|
||||
//!
|
||||
/** Dump the L1 D-cache. */
|
||||
#define SIM_DUMP_L1D 0x020
|
||||
|
||||
//! Dump the L2 cache.
|
||||
//!
|
||||
/** Dump the L2 cache. */
|
||||
#define SIM_DUMP_L2 0x040
|
||||
|
||||
//! Dump the switch registers.
|
||||
//!
|
||||
/** Dump the switch registers. */
|
||||
#define SIM_DUMP_SNREGS 0x080
|
||||
|
||||
//! Dump the switch ITLB.
|
||||
//!
|
||||
/** Dump the switch ITLB. */
|
||||
#define SIM_DUMP_SNITLB 0x100
|
||||
|
||||
//! Dump the switch L1 I-cache.
|
||||
//!
|
||||
/** Dump the switch L1 I-cache. */
|
||||
#define SIM_DUMP_SNL1I 0x200
|
||||
|
||||
//! Dump the current backtrace.
|
||||
//!
|
||||
/** Dump the current backtrace. */
|
||||
#define SIM_DUMP_BACKTRACE 0x400
|
||||
|
||||
//! Only dump valid lines in caches.
|
||||
//!
|
||||
/** Only dump valid lines in caches. */
|
||||
#define SIM_DUMP_VALID_LINES 0x800
|
||||
|
||||
//! Dump everything that is dumpable.
|
||||
//!
|
||||
/** Dump everything that is dumpable. */
|
||||
#define SIM_DUMP_ALL (-1 & ~SIM_DUMP_VALID_LINES)
|
||||
|
||||
// @}
|
||||
/** @} */
|
||||
|
||||
//! Computes the value to write to SPR_SIM_CONTROL to dump machine state.
|
||||
//!
|
||||
/** Computes the value to write to SPR_SIM_CONTROL to dump machine state. */
|
||||
#define SIM_DUMP_SPR_ARG(mask) \
|
||||
(SIM_CONTROL_DUMP | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
|
||||
|
||||
|
||||
//== Bit masks which can be shifted by 8, combined with
|
||||
//== SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL.
|
||||
/*
|
||||
* Bit masks which can be shifted by 8, combined with
|
||||
* SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL.
|
||||
*/
|
||||
|
||||
//! @addtogroup arch_sim
|
||||
//! @{
|
||||
/**
|
||||
* @addtogroup arch_sim
|
||||
* @{
|
||||
*/
|
||||
|
||||
//! Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers.
|
||||
//!
|
||||
/** Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers. */
|
||||
#define SIM_CHIP_MEMCTL 0x001
|
||||
|
||||
//! Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface.
|
||||
//!
|
||||
/** Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface. */
|
||||
#define SIM_CHIP_XAUI 0x002
|
||||
|
||||
//! Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface.
|
||||
//!
|
||||
/** Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface. */
|
||||
#define SIM_CHIP_PCIE 0x004
|
||||
|
||||
//! Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface.
|
||||
//!
|
||||
/** Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. */
|
||||
#define SIM_CHIP_MPIPE 0x008
|
||||
|
||||
//! Reference all chip devices.
|
||||
//!
|
||||
/** Use with with SIM_PROFILER_CHIP_xxx to control the TRIO interface. */
|
||||
#define SIM_CHIP_TRIO 0x010
|
||||
|
||||
/** Reference all chip devices. */
|
||||
#define SIM_CHIP_ALL (-1)
|
||||
|
||||
//! @}
|
||||
/** @} */
|
||||
|
||||
//! Computes the value to write to SPR_SIM_CONTROL to clear chip statistics.
|
||||
//!
|
||||
/** Computes the value to write to SPR_SIM_CONTROL to clear chip statistics. */
|
||||
#define SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask) \
|
||||
(SIM_CONTROL_PROFILER_CHIP_CLEAR | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
|
||||
|
||||
//! Computes the value to write to SPR_SIM_CONTROL to disable chip statistics.
|
||||
//!
|
||||
/** Computes the value to write to SPR_SIM_CONTROL to disable chip statistics.*/
|
||||
#define SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask) \
|
||||
(SIM_CONTROL_PROFILER_CHIP_DISABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
|
||||
|
||||
//! Computes the value to write to SPR_SIM_CONTROL to enable chip statistics.
|
||||
//!
|
||||
/** Computes the value to write to SPR_SIM_CONTROL to enable chip statistics. */
|
||||
#define SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask) \
|
||||
(SIM_CONTROL_PROFILER_CHIP_ENABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
|
||||
|
||||
|
||||
|
||||
// Shim bitrate controls.
|
||||
/* Shim bitrate controls. */
|
||||
|
||||
//! The number of bits used to store the shim id.
|
||||
//!
|
||||
/** The number of bits used to store the shim id. */
|
||||
#define SIM_CONTROL_SHAPING_SHIM_ID_BITS 3
|
||||
|
||||
//! @addtogroup arch_sim
|
||||
//! @{
|
||||
/**
|
||||
* @addtogroup arch_sim
|
||||
* @{
|
||||
*/
|
||||
|
||||
//! Change the gbe 0 bitrate.
|
||||
//!
|
||||
/** Change the gbe 0 bitrate. */
|
||||
#define SIM_CONTROL_SHAPING_GBE_0 0x0
|
||||
|
||||
//! Change the gbe 1 bitrate.
|
||||
//!
|
||||
/** Change the gbe 1 bitrate. */
|
||||
#define SIM_CONTROL_SHAPING_GBE_1 0x1
|
||||
|
||||
//! Change the gbe 2 bitrate.
|
||||
//!
|
||||
/** Change the gbe 2 bitrate. */
|
||||
#define SIM_CONTROL_SHAPING_GBE_2 0x2
|
||||
|
||||
//! Change the gbe 3 bitrate.
|
||||
//!
|
||||
/** Change the gbe 3 bitrate. */
|
||||
#define SIM_CONTROL_SHAPING_GBE_3 0x3
|
||||
|
||||
//! Change the xgbe 0 bitrate.
|
||||
//!
|
||||
/** Change the xgbe 0 bitrate. */
|
||||
#define SIM_CONTROL_SHAPING_XGBE_0 0x4
|
||||
|
||||
//! Change the xgbe 1 bitrate.
|
||||
//!
|
||||
/** Change the xgbe 1 bitrate. */
|
||||
#define SIM_CONTROL_SHAPING_XGBE_1 0x5
|
||||
|
||||
//! The type of shaping to do.
|
||||
//!
|
||||
/** The type of shaping to do. */
|
||||
#define SIM_CONTROL_SHAPING_TYPE_BITS 2
|
||||
|
||||
//! Control the multiplier.
|
||||
//!
|
||||
/** Control the multiplier. */
|
||||
#define SIM_CONTROL_SHAPING_MULTIPLIER 0
|
||||
|
||||
//! Control the PPS.
|
||||
//!
|
||||
/** Control the PPS. */
|
||||
#define SIM_CONTROL_SHAPING_PPS 1
|
||||
|
||||
//! Control the BPS.
|
||||
//!
|
||||
/** Control the BPS. */
|
||||
#define SIM_CONTROL_SHAPING_BPS 2
|
||||
|
||||
//! The number of bits for the units for the shaping parameter.
|
||||
//!
|
||||
/** The number of bits for the units for the shaping parameter. */
|
||||
#define SIM_CONTROL_SHAPING_UNITS_BITS 2
|
||||
|
||||
//! Provide a number in single units.
|
||||
//!
|
||||
/** Provide a number in single units. */
|
||||
#define SIM_CONTROL_SHAPING_UNITS_SINGLE 0
|
||||
|
||||
//! Provide a number in kilo units.
|
||||
//!
|
||||
/** Provide a number in kilo units. */
|
||||
#define SIM_CONTROL_SHAPING_UNITS_KILO 1
|
||||
|
||||
//! Provide a number in mega units.
|
||||
//!
|
||||
/** Provide a number in mega units. */
|
||||
#define SIM_CONTROL_SHAPING_UNITS_MEGA 2
|
||||
|
||||
//! Provide a number in giga units.
|
||||
//!
|
||||
/** Provide a number in giga units. */
|
||||
#define SIM_CONTROL_SHAPING_UNITS_GIGA 3
|
||||
|
||||
// @}
|
||||
/** @} */
|
||||
|
||||
//! How many bits are available for the rate.
|
||||
//!
|
||||
/** How many bits are available for the rate. */
|
||||
#define SIM_CONTROL_SHAPING_RATE_BITS \
|
||||
(32 - (_SIM_CONTROL_OPERATOR_BITS + \
|
||||
SIM_CONTROL_SHAPING_SHIM_ID_BITS + \
|
||||
SIM_CONTROL_SHAPING_TYPE_BITS + \
|
||||
SIM_CONTROL_SHAPING_UNITS_BITS))
|
||||
|
||||
//! Computes the value to write to SPR_SIM_CONTROL to change a bitrate.
|
||||
//!
|
||||
/** Computes the value to write to SPR_SIM_CONTROL to change a bitrate. */
|
||||
#define SIM_SHAPING_SPR_ARG(shim, type, units, rate) \
|
||||
(SIM_CONTROL_SHAPING | \
|
||||
((shim) | \
|
||||
|
@ -483,30 +467,36 @@
|
|||
SIM_CONTROL_SHAPING_UNITS_BITS))) << _SIM_CONTROL_OPERATOR_BITS)
|
||||
|
||||
|
||||
//== Values returned when reading SPR_SIM_CONTROL.
|
||||
// ISSUE: These names should share a longer common prefix.
|
||||
/*
|
||||
* Values returned when reading SPR_SIM_CONTROL.
|
||||
* ISSUE: These names should share a longer common prefix.
|
||||
*/
|
||||
|
||||
//! When reading SPR_SIM_CONTROL, the mask of simulator tracing bits
|
||||
//! (SIM_TRACE_xxx values).
|
||||
//!
|
||||
/**
|
||||
* When reading SPR_SIM_CONTROL, the mask of simulator tracing bits
|
||||
* (SIM_TRACE_xxx values).
|
||||
*/
|
||||
#define SIM_TRACE_FLAG_MASK 0xFFFF
|
||||
|
||||
//! When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled.
|
||||
//!
|
||||
/** When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled. */
|
||||
#define SIM_PROFILER_ENABLED_MASK 0x10000
|
||||
|
||||
|
||||
//== Special arguments for "SIM_CONTROL_PUTC".
|
||||
/*
|
||||
* Special arguments for "SIM_CONTROL_PUTC".
|
||||
*/
|
||||
|
||||
//! Flag value for forcing a PUTC string-flush, including
|
||||
//! coordinate/cycle prefix and newline.
|
||||
//!
|
||||
/**
|
||||
* Flag value for forcing a PUTC string-flush, including
|
||||
* coordinate/cycle prefix and newline.
|
||||
*/
|
||||
#define SIM_PUTC_FLUSH_STRING 0x100
|
||||
|
||||
//! Flag value for forcing a PUTC binary-data-flush, which skips the
|
||||
//! prefix and does not append a newline.
|
||||
//!
|
||||
/**
|
||||
* Flag value for forcing a PUTC binary-data-flush, which skips the
|
||||
* prefix and does not append a newline.
|
||||
*/
|
||||
#define SIM_PUTC_FLUSH_BINARY 0x101
|
||||
|
||||
|
||||
#endif //__ARCH_SIM_DEF_H__
|
||||
#endif /* __ARCH_SIM_DEF_H__ */
|
||||
|
|
|
@ -12,8 +12,93 @@
|
|||
* more details.
|
||||
*/
|
||||
|
||||
/*
|
||||
* In addition to including the proper base SPR definition file, depending
|
||||
* on machine architecture, this file defines several macros which allow
|
||||
* kernel code to use protection-level dependent SPRs without worrying
|
||||
* about which PL it's running at. In these macros, the PL that the SPR
|
||||
* or interrupt number applies to is replaced by K.
|
||||
*/
|
||||
|
||||
#if CONFIG_KERNEL_PL != 1 && CONFIG_KERNEL_PL != 2
|
||||
#error CONFIG_KERNEL_PL must be 1 or 2
|
||||
#endif
|
||||
|
||||
/* Concatenate 4 strings. */
|
||||
#define __concat4(a, b, c, d) a ## b ## c ## d
|
||||
#define _concat4(a, b, c, d) __concat4(a, b, c, d)
|
||||
|
||||
#ifdef __tilegx__
|
||||
#include <arch/spr_def_64.h>
|
||||
|
||||
/* TILE-Gx dependent, protection-level dependent SPRs. */
|
||||
|
||||
#define SPR_INTERRUPT_MASK_K \
|
||||
_concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL,,)
|
||||
#define SPR_INTERRUPT_MASK_SET_K \
|
||||
_concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL,,)
|
||||
#define SPR_INTERRUPT_MASK_RESET_K \
|
||||
_concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL,,)
|
||||
#define SPR_INTERRUPT_VECTOR_BASE_K \
|
||||
_concat4(SPR_INTERRUPT_VECTOR_BASE_, CONFIG_KERNEL_PL,,)
|
||||
|
||||
#define SPR_IPI_MASK_K \
|
||||
_concat4(SPR_IPI_MASK_, CONFIG_KERNEL_PL,,)
|
||||
#define SPR_IPI_MASK_RESET_K \
|
||||
_concat4(SPR_IPI_MASK_RESET_, CONFIG_KERNEL_PL,,)
|
||||
#define SPR_IPI_MASK_SET_K \
|
||||
_concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
|
||||
#define SPR_IPI_EVENT_K \
|
||||
_concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,)
|
||||
#define SPR_IPI_EVENT_RESET_K \
|
||||
_concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,)
|
||||
#define SPR_IPI_MASK_SET_K \
|
||||
_concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
|
||||
#define INT_IPI_K \
|
||||
_concat4(INT_IPI_, CONFIG_KERNEL_PL,,)
|
||||
|
||||
#define SPR_SINGLE_STEP_CONTROL_K \
|
||||
_concat4(SPR_SINGLE_STEP_CONTROL_, CONFIG_KERNEL_PL,,)
|
||||
#define SPR_SINGLE_STEP_EN_K_K \
|
||||
_concat4(SPR_SINGLE_STEP_EN_, CONFIG_KERNEL_PL, _, CONFIG_KERNEL_PL)
|
||||
#define INT_SINGLE_STEP_K \
|
||||
_concat4(INT_SINGLE_STEP_, CONFIG_KERNEL_PL,,)
|
||||
|
||||
#else
|
||||
#include <arch/spr_def_32.h>
|
||||
|
||||
/* TILEPro dependent, protection-level dependent SPRs. */
|
||||
|
||||
#define SPR_INTERRUPT_MASK_K_0 \
|
||||
_concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _0,)
|
||||
#define SPR_INTERRUPT_MASK_K_1 \
|
||||
_concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _1,)
|
||||
#define SPR_INTERRUPT_MASK_SET_K_0 \
|
||||
_concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _0,)
|
||||
#define SPR_INTERRUPT_MASK_SET_K_1 \
|
||||
_concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _1,)
|
||||
#define SPR_INTERRUPT_MASK_RESET_K_0 \
|
||||
_concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _0,)
|
||||
#define SPR_INTERRUPT_MASK_RESET_K_1 \
|
||||
_concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _1,)
|
||||
|
||||
#endif
|
||||
|
||||
/* Generic protection-level dependent SPRs. */
|
||||
|
||||
#define SPR_SYSTEM_SAVE_K_0 \
|
||||
_concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _0,)
|
||||
#define SPR_SYSTEM_SAVE_K_1 \
|
||||
_concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _1,)
|
||||
#define SPR_SYSTEM_SAVE_K_2 \
|
||||
_concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _2,)
|
||||
#define SPR_SYSTEM_SAVE_K_3 \
|
||||
_concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _3,)
|
||||
#define SPR_EX_CONTEXT_K_0 \
|
||||
_concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _0,)
|
||||
#define SPR_EX_CONTEXT_K_1 \
|
||||
_concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _1,)
|
||||
#define SPR_INTCTRL_K_STATUS \
|
||||
_concat4(SPR_INTCTRL_, CONFIG_KERNEL_PL, _STATUS,)
|
||||
#define INT_INTCTRL_K \
|
||||
_concat4(INT_INTCTRL_, CONFIG_KERNEL_PL,,)
|
||||
|
|
|
@ -56,58 +56,93 @@
|
|||
#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2
|
||||
#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1
|
||||
#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4
|
||||
#define SPR_EX_CONTEXT_2_0 0x4605
|
||||
#define SPR_EX_CONTEXT_2_1 0x4606
|
||||
#define SPR_EX_CONTEXT_2_1__PL_SHIFT 0
|
||||
#define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3
|
||||
#define SPR_EX_CONTEXT_2_1__PL_MASK 0x3
|
||||
#define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2
|
||||
#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
|
||||
#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
|
||||
#define SPR_FAIL 0x4e09
|
||||
#define SPR_INTCTRL_0_STATUS 0x4a07
|
||||
#define SPR_INTCTRL_1_STATUS 0x4807
|
||||
#define SPR_INTCTRL_2_STATUS 0x4607
|
||||
#define SPR_INTERRUPT_CRITICAL_SECTION 0x4e0a
|
||||
#define SPR_INTERRUPT_MASK_0_0 0x4a08
|
||||
#define SPR_INTERRUPT_MASK_0_1 0x4a09
|
||||
#define SPR_INTERRUPT_MASK_1_0 0x4809
|
||||
#define SPR_INTERRUPT_MASK_1_1 0x480a
|
||||
#define SPR_INTERRUPT_MASK_2_0 0x4608
|
||||
#define SPR_INTERRUPT_MASK_2_1 0x4609
|
||||
#define SPR_INTERRUPT_MASK_RESET_0_0 0x4a0a
|
||||
#define SPR_INTERRUPT_MASK_RESET_0_1 0x4a0b
|
||||
#define SPR_INTERRUPT_MASK_RESET_1_0 0x480b
|
||||
#define SPR_INTERRUPT_MASK_RESET_1_1 0x480c
|
||||
#define SPR_INTERRUPT_MASK_RESET_2_0 0x460a
|
||||
#define SPR_INTERRUPT_MASK_RESET_2_1 0x460b
|
||||
#define SPR_INTERRUPT_MASK_SET_0_0 0x4a0c
|
||||
#define SPR_INTERRUPT_MASK_SET_0_1 0x4a0d
|
||||
#define SPR_INTERRUPT_MASK_SET_1_0 0x480d
|
||||
#define SPR_INTERRUPT_MASK_SET_1_1 0x480e
|
||||
#define SPR_INTERRUPT_MASK_SET_2_0 0x460c
|
||||
#define SPR_INTERRUPT_MASK_SET_2_1 0x460d
|
||||
#define SPR_MPL_DMA_CPL_SET_0 0x5800
|
||||
#define SPR_MPL_DMA_CPL_SET_1 0x5801
|
||||
#define SPR_MPL_DMA_CPL_SET_2 0x5802
|
||||
#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
|
||||
#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
|
||||
#define SPR_MPL_DMA_NOTIFY_SET_2 0x3802
|
||||
#define SPR_MPL_INTCTRL_0_SET_0 0x4a00
|
||||
#define SPR_MPL_INTCTRL_0_SET_1 0x4a01
|
||||
#define SPR_MPL_INTCTRL_0_SET_2 0x4a02
|
||||
#define SPR_MPL_INTCTRL_1_SET_0 0x4800
|
||||
#define SPR_MPL_INTCTRL_1_SET_1 0x4801
|
||||
#define SPR_MPL_INTCTRL_1_SET_2 0x4802
|
||||
#define SPR_MPL_INTCTRL_2_SET_0 0x4600
|
||||
#define SPR_MPL_INTCTRL_2_SET_1 0x4601
|
||||
#define SPR_MPL_INTCTRL_2_SET_2 0x4602
|
||||
#define SPR_MPL_SN_ACCESS_SET_0 0x0800
|
||||
#define SPR_MPL_SN_ACCESS_SET_1 0x0801
|
||||
#define SPR_MPL_SN_ACCESS_SET_2 0x0802
|
||||
#define SPR_MPL_SN_CPL_SET_0 0x5a00
|
||||
#define SPR_MPL_SN_CPL_SET_1 0x5a01
|
||||
#define SPR_MPL_SN_CPL_SET_2 0x5a02
|
||||
#define SPR_MPL_SN_FIREWALL_SET_0 0x2c00
|
||||
#define SPR_MPL_SN_FIREWALL_SET_1 0x2c01
|
||||
#define SPR_MPL_SN_FIREWALL_SET_2 0x2c02
|
||||
#define SPR_MPL_SN_NOTIFY_SET_0 0x2a00
|
||||
#define SPR_MPL_SN_NOTIFY_SET_1 0x2a01
|
||||
#define SPR_MPL_SN_NOTIFY_SET_2 0x2a02
|
||||
#define SPR_MPL_UDN_ACCESS_SET_0 0x0c00
|
||||
#define SPR_MPL_UDN_ACCESS_SET_1 0x0c01
|
||||
#define SPR_MPL_UDN_ACCESS_SET_2 0x0c02
|
||||
#define SPR_MPL_UDN_AVAIL_SET_0 0x4000
|
||||
#define SPR_MPL_UDN_AVAIL_SET_1 0x4001
|
||||
#define SPR_MPL_UDN_AVAIL_SET_2 0x4002
|
||||
#define SPR_MPL_UDN_CA_SET_0 0x3c00
|
||||
#define SPR_MPL_UDN_CA_SET_1 0x3c01
|
||||
#define SPR_MPL_UDN_CA_SET_2 0x3c02
|
||||
#define SPR_MPL_UDN_COMPLETE_SET_0 0x1400
|
||||
#define SPR_MPL_UDN_COMPLETE_SET_1 0x1401
|
||||
#define SPR_MPL_UDN_COMPLETE_SET_2 0x1402
|
||||
#define SPR_MPL_UDN_FIREWALL_SET_0 0x3000
|
||||
#define SPR_MPL_UDN_FIREWALL_SET_1 0x3001
|
||||
#define SPR_MPL_UDN_FIREWALL_SET_2 0x3002
|
||||
#define SPR_MPL_UDN_REFILL_SET_0 0x1000
|
||||
#define SPR_MPL_UDN_REFILL_SET_1 0x1001
|
||||
#define SPR_MPL_UDN_REFILL_SET_2 0x1002
|
||||
#define SPR_MPL_UDN_TIMER_SET_0 0x3600
|
||||
#define SPR_MPL_UDN_TIMER_SET_1 0x3601
|
||||
#define SPR_MPL_UDN_TIMER_SET_2 0x3602
|
||||
#define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00
|
||||
#define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01
|
||||
#define SPR_MPL_WORLD_ACCESS_SET_2 0x4e02
|
||||
#define SPR_PASS 0x4e0b
|
||||
#define SPR_PERF_COUNT_0 0x4205
|
||||
#define SPR_PERF_COUNT_1 0x4206
|
||||
#define SPR_PERF_COUNT_CTL 0x4207
|
||||
#define SPR_PERF_COUNT_DN_CTL 0x4210
|
||||
#define SPR_PERF_COUNT_STS 0x4208
|
||||
#define SPR_PROC_STATUS 0x4f00
|
||||
#define SPR_SIM_CONTROL 0x4e0c
|
||||
|
@ -124,6 +159,10 @@
|
|||
#define SPR_SYSTEM_SAVE_1_1 0x4901
|
||||
#define SPR_SYSTEM_SAVE_1_2 0x4902
|
||||
#define SPR_SYSTEM_SAVE_1_3 0x4903
|
||||
#define SPR_SYSTEM_SAVE_2_0 0x4700
|
||||
#define SPR_SYSTEM_SAVE_2_1 0x4701
|
||||
#define SPR_SYSTEM_SAVE_2_2 0x4702
|
||||
#define SPR_SYSTEM_SAVE_2_3 0x4703
|
||||
#define SPR_TILE_COORD 0x4c17
|
||||
#define SPR_TILE_RTF_HWM 0x4e10
|
||||
#define SPR_TILE_TIMER_CONTROL 0x3205
|
||||
|
|
|
@ -146,7 +146,10 @@ enum {
|
|||
|
||||
CALLER_SP_IN_R52_BASE = 4,
|
||||
|
||||
CALLER_SP_OFFSET_BASE = 8
|
||||
CALLER_SP_OFFSET_BASE = 8,
|
||||
|
||||
/* Marks the entry point of certain functions. */
|
||||
ENTRY_POINT_INFO_OP = 16
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -216,15 +216,16 @@ struct compat_siginfo;
|
|||
struct compat_sigaltstack;
|
||||
long compat_sys_execve(const char __user *path,
|
||||
const compat_uptr_t __user *argv,
|
||||
const compat_uptr_t __user *envp);
|
||||
const compat_uptr_t __user *envp, struct pt_regs *);
|
||||
long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
|
||||
struct compat_sigaction __user *oact,
|
||||
size_t sigsetsize);
|
||||
long compat_sys_rt_sigqueueinfo(int pid, int sig,
|
||||
struct compat_siginfo __user *uinfo);
|
||||
long compat_sys_rt_sigreturn(void);
|
||||
long compat_sys_rt_sigreturn(struct pt_regs *);
|
||||
long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
|
||||
struct compat_sigaltstack __user *uoss_ptr);
|
||||
struct compat_sigaltstack __user *uoss_ptr,
|
||||
struct pt_regs *);
|
||||
long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high);
|
||||
long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high);
|
||||
long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
|
||||
|
@ -255,4 +256,12 @@ long tile_compat_sys_ptrace(compat_long_t request, compat_long_t pid,
|
|||
/* Tilera Linux syscalls that don't have "compat" versions. */
|
||||
#define compat_sys_flush_cache sys_flush_cache
|
||||
|
||||
/* These are the intvec_64.S trampolines. */
|
||||
long _compat_sys_execve(const char __user *path,
|
||||
const compat_uptr_t __user *argv,
|
||||
const compat_uptr_t __user *envp);
|
||||
long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
|
||||
struct compat_sigaltstack __user *uoss_ptr);
|
||||
long _compat_sys_rt_sigreturn(void);
|
||||
|
||||
#endif /* _ASM_TILE_COMPAT_H */
|
||||
|
|
|
@ -47,53 +47,53 @@
|
|||
int __n = (n); \
|
||||
int __mask = 1 << (__n & 0x1f); \
|
||||
if (__n < 32) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \
|
||||
else \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \
|
||||
} while (0)
|
||||
#define interrupt_mask_reset(n) do { \
|
||||
int __n = (n); \
|
||||
int __mask = 1 << (__n & 0x1f); \
|
||||
if (__n < 32) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \
|
||||
else \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \
|
||||
} while (0)
|
||||
#define interrupt_mask_check(n) ({ \
|
||||
int __n = (n); \
|
||||
(((__n < 32) ? \
|
||||
__insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \
|
||||
__insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \
|
||||
__insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \
|
||||
__insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \
|
||||
>> (__n & 0x1f)) & 1; \
|
||||
})
|
||||
#define interrupt_mask_set_mask(mask) do { \
|
||||
unsigned long long __m = (mask); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \
|
||||
} while (0)
|
||||
#define interrupt_mask_reset_mask(mask) do { \
|
||||
unsigned long long __m = (mask); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
|
||||
} while (0)
|
||||
#else
|
||||
#define interrupt_mask_set(n) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n)))
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
|
||||
#define interrupt_mask_reset(n) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n)))
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n)))
|
||||
#define interrupt_mask_check(n) \
|
||||
((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1)
|
||||
((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1)
|
||||
#define interrupt_mask_set_mask(mask) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask))
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
|
||||
#define interrupt_mask_reset_mask(mask) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask))
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The set of interrupts we want active if irqs are enabled.
|
||||
* Note that in particular, the tile timer interrupt comes and goes
|
||||
* from this set, since we have no other way to turn off the timer.
|
||||
* Likewise, INTCTRL_1 is removed and re-added during device
|
||||
* Likewise, INTCTRL_K is removed and re-added during device
|
||||
* interrupts, as is the the hardwall UDN_FIREWALL interrupt.
|
||||
* We use a low bit (MEM_ERROR) as our sentinel value and make sure it
|
||||
* is always claimed as an "active interrupt" so we can query that bit
|
||||
|
@ -170,14 +170,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
|
|||
|
||||
/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
|
||||
#define IRQS_DISABLED(tmp) \
|
||||
mfspr tmp, INTERRUPT_MASK_1; \
|
||||
mfspr tmp, SPR_INTERRUPT_MASK_K; \
|
||||
andi tmp, tmp, 1
|
||||
|
||||
/* Load up a pointer to &interrupts_enabled_mask. */
|
||||
#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
|
||||
moveli reg, hw2_last(interrupts_enabled_mask); \
|
||||
shl16insli reg, reg, hw1(interrupts_enabled_mask); \
|
||||
shl16insli reg, reg, hw0(interrupts_enabled_mask); \
|
||||
moveli reg, hw2_last(interrupts_enabled_mask); \
|
||||
shl16insli reg, reg, hw1(interrupts_enabled_mask); \
|
||||
shl16insli reg, reg, hw0(interrupts_enabled_mask); \
|
||||
add reg, reg, tp
|
||||
|
||||
/* Disable interrupts. */
|
||||
|
@ -185,18 +185,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
|
|||
moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \
|
||||
shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \
|
||||
shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \
|
||||
mtspr INTERRUPT_MASK_SET_1, tmp0
|
||||
mtspr SPR_INTERRUPT_MASK_SET_K, tmp0
|
||||
|
||||
/* Disable ALL synchronous interrupts (used by NMI entry). */
|
||||
#define IRQ_DISABLE_ALL(tmp) \
|
||||
movei tmp, -1; \
|
||||
mtspr INTERRUPT_MASK_SET_1, tmp
|
||||
mtspr SPR_INTERRUPT_MASK_SET_K, tmp
|
||||
|
||||
/* Enable interrupts. */
|
||||
#define IRQ_ENABLE(tmp0, tmp1) \
|
||||
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
|
||||
ld tmp0, tmp0; \
|
||||
mtspr INTERRUPT_MASK_RESET_1, tmp0
|
||||
mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
|
||||
|
||||
#else /* !__tilegx__ */
|
||||
|
||||
|
@ -210,14 +210,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
|
|||
* (making the original code's write of the "high" mask word idempotent).
|
||||
*/
|
||||
#define IRQS_DISABLED(tmp) \
|
||||
mfspr tmp, INTERRUPT_MASK_1_0; \
|
||||
mfspr tmp, SPR_INTERRUPT_MASK_K_0; \
|
||||
shri tmp, tmp, INT_MEM_ERROR; \
|
||||
andi tmp, tmp, 1
|
||||
|
||||
/* Load up a pointer to &interrupts_enabled_mask. */
|
||||
#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
|
||||
moveli reg, lo16(interrupts_enabled_mask); \
|
||||
auli reg, reg, ha16(interrupts_enabled_mask);\
|
||||
moveli reg, lo16(interrupts_enabled_mask); \
|
||||
auli reg, reg, ha16(interrupts_enabled_mask); \
|
||||
add reg, reg, tp
|
||||
|
||||
/* Disable interrupts. */
|
||||
|
@ -227,16 +227,16 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
|
|||
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \
|
||||
}; \
|
||||
{ \
|
||||
mtspr INTERRUPT_MASK_SET_1_0, tmp0; \
|
||||
mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \
|
||||
auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \
|
||||
}; \
|
||||
mtspr INTERRUPT_MASK_SET_1_1, tmp1
|
||||
mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1
|
||||
|
||||
/* Disable ALL synchronous interrupts (used by NMI entry). */
|
||||
#define IRQ_DISABLE_ALL(tmp) \
|
||||
movei tmp, -1; \
|
||||
mtspr INTERRUPT_MASK_SET_1_0, tmp; \
|
||||
mtspr INTERRUPT_MASK_SET_1_1, tmp
|
||||
mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \
|
||||
mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
|
||||
|
||||
/* Enable interrupts. */
|
||||
#define IRQ_ENABLE(tmp0, tmp1) \
|
||||
|
@ -246,8 +246,8 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
|
|||
addi tmp1, tmp0, 4 \
|
||||
}; \
|
||||
lw tmp1, tmp1; \
|
||||
mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \
|
||||
mtspr INTERRUPT_MASK_RESET_1_1, tmp1
|
||||
mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
|
||||
mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#define MAP_POPULATE 0x0040 /* populate (prefault) pagetables */
|
||||
#define MAP_NONBLOCK 0x0080 /* do not block on IO */
|
||||
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
|
||||
#define MAP_STACK MAP_GROWSDOWN /* provide convenience alias */
|
||||
#define MAP_LOCKED 0x0200 /* pages are locked */
|
||||
#define MAP_NORESERVE 0x0400 /* don't check for reservations */
|
||||
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
|
||||
|
|
|
@ -199,17 +199,17 @@ static inline __attribute_const__ int get_order(unsigned long size)
|
|||
* If you want more physical memory than this then see the CONFIG_HIGHMEM
|
||||
* option in the kernel configuration.
|
||||
*
|
||||
* The top two 16MB chunks in the table below (VIRT and HV) are
|
||||
* unavailable to Linux. Since the kernel interrupt vectors must live
|
||||
* at 0xfd000000, we map all of the bottom of RAM at this address with
|
||||
* a huge page table entry to minimize its ITLB footprint (as well as
|
||||
* at PAGE_OFFSET). The last architected requirement is that user
|
||||
* interrupt vectors live at 0xfc000000, so we make that range of
|
||||
* memory available to user processes. The remaining regions are sized
|
||||
* as shown; after the first four addresses, we show "typical" values,
|
||||
* since the actual addresses depend on kernel #defines.
|
||||
* The top 16MB chunk in the table below is unavailable to Linux. Since
|
||||
* the kernel interrupt vectors must live at ether 0xfe000000 or 0xfd000000
|
||||
* (depending on whether the kernel is at PL2 or Pl1), we map all of the
|
||||
* bottom of RAM at this address with a huge page table entry to minimize
|
||||
* its ITLB footprint (as well as at PAGE_OFFSET). The last architected
|
||||
* requirement is that user interrupt vectors live at 0xfc000000, so we
|
||||
* make that range of memory available to user processes. The remaining
|
||||
* regions are sized as shown; the first four addresses use the PL 1
|
||||
* values, and after that, we show "typical" values, since the actual
|
||||
* addresses depend on kernel #defines.
|
||||
*
|
||||
* MEM_VIRT_INTRPT 0xff000000
|
||||
* MEM_HV_INTRPT 0xfe000000
|
||||
* MEM_SV_INTRPT (kernel code) 0xfd000000
|
||||
* MEM_USER_INTRPT (user vector) 0xfc000000
|
||||
|
@ -221,9 +221,14 @@ static inline __attribute_const__ int get_order(unsigned long size)
|
|||
*/
|
||||
|
||||
#define MEM_USER_INTRPT _AC(0xfc000000, UL)
|
||||
#if CONFIG_KERNEL_PL == 1
|
||||
#define MEM_SV_INTRPT _AC(0xfd000000, UL)
|
||||
#define MEM_HV_INTRPT _AC(0xfe000000, UL)
|
||||
#define MEM_VIRT_INTRPT _AC(0xff000000, UL)
|
||||
#else
|
||||
#define MEM_GUEST_INTRPT _AC(0xfd000000, UL)
|
||||
#define MEM_SV_INTRPT _AC(0xfe000000, UL)
|
||||
#define MEM_HV_INTRPT _AC(0xff000000, UL)
|
||||
#endif
|
||||
|
||||
#define INTRPT_SIZE 0x4000
|
||||
|
||||
|
|
|
@ -328,18 +328,21 @@ extern int kdata_huge;
|
|||
* Note that assembly code assumes that USER_PL is zero.
|
||||
*/
|
||||
#define USER_PL 0
|
||||
#define KERNEL_PL 1
|
||||
#if CONFIG_KERNEL_PL == 2
|
||||
#define GUEST_PL 1
|
||||
#endif
|
||||
#define KERNEL_PL CONFIG_KERNEL_PL
|
||||
|
||||
/* SYSTEM_SAVE_1_0 holds the current cpu number ORed with ksp0. */
|
||||
/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
|
||||
#define CPU_LOG_MASK_VALUE 12
|
||||
#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1)
|
||||
#if CONFIG_NR_CPUS > CPU_MASK_VALUE
|
||||
# error Too many cpus!
|
||||
#endif
|
||||
#define raw_smp_processor_id() \
|
||||
((int)__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & CPU_MASK_VALUE)
|
||||
((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & CPU_MASK_VALUE)
|
||||
#define get_current_ksp0() \
|
||||
(__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & ~CPU_MASK_VALUE)
|
||||
(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~CPU_MASK_VALUE)
|
||||
#define next_current_ksp0(task) ({ \
|
||||
unsigned long __ksp0 = task_ksp0(task); \
|
||||
int __cpu = raw_smp_processor_id(); \
|
||||
|
|
|
@ -62,8 +62,8 @@ struct pt_regs {
|
|||
pt_reg_t lr; /* aliases regs[TREG_LR] */
|
||||
|
||||
/* Saved special registers. */
|
||||
pt_reg_t pc; /* stored in EX_CONTEXT_1_0 */
|
||||
pt_reg_t ex1; /* stored in EX_CONTEXT_1_1 (PL and ICS bit) */
|
||||
pt_reg_t pc; /* stored in EX_CONTEXT_K_0 */
|
||||
pt_reg_t ex1; /* stored in EX_CONTEXT_K_1 (PL and ICS bit) */
|
||||
pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */
|
||||
pt_reg_t orig_r0; /* r0 at syscall entry, else zero */
|
||||
pt_reg_t flags; /* flags (see below) */
|
||||
|
|
|
@ -32,8 +32,9 @@ extern void *compat_sys_call_table[];
|
|||
|
||||
/*
|
||||
* Note that by convention, any syscall which requires the current
|
||||
* register set takes an additional "struct pt_regs *" pointer; the
|
||||
* sys_xxx() function just adds the pointer and tail-calls to _sys_xxx().
|
||||
* register set takes an additional "struct pt_regs *" pointer; a
|
||||
* _sys_xxx() trampoline in intvec*.S just sets up the pointer and
|
||||
* jumps to sys_xxx().
|
||||
*/
|
||||
|
||||
/* kernel/sys.c */
|
||||
|
@ -43,66 +44,17 @@ long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
|
|||
int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
|
||||
u32 len_lo, u32 len_hi, int advice);
|
||||
long sys_flush_cache(void);
|
||||
long sys_mmap2(unsigned long addr, unsigned long len,
|
||||
unsigned long prot, unsigned long flags,
|
||||
unsigned long fd, unsigned long pgoff);
|
||||
#ifdef __tilegx__
|
||||
long sys_mmap(unsigned long addr, unsigned long len,
|
||||
unsigned long prot, unsigned long flags,
|
||||
unsigned long fd, off_t pgoff);
|
||||
#ifndef __tilegx__ /* No mmap() in the 32-bit kernel. */
|
||||
#define sys_mmap sys_mmap
|
||||
#endif
|
||||
|
||||
/* kernel/process.c */
|
||||
long sys_clone(unsigned long clone_flags, unsigned long newsp,
|
||||
void __user *parent_tid, void __user *child_tid);
|
||||
long _sys_clone(unsigned long clone_flags, unsigned long newsp,
|
||||
void __user *parent_tid, void __user *child_tid,
|
||||
struct pt_regs *regs);
|
||||
long sys_fork(void);
|
||||
long _sys_fork(struct pt_regs *regs);
|
||||
long sys_vfork(void);
|
||||
long _sys_vfork(struct pt_regs *regs);
|
||||
long sys_execve(const char __user *filename,
|
||||
const char __user *const __user *argv,
|
||||
const char __user *const __user *envp);
|
||||
long _sys_execve(const char __user *filename,
|
||||
const char __user *const __user *argv,
|
||||
const char __user *const __user *envp, struct pt_regs *regs);
|
||||
|
||||
/* kernel/signal.c */
|
||||
long sys_sigaltstack(const stack_t __user *, stack_t __user *);
|
||||
long _sys_sigaltstack(const stack_t __user *, stack_t __user *,
|
||||
struct pt_regs *);
|
||||
long sys_rt_sigreturn(void);
|
||||
long _sys_rt_sigreturn(struct pt_regs *regs);
|
||||
|
||||
/* platform-independent functions */
|
||||
long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
|
||||
long sys_rt_sigaction(int sig, const struct sigaction __user *act,
|
||||
struct sigaction __user *oact, size_t sigsetsize);
|
||||
|
||||
#ifndef __tilegx__
|
||||
/* mm/fault.c */
|
||||
int sys_cmpxchg_badaddr(unsigned long address);
|
||||
int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
|
||||
long sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
|
||||
long _sys_cmpxchg_badaddr(unsigned long address);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
long compat_sys_execve(const char __user *path,
|
||||
const compat_uptr_t __user *argv,
|
||||
const compat_uptr_t __user *envp);
|
||||
long _compat_sys_execve(const char __user *path,
|
||||
const compat_uptr_t __user *argv,
|
||||
const compat_uptr_t __user *envp,
|
||||
struct pt_regs *regs);
|
||||
long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
|
||||
struct compat_sigaltstack __user *uoss_ptr);
|
||||
long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
|
||||
struct compat_sigaltstack __user *uoss_ptr,
|
||||
struct pt_regs *regs);
|
||||
long compat_sys_rt_sigreturn(void);
|
||||
long _compat_sys_rt_sigreturn(struct pt_regs *regs);
|
||||
|
||||
/* These four are not defined for 64-bit, but serve as "compat" syscalls. */
|
||||
long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg);
|
||||
long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
|
||||
|
@ -110,4 +62,15 @@ long sys_truncate64(const char __user *path, loff_t length);
|
|||
long sys_ftruncate64(unsigned int fd, loff_t length);
|
||||
#endif
|
||||
|
||||
/* These are the intvec*.S trampolines. */
|
||||
long _sys_sigaltstack(const stack_t __user *, stack_t __user *);
|
||||
long _sys_rt_sigreturn(void);
|
||||
long _sys_clone(unsigned long clone_flags, unsigned long newsp,
|
||||
void __user *parent_tid, void __user *child_tid);
|
||||
long _sys_execve(const char __user *filename,
|
||||
const char __user *const __user *argv,
|
||||
const char __user *const __user *envp);
|
||||
|
||||
#include <asm-generic/syscalls.h>
|
||||
|
||||
#endif /* _ASM_TILE_SYSCALLS_H */
|
||||
|
|
|
@ -89,6 +89,10 @@
|
|||
#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */
|
||||
#endif
|
||||
|
||||
#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
|
||||
int __mb_incoherent(void); /* Helper routine for mb_incoherent(). */
|
||||
#endif
|
||||
|
||||
/* Fence to guarantee visibility of stores to incoherent memory. */
|
||||
static inline void
|
||||
mb_incoherent(void)
|
||||
|
@ -97,7 +101,6 @@ mb_incoherent(void)
|
|||
|
||||
#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
|
||||
{
|
||||
int __mb_incoherent(void);
|
||||
#if CHIP_HAS_TILE_WRITE_PENDING()
|
||||
const unsigned long WRITE_TIMEOUT_CYCLES = 400;
|
||||
unsigned long start = get_cycles_low();
|
||||
|
@ -161,7 +164,7 @@ extern struct task_struct *_switch_to(struct task_struct *prev,
|
|||
/* Helper function for _switch_to(). */
|
||||
extern struct task_struct *__switch_to(struct task_struct *prev,
|
||||
struct task_struct *next,
|
||||
unsigned long new_system_save_1_0);
|
||||
unsigned long new_system_save_k_0);
|
||||
|
||||
/* Address that switched-away from tasks are at. */
|
||||
extern unsigned long get_switch_to_pc(void);
|
||||
|
@ -214,13 +217,6 @@ int hardwall_deactivate(struct task_struct *task);
|
|||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */
|
||||
extern int _sim_syscall(int syscall_num, ...);
|
||||
#define sim_syscall(syscall_num, ...) \
|
||||
_sim_syscall(SIM_CONTROL_SYSCALL + \
|
||||
((syscall_num) << _SIM_CONTROL_OPERATOR_BITS), \
|
||||
## __VA_ARGS__)
|
||||
|
||||
/*
|
||||
* Kernel threads can check to see if they need to migrate their
|
||||
* stack whenever they return from a context switch; for user
|
||||
|
|
|
@ -59,4 +59,8 @@ void do_hardwall_trap(struct pt_regs *, int fault_num);
|
|||
void do_breakpoint(struct pt_regs *, int fault_num);
|
||||
|
||||
|
||||
#ifdef __tilegx__
|
||||
void gx_singlestep_handle(struct pt_regs *, int fault_num);
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_SYSCALLS_H */
|
||||
|
|
|
@ -1003,37 +1003,37 @@ int hv_console_write(HV_VirtAddr bytes, int len);
|
|||
* when these occur in a client's interrupt critical section, they must
|
||||
* be delivered through the downcall mechanism.
|
||||
*
|
||||
* A downcall is initially delivered to the client as an INTCTRL_1
|
||||
* interrupt. Upon entry to the INTCTRL_1 vector, the client must
|
||||
* immediately invoke the hv_downcall_dispatch service. This service
|
||||
* will not return; instead it will cause one of the client's actual
|
||||
* downcall-handling interrupt vectors to be entered. The EX_CONTEXT
|
||||
* registers in the client will be set so that when the client irets,
|
||||
* it will return to the code which was interrupted by the INTCTRL_1
|
||||
* interrupt.
|
||||
* A downcall is initially delivered to the client as an INTCTRL_CL
|
||||
* interrupt, where CL is the client's PL. Upon entry to the INTCTRL_CL
|
||||
* vector, the client must immediately invoke the hv_downcall_dispatch
|
||||
* service. This service will not return; instead it will cause one of
|
||||
* the client's actual downcall-handling interrupt vectors to be entered.
|
||||
* The EX_CONTEXT registers in the client will be set so that when the
|
||||
* client irets, it will return to the code which was interrupted by the
|
||||
* INTCTRL_CL interrupt.
|
||||
*
|
||||
* Under some circumstances, the firing of INTCTRL_1 can race with
|
||||
* Under some circumstances, the firing of INTCTRL_CL can race with
|
||||
* the lowering of a device interrupt. In such a case, the
|
||||
* hv_downcall_dispatch service may issue an iret instruction instead
|
||||
* of entering one of the client's actual downcall-handling interrupt
|
||||
* vectors. This will return execution to the location that was
|
||||
* interrupted by INTCTRL_1.
|
||||
* interrupted by INTCTRL_CL.
|
||||
*
|
||||
* Any saving of registers should be done by the actual handling
|
||||
* vectors; no registers should be changed by the INTCTRL_1 handler.
|
||||
* vectors; no registers should be changed by the INTCTRL_CL handler.
|
||||
* In particular, the client should not use a jal instruction to invoke
|
||||
* the hv_downcall_dispatch service, as that would overwrite the client's
|
||||
* lr register. Note that the hv_downcall_dispatch service may overwrite
|
||||
* one or more of the client's system save registers.
|
||||
*
|
||||
* The client must not modify the INTCTRL_1_STATUS SPR. The hypervisor
|
||||
* The client must not modify the INTCTRL_CL_STATUS SPR. The hypervisor
|
||||
* will set this register to cause a downcall to happen, and will clear
|
||||
* it when no further downcalls are pending.
|
||||
*
|
||||
* When a downcall vector is entered, the INTCTRL_1 interrupt will be
|
||||
* When a downcall vector is entered, the INTCTRL_CL interrupt will be
|
||||
* masked. When the client is done processing a downcall, and is ready
|
||||
* to accept another, it must unmask this interrupt; if more downcalls
|
||||
* are pending, this will cause the INTCTRL_1 vector to be reentered.
|
||||
* are pending, this will cause the INTCTRL_CL vector to be reentered.
|
||||
* Currently the following interrupt vectors can be entered through a
|
||||
* downcall:
|
||||
*
|
||||
|
|
|
@ -369,6 +369,10 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
|
|||
/* Weird; reserved value, ignore it. */
|
||||
continue;
|
||||
}
|
||||
if (info_operand & ENTRY_POINT_INFO_OP) {
|
||||
/* This info op is ignored by the backtracer. */
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Skip info ops which are not in the
|
||||
* "one_ago" mode we want right now.
|
||||
|
|
|
@ -154,8 +154,14 @@ long tile_compat_sys_msgrcv(int msqid,
|
|||
#define compat_sys_fstat64 sys_newfstat
|
||||
#define compat_sys_fstatat64 sys_newfstatat
|
||||
|
||||
/* Pass full 64-bit values through ptrace. */
|
||||
#define compat_sys_ptrace tile_compat_sys_ptrace
|
||||
/* The native sys_ptrace dynamically handles compat binaries. */
|
||||
#define compat_sys_ptrace sys_ptrace
|
||||
|
||||
/* Call the trampolines to manage pt_regs where necessary. */
|
||||
#define compat_sys_execve _compat_sys_execve
|
||||
#define compat_sys_sigaltstack _compat_sys_sigaltstack
|
||||
#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
|
||||
#define sys_clone _sys_clone
|
||||
|
||||
/*
|
||||
* Note that we can't include <linux/unistd.h> here since the header
|
||||
|
|
|
@ -256,9 +256,9 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
|
|||
return err;
|
||||
}
|
||||
|
||||
long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
|
||||
struct compat_sigaltstack __user *uoss_ptr,
|
||||
struct pt_regs *regs)
|
||||
long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
|
||||
struct compat_sigaltstack __user *uoss_ptr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
stack_t uss, uoss;
|
||||
int ret;
|
||||
|
@ -291,7 +291,7 @@ long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
|
|||
return ret;
|
||||
}
|
||||
|
||||
long _compat_sys_rt_sigreturn(struct pt_regs *regs)
|
||||
long compat_sys_rt_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
struct compat_rt_sigframe __user *frame =
|
||||
(struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
|
||||
|
@ -312,7 +312,7 @@ long _compat_sys_rt_sigreturn(struct pt_regs *regs)
|
|||
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
|
||||
goto badframe;
|
||||
|
||||
if (_compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
|
||||
if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
|
||||
goto badframe;
|
||||
|
||||
return r0;
|
||||
|
|
|
@ -15,7 +15,9 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/processor.h>
|
||||
#include <arch/abi.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
#ifdef __tilegx__
|
||||
#define bnzt bnezt
|
||||
|
@ -25,28 +27,6 @@ STD_ENTRY(current_text_addr)
|
|||
{ move r0, lr; jrp lr }
|
||||
STD_ENDPROC(current_text_addr)
|
||||
|
||||
STD_ENTRY(_sim_syscall)
|
||||
/*
|
||||
* Wait for r0-r9 to be ready (and lr on the off chance we
|
||||
* want the syscall to locate its caller), then make a magic
|
||||
* simulator syscall.
|
||||
*
|
||||
* We carefully stall until the registers are readable in case they
|
||||
* are the target of a slow load, etc. so that tile-sim will
|
||||
* definitely be able to read all of them inside the magic syscall.
|
||||
*
|
||||
* Technically this is wrong for r3-r9 and lr, since an interrupt
|
||||
* could come in and restore the registers with a slow load right
|
||||
* before executing the mtspr. We may need to modify tile-sim to
|
||||
* explicitly stall for this case, but we do not yet have
|
||||
* a way to implement such a stall.
|
||||
*/
|
||||
{ and zero, lr, r9 ; and zero, r8, r7 }
|
||||
{ and zero, r6, r5 ; and zero, r4, r3 }
|
||||
{ and zero, r2, r1 ; mtspr SIM_CONTROL, r0 }
|
||||
{ jrp lr }
|
||||
STD_ENDPROC(_sim_syscall)
|
||||
|
||||
/*
|
||||
* Implement execve(). The i386 code has a note that forking from kernel
|
||||
* space results in no copy on write until the execve, so we should be
|
||||
|
@ -102,7 +82,7 @@ STD_ENTRY(KBacktraceIterator_init_current)
|
|||
STD_ENTRY(cpu_idle_on_new_stack)
|
||||
{
|
||||
move sp, r1
|
||||
mtspr SYSTEM_SAVE_1_0, r2
|
||||
mtspr SPR_SYSTEM_SAVE_K_0, r2
|
||||
}
|
||||
jal free_thread_info
|
||||
j cpu_idle
|
||||
|
@ -124,15 +104,15 @@ STD_ENTRY(smp_nap)
|
|||
STD_ENTRY(_cpu_idle)
|
||||
{
|
||||
lnk r0
|
||||
movei r1, 1
|
||||
movei r1, KERNEL_PL
|
||||
}
|
||||
{
|
||||
addli r0, r0, _cpu_idle_nap - .
|
||||
mtspr INTERRUPT_CRITICAL_SECTION, r1
|
||||
}
|
||||
IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
|
||||
mtspr EX_CONTEXT_1_1, r1 /* PL1, ICS clear */
|
||||
mtspr EX_CONTEXT_1_0, r0
|
||||
IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
|
||||
mtspr SPR_EX_CONTEXT_K_1, r1 /* Kernel PL, ICS clear */
|
||||
mtspr SPR_EX_CONTEXT_K_0, r0
|
||||
iret
|
||||
.global _cpu_idle_nap
|
||||
_cpu_idle_nap:
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <asm/asm-offsets.h>
|
||||
#include <hv/hypervisor.h>
|
||||
#include <arch/chip.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
/*
|
||||
* This module contains the entry code for kernel images. It performs the
|
||||
|
@ -76,7 +77,7 @@ ENTRY(_start)
|
|||
}
|
||||
1:
|
||||
|
||||
/* Get our processor number and save it away in SAVE_1_0. */
|
||||
/* Get our processor number and save it away in SAVE_K_0. */
|
||||
jal hv_inquire_topology
|
||||
mulll_uu r4, r1, r2 /* r1 == y, r2 == width */
|
||||
add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */
|
||||
|
@ -124,7 +125,7 @@ ENTRY(_start)
|
|||
lw r0, r0
|
||||
lw sp, r1
|
||||
or r4, sp, r4
|
||||
mtspr SYSTEM_SAVE_1_0, r4 /* save ksp0 + cpu */
|
||||
mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
|
||||
addi sp, sp, -STACK_TOP_DELTA
|
||||
{
|
||||
move lr, zero /* stop backtraces in the called function */
|
||||
|
|
|
@ -32,8 +32,8 @@
|
|||
# error "No support for kernel preemption currently"
|
||||
#endif
|
||||
|
||||
#if INT_INTCTRL_1 < 32 || INT_INTCTRL_1 >= 48
|
||||
# error INT_INTCTRL_1 coded to set high interrupt mask
|
||||
#if INT_INTCTRL_K < 32 || INT_INTCTRL_K >= 48
|
||||
# error INT_INTCTRL_K coded to set high interrupt mask
|
||||
#endif
|
||||
|
||||
#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
|
||||
|
@ -132,8 +132,8 @@ intvec_\vecname:
|
|||
|
||||
/* Temporarily save a register so we have somewhere to work. */
|
||||
|
||||
mtspr SYSTEM_SAVE_1_1, r0
|
||||
mfspr r0, EX_CONTEXT_1_1
|
||||
mtspr SPR_SYSTEM_SAVE_K_1, r0
|
||||
mfspr r0, SPR_EX_CONTEXT_K_1
|
||||
|
||||
/* The cmpxchg code clears sp to force us to reset it here on fault. */
|
||||
{
|
||||
|
@ -167,18 +167,18 @@ intvec_\vecname:
|
|||
* The page_fault handler may be downcalled directly by the
|
||||
* hypervisor even when Linux is running and has ICS set.
|
||||
*
|
||||
* In this case the contents of EX_CONTEXT_1_1 reflect the
|
||||
* In this case the contents of EX_CONTEXT_K_1 reflect the
|
||||
* previous fault and can't be relied on to choose whether or
|
||||
* not to reinitialize the stack pointer. So we add a test
|
||||
* to see whether SYSTEM_SAVE_1_2 has the high bit set,
|
||||
* to see whether SYSTEM_SAVE_K_2 has the high bit set,
|
||||
* and if so we don't reinitialize sp, since we must be coming
|
||||
* from Linux. (In fact the precise case is !(val & ~1),
|
||||
* but any Linux PC has to have the high bit set.)
|
||||
*
|
||||
* Note that the hypervisor *always* sets SYSTEM_SAVE_1_2 for
|
||||
* Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
|
||||
* any path that turns into a downcall to one of our TLB handlers.
|
||||
*/
|
||||
mfspr r0, SYSTEM_SAVE_1_2
|
||||
mfspr r0, SPR_SYSTEM_SAVE_K_2
|
||||
{
|
||||
blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
|
||||
move r0, sp
|
||||
|
@ -187,12 +187,12 @@ intvec_\vecname:
|
|||
|
||||
2:
|
||||
/*
|
||||
* SYSTEM_SAVE_1_0 holds the cpu number in the low bits, and
|
||||
* SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
|
||||
* the current stack top in the higher bits. So we recover
|
||||
* our stack top by just masking off the low bits, then
|
||||
* point sp at the top aligned address on the actual stack page.
|
||||
*/
|
||||
mfspr r0, SYSTEM_SAVE_1_0
|
||||
mfspr r0, SPR_SYSTEM_SAVE_K_0
|
||||
mm r0, r0, zero, LOG2_THREAD_SIZE, 31
|
||||
|
||||
0:
|
||||
|
@ -254,7 +254,7 @@ intvec_\vecname:
|
|||
sw sp, r3
|
||||
addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
|
||||
}
|
||||
mfspr r0, EX_CONTEXT_1_0
|
||||
mfspr r0, SPR_EX_CONTEXT_K_0
|
||||
.ifc \processing,handle_syscall
|
||||
/*
|
||||
* Bump the saved PC by one bundle so that when we return, we won't
|
||||
|
@ -267,7 +267,7 @@ intvec_\vecname:
|
|||
sw sp, r0
|
||||
addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
|
||||
}
|
||||
mfspr r0, EX_CONTEXT_1_1
|
||||
mfspr r0, SPR_EX_CONTEXT_K_1
|
||||
{
|
||||
sw sp, r0
|
||||
addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
|
||||
|
@ -289,7 +289,7 @@ intvec_\vecname:
|
|||
.endif
|
||||
addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
|
||||
}
|
||||
mfspr r0, SYSTEM_SAVE_1_1 /* Original r0 */
|
||||
mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
|
||||
{
|
||||
sw sp, r0
|
||||
addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
|
||||
|
@ -309,12 +309,12 @@ intvec_\vecname:
|
|||
* See discussion below at "finish_interrupt_save".
|
||||
*/
|
||||
.ifc \c_routine, do_page_fault
|
||||
mfspr r2, SYSTEM_SAVE_1_3 /* address of page fault */
|
||||
mfspr r3, SYSTEM_SAVE_1_2 /* info about page fault */
|
||||
mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
|
||||
mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
|
||||
.else
|
||||
.ifc \vecnum, INT_DOUBLE_FAULT
|
||||
{
|
||||
mfspr r2, SYSTEM_SAVE_1_2 /* double fault info from HV */
|
||||
mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
|
||||
movei r3, 0
|
||||
}
|
||||
.else
|
||||
|
@ -467,7 +467,7 @@ intvec_\vecname:
|
|||
/* Load tp with our per-cpu offset. */
|
||||
#ifdef CONFIG_SMP
|
||||
{
|
||||
mfspr r20, SYSTEM_SAVE_1_0
|
||||
mfspr r20, SPR_SYSTEM_SAVE_K_0
|
||||
moveli r21, lo16(__per_cpu_offset)
|
||||
}
|
||||
{
|
||||
|
@ -487,7 +487,7 @@ intvec_\vecname:
|
|||
* We load flags in r32 here so we can jump to .Lrestore_regs
|
||||
* directly after do_page_fault_ics() if necessary.
|
||||
*/
|
||||
mfspr r32, EX_CONTEXT_1_1
|
||||
mfspr r32, SPR_EX_CONTEXT_K_1
|
||||
{
|
||||
andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
|
||||
PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
|
||||
|
@ -957,11 +957,11 @@ STD_ENTRY(interrupt_return)
|
|||
pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
|
||||
pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
|
||||
{
|
||||
mtspr EX_CONTEXT_1_0, r21
|
||||
mtspr SPR_EX_CONTEXT_K_0, r21
|
||||
move r5, zero
|
||||
}
|
||||
{
|
||||
mtspr EX_CONTEXT_1_1, lr
|
||||
mtspr SPR_EX_CONTEXT_K_1, lr
|
||||
andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
|
||||
}
|
||||
|
||||
|
@ -1020,7 +1020,7 @@ STD_ENTRY(interrupt_return)
|
|||
|
||||
/* Set r1 to errno if we are returning an error, otherwise zero. */
|
||||
{
|
||||
moveli r29, 1024
|
||||
moveli r29, 4096
|
||||
sub r1, zero, r0
|
||||
}
|
||||
slt_u r29, r1, r29
|
||||
|
@ -1199,7 +1199,7 @@ STD_ENTRY(interrupt_return)
|
|||
STD_ENDPROC(interrupt_return)
|
||||
|
||||
/*
|
||||
* This interrupt variant clears the INT_INTCTRL_1 interrupt mask bit
|
||||
* This interrupt variant clears the INT_INTCTRL_K interrupt mask bit
|
||||
* before returning, so we can properly get more downcalls.
|
||||
*/
|
||||
.pushsection .text.handle_interrupt_downcall,"ax"
|
||||
|
@ -1208,11 +1208,11 @@ handle_interrupt_downcall:
|
|||
check_single_stepping normal, .Ldispatch_downcall
|
||||
.Ldispatch_downcall:
|
||||
|
||||
/* Clear INTCTRL_1 from the set of interrupts we ever enable. */
|
||||
/* Clear INTCTRL_K from the set of interrupts we ever enable. */
|
||||
GET_INTERRUPTS_ENABLED_MASK_PTR(r30)
|
||||
{
|
||||
addi r30, r30, 4
|
||||
movei r31, INT_MASK(INT_INTCTRL_1)
|
||||
movei r31, INT_MASK(INT_INTCTRL_K)
|
||||
}
|
||||
{
|
||||
lw r20, r30
|
||||
|
@ -1227,7 +1227,7 @@ handle_interrupt_downcall:
|
|||
}
|
||||
FEEDBACK_REENTER(handle_interrupt_downcall)
|
||||
|
||||
/* Allow INTCTRL_1 to be enabled next time we enable interrupts. */
|
||||
/* Allow INTCTRL_K to be enabled next time we enable interrupts. */
|
||||
lw r20, r30
|
||||
or r20, r20, r31
|
||||
sw r30, r20
|
||||
|
@ -1472,7 +1472,12 @@ handle_ill:
|
|||
lw r26, r24
|
||||
sw r28, r26
|
||||
|
||||
/* Clear TIF_SINGLESTEP */
|
||||
/*
|
||||
* Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
|
||||
* The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
|
||||
* need to clear it here and can't really impose on all other arches.
|
||||
* So what's another write between friends?
|
||||
*/
|
||||
GET_THREAD_INFO(r0)
|
||||
|
||||
addi r1, r0, THREAD_INFO_FLAGS_OFFSET
|
||||
|
@ -1509,7 +1514,7 @@ handle_ill:
|
|||
/* Various stub interrupt handlers and syscall handlers */
|
||||
|
||||
STD_ENTRY_LOCAL(_kernel_double_fault)
|
||||
mfspr r1, EX_CONTEXT_1_0
|
||||
mfspr r1, SPR_EX_CONTEXT_K_0
|
||||
move r2, lr
|
||||
move r3, sp
|
||||
move r4, r52
|
||||
|
@ -1518,34 +1523,29 @@ STD_ENTRY_LOCAL(_kernel_double_fault)
|
|||
STD_ENDPROC(_kernel_double_fault)
|
||||
|
||||
STD_ENTRY_LOCAL(bad_intr)
|
||||
mfspr r2, EX_CONTEXT_1_0
|
||||
mfspr r2, SPR_EX_CONTEXT_K_0
|
||||
panic "Unhandled interrupt %#x: PC %#lx"
|
||||
STD_ENDPROC(bad_intr)
|
||||
|
||||
/* Put address of pt_regs in reg and jump. */
|
||||
#define PTREGS_SYSCALL(x, reg) \
|
||||
STD_ENTRY(x); \
|
||||
STD_ENTRY(_##x); \
|
||||
{ \
|
||||
PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
|
||||
j _##x \
|
||||
j x \
|
||||
}; \
|
||||
STD_ENDPROC(x)
|
||||
STD_ENDPROC(_##x)
|
||||
|
||||
PTREGS_SYSCALL(sys_execve, r3)
|
||||
PTREGS_SYSCALL(sys_sigaltstack, r2)
|
||||
PTREGS_SYSCALL(sys_rt_sigreturn, r0)
|
||||
PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1)
|
||||
|
||||
/* Save additional callee-saves to pt_regs, put address in reg and jump. */
|
||||
#define PTREGS_SYSCALL_ALL_REGS(x, reg) \
|
||||
STD_ENTRY(x); \
|
||||
push_extra_callee_saves reg; \
|
||||
j _##x; \
|
||||
STD_ENDPROC(x)
|
||||
|
||||
PTREGS_SYSCALL_ALL_REGS(sys_fork, r0)
|
||||
PTREGS_SYSCALL_ALL_REGS(sys_vfork, r0)
|
||||
PTREGS_SYSCALL_ALL_REGS(sys_clone, r4)
|
||||
PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1)
|
||||
/* Save additional callee-saves to pt_regs, put address in r4 and jump. */
|
||||
STD_ENTRY(_sys_clone)
|
||||
push_extra_callee_saves r4
|
||||
j sys_clone
|
||||
STD_ENDPROC(_sys_clone)
|
||||
|
||||
/*
|
||||
* This entrypoint is taken for the cmpxchg and atomic_update fast
|
||||
|
@ -1558,12 +1558,14 @@ PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1)
|
|||
* to be available to it on entry. It does not modify any callee-save
|
||||
* registers (including "lr"). It does not check what PL it is being
|
||||
* called at, so you'd better not call it other than at PL0.
|
||||
* The <atomic.h> wrapper assumes it only clobbers r20-r29, so if
|
||||
* it ever is necessary to use more registers, be aware.
|
||||
*
|
||||
* It does not use the stack, but since it might be re-interrupted by
|
||||
* a page fault which would assume the stack was valid, it does
|
||||
* save/restore the stack pointer and zero it out to make sure it gets reset.
|
||||
* Since we always keep interrupts disabled, the hypervisor won't
|
||||
* clobber our EX_CONTEXT_1_x registers, so we don't save/restore them
|
||||
* clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
|
||||
* (other than to advance the PC on return).
|
||||
*
|
||||
* We have to manually validate the user vs kernel address range
|
||||
|
@ -1769,7 +1771,7 @@ ENTRY(sys_cmpxchg)
|
|||
/* Do slow mtspr here so the following "mf" waits less. */
|
||||
{
|
||||
move sp, r27
|
||||
mtspr EX_CONTEXT_1_0, r28
|
||||
mtspr SPR_EX_CONTEXT_K_0, r28
|
||||
}
|
||||
mf
|
||||
|
||||
|
@ -1788,7 +1790,7 @@ ENTRY(sys_cmpxchg)
|
|||
}
|
||||
{
|
||||
move sp, r27
|
||||
mtspr EX_CONTEXT_1_0, r28
|
||||
mtspr SPR_EX_CONTEXT_K_0, r28
|
||||
}
|
||||
iret
|
||||
|
||||
|
@ -1816,7 +1818,7 @@ ENTRY(sys_cmpxchg)
|
|||
#endif
|
||||
|
||||
/* Issue the slow SPR here while the tns result is in flight. */
|
||||
mfspr r28, EX_CONTEXT_1_0
|
||||
mfspr r28, SPR_EX_CONTEXT_K_0
|
||||
|
||||
{
|
||||
addi r28, r28, 8 /* return to the instruction after the swint1 */
|
||||
|
@ -1904,7 +1906,7 @@ ENTRY(sys_cmpxchg)
|
|||
.Lcmpxchg64_mismatch:
|
||||
{
|
||||
move sp, r27
|
||||
mtspr EX_CONTEXT_1_0, r28
|
||||
mtspr SPR_EX_CONTEXT_K_0, r28
|
||||
}
|
||||
mf
|
||||
{
|
||||
|
@ -1985,8 +1987,13 @@ int_unalign:
|
|||
int_hand INT_PERF_COUNT, PERF_COUNT, \
|
||||
op_handle_perf_interrupt, handle_nmi
|
||||
int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
|
||||
#if CONFIG_KERNEL_PL == 2
|
||||
dc_dispatch INT_INTCTRL_2, INTCTRL_2
|
||||
int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
|
||||
#else
|
||||
int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
|
||||
dc_dispatch INT_INTCTRL_1, INTCTRL_1
|
||||
#endif
|
||||
int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
|
||||
int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
|
||||
hv_message_intr, handle_interrupt_downcall
|
||||
|
|
|
@ -61,9 +61,9 @@ static DEFINE_SPINLOCK(available_irqs_lock);
|
|||
|
||||
#if CHIP_HAS_IPI()
|
||||
/* Use SPRs to manipulate device interrupts. */
|
||||
#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask)
|
||||
#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask)
|
||||
#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask)
|
||||
#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
|
||||
#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
|
||||
#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
|
||||
#else
|
||||
/* Use HV to manipulate device interrupts. */
|
||||
#define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
|
||||
|
@ -89,16 +89,16 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
|
|||
* masked by a previous interrupt. Then, mask out the ones
|
||||
* we're going to handle.
|
||||
*/
|
||||
unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1);
|
||||
original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked;
|
||||
__insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs);
|
||||
unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
|
||||
original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
|
||||
__insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
|
||||
#else
|
||||
/*
|
||||
* Hypervisor performs the equivalent of the Gx code above and
|
||||
* then puts the pending interrupt mask into a system save reg
|
||||
* for us to find.
|
||||
*/
|
||||
original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3);
|
||||
original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
|
||||
#endif
|
||||
remaining_irqs = original_irqs;
|
||||
|
||||
|
@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
|
|||
/* Enable interrupt delivery. */
|
||||
unmask_irqs(~0UL);
|
||||
#if CHIP_HAS_IPI()
|
||||
raw_local_irq_unmask(INT_IPI_1);
|
||||
raw_local_irq_unmask(INT_IPI_K);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
|
|||
panic("hv_register_message_state: error %d", rc);
|
||||
|
||||
/* Make sure downcall interrupts will be enabled. */
|
||||
raw_local_irq_unmask(INT_INTCTRL_1);
|
||||
raw_local_irq_unmask(INT_INTCTRL_K);
|
||||
}
|
||||
|
||||
void hv_message_intr(struct pt_regs *regs, int intnum)
|
||||
|
|
|
@ -214,9 +214,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|||
/*
|
||||
* Copy the callee-saved registers from the passed pt_regs struct
|
||||
* into the context-switch callee-saved registers area.
|
||||
* We have to restore the callee-saved registers since we may
|
||||
* be cloning a userspace task with userspace register state,
|
||||
* and we won't be unwinding the same kernel frames to restore them.
|
||||
* This way when we start the interrupt-return sequence, the
|
||||
* callee-save registers will be correctly in registers, which
|
||||
* is how we assume the compiler leaves them as we start doing
|
||||
* the normal return-from-interrupt path after calling C code.
|
||||
* Zero out the C ABI save area to mark the top of the stack.
|
||||
*/
|
||||
ksp = (unsigned long) childregs;
|
||||
|
@ -304,15 +305,25 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
|
|||
/* Allow user processes to access the DMA SPRs */
|
||||
void grant_dma_mpls(void)
|
||||
{
|
||||
#if CONFIG_KERNEL_PL == 2
|
||||
__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
|
||||
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
|
||||
#else
|
||||
__insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
|
||||
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Forbid user processes from accessing the DMA SPRs */
|
||||
void restrict_dma_mpls(void)
|
||||
{
|
||||
#if CONFIG_KERNEL_PL == 2
|
||||
__insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
|
||||
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
|
||||
#else
|
||||
__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
|
||||
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Pause the DMA engine, then save off its state registers. */
|
||||
|
@ -523,19 +534,14 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
|
|||
* Switch kernel SP, PC, and callee-saved registers.
|
||||
* In the context of the new task, return the old task pointer
|
||||
* (i.e. the task that actually called __switch_to).
|
||||
* Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp.
|
||||
* Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
|
||||
*/
|
||||
return __switch_to(prev, next, next_current_ksp0(next));
|
||||
}
|
||||
|
||||
long _sys_fork(struct pt_regs *regs)
|
||||
{
|
||||
return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
|
||||
}
|
||||
|
||||
long _sys_clone(unsigned long clone_flags, unsigned long newsp,
|
||||
void __user *parent_tidptr, void __user *child_tidptr,
|
||||
struct pt_regs *regs)
|
||||
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
|
||||
void __user *, parent_tidptr, void __user *, child_tidptr,
|
||||
struct pt_regs *, regs)
|
||||
{
|
||||
if (!newsp)
|
||||
newsp = regs->sp;
|
||||
|
@ -543,18 +549,13 @@ long _sys_clone(unsigned long clone_flags, unsigned long newsp,
|
|||
parent_tidptr, child_tidptr);
|
||||
}
|
||||
|
||||
long _sys_vfork(struct pt_regs *regs)
|
||||
{
|
||||
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp,
|
||||
regs, 0, NULL, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* sys_execve() executes a new program.
|
||||
*/
|
||||
long _sys_execve(const char __user *path,
|
||||
const char __user *const __user *argv,
|
||||
const char __user *const __user *envp, struct pt_regs *regs)
|
||||
SYSCALL_DEFINE4(execve, const char __user *, path,
|
||||
const char __user *const __user *, argv,
|
||||
const char __user *const __user *, envp,
|
||||
struct pt_regs *, regs)
|
||||
{
|
||||
long error;
|
||||
char *filename;
|
||||
|
@ -570,9 +571,10 @@ long _sys_execve(const char __user *path,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
long _compat_sys_execve(const char __user *path,
|
||||
const compat_uptr_t __user *argv,
|
||||
const compat_uptr_t __user *envp, struct pt_regs *regs)
|
||||
long compat_sys_execve(const char __user *path,
|
||||
const compat_uptr_t __user *argv,
|
||||
const compat_uptr_t __user *envp,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
long error;
|
||||
char *filename;
|
||||
|
|
|
@ -31,25 +31,6 @@ void user_disable_single_step(struct task_struct *child)
|
|||
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine will put a word on the process's privileged stack.
|
||||
*/
|
||||
static void putreg(struct task_struct *task,
|
||||
unsigned long addr, unsigned long value)
|
||||
{
|
||||
unsigned int regno = addr / sizeof(unsigned long);
|
||||
struct pt_regs *childregs = task_pt_regs(task);
|
||||
childregs->regs[regno] = value;
|
||||
childregs->flags |= PT_FLAGS_RESTORE_REGS;
|
||||
}
|
||||
|
||||
static unsigned long getreg(struct task_struct *task, unsigned long addr)
|
||||
{
|
||||
unsigned int regno = addr / sizeof(unsigned long);
|
||||
struct pt_regs *childregs = task_pt_regs(task);
|
||||
return childregs->regs[regno];
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by kernel/ptrace.c when detaching..
|
||||
*/
|
||||
|
@ -66,59 +47,72 @@ void ptrace_disable(struct task_struct *child)
|
|||
|
||||
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
{
|
||||
unsigned long __user *datap;
|
||||
unsigned long __user *datap = (long __user __force *)data;
|
||||
unsigned long tmp;
|
||||
int i;
|
||||
long ret = -EIO;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (task_thread_info(current)->status & TS_COMPAT)
|
||||
data = (u32)data;
|
||||
if (task_thread_info(child)->status & TS_COMPAT)
|
||||
addr = (u32)addr;
|
||||
#endif
|
||||
datap = (unsigned long __user __force *)data;
|
||||
unsigned long *childregs;
|
||||
char *childreg;
|
||||
|
||||
switch (request) {
|
||||
|
||||
case PTRACE_PEEKUSR: /* Read register from pt_regs. */
|
||||
if (addr & (sizeof(data)-1))
|
||||
break;
|
||||
if (addr < 0 || addr >= PTREGS_SIZE)
|
||||
break;
|
||||
tmp = getreg(child, addr); /* Read register */
|
||||
ret = put_user(tmp, datap);
|
||||
childreg = (char *)task_pt_regs(child) + addr;
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (is_compat_task()) {
|
||||
if (addr & (sizeof(compat_long_t)-1))
|
||||
break;
|
||||
ret = put_user(*(compat_long_t *)childreg,
|
||||
(compat_long_t __user *)datap);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (addr & (sizeof(long)-1))
|
||||
break;
|
||||
ret = put_user(*(long *)childreg, datap);
|
||||
}
|
||||
break;
|
||||
|
||||
case PTRACE_POKEUSR: /* Write register in pt_regs. */
|
||||
if (addr & (sizeof(data)-1))
|
||||
break;
|
||||
if (addr < 0 || addr >= PTREGS_SIZE)
|
||||
break;
|
||||
putreg(child, addr, data); /* Write register */
|
||||
childreg = (char *)task_pt_regs(child) + addr;
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (is_compat_task()) {
|
||||
if (addr & (sizeof(compat_long_t)-1))
|
||||
break;
|
||||
*(compat_long_t *)childreg = data;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (addr & (sizeof(long)-1))
|
||||
break;
|
||||
*(long *)childreg = data;
|
||||
}
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
case PTRACE_GETREGS: /* Get all registers from the child. */
|
||||
if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE))
|
||||
break;
|
||||
for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) {
|
||||
ret = __put_user(getreg(child, i), datap);
|
||||
childregs = (long *)task_pt_regs(child);
|
||||
for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) {
|
||||
ret = __put_user(childregs[i], &datap[i]);
|
||||
if (ret != 0)
|
||||
break;
|
||||
datap++;
|
||||
}
|
||||
break;
|
||||
|
||||
case PTRACE_SETREGS: /* Set all registers in the child. */
|
||||
if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE))
|
||||
break;
|
||||
for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) {
|
||||
ret = __get_user(tmp, datap);
|
||||
childregs = (long *)task_pt_regs(child);
|
||||
for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) {
|
||||
ret = __get_user(childregs[i], &datap[i]);
|
||||
if (ret != 0)
|
||||
break;
|
||||
putreg(child, i, tmp);
|
||||
datap++;
|
||||
}
|
||||
break;
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ STD_ENTRY_SECTION(__switch_to, .sched.text)
|
|||
{
|
||||
/* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
|
||||
move sp, r13
|
||||
mtspr SYSTEM_SAVE_1_0, r2
|
||||
mtspr SPR_SYSTEM_SAVE_K_0, r2
|
||||
}
|
||||
FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
|
||||
.L__switch_to_pc:
|
||||
|
|
|
@ -187,11 +187,11 @@ early_param("vmalloc", parse_vmalloc);
|
|||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/*
|
||||
* Determine for each controller where its lowmem is mapped and how
|
||||
* much of it is mapped there. On controller zero, the first few
|
||||
* megabytes are mapped at 0xfd000000 as code, so in principle we
|
||||
* could start our data mappings higher up, but for now we don't
|
||||
* bother, to avoid additional confusion.
|
||||
* Determine for each controller where its lowmem is mapped and how much of
|
||||
* it is mapped there. On controller zero, the first few megabytes are
|
||||
* already mapped in as code at MEM_SV_INTRPT, so in principle we could
|
||||
* start our data mappings higher up, but for now we don't bother, to avoid
|
||||
* additional confusion.
|
||||
*
|
||||
* One question is whether, on systems with more than 768 Mb and
|
||||
* controllers of different sizes, to map in a proportionate amount of
|
||||
|
@ -311,7 +311,7 @@ static void __init setup_memory(void)
|
|||
#endif
|
||||
|
||||
/* We are using a char to hold the cpu_2_node[] mapping */
|
||||
BUG_ON(MAX_NUMNODES > 127);
|
||||
BUILD_BUG_ON(MAX_NUMNODES > 127);
|
||||
|
||||
/* Discover the ranges of memory available to us */
|
||||
for (i = 0; ; ++i) {
|
||||
|
@ -876,6 +876,9 @@ void __cpuinit setup_cpu(int boot)
|
|||
#if CHIP_HAS_SN_PROC()
|
||||
raw_local_irq_unmask(INT_SNITLB_MISS);
|
||||
#endif
|
||||
#ifdef __tilegx__
|
||||
raw_local_irq_unmask(INT_SINGLE_STEP_K);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Allow user access to many generic SPRs, like the cycle
|
||||
|
@ -893,11 +896,12 @@ void __cpuinit setup_cpu(int boot)
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Set the MPL for interrupt control 0 to user level.
|
||||
* This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs,
|
||||
* as well as the PL 0 interrupt mask.
|
||||
* Set the MPL for interrupt control 0 & 1 to the corresponding
|
||||
* values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
|
||||
* SPRs, as well as the interrupt mask.
|
||||
*/
|
||||
__insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
|
||||
__insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
|
||||
|
||||
/* Initialize IRQ support for this cpu. */
|
||||
setup_irq_regs();
|
||||
|
@ -1033,7 +1037,7 @@ static void __init validate_va(void)
|
|||
* In addition, make sure we CAN'T use the end of memory, since
|
||||
* we use the last chunk of each pgd for the pgd_list.
|
||||
*/
|
||||
int i, fc_fd_ok = 0;
|
||||
int i, user_kernel_ok = 0;
|
||||
unsigned long max_va = 0;
|
||||
unsigned long list_va =
|
||||
((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
|
||||
|
@ -1044,13 +1048,13 @@ static void __init validate_va(void)
|
|||
break;
|
||||
if (range.start <= MEM_USER_INTRPT &&
|
||||
range.start + range.size >= MEM_HV_INTRPT)
|
||||
fc_fd_ok = 1;
|
||||
user_kernel_ok = 1;
|
||||
if (range.start == 0)
|
||||
max_va = range.size;
|
||||
BUG_ON(range.start + range.size > list_va);
|
||||
}
|
||||
if (!fc_fd_ok)
|
||||
early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n");
|
||||
if (!user_kernel_ok)
|
||||
early_panic("Hypervisor not configured for user/kernel VAs\n");
|
||||
if (max_va == 0)
|
||||
early_panic("Hypervisor not configured for low VAs\n");
|
||||
if (max_va < KERNEL_HIGH_VADDR)
|
||||
|
@ -1334,6 +1338,10 @@ static void __init pcpu_fc_populate_pte(unsigned long addr)
|
|||
pte_t *pte;
|
||||
|
||||
BUG_ON(pgd_addr_invalid(addr));
|
||||
if (addr < VMALLOC_START || addr >= VMALLOC_END)
|
||||
panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
|
||||
" try increasing CONFIG_VMALLOC_RESERVE\n",
|
||||
addr, VMALLOC_START, VMALLOC_END);
|
||||
|
||||
pgd = swapper_pg_dir + pgd_index(addr);
|
||||
pud = pud_offset(pgd, addr);
|
||||
|
|
|
@ -41,8 +41,8 @@
|
|||
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
||||
|
||||
|
||||
long _sys_sigaltstack(const stack_t __user *uss,
|
||||
stack_t __user *uoss, struct pt_regs *regs)
|
||||
SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
|
||||
stack_t __user *, uoss, struct pt_regs *, regs)
|
||||
{
|
||||
return do_sigaltstack(uss, uoss, regs->sp);
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ int restore_sigcontext(struct pt_regs *regs,
|
|||
}
|
||||
|
||||
/* sigreturn() returns long since it restores r0 in the interrupted code. */
|
||||
long _sys_rt_sigreturn(struct pt_regs *regs)
|
||||
SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
|
||||
{
|
||||
struct rt_sigframe __user *frame =
|
||||
(struct rt_sigframe __user *)(regs->sp);
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* Derived from iLib's single-stepping code.
|
||||
*/
|
||||
|
||||
#ifndef __tilegx__ /* No support for single-step yet. */
|
||||
#ifndef __tilegx__ /* Hardware support for single step unavailable. */
|
||||
|
||||
/* These functions are only used on the TILE platform */
|
||||
#include <linux/slab.h>
|
||||
|
@ -660,4 +660,75 @@ void single_step_once(struct pt_regs *regs)
|
|||
regs->pc += 8;
|
||||
}
|
||||
|
||||
#else
|
||||
#include <linux/smp.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
|
||||
|
||||
|
||||
/*
|
||||
* Called directly on the occasion of an interrupt.
|
||||
*
|
||||
* If the process doesn't have single step set, then we use this as an
|
||||
* opportunity to turn single step off.
|
||||
*
|
||||
* It has been mentioned that we could conditionally turn off single stepping
|
||||
* on each entry into the kernel and rely on single_step_once to turn it
|
||||
* on for the processes that matter (as we already do), but this
|
||||
* implementation is somewhat more efficient in that we muck with registers
|
||||
* once on a bum interrupt rather than on every entry into the kernel.
|
||||
*
|
||||
* If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
|
||||
* so we have to run through this process again before we can say that an
|
||||
* instruction has executed.
|
||||
*
|
||||
* swint will set CANCELED, but it's a legitimate instruction. Fortunately
|
||||
* it changes the PC. If it hasn't changed, then we know that the interrupt
|
||||
* wasn't generated by swint and we'll need to run this process again before
|
||||
* we can say an instruction has executed.
|
||||
*
|
||||
* If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
|
||||
* on with our lives.
|
||||
*/
|
||||
|
||||
void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
|
||||
{
|
||||
unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
|
||||
struct thread_info *info = (void *)current_thread_info();
|
||||
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
|
||||
unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
|
||||
|
||||
if (is_single_step == 0) {
|
||||
__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
|
||||
|
||||
} else if ((*ss_pc != regs->pc) ||
|
||||
(!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
|
||||
|
||||
ptrace_notify(SIGTRAP);
|
||||
control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
|
||||
control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
|
||||
__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Called from need_singlestep. Set up the control registers and the enable
|
||||
* register, then return back.
|
||||
*/
|
||||
|
||||
void single_step_once(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
|
||||
unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
|
||||
|
||||
*ss_pc = regs->pc;
|
||||
control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
|
||||
control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
|
||||
__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
|
||||
__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
|
||||
}
|
||||
|
||||
#endif /* !__tilegx__ */
|
||||
|
|
|
@ -212,7 +212,7 @@ void __init ipi_init(void)
|
|||
|
||||
tile.x = cpu_x(cpu);
|
||||
tile.y = cpu_y(cpu);
|
||||
if (hv_get_ipi_pte(tile, 1, &pte) != 0)
|
||||
if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
|
||||
panic("Failed to initialize IPI for cpu %d\n", cpu);
|
||||
|
||||
offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
|
||||
|
|
|
@ -30,6 +30,10 @@
|
|||
#include <arch/abi.h>
|
||||
#include <arch/interrupts.h>
|
||||
|
||||
#define KBT_ONGOING 0 /* Backtrace still ongoing */
|
||||
#define KBT_DONE 1 /* Backtrace cleanly completed */
|
||||
#define KBT_RUNNING 2 /* Can't run backtrace on a running task */
|
||||
#define KBT_LOOP 3 /* Backtrace entered a loop */
|
||||
|
||||
/* Is address on the specified kernel stack? */
|
||||
static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
|
||||
|
@ -207,11 +211,11 @@ static int KBacktraceIterator_next_item_inclusive(
|
|||
for (;;) {
|
||||
do {
|
||||
if (!KBacktraceIterator_is_sigreturn(kbt))
|
||||
return 1;
|
||||
return KBT_ONGOING;
|
||||
} while (backtrace_next(&kbt->it));
|
||||
|
||||
if (!KBacktraceIterator_restart(kbt))
|
||||
return 0;
|
||||
return KBT_DONE;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -264,7 +268,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
|
|||
kbt->pgtable = NULL;
|
||||
kbt->verbose = 0; /* override in caller if desired */
|
||||
kbt->profile = 0; /* override in caller if desired */
|
||||
kbt->end = 0;
|
||||
kbt->end = KBT_ONGOING;
|
||||
kbt->new_context = 0;
|
||||
if (is_current) {
|
||||
HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
|
||||
|
@ -290,7 +294,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
|
|||
if (regs == NULL) {
|
||||
if (is_current || t->state == TASK_RUNNING) {
|
||||
/* Can't do this; we need registers */
|
||||
kbt->end = 1;
|
||||
kbt->end = KBT_RUNNING;
|
||||
return;
|
||||
}
|
||||
pc = get_switch_to_pc();
|
||||
|
@ -305,26 +309,29 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
|
|||
}
|
||||
|
||||
backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
|
||||
kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
|
||||
kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
|
||||
}
|
||||
EXPORT_SYMBOL(KBacktraceIterator_init);
|
||||
|
||||
int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
|
||||
{
|
||||
return kbt->end;
|
||||
return kbt->end != KBT_ONGOING;
|
||||
}
|
||||
EXPORT_SYMBOL(KBacktraceIterator_end);
|
||||
|
||||
void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
|
||||
{
|
||||
VirtualAddress old_pc = kbt->it.pc, old_sp = kbt->it.sp;
|
||||
kbt->new_context = 0;
|
||||
if (!backtrace_next(&kbt->it) &&
|
||||
!KBacktraceIterator_restart(kbt)) {
|
||||
kbt->end = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
|
||||
if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
|
||||
kbt->end = KBT_DONE;
|
||||
return;
|
||||
}
|
||||
kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
|
||||
if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
|
||||
/* Trapped in a loop; give up. */
|
||||
kbt->end = KBT_LOOP;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(KBacktraceIterator_next);
|
||||
|
||||
|
@ -387,6 +394,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (kbt->end == KBT_LOOP)
|
||||
pr_err("Stack dump stopped; next frame identical to this one\n");
|
||||
if (headers)
|
||||
pr_err("Stack dump complete\n");
|
||||
}
|
||||
|
|
|
@ -110,6 +110,15 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
|
|||
#define sys_sync_file_range sys_sync_file_range2
|
||||
#endif
|
||||
|
||||
/* Call the trampolines to manage pt_regs where necessary. */
|
||||
#define sys_execve _sys_execve
|
||||
#define sys_sigaltstack _sys_sigaltstack
|
||||
#define sys_rt_sigreturn _sys_rt_sigreturn
|
||||
#define sys_clone _sys_clone
|
||||
#ifndef __tilegx__
|
||||
#define sys_cmpxchg_badaddr _sys_cmpxchg_badaddr
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Note that we can't include <linux/unistd.h> here since the header
|
||||
* guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
|
||||
|
|
|
@ -260,7 +260,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
|||
address = regs->pc;
|
||||
break;
|
||||
case INT_UNALIGN_DATA:
|
||||
#ifndef __tilegx__ /* FIXME: GX: no single-step yet */
|
||||
#ifndef __tilegx__ /* Emulated support for single step debugging */
|
||||
if (unaligned_fixup >= 0) {
|
||||
struct single_step_state *state =
|
||||
current_thread_info()->step_state;
|
||||
|
@ -278,7 +278,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
|||
case INT_DOUBLE_FAULT:
|
||||
/*
|
||||
* For double fault, "reason" is actually passed as
|
||||
* SYSTEM_SAVE_1_2, the hypervisor's double-fault info, so
|
||||
* SYSTEM_SAVE_K_2, the hypervisor's double-fault info, so
|
||||
* we can provide the original fault number rather than
|
||||
* the uninteresting "INT_DOUBLE_FAULT" so the user can
|
||||
* learn what actually struck while PL0 ICS was set.
|
||||
|
|
38
arch/tile/kvm/Kconfig
Normal file
38
arch/tile/kvm/Kconfig
Normal file
|
@ -0,0 +1,38 @@
|
|||
#
|
||||
# KVM configuration
|
||||
#
|
||||
|
||||
source "virt/kvm/Kconfig"
|
||||
|
||||
menuconfig VIRTUALIZATION
|
||||
bool "Virtualization"
|
||||
---help---
|
||||
Say Y here to get to see options for using your Linux host to run
|
||||
other operating systems inside virtual machines (guests).
|
||||
This option alone does not add any kernel code.
|
||||
|
||||
If you say N, all options in this submenu will be skipped and
|
||||
disabled.
|
||||
|
||||
if VIRTUALIZATION
|
||||
|
||||
config KVM
|
||||
tristate "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on HAVE_KVM && MODULES && EXPERIMENTAL
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
---help---
|
||||
Support hosting paravirtualized guest machines.
|
||||
|
||||
This module provides access to the hardware capabilities through
|
||||
a character device node named /dev/kvm.
|
||||
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called kvm.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
source drivers/vhost/Kconfig
|
||||
source drivers/virtio/Kconfig
|
||||
|
||||
endif # VIRTUALIZATION
|
|
@ -3,8 +3,8 @@
|
|||
#
|
||||
|
||||
lib-y = cacheflush.o checksum.o cpumask.o delay.o \
|
||||
mb_incoherent.o uaccess.o \
|
||||
memcpy_$(BITS).o memchr_$(BITS).o memmove_$(BITS).o memset_$(BITS).o \
|
||||
mb_incoherent.o uaccess.o memmove.o \
|
||||
memcpy_$(BITS).o memchr_$(BITS).o memset_$(BITS).o \
|
||||
strchr_$(BITS).o strlen_$(BITS).o
|
||||
|
||||
ifeq ($(CONFIG_TILEGX),y)
|
||||
|
|
|
@ -300,7 +300,7 @@ void __init __init_atomic_per_cpu(void)
|
|||
#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
|
||||
|
||||
/* Validate power-of-two and "bigger than cpus" assumption */
|
||||
BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
|
||||
BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
|
||||
BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
|
||||
|
||||
/*
|
||||
|
@ -314,17 +314,17 @@ void __init __init_atomic_per_cpu(void)
|
|||
BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
|
||||
|
||||
/* The locks must all fit on one page. */
|
||||
BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
|
||||
BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* We use the page offset of the atomic value's address as
|
||||
* an index into atomic_locks, excluding the low 3 bits.
|
||||
* That should not produce more indices than ATOMIC_HASH_SIZE.
|
||||
*/
|
||||
BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
|
||||
BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
|
||||
|
||||
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
|
||||
|
||||
/* The futex code makes this assumption, so we validate it here. */
|
||||
BUG_ON(sizeof(atomic_t) != sizeof(int));
|
||||
BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
|
||||
}
|
||||
|
|
|
@ -45,6 +45,9 @@ EXPORT_SYMBOL(__copy_from_user_zeroing);
|
|||
EXPORT_SYMBOL(__copy_in_user_inatomic);
|
||||
#endif
|
||||
|
||||
/* arch/tile/lib/mb_incoherent.S */
|
||||
EXPORT_SYMBOL(__mb_incoherent);
|
||||
|
||||
/* hypervisor glue */
|
||||
#include <hv/hypervisor.h>
|
||||
EXPORT_SYMBOL(hv_dev_open);
|
||||
|
|
|
@ -10,14 +10,16 @@
|
|||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* This file shares the implementation of the userspace memcpy and
|
||||
* the kernel's memcpy, copy_to_user and copy_from_user.
|
||||
*/
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
|
||||
/*
|
||||
* This file shares the implementation of the userspace memcpy and
|
||||
* the kernel's memcpy, copy_to_user and copy_from_user.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
/* On TILE64, we wrap these functions via arch/tile/lib/memcpy_tile64.c */
|
||||
|
@ -53,9 +55,9 @@
|
|||
*/
|
||||
ENTRY(__copy_from_user_inatomic)
|
||||
.type __copy_from_user_inatomic, @function
|
||||
FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
|
||||
FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
|
||||
.text.memcpy_common, \
|
||||
.Lend_memcpy_common - __copy_from_user_inatomic)
|
||||
.Lend_memcpy_common - __copy_from_user_inatomic)
|
||||
{ movei r29, IS_COPY_FROM_USER; j memcpy_common }
|
||||
.size __copy_from_user_inatomic, . - __copy_from_user_inatomic
|
||||
|
||||
|
@ -64,7 +66,7 @@ ENTRY(__copy_from_user_inatomic)
|
|||
*/
|
||||
ENTRY(__copy_from_user_zeroing)
|
||||
.type __copy_from_user_zeroing, @function
|
||||
FEEDBACK_REENTER(__copy_from_user_inatomic)
|
||||
FEEDBACK_REENTER(__copy_from_user_inatomic)
|
||||
{ movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
|
||||
.size __copy_from_user_zeroing, . - __copy_from_user_zeroing
|
||||
|
||||
|
@ -74,13 +76,13 @@ ENTRY(__copy_from_user_zeroing)
|
|||
*/
|
||||
ENTRY(__copy_to_user_inatomic)
|
||||
.type __copy_to_user_inatomic, @function
|
||||
FEEDBACK_REENTER(__copy_from_user_inatomic)
|
||||
FEEDBACK_REENTER(__copy_from_user_inatomic)
|
||||
{ movei r29, IS_COPY_TO_USER; j memcpy_common }
|
||||
.size __copy_to_user_inatomic, . - __copy_to_user_inatomic
|
||||
|
||||
ENTRY(memcpy)
|
||||
.type memcpy, @function
|
||||
FEEDBACK_REENTER(__copy_from_user_inatomic)
|
||||
FEEDBACK_REENTER(__copy_from_user_inatomic)
|
||||
{ movei r29, IS_MEMCPY }
|
||||
.size memcpy, . - memcpy
|
||||
/* Fall through */
|
||||
|
@ -157,35 +159,35 @@ EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
|
|||
{ addi r3, r1, 60; andi r9, r9, -64 }
|
||||
|
||||
#if CHIP_HAS_WH64()
|
||||
/* No need to prefetch dst, we'll just do the wh64
|
||||
* right before we copy a line.
|
||||
/* No need to prefetch dst, we'll just do the wh64
|
||||
* right before we copy a line.
|
||||
*/
|
||||
#endif
|
||||
|
||||
EX: { lw r5, r3; addi r3, r3, 64; movei r4, 1 }
|
||||
/* Intentionally stall for a few cycles to leave L2 cache alone. */
|
||||
{ bnzt zero, .; move r27, lr }
|
||||
/* Intentionally stall for a few cycles to leave L2 cache alone. */
|
||||
{ bnzt zero, .; move r27, lr }
|
||||
EX: { lw r6, r3; addi r3, r3, 64 }
|
||||
/* Intentionally stall for a few cycles to leave L2 cache alone. */
|
||||
{ bnzt zero, . }
|
||||
/* Intentionally stall for a few cycles to leave L2 cache alone. */
|
||||
{ bnzt zero, . }
|
||||
EX: { lw r7, r3; addi r3, r3, 64 }
|
||||
#if !CHIP_HAS_WH64()
|
||||
/* Prefetch the dest */
|
||||
/* Intentionally stall for a few cycles to leave L2 cache alone. */
|
||||
{ bnzt zero, . }
|
||||
/* Use a real load to cause a TLB miss if necessary. We aren't using
|
||||
* r28, so this should be fine.
|
||||
*/
|
||||
/* Prefetch the dest */
|
||||
/* Intentionally stall for a few cycles to leave L2 cache alone. */
|
||||
{ bnzt zero, . }
|
||||
/* Use a real load to cause a TLB miss if necessary. We aren't using
|
||||
* r28, so this should be fine.
|
||||
*/
|
||||
EX: { lw r28, r9; addi r9, r9, 64 }
|
||||
/* Intentionally stall for a few cycles to leave L2 cache alone. */
|
||||
{ bnzt zero, . }
|
||||
{ prefetch r9; addi r9, r9, 64 }
|
||||
/* Intentionally stall for a few cycles to leave L2 cache alone. */
|
||||
{ bnzt zero, . }
|
||||
{ prefetch r9; addi r9, r9, 64 }
|
||||
/* Intentionally stall for a few cycles to leave L2 cache alone. */
|
||||
{ bnzt zero, . }
|
||||
{ prefetch r9; addi r9, r9, 64 }
|
||||
/* Intentionally stall for a few cycles to leave L2 cache alone. */
|
||||
{ bnzt zero, . }
|
||||
{ prefetch r9; addi r9, r9, 64 }
|
||||
#endif
|
||||
/* Intentionally stall for a few cycles to leave L2 cache alone. */
|
||||
{ bz zero, .Lbig_loop2 }
|
||||
/* Intentionally stall for a few cycles to leave L2 cache alone. */
|
||||
{ bz zero, .Lbig_loop2 }
|
||||
|
||||
/* On entry to this loop:
|
||||
* - r0 points to the start of dst line 0
|
||||
|
@ -197,7 +199,7 @@ EX: { lw r28, r9; addi r9, r9, 64 }
|
|||
* to some "safe" recently loaded address.
|
||||
* - r5 contains *(r1 + 60) [i.e. last word of source line 0]
|
||||
* - r6 contains *(r1 + 64 + 60) [i.e. last word of source line 1]
|
||||
* - r9 contains ((r0 + 63) & -64)
|
||||
* - r9 contains ((r0 + 63) & -64)
|
||||
* [start of next dst cache line.]
|
||||
*/
|
||||
|
||||
|
@ -208,137 +210,137 @@ EX: { lw r28, r9; addi r9, r9, 64 }
|
|||
/* Copy line 0, first stalling until r5 is ready. */
|
||||
EX: { move r12, r5; lw r16, r1 }
|
||||
{ bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
|
||||
/* Prefetch several lines ahead. */
|
||||
/* Prefetch several lines ahead. */
|
||||
EX: { lw r5, r3; addi r3, r3, 64 }
|
||||
{ jal .Lcopy_line }
|
||||
{ jal .Lcopy_line }
|
||||
|
||||
/* Copy line 1, first stalling until r6 is ready. */
|
||||
EX: { move r12, r6; lw r16, r1 }
|
||||
{ bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
|
||||
/* Prefetch several lines ahead. */
|
||||
/* Prefetch several lines ahead. */
|
||||
EX: { lw r6, r3; addi r3, r3, 64 }
|
||||
{ jal .Lcopy_line }
|
||||
|
||||
/* Copy line 2, first stalling until r7 is ready. */
|
||||
EX: { move r12, r7; lw r16, r1 }
|
||||
{ bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
|
||||
/* Prefetch several lines ahead. */
|
||||
/* Prefetch several lines ahead. */
|
||||
EX: { lw r7, r3; addi r3, r3, 64 }
|
||||
/* Use up a caches-busy cycle by jumping back to the top of the
|
||||
* loop. Might as well get it out of the way now.
|
||||
*/
|
||||
{ j .Lbig_loop }
|
||||
/* Use up a caches-busy cycle by jumping back to the top of the
|
||||
* loop. Might as well get it out of the way now.
|
||||
*/
|
||||
{ j .Lbig_loop }
|
||||
|
||||
|
||||
/* On entry:
|
||||
* - r0 points to the destination line.
|
||||
* - r1 points to the source line.
|
||||
* - r3 is the next prefetch address.
|
||||
* - r3 is the next prefetch address.
|
||||
* - r9 holds the last address used for wh64.
|
||||
* - r12 = WORD_15
|
||||
* - r16 = WORD_0.
|
||||
* - r17 == r1 + 16.
|
||||
* - r27 holds saved lr to restore.
|
||||
* - r16 = WORD_0.
|
||||
* - r17 == r1 + 16.
|
||||
* - r27 holds saved lr to restore.
|
||||
*
|
||||
* On exit:
|
||||
* - r0 is incremented by 64.
|
||||
* - r1 is incremented by 64, unless that would point to a word
|
||||
* beyond the end of the source array, in which case it is redirected
|
||||
* to point to an arbitrary word already in the cache.
|
||||
* beyond the end of the source array, in which case it is redirected
|
||||
* to point to an arbitrary word already in the cache.
|
||||
* - r2 is decremented by 64.
|
||||
* - r3 is unchanged, unless it points to a word beyond the
|
||||
* end of the source array, in which case it is redirected
|
||||
* to point to an arbitrary word already in the cache.
|
||||
* Redirecting is OK since if we are that close to the end
|
||||
* of the array we will not come back to this subroutine
|
||||
* and use the contents of the prefetched address.
|
||||
* - r3 is unchanged, unless it points to a word beyond the
|
||||
* end of the source array, in which case it is redirected
|
||||
* to point to an arbitrary word already in the cache.
|
||||
* Redirecting is OK since if we are that close to the end
|
||||
* of the array we will not come back to this subroutine
|
||||
* and use the contents of the prefetched address.
|
||||
* - r4 is nonzero iff r2 >= 64.
|
||||
* - r9 is incremented by 64, unless it points beyond the
|
||||
* end of the last full destination cache line, in which
|
||||
* case it is redirected to a "safe address" that can be
|
||||
* clobbered (sp - 64)
|
||||
* - r9 is incremented by 64, unless it points beyond the
|
||||
* end of the last full destination cache line, in which
|
||||
* case it is redirected to a "safe address" that can be
|
||||
* clobbered (sp - 64)
|
||||
* - lr contains the value in r27.
|
||||
*/
|
||||
|
||||
/* r26 unused */
|
||||
|
||||
.Lcopy_line:
|
||||
/* TODO: when r3 goes past the end, we would like to redirect it
|
||||
* to prefetch the last partial cache line (if any) just once, for the
|
||||
* benefit of the final cleanup loop. But we don't want to
|
||||
* prefetch that line more than once, or subsequent prefetches
|
||||
* will go into the RTF. But then .Lbig_loop should unconditionally
|
||||
* branch to top of loop to execute final prefetch, and its
|
||||
* nop should become a conditional branch.
|
||||
*/
|
||||
/* TODO: when r3 goes past the end, we would like to redirect it
|
||||
* to prefetch the last partial cache line (if any) just once, for the
|
||||
* benefit of the final cleanup loop. But we don't want to
|
||||
* prefetch that line more than once, or subsequent prefetches
|
||||
* will go into the RTF. But then .Lbig_loop should unconditionally
|
||||
* branch to top of loop to execute final prefetch, and its
|
||||
* nop should become a conditional branch.
|
||||
*/
|
||||
|
||||
/* We need two non-memory cycles here to cover the resources
|
||||
* used by the loads initiated by the caller.
|
||||
*/
|
||||
{ add r15, r1, r2 }
|
||||
/* We need two non-memory cycles here to cover the resources
|
||||
* used by the loads initiated by the caller.
|
||||
*/
|
||||
{ add r15, r1, r2 }
|
||||
.Lcopy_line2:
|
||||
{ slt_u r13, r3, r15; addi r17, r1, 16 }
|
||||
{ slt_u r13, r3, r15; addi r17, r1, 16 }
|
||||
|
||||
/* NOTE: this will stall for one cycle as L1 is busy. */
|
||||
/* NOTE: this will stall for one cycle as L1 is busy. */
|
||||
|
||||
/* Fill second L1D line. */
|
||||
/* Fill second L1D line. */
|
||||
EX: { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */
|
||||
|
||||
#if CHIP_HAS_WH64()
|
||||
/* Prepare destination line for writing. */
|
||||
/* Prepare destination line for writing. */
|
||||
EX: { wh64 r9; addi r9, r9, 64 }
|
||||
#else
|
||||
/* Prefetch dest line */
|
||||
/* Prefetch dest line */
|
||||
{ prefetch r9; addi r9, r9, 64 }
|
||||
#endif
|
||||
/* Load seven words that are L1D hits to cover wh64 L2 usage. */
|
||||
/* Load seven words that are L1D hits to cover wh64 L2 usage. */
|
||||
|
||||
/* Load the three remaining words from the last L1D line, which
|
||||
* we know has already filled the L1D.
|
||||
*/
|
||||
/* Load the three remaining words from the last L1D line, which
|
||||
* we know has already filled the L1D.
|
||||
*/
|
||||
EX: { lw r4, r1; addi r1, r1, 4; addi r20, r1, 16 } /* r4 = WORD_12 */
|
||||
EX: { lw r8, r1; addi r1, r1, 4; slt_u r13, r20, r15 }/* r8 = WORD_13 */
|
||||
EX: { lw r11, r1; addi r1, r1, -52; mvz r20, r13, r1 } /* r11 = WORD_14 */
|
||||
|
||||
/* Load the three remaining words from the first L1D line, first
|
||||
* stalling until it has filled by "looking at" r16.
|
||||
*/
|
||||
/* Load the three remaining words from the first L1D line, first
|
||||
* stalling until it has filled by "looking at" r16.
|
||||
*/
|
||||
EX: { lw r13, r1; addi r1, r1, 4; move zero, r16 } /* r13 = WORD_1 */
|
||||
EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_2 */
|
||||
EX: { lw r15, r1; addi r1, r1, 8; addi r10, r0, 60 } /* r15 = WORD_3 */
|
||||
|
||||
/* Load second word from the second L1D line, first
|
||||
* stalling until it has filled by "looking at" r17.
|
||||
*/
|
||||
/* Load second word from the second L1D line, first
|
||||
* stalling until it has filled by "looking at" r17.
|
||||
*/
|
||||
EX: { lw r19, r1; addi r1, r1, 4; move zero, r17 } /* r19 = WORD_5 */
|
||||
|
||||
/* Store last word to the destination line, potentially dirtying it
|
||||
* for the first time, which keeps the L2 busy for two cycles.
|
||||
*/
|
||||
/* Store last word to the destination line, potentially dirtying it
|
||||
* for the first time, which keeps the L2 busy for two cycles.
|
||||
*/
|
||||
EX: { sw r10, r12 } /* store(WORD_15) */
|
||||
|
||||
/* Use two L1D hits to cover the sw L2 access above. */
|
||||
/* Use two L1D hits to cover the sw L2 access above. */
|
||||
EX: { lw r10, r1; addi r1, r1, 4 } /* r10 = WORD_6 */
|
||||
EX: { lw r12, r1; addi r1, r1, 4 } /* r12 = WORD_7 */
|
||||
|
||||
/* Fill third L1D line. */
|
||||
/* Fill third L1D line. */
|
||||
EX: { lw r18, r1; addi r1, r1, 4 } /* r18 = WORD_8 */
|
||||
|
||||
/* Store first L1D line. */
|
||||
/* Store first L1D line. */
|
||||
EX: { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */
|
||||
EX: { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */
|
||||
EX: { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */
|
||||
#if CHIP_HAS_WH64()
|
||||
EX: { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */
|
||||
#else
|
||||
/* Back up the r9 to a cache line we are already storing to
|
||||
/* Back up the r9 to a cache line we are already storing to
|
||||
* if it gets past the end of the dest vector. Strictly speaking,
|
||||
* we don't need to back up to the start of a cache line, but it's free
|
||||
* and tidy, so why not?
|
||||
*/
|
||||
*/
|
||||
EX: { sw r0, r15; addi r0, r0, 4; andi r13, r0, -64 } /* store(WORD_3) */
|
||||
#endif
|
||||
/* Store second L1D line. */
|
||||
/* Store second L1D line. */
|
||||
EX: { sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */
|
||||
EX: { sw r0, r19; addi r0, r0, 4 } /* store(WORD_5) */
|
||||
EX: { sw r0, r10; addi r0, r0, 4 } /* store(WORD_6) */
|
||||
|
@ -348,30 +350,30 @@ EX: { lw r13, r1; addi r1, r1, 4; move zero, r18 } /* r13 = WORD_9 */
|
|||
EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_10 */
|
||||
EX: { lw r15, r1; move r1, r20 } /* r15 = WORD_11 */
|
||||
|
||||
/* Store third L1D line. */
|
||||
/* Store third L1D line. */
|
||||
EX: { sw r0, r18; addi r0, r0, 4 } /* store(WORD_8) */
|
||||
EX: { sw r0, r13; addi r0, r0, 4 } /* store(WORD_9) */
|
||||
EX: { sw r0, r14; addi r0, r0, 4 } /* store(WORD_10) */
|
||||
EX: { sw r0, r15; addi r0, r0, 4 } /* store(WORD_11) */
|
||||
|
||||
/* Store rest of fourth L1D line. */
|
||||
/* Store rest of fourth L1D line. */
|
||||
EX: { sw r0, r4; addi r0, r0, 4 } /* store(WORD_12) */
|
||||
{
|
||||
{
|
||||
EX: sw r0, r8 /* store(WORD_13) */
|
||||
addi r0, r0, 4
|
||||
addi r0, r0, 4
|
||||
/* Will r2 be > 64 after we subtract 64 below? */
|
||||
shri r4, r2, 7
|
||||
}
|
||||
{
|
||||
shri r4, r2, 7
|
||||
}
|
||||
{
|
||||
EX: sw r0, r11 /* store(WORD_14) */
|
||||
addi r0, r0, 8
|
||||
/* Record 64 bytes successfully copied. */
|
||||
addi r2, r2, -64
|
||||
}
|
||||
addi r0, r0, 8
|
||||
/* Record 64 bytes successfully copied. */
|
||||
addi r2, r2, -64
|
||||
}
|
||||
|
||||
{ jrp lr; move lr, r27 }
|
||||
|
||||
/* Convey to the backtrace library that the stack frame is size
|
||||
/* Convey to the backtrace library that the stack frame is size
|
||||
* zero, and the real return address is on the stack rather than
|
||||
* in 'lr'.
|
||||
*/
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#undef memset
|
||||
|
||||
void *memset(void *s, int c, size_t n)
|
||||
{
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#undef strlen
|
||||
|
||||
size_t strlen(const char *s)
|
||||
{
|
||||
/* Get an aligned pointer. */
|
||||
|
|
|
@ -66,10 +66,10 @@ static noinline void force_sig_info_fault(int si_signo, int si_code,
|
|||
#ifndef __tilegx__
|
||||
/*
|
||||
* Synthesize the fault a PL0 process would get by doing a word-load of
|
||||
* an unaligned address or a high kernel address. Called indirectly
|
||||
* from sys_cmpxchg() in kernel/intvec.S.
|
||||
* an unaligned address or a high kernel address.
|
||||
*/
|
||||
int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *regs)
|
||||
SYSCALL_DEFINE2(cmpxchg_badaddr, unsigned long, address,
|
||||
struct pt_regs *, regs)
|
||||
{
|
||||
if (address >= PAGE_OFFSET)
|
||||
force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address,
|
||||
|
@ -563,10 +563,10 @@ static int handle_page_fault(struct pt_regs *regs,
|
|||
/*
|
||||
* When we take an ITLB or DTLB fault or access violation in the
|
||||
* supervisor while the critical section bit is set, the hypervisor is
|
||||
* reluctant to write new values into the EX_CONTEXT_1_x registers,
|
||||
* reluctant to write new values into the EX_CONTEXT_K_x registers,
|
||||
* since that might indicate we have not yet squirreled the SPR
|
||||
* contents away and can thus safely take a recursive interrupt.
|
||||
* Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2.
|
||||
* Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2.
|
||||
*
|
||||
* Note that this routine is called before homecache_tlb_defer_enter(),
|
||||
* which means that we can properly unlock any atomics that might
|
||||
|
@ -610,7 +610,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
|
|||
* fault. We didn't set up a kernel stack on initial entry to
|
||||
* sys_cmpxchg, but instead had one set up by the fault, which
|
||||
* (because sys_cmpxchg never releases ICS) came to us via the
|
||||
* SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are
|
||||
* SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are
|
||||
* still referencing the original user code. We release the
|
||||
* atomic lock and rewrite pt_regs so that it appears that we
|
||||
* came from user-space directly, and after we finish the
|
||||
|
|
|
@ -37,6 +37,8 @@
|
|||
#include <asm/pgalloc.h>
|
||||
#include <asm/homecache.h>
|
||||
|
||||
#include <arch/sim.h>
|
||||
|
||||
#include "migrate.h"
|
||||
|
||||
|
||||
|
@ -217,13 +219,6 @@ static unsigned long cache_flush_length(unsigned long length)
|
|||
return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length;
|
||||
}
|
||||
|
||||
/* On the simulator, confirm lines have been evicted everywhere. */
|
||||
static void validate_lines_evicted(unsigned long pfn, size_t length)
|
||||
{
|
||||
sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED,
|
||||
(HV_PhysAddr)pfn << PAGE_SHIFT, length);
|
||||
}
|
||||
|
||||
/* Flush a page out of whatever cache(s) it is in. */
|
||||
void homecache_flush_cache(struct page *page, int order)
|
||||
{
|
||||
|
@ -234,7 +229,7 @@ void homecache_flush_cache(struct page *page, int order)
|
|||
|
||||
homecache_mask(page, pages, &home_mask);
|
||||
flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0);
|
||||
validate_lines_evicted(pfn, pages * PAGE_SIZE);
|
||||
sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1060,7 +1060,7 @@ void free_initmem(void)
|
|||
|
||||
/*
|
||||
* Free the pages mapped from 0xc0000000 that correspond to code
|
||||
* pages from 0xfd000000 that we won't use again after init.
|
||||
* pages from MEM_SV_INTRPT that we won't use again after init.
|
||||
*/
|
||||
free_init_pages("unused kernel text",
|
||||
(unsigned long)_sinittext - text_delta,
|
||||
|
|
|
@ -61,7 +61,8 @@ console_initcall(hvc_tile_console_init);
|
|||
|
||||
static int __init hvc_tile_init(void)
|
||||
{
|
||||
hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128);
|
||||
return 0;
|
||||
struct hvc_struct *s;
|
||||
s = hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128);
|
||||
return IS_ERR(s) ? PTR_ERR(s) : 0;
|
||||
}
|
||||
device_initcall(hvc_tile_init);
|
||||
|
|
|
@ -374,7 +374,7 @@ config SLUB_STATS
|
|||
config DEBUG_KMEMLEAK
|
||||
bool "Kernel memory leak detector"
|
||||
depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
|
||||
(X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE)
|
||||
(X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE)
|
||||
|
||||
select DEBUG_FS if SYSFS
|
||||
select STACKTRACE if STACKTRACE_SUPPORT
|
||||
|
|
Loading…
Reference in a new issue