Merge branch 'move_extents' of git://oss.oracle.com/git/tye/linux-2.6 into ocfs2-merge-window

Conflicts:
	fs/ocfs2/ioctl.c
This commit is contained in:
Joel Becker 2011-05-25 21:51:55 -07:00
commit ece928df16
457 changed files with 18613 additions and 9705 deletions

1
.gitignore vendored
View file

@ -57,6 +57,7 @@ modules.builtin
include/config
include/linux/version.h
include/generated
arch/*/include/generated
# stgit generated dirs
patches-*

View file

@ -73,7 +73,7 @@ installmandocs: mandocs
###
#External programs used
KERNELDOC = $(srctree)/scripts/kernel-doc
DOCPROC = $(objtree)/scripts/basic/docproc
DOCPROC = $(objtree)/scripts/docproc
XMLTOFLAGS = -m $(srctree)/Documentation/DocBook/stylesheet.xsl
XMLTOFLAGS += --skip-validation

View file

@ -115,28 +115,8 @@ ubi.mtd=0 root=ubi0:rootfs rootfstype=ubifs
Module Parameters for Debugging
===============================
When UBIFS has been compiled with debugging enabled, there are 3 module
When UBIFS has been compiled with debugging enabled, there are 2 module
parameters that are available to control aspects of testing and debugging.
The parameters are unsigned integers where each bit controls an option.
The parameters are:
debug_msgs Selects which debug messages to display, as follows:
Message Type Flag value
General messages 1
Journal messages 2
Mount messages 4
Commit messages 8
LEB search messages 16
Budgeting messages 32
Garbage collection messages 64
Tree Node Cache (TNC) messages 128
LEB properties (lprops) messages 256
Input/output messages 512
Log messages 1024
Scan messages 2048
Recovery messages 4096
debug_chks Selects extra checks that UBIFS can do while running:
@ -154,11 +134,9 @@ debug_tsts Selects a mode of testing, as follows:
Test mode Flag value
Force in-the-gaps method 2
Failure mode for recovery testing 4
For example, set debug_msgs to 5 to display General messages and Mount
messages.
For example, set debug_chks to 3 to enable general and TNC checks.
References

View file

@ -19,6 +19,7 @@ Supported adapters:
* Intel 6 Series (PCH)
* Intel Patsburg (PCH)
* Intel DH89xxCC (PCH)
* Intel Panther Point (PCH)
Datasheets: Publicly available at the Intel website
On Intel Patsburg and later chipsets, both the normal host SMBus controller

View file

@ -38,7 +38,7 @@ static struct i2c_driver foo_driver = {
.name = "foo",
},
.id_table = foo_ids,
.id_table = foo_idtable,
.probe = foo_probe,
.remove = foo_remove,
/* if device autodetection is needed: */

View file

@ -34,7 +34,8 @@ Contents
Currently the Linux Elantech touchpad driver is aware of two different
hardware versions unimaginatively called version 1 and version 2. Version 1
is found in "older" laptops and uses 4 bytes per packet. Version 2 seems to
be introduced with the EeePC and uses 6 bytes per packet.
be introduced with the EeePC and uses 6 bytes per packet, and provides
additional features such as position of two fingers, and width of the touch.
The driver tries to support both hardware versions and should be compatible
with the Xorg Synaptics touchpad driver and its graphical configuration
@ -94,18 +95,44 @@ Currently the Linux Elantech touchpad driver provides two extra knobs under
can check these bits and reject any packet that appears corrupted. Using
this knob you can bypass that check.
It is not known yet whether hardware version 2 provides the same parity
bits. Hence checking is disabled by default. Currently even turning it on
will do nothing.
Hardware version 2 does not provide the same parity bits. Only some basic
data consistency checking can be done. For now checking is disabled by
default. Currently even turning it on will do nothing.
/////////////////////////////////////////////////////////////////////////////
3. Differentiating hardware versions
=================================
3. Hardware version 1
To detect the hardware version, read the version number as param[0].param[1].param[2]
4 bytes version: (after the arrow is the name given in the Dell-provided driver)
02.00.22 => EF013
02.06.00 => EF019
In the wild, there appear to be more versions, such as 00.01.64, 01.00.21,
02.00.00, 02.00.04, 02.00.06.
6 bytes:
02.00.30 => EF113
02.08.00 => EF023
02.08.XX => EF123
02.0B.00 => EF215
04.01.XX => Scroll_EF051
04.02.XX => EF051
In the wild, there appear to be more versions, such as 04.03.01, 04.04.11. There
appears to be almost no difference, except for EF113, which does not report
pressure/width and has different data consistency checks.
Probably all the versions with param[0] <= 01 can be considered as
4 bytes/firmware 1. The versions < 02.08.00, with the exception of 02.00.30, as
4 bytes/firmware 2. Everything >= 02.08.00 can be considered as 6 bytes.
/////////////////////////////////////////////////////////////////////////////
4. Hardware version 1
==================
3.1 Registers
4.1 Registers
~~~~~~~~~
By echoing a hexadecimal value to a register it contents can be altered.
@ -168,7 +195,7 @@ For example:
smart edge activation area width?
3.2 Native relative mode 4 byte packet format
4.2 Native relative mode 4 byte packet format
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
byte 0:
@ -226,9 +253,13 @@ byte 3:
positive = down
3.3 Native absolute mode 4 byte packet format
4.3 Native absolute mode 4 byte packet format
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
EF013 and EF019 have a special behaviour (due to a bug in the firmware?), and
when 1 finger is touching, the first 2 position reports must be discarded.
This counting is reset whenever a different number of fingers is reported.
byte 0:
firmware version 1.x:
@ -279,11 +310,11 @@ byte 3:
/////////////////////////////////////////////////////////////////////////////
4. Hardware version 2
5. Hardware version 2
==================
4.1 Registers
5.1 Registers
~~~~~~~~~
By echoing a hexadecimal value to a register it contents can be altered.
@ -316,16 +347,41 @@ For example:
0x7f = never i.e. tap again to release)
4.2 Native absolute mode 6 byte packet format
5.2 Native absolute mode 6 byte packet format
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5.2.1 Parity checking and packet re-synchronization
There is no parity checking, however some consistency checks can be performed.
4.2.1 One finger touch
For instance for EF113:
SA1= packet[0];
A1 = packet[1];
B1 = packet[2];
SB1= packet[3];
C1 = packet[4];
D1 = packet[5];
if( (((SA1 & 0x3C) != 0x3C) && ((SA1 & 0xC0) != 0x80)) || // check Byte 1
(((SA1 & 0x0C) != 0x0C) && ((SA1 & 0xC0) == 0x80)) || // check Byte 1 (one finger pressed)
(((SA1 & 0xC0) != 0x80) && (( A1 & 0xF0) != 0x00)) || // check Byte 2
(((SB1 & 0x3E) != 0x38) && ((SA1 & 0xC0) != 0x80)) || // check Byte 4
(((SB1 & 0x0E) != 0x08) && ((SA1 & 0xC0) == 0x80)) || // check Byte 4 (one finger pressed)
(((SA1 & 0xC0) != 0x80) && (( C1 & 0xF0) != 0x00)) ) // check Byte 5
// error detected
For all the other ones, there are just a few constant bits:
if( ((packet[0] & 0x0C) != 0x04) ||
((packet[3] & 0x0f) != 0x02) )
// error detected
In case an error is detected, all the packets are shifted by one (and packet[0] is discarded).
5.2.1 One/Three finger touch
~~~~~~~~~~~~~~~~
byte 0:
bit 7 6 5 4 3 2 1 0
n1 n0 . . . . R L
n1 n0 w3 w2 . . R L
L, R = 1 when Left, Right mouse button pressed
n1..n0 = numbers of fingers on touchpad
@ -333,24 +389,40 @@ byte 0:
byte 1:
bit 7 6 5 4 3 2 1 0
. . . . . x10 x9 x8
p7 p6 p5 p4 . x10 x9 x8
byte 2:
bit 7 6 5 4 3 2 1 0
x7 x6 x5 x4 x4 x2 x1 x0
x7 x6 x5 x4 x3 x2 x1 x0
x10..x0 = absolute x value (horizontal)
byte 3:
bit 7 6 5 4 3 2 1 0
. . . . . . . .
n4 vf w1 w0 . . . b2
n4 = set if more than 3 fingers (only in 3 fingers mode)
vf = a kind of flag ? (only on EF123, 0 when finger is over one
of the buttons, 1 otherwise)
w3..w0 = width of the finger touch (not EF113)
b2 (on EF113 only, 0 otherwise), b2.R.L indicates one button pressed:
0 = none
1 = Left
2 = Right
3 = Middle (Left and Right)
4 = Forward
5 = Back
6 = Another one
7 = Another one
byte 4:
bit 7 6 5 4 3 2 1 0
. . . . . . y9 y8
p3 p1 p2 p0 . . y9 y8
p7..p0 = pressure (not EF113)
byte 5:
@ -363,6 +435,11 @@ byte 5:
4.2.2 Two finger touch
~~~~~~~~~~~~~~~~
Note that the two pairs of coordinates are not exactly the coordinates of the
two fingers, but only the pair of the lower-left and upper-right coordinates.
So the actual fingers might be situated on the other diagonal of the square
defined by these two points.
byte 0:
bit 7 6 5 4 3 2 1 0
@ -376,14 +453,14 @@ byte 1:
bit 7 6 5 4 3 2 1 0
ax7 ax6 ax5 ax4 ax3 ax2 ax1 ax0
ax8..ax0 = first finger absolute x value
ax8..ax0 = lower-left finger absolute x value
byte 2:
bit 7 6 5 4 3 2 1 0
ay7 ay6 ay5 ay4 ay3 ay2 ay1 ay0
ay8..ay0 = first finger absolute y value
ay8..ay0 = lower-left finger absolute y value
byte 3:
@ -395,11 +472,11 @@ byte 4:
bit 7 6 5 4 3 2 1 0
bx7 bx6 bx5 bx4 bx3 bx2 bx1 bx0
bx8..bx0 = second finger absolute x value
bx8..bx0 = upper-right finger absolute x value
byte 5:
bit 7 6 5 4 3 2 1 0
by7 by8 by5 by4 by3 by2 by1 by0
by8..by0 = second finger absolute y value
by8..by0 = upper-right finger absolute y value

View file

@ -9,6 +9,9 @@ peripherals with two wires. The outputs are phase-shifted by 90 degrees
and by triggering on falling and rising edges, the turn direction can
be determined.
Some encoders have both outputs low in stable states, whereas others also have
a stable state with both outputs high (half-period mode).
The phase diagram of these two outputs look like this:
_____ _____ _____
@ -26,6 +29,8 @@ The phase diagram of these two outputs look like this:
|<-------->|
one step
|<-->|
one step (half-period mode)
For more information, please see
http://en.wikipedia.org/wiki/Rotary_encoder
@ -34,6 +39,13 @@ For more information, please see
1. Events / state machine
-------------------------
In half-period mode, state a) and c) above are used to determine the
rotational direction based on the last stable state. Events are reported in
states b) and d) given that the new stable state is different from the last
(i.e. the rotation was not reversed half-way).
Otherwise, the following apply:
a) Rising edge on channel A, channel B in low state
This state is used to recognize a clockwise turn
@ -96,6 +108,7 @@ static struct rotary_encoder_platform_data my_rotary_encoder_info = {
.gpio_b = GPIO_ROTARY_B,
.inverted_a = 0,
.inverted_b = 0,
.half_period = false,
};
static struct platform_device rotary_encoder_device = {

View file

@ -201,3 +201,16 @@ KBUILD_ENABLE_EXTRA_GCC_CHECKS
--------------------------------------------------
If enabled over the make command line with "W=1", it turns on additional
gcc -W... options for more extensive build-time checking.
KBUILD_BUILD_TIMESTAMP
--------------------------------------------------
Setting this to a date string overrides the timestamp used in the
UTS_VERSION definition (uname -v in the running kernel). The value has to
be a string that can be passed to date -d. The default value
is the output of the date command at one point during build.
KBUILD_BUILD_USER, KBUILD_BUILD_HOST
--------------------------------------------------
These two variables allow to override the user@host string displayed during
boot and in /proc/version. The default value is the output of the commands
whoami and host, respectively.

View file

@ -40,11 +40,13 @@ This document describes the Linux kernel Makefiles.
--- 6.6 Commands useful for building a boot image
--- 6.7 Custom kbuild commands
--- 6.8 Preprocessing linker scripts
--- 6.9 Generic header files
=== 7 Kbuild syntax for exported headers
--- 7.1 header-y
--- 7.2 objhdr-y
--- 7.3 destination-y
--- 7.4 generic-y
=== 8 Kbuild Variables
=== 9 Makefile language
@ -499,6 +501,18 @@ more details, with real examples.
gcc >= 3.00. For gcc < 3.00, -malign-functions=4 is used.
Note: cc-option-align uses KBUILD_CFLAGS for $(CC) options
cc-disable-warning
cc-disable-warning checks if gcc supports a given warning and returns
the commandline switch to disable it. This special function is needed,
because gcc 4.4 and later accept any unknown -Wno-* option and only
warn about it if there is another warning in the source file.
Example:
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
In the above example, -Wno-unused-but-set-variable will be added to
KBUILD_CFLAGS only if gcc really accepts it.
cc-version
cc-version returns a numerical version of the $(CC) compiler version.
The format is <major><minor> where both are two digits. So for example
@ -955,6 +969,11 @@ When kbuild executes, the following steps are followed (roughly):
used when linking modules. This is often a linker script.
From commandline LDFLAGS_MODULE shall be used (see kbuild.txt).
KBUILD_ARFLAGS Options for $(AR) when creating archives
$(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
mode) if this option is supported by $(AR).
--- 6.2 Add prerequisites to archprepare:
The archprepare: rule is used to list prerequisites that need to be
@ -1209,6 +1228,14 @@ When kbuild executes, the following steps are followed (roughly):
The kbuild infrastructure for *lds file are used in several
architecture-specific files.
--- 6.9 Generic header files
The directory include/asm-generic contains the header files
that may be shared between individual architectures.
The recommended approach how to use a generic header file is
to list the file in the Kbuild file.
See "7.4 generic-y" for further info on syntax etc.
=== 7 Kbuild syntax for exported headers
The kernel include a set of headers that is exported to userspace.
@ -1265,6 +1292,32 @@ See subsequent chapter for the syntax of the Kbuild file.
In the example above all exported headers in the Kbuild file
will be located in the directory "include/linux" when exported.
--- 7.4 generic-y
If an architecture uses a verbatim copy of a header from
include/asm-generic then this is listed in the file
arch/$(ARCH)/include/asm/Kbuild like this:
Example:
#arch/x86/include/asm/Kbuild
generic-y += termios.h
generic-y += rtc.h
During the prepare phase of the build a wrapper include
file is generated in the directory:
arch/$(ARCH)/include/generated/asm
When a header is exported where the architecture uses
the generic header a similar wrapper is generated as part
of the set of exported headers in the directory:
usr/include/asm
The generated wrapper will in both cases look like the following:
Example: termios.h
#include <asm-generic/termios.h>
=== 8 Kbuild Variables

View file

@ -2245,10 +2245,10 @@ F: drivers/gpu/drm/
F: include/drm/
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Chris Wilson <chris@chris-wilson.co.uk>
M: Keith Packard <keithp@keithp.com>
L: intel-gfx@lists.freedesktop.org (subscribers-only)
L: dri-devel@lists.freedesktop.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux-2.6.git
S: Supported
F: drivers/gpu/drm/i915
F: include/drm/i915*
@ -5592,10 +5592,11 @@ M: James Morris <jmorris@namei.org>
M: Eric Paris <eparis@parisplace.org>
L: selinux@tycho.nsa.gov (subscribers-only, general discussion)
W: http://selinuxproject.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git
T: git git://git.infradead.org/users/eparis/selinux.git
S: Supported
F: include/linux/selinux*
F: security/selinux/
F: scripts/selinux/
APPARMOR SECURITY MODULE
M: John Johansen <john.johansen@canonical.com>

View file

@ -103,7 +103,7 @@ ifeq ("$(origin O)", "command line")
endif
ifeq ("$(origin W)", "command line")
export KBUILD_ENABLE_EXTRA_GCC_CHECKS := 1
export KBUILD_ENABLE_EXTRA_GCC_CHECKS := $(W)
endif
# That's our default target when none is given on the command line
@ -349,7 +349,8 @@ CFLAGS_GCOV = -fprofile-arcs -ftest-coverage
# Use LINUXINCLUDE when you must reference the include/ directory.
# Needed to be compatible with the O= option
LINUXINCLUDE := -I$(srctree)/arch/$(hdr-arch)/include -Iinclude \
LINUXINCLUDE := -I$(srctree)/arch/$(hdr-arch)/include \
-Iarch/$(hdr-arch)/include/generated -Iinclude \
$(if $(KBUILD_SRC), -I$(srctree)/include) \
-include include/generated/autoconf.h
@ -382,6 +383,7 @@ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
export KBUILD_ARFLAGS
# When compiling out-of-tree modules, put MODVERDIR in the module
# tree rather than in the kernel tree. The kernel tree might
@ -416,6 +418,12 @@ ifneq ($(KBUILD_SRC),)
$(srctree) $(objtree) $(VERSION) $(PATCHLEVEL)
endif
# Support for using generic headers in asm-generic
PHONY += asm-generic
asm-generic:
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
obj=arch/$(SRCARCH)/include/generated/asm
# To make sure we do not include .config for any of the *config targets
# catch them early, and hand them over to scripts/kconfig/Makefile
# It is allowed to specify more targets when calling make, including
@ -559,6 +567,10 @@ ifndef CONFIG_CC_STACKPROTECTOR
KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
endif
# This warning generated too much noise in a regular build.
# Use make W=1 to enable this warning (see scripts/Makefile.build)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
else
@ -604,7 +616,7 @@ CHECKFLAGS += $(NOSTDINC_FLAGS)
KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
# disable pointer signed / unsigned warnings in gcc 4.0
KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,)
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
@ -612,6 +624,9 @@ KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
# conserve stack if available
KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
# use the deterministic mode of AR if available
KBUILD_ARFLAGS := $(call ar-option,D)
# check for 'asm goto'
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
@ -797,15 +812,17 @@ ifdef CONFIG_KALLSYMS
# o The correct .tmp_kallsyms2.o is linked into the final vmlinux.
# o Verify that the System.map from vmlinux matches the map from
# .tmp_vmlinux2, just in case we did not generate kallsyms correctly.
# o If CONFIG_KALLSYMS_EXTRA_PASS is set, do an extra pass using
# o If 'make KALLSYMS_EXTRA_PASS=1" was used, do an extra pass using
# .tmp_vmlinux3 and .tmp_kallsyms3.o. This is only meant as a
# temporary bypass to allow the kernel to be built while the
# maintainers work out what went wrong with kallsyms.
ifdef CONFIG_KALLSYMS_EXTRA_PASS
last_kallsyms := 3
else
last_kallsyms := 2
ifdef KALLSYMS_EXTRA_PASS
ifneq ($(KALLSYMS_EXTRA_PASS),0)
last_kallsyms := 3
endif
endif
kallsyms.o := .tmp_kallsyms$(last_kallsyms).o
@ -816,7 +833,8 @@ define verify_kallsyms
$(cmd_sysmap) .tmp_vmlinux$(last_kallsyms) .tmp_System.map
$(Q)cmp -s System.map .tmp_System.map || \
(echo Inconsistent kallsyms data; \
echo Try setting CONFIG_KALLSYMS_EXTRA_PASS; \
echo This is a bug - please report about it; \
echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround; \
rm .tmp_kallsyms* ; /bin/false )
endef
@ -947,7 +965,7 @@ ifneq ($(KBUILD_SRC),)
endif
# prepare2 creates a makefile if using a separate output directory
prepare2: prepare3 outputmakefile
prepare2: prepare3 outputmakefile asm-generic
prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
include/config/auto.conf
@ -1021,7 +1039,7 @@ hdr-inst := -rR -f $(srctree)/scripts/Makefile.headersinst obj
hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm)
PHONY += __headers
__headers: include/linux/version.h scripts_basic FORCE
__headers: include/linux/version.h scripts_basic asm-generic FORCE
$(Q)$(MAKE) $(build)=scripts build_unifdef
PHONY += headers_install_all
@ -1136,7 +1154,8 @@ CLEAN_FILES += vmlinux System.map \
.tmp_kallsyms* .tmp_version .tmp_vmlinux* .tmp_System.map
# Directories & files removed with 'make mrproper'
MRPROPER_DIRS += include/config usr/include include/generated
MRPROPER_DIRS += include/config usr/include include/generated \
arch/*/include/generated
MRPROPER_FILES += .config .config.old .version .old_version \
include/linux/version.h \
Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
@ -1267,7 +1286,11 @@ help:
@echo ' make O=dir [targets] Locate all output files in "dir", including .config'
@echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)'
@echo ' make C=2 [targets] Force check of all c source with $$CHECK'
@echo ' make W=1 [targets] Enable extra gcc checks'
@echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where'
@echo ' 1: warnings which may be relevant and do not occur too often'
@echo ' 2: warnings which occur quite often but may still be relevant'
@echo ' 3: more obscure warnings, can most likely be ignored'
@echo ' Multiple levels can be combined with W=12 or W=123'
@echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
@echo ''
@echo 'Execute "make" or "make all" to build all targets marked with [*] '
@ -1291,6 +1314,7 @@ $(help-board-dirs): help-%:
# Documentation targets
# ---------------------------------------------------------------------------
%docs: scripts_basic FORCE
$(Q)$(MAKE) $(build)=scripts build_docproc
$(Q)$(MAKE) $(build)=Documentation/DocBook $@
else # KBUILD_EXTMOD
@ -1375,7 +1399,7 @@ endif # KBUILD_EXTMOD
clean: $(clean-dirs)
$(call cmd,rmdirs)
$(call cmd,rmfiles)
@find $(or $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
@find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
\( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
-o -name '*.symtypes' -o -name 'modules.order' \

View file

@ -39,7 +39,7 @@ SECTIONS
__init_begin = ALIGN(PAGE_SIZE);
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
PERCPU_SECTION(L1_CACHE_BYTES)
/* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
needed for the THREAD_SIZE aligned init_task gets freed after init */
. = ALIGN(THREAD_SIZE);

View file

@ -82,7 +82,7 @@ SECTIONS
#endif
}
PERCPU(32, PAGE_SIZE)
PERCPU_SECTION(32)
#ifndef CONFIG_XIP_KERNEL
. = ALIGN(PAGE_SIZE);

View file

@ -50,13 +50,11 @@ struct tegra_kbc_platform_data {
unsigned int debounce_cnt;
unsigned int repeat_cnt;
unsigned int wake_cnt; /* 0:wake on any key >1:wake on wake_cfg */
const struct tegra_kbc_wake_key *wake_cfg;
struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
const struct matrix_keymap_data *keymap_data;
bool wakeup;
bool use_fn_map;
bool use_ghost_filter;
};
#endif

View file

@ -204,7 +204,7 @@ static struct i2c_board_info __initdata mop500_i2c2_devices[] = {
},
};
#define U8500_I2C_CONTROLLER(id, _slsu, _tft, _rft, clk, _sm) \
#define U8500_I2C_CONTROLLER(id, _slsu, _tft, _rft, clk, t_out, _sm) \
static struct nmk_i2c_controller u8500_i2c##id##_data = { \
/* \
* slave data setup time, which is \
@ -219,19 +219,21 @@ static struct nmk_i2c_controller u8500_i2c##id##_data = { \
.rft = _rft, \
/* std. mode operation */ \
.clk_freq = clk, \
/* Slave response timeout(ms) */\
.timeout = t_out, \
.sm = _sm, \
}
/*
* The board uses 4 i2c controllers, initialize all of
* them with slave data setup time of 250 ns,
* Tx & Rx FIFO threshold values as 1 and standard
* Tx & Rx FIFO threshold values as 8 and standard
* mode of operation
*/
U8500_I2C_CONTROLLER(0, 0xe, 1, 1, 100000, I2C_FREQ_MODE_STANDARD);
U8500_I2C_CONTROLLER(1, 0xe, 1, 1, 100000, I2C_FREQ_MODE_STANDARD);
U8500_I2C_CONTROLLER(2, 0xe, 1, 1, 100000, I2C_FREQ_MODE_STANDARD);
U8500_I2C_CONTROLLER(3, 0xe, 1, 1, 100000, I2C_FREQ_MODE_STANDARD);
U8500_I2C_CONTROLLER(0, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
U8500_I2C_CONTROLLER(1, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
U8500_I2C_CONTROLLER(2, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
U8500_I2C_CONTROLLER(3, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
static void __init mop500_i2c_init(void)
{

View file

@ -11,8 +11,8 @@
enum i2c_freq_mode {
I2C_FREQ_MODE_STANDARD, /* up to 100 Kb/s */
I2C_FREQ_MODE_FAST, /* up to 400 Kb/s */
I2C_FREQ_MODE_HIGH_SPEED, /* up to 3.4 Mb/s */
I2C_FREQ_MODE_FAST_PLUS, /* up to 1 Mb/s */
I2C_FREQ_MODE_HIGH_SPEED /* up to 3.4 Mb/s */
};
/**
@ -24,13 +24,15 @@ enum i2c_freq_mode {
* to the values of 14, 6, 2 for a 48 MHz i2c clk
* @tft: Tx FIFO Threshold in bytes
* @rft: Rx FIFO Threshold in bytes
* @timeout Slave response timeout(ms)
* @sm: speed mode
*/
struct nmk_i2c_controller {
unsigned long clk_freq;
unsigned short slsu;
unsigned char tft;
unsigned char rft;
unsigned char tft;
unsigned char rft;
int timeout;
enum i2c_freq_mode sm;
};

View file

@ -136,7 +136,7 @@ SECTIONS
. = ALIGN(16);
INIT_DATA_SECTION(16)
PERCPU(32, PAGE_SIZE)
PERCPU_SECTION(32)
.exit.data :
{

View file

@ -102,7 +102,7 @@ SECTIONS
#endif
__vmlinux_end = .; /* Last address of the physical file. */
#ifdef CONFIG_ETRAX_ARCH_V32
PERCPU(32, PAGE_SIZE)
PERCPU_SECTION(32)
.init.ramfs : {
INIT_RAM_FS

View file

@ -37,7 +37,7 @@ SECTIONS
_einittext = .;
INIT_DATA_SECTION(8)
PERCPU(L1_CACHE_BYTES, 4096)
PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
__init_end = .;

View file

@ -54,7 +54,7 @@ SECTIONS
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
PERCPU(32, PAGE_SIZE)
PERCPU_SECTION(32)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */

View file

@ -5,6 +5,7 @@ config M68K
select HAVE_AOUT if MMU
select GENERIC_ATOMIC64 if MMU
select HAVE_GENERIC_HARDIRQS if !MMU
select GENERIC_IRQ_SHOW if !MMU
config RWSEM_GENERIC_SPINLOCK
bool

View file

@ -246,23 +246,7 @@ static inline int __test_and_clear_bit_le(int nr, volatile void *addr)
return retval;
}
#define ext2_set_bit_atomic(lock, nr, addr) \
({ \
int ret; \
spin_lock(lock); \
ret = __test_and_set_bit_le((nr), (addr)); \
spin_unlock(lock); \
ret; \
})
#define ext2_clear_bit_atomic(lock, nr, addr) \
({ \
int ret; \
spin_lock(lock); \
ret = __test_and_clear_bit_le((nr), (addr)); \
spin_unlock(lock); \
ret; \
})
#include <asm-generic/bitops/ext2-atomic.h>
static inline int test_bit_le(int nr, const volatile void *addr)
{

View file

@ -144,8 +144,10 @@ static inline void io_insl(unsigned int addr, void *buf, int len)
#define IOMAP_NOCACHE_NONSER 2
#define IOMAP_WRITETHROUGH 3
extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
static inline void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
{
return (void *) physaddr;
}
static inline void *ioremap(unsigned long physaddr, unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
@ -163,7 +165,7 @@ static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
}
extern void iounmap(void *addr);
#define iounmap(addr) do { } while(0)
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem

View file

@ -1,5 +1,105 @@
#ifdef CONFIG_MMU
#include "asm-offsets_mm.c"
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*/
#define ASM_OFFSETS_C
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/kbuild.h>
#include <asm/bootinfo.h>
#include <asm/irq.h>
#include <asm/amigahw.h>
#include <linux/font.h>
int main(void)
{
/* offsets into the task struct */
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
/* offsets into the thread struct */
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
/* offsets into the thread_info struct */
DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
/* offsets into the pt_regs */
DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
/* bitfields are a bit difficult */
#ifdef CONFIG_COLDFIRE
DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
#else
#include "asm-offsets_no.c"
DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
#endif
/* offsets into the irq_cpustat_t struct */
DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
/* signal defines */
DEFINE(LSIGSEGV, SIGSEGV);
DEFINE(LSEGV_MAPERR, SEGV_MAPERR);
DEFINE(LSIGTRAP, SIGTRAP);
DEFINE(LTRAP_TRACE, TRAP_TRACE);
#ifdef CONFIG_MMU
/* offsets into the bi_record struct */
DEFINE(BIR_TAG, offsetof(struct bi_record, tag));
DEFINE(BIR_SIZE, offsetof(struct bi_record, size));
DEFINE(BIR_DATA, offsetof(struct bi_record, data));
/* offsets into font_desc (drivers/video/console/font.h) */
DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx));
DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name));
DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width));
DEFINE(FONT_DESC_HEIGHT, offsetof(struct font_desc, height));
DEFINE(FONT_DESC_DATA, offsetof(struct font_desc, data));
DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref));
/* offsets into the custom struct */
DEFINE(CUSTOMBASE, &amiga_custom);
DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
DEFINE(C_INTREQ, offsetof(struct CUSTOM, intreq));
DEFINE(C_SERDATR, offsetof(struct CUSTOM, serdatr));
DEFINE(C_SERDAT, offsetof(struct CUSTOM, serdat));
DEFINE(C_SERPER, offsetof(struct CUSTOM, serper));
DEFINE(CIAABASE, &ciaa);
DEFINE(CIABBASE, &ciab);
DEFINE(C_PRA, offsetof(struct CIA, pra));
DEFINE(ZTWOBASE, zTwoBase);
#endif
return 0;
}

View file

@ -1,100 +0,0 @@
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*/
#define ASM_OFFSETS_C
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/kbuild.h>
#include <asm/bootinfo.h>
#include <asm/irq.h>
#include <asm/amigahw.h>
#include <linux/font.h>
int main(void)
{
/* offsets into the task struct */
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
#ifdef CONFIG_MMU
DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
#endif
/* offsets into the thread struct */
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
/* offsets into the thread_info struct */
DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
/* offsets into the pt_regs */
DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
/* bitfields are a bit difficult */
DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
/* offsets into the irq_cpustat_t struct */
DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
/* offsets into the bi_record struct */
DEFINE(BIR_TAG, offsetof(struct bi_record, tag));
DEFINE(BIR_SIZE, offsetof(struct bi_record, size));
DEFINE(BIR_DATA, offsetof(struct bi_record, data));
/* offsets into font_desc (drivers/video/console/font.h) */
DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx));
DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name));
DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width));
DEFINE(FONT_DESC_HEIGHT, offsetof(struct font_desc, height));
DEFINE(FONT_DESC_DATA, offsetof(struct font_desc, data));
DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref));
/* signal defines */
DEFINE(LSIGSEGV, SIGSEGV);
DEFINE(LSEGV_MAPERR, SEGV_MAPERR);
DEFINE(LSIGTRAP, SIGTRAP);
DEFINE(LTRAP_TRACE, TRAP_TRACE);
/* offsets into the custom struct */
DEFINE(CUSTOMBASE, &amiga_custom);
DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
DEFINE(C_INTREQ, offsetof(struct CUSTOM, intreq));
DEFINE(C_SERDATR, offsetof(struct CUSTOM, serdatr));
DEFINE(C_SERDAT, offsetof(struct CUSTOM, serdat));
DEFINE(C_SERPER, offsetof(struct CUSTOM, serper));
DEFINE(CIAABASE, &ciaa);
DEFINE(CIABBASE, &ciab);
DEFINE(C_PRA, offsetof(struct CIA, pra));
DEFINE(ZTWOBASE, zTwoBase);
return 0;
}

View file

@ -1,76 +0,0 @@
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*/
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/kbuild.h>
#include <asm/bootinfo.h>
#include <asm/irq.h>
#include <asm/thread_info.h>
int main(void)
{
/* offsets into the task struct */
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
/* offsets into the irq_cpustat_t struct */
DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
/* offsets into the thread struct */
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
/* offsets into the pt_regs */
DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
#ifdef CONFIG_COLDFIRE
/* bitfields are a bit difficult */
DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
#else
/* bitfields are a bit difficult */
DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
#endif
/* signal defines */
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(SEGV_MAPERR, SEGV_MAPERR);
DEFINE(SIGTRAP, SIGTRAP);
DEFINE(TRAP_TRACE, TRAP_TRACE);
DEFINE(PT_PTRACED, PT_PTRACED);
/* Offsets in thread_info structure */
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPTCOUNT, offsetof(struct thread_info, preempt_count));
return 0;
}

View file

@ -24,7 +24,6 @@
* linux 2.4 support David McCullough <davidm@snapgear.com>
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/setup.h>

View file

@ -28,31 +28,3 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
set_irq_regs(oldregs);
}
int show_interrupts(struct seq_file *p, void *v)
{
struct irqaction *ap;
int irq = *((loff_t *) v);
if (irq == 0)
seq_puts(p, " CPU0\n");
if (irq < NR_IRQS) {
struct irq_desc *desc = irq_to_desc(irq);
ap = desc->action;
if (ap) {
seq_printf(p, "%3d: ", irq);
seq_printf(p, "%10u ", kstat_irqs(irq));
seq_printf(p, "%14s ", irq_desc_get_chip(desc)->name);
seq_printf(p, "%s", ap->name);
for (ap = ap->next; ap; ap = ap->next)
seq_printf(p, ", %s", ap->name);
seq_putc(p, '\n');
}
}
return 0;
}

View file

@ -1,5 +1,33 @@
#ifdef CONFIG_MMU
#include "m68k_ksyms_mm.c"
#else
#include "m68k_ksyms_no.c"
#include <linux/module.h>
asmlinkage long long __ashldi3 (long long, int);
asmlinkage long long __ashrdi3 (long long, int);
asmlinkage long long __lshrdi3 (long long, int);
asmlinkage long long __muldi3 (long long, long long);
/* The following are special because they're not called
explicitly (the C compiler generates them). Fortunately,
their interface isn't gonna change any time soon now, so
it's OK to leave it out of version control. */
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
#if !defined(__mc68020__) && !defined(__mc68030__) && \
!defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcpu32__)
/*
* Simpler 68k and ColdFire parts also need a few other gcc functions.
*/
extern long long __divsi3(long long, long long);
extern long long __modsi3(long long, long long);
extern long long __mulsi3(long long, long long);
extern long long __udivsi3(long long, long long);
extern long long __umodsi3(long long, long long);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__mulsi3);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
#endif

View file

@ -1,16 +0,0 @@
#include <linux/module.h>
asmlinkage long long __ashldi3 (long long, int);
asmlinkage long long __ashrdi3 (long long, int);
asmlinkage long long __lshrdi3 (long long, int);
asmlinkage long long __muldi3 (long long, long long);
/* The following are special because they're not called
explicitly (the C compiler generates them). Fortunately,
their interface isn't gonna change any time soon now, so
it's OK to leave it out of version control. */
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);

View file

@ -1,78 +0,0 @@
#include <linux/module.h>
#include <linux/linkage.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/user.h>
#include <linux/elfcore.h>
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/machdep.h>
#include <asm/pgalloc.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/checksum.h>
#include <asm/current.h>
extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
/* platform dependent support */
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(kernel_thread);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy_nocheck);
/* The following are special because they're not called
explicitly (the C compiler generates them). Fortunately,
their interface isn't gonna change any time soon now, so
it's OK to leave it out of version control. */
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
/*
* libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that
* doesn't really matter since they're not versioned).
*/
extern void __ashldi3(void);
extern void __ashrdi3(void);
extern void __divsi3(void);
extern void __lshrdi3(void);
extern void __modsi3(void);
extern void __muldi3(void);
extern void __mulsi3(void);
extern void __udivsi3(void);
extern void __umodsi3(void);
/* gcc lib functions */
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__mulsi3);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
#ifdef CONFIG_COLDFIRE
extern unsigned int *dma_device_address;
extern unsigned long dma_base_addr, _ramend;
EXPORT_SYMBOL(dma_base_addr);
EXPORT_SYMBOL(dma_device_address);
EXPORT_SYMBOL(_ramend);
extern asmlinkage void trap(void);
extern void *_ramvec;
EXPORT_SYMBOL(trap);
EXPORT_SYMBOL(_ramvec);
#endif /* CONFIG_COLDFIRE */

View file

@ -151,6 +151,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
set_fs(fs);
return retval;
}
EXPORT_SYMBOL(kernel_thread);
void flush_thread(void)
{
@ -283,6 +284,7 @@ int dump_fpu(struct pt_regs *regs, struct user_m68kfp_struct *fpu)
#endif
return 1;
}
EXPORT_SYMBOL(dump_fpu);
/*
* Generic dumping code. Used for panic and debug.

View file

@ -1,5 +1,580 @@
/*
* linux/arch/m68k/kernel/sys_m68k.c
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/m68k
* platform.
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/ipc.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/cachectl.h>
#include <asm/traps.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_MMU
#include "sys_m68k_mm.c"
#include <asm/tlb.h>
asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
/*
* This is wrong for sun3 - there PAGE_SIZE is 8Kb,
* so we need to shift the argument down by 1; m68k mmap64(3)
* (in libc) expects the last argument of mmap2 in 4Kb units.
*/
return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
}
/* Convert virtual (user) address VADDR to physical address PADDR */
#define virt_to_phys_040(vaddr) \
({ \
unsigned long _mmusr, _paddr; \
\
__asm__ __volatile__ (".chip 68040\n\t" \
"ptestr (%1)\n\t" \
"movec %%mmusr,%0\n\t" \
".chip 68k" \
: "=r" (_mmusr) \
: "a" (vaddr)); \
_paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
_paddr; \
})
static inline int
cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
{
unsigned long paddr, i;
switch (scope)
{
case FLUSH_SCOPE_ALL:
switch (cache)
{
case FLUSH_CACHE_DATA:
/* This nop is needed for some broken versions of the 68040. */
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %dc\n\t"
".chip 68k");
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %ic\n\t"
".chip 68k");
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %bc\n\t"
".chip 68k");
break;
}
break;
case FLUSH_SCOPE_LINE:
/* Find the physical address of the first mapped page in the
address range. */
if ((paddr = virt_to_phys_040(addr))) {
paddr += addr & ~(PAGE_MASK | 15);
len = (len + (addr & 15) + 15) >> 4;
} else {
unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
tmp = PAGE_SIZE;
for (;;)
{
if ((paddr = virt_to_phys_040(addr)))
break;
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
}
len = (len + 15) >> 4;
}
i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
while (len--)
{
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
if (!--i && len)
{
/*
* No need to page align here since it is done by
* virt_to_phys_040().
*/
addr += PAGE_SIZE;
i = PAGE_SIZE / 16;
/* Recompute physical address when crossing a page
boundary. */
for (;;)
{
if ((paddr = virt_to_phys_040(addr)))
break;
if (len <= i)
return 0;
len -= i;
addr += PAGE_SIZE;
}
}
else
paddr += 16;
}
break;
default:
case FLUSH_SCOPE_PAGE:
len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
{
if (!(paddr = virt_to_phys_040(addr)))
continue;
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
}
break;
}
return 0;
}
#define virt_to_phys_060(vaddr) \
({ \
unsigned long paddr; \
__asm__ __volatile__ (".chip 68060\n\t" \
"plpar (%0)\n\t" \
".chip 68k" \
: "=a" (paddr) \
: "0" (vaddr)); \
(paddr); /* XXX */ \
})
static inline int
cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
{
unsigned long paddr, i;
/*
* 68060 manual says:
* cpush %dc : flush DC, remains valid (with our %cacr setup)
* cpush %ic : invalidate IC
* cpush %bc : flush DC + invalidate IC
*/
switch (scope)
{
case FLUSH_SCOPE_ALL:
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %dc\n\t"
".chip 68k");
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %ic\n\t"
".chip 68k");
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %bc\n\t"
".chip 68k");
break;
}
break;
case FLUSH_SCOPE_LINE:
/* Find the physical address of the first mapped page in the
address range. */
len += addr & 15;
addr &= -16;
if (!(paddr = virt_to_phys_060(addr))) {
unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
tmp = PAGE_SIZE;
for (;;)
{
if ((paddr = virt_to_phys_060(addr)))
break;
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
}
}
len = (len + 15) >> 4;
i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
while (len--)
{
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
if (!--i && len)
{
/*
* We just want to jump to the first cache line
* in the next page.
*/
addr += PAGE_SIZE;
addr &= PAGE_MASK;
i = PAGE_SIZE / 16;
/* Recompute physical address when crossing a page
boundary. */
for (;;)
{
if ((paddr = virt_to_phys_060(addr)))
break;
if (len <= i)
return 0;
len -= i;
addr += PAGE_SIZE;
}
}
else
paddr += 16;
}
break;
default:
case FLUSH_SCOPE_PAGE:
len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
addr &= PAGE_MASK; /* Workaround for bug in some
revisions of the 68060 */
for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
{
if (!(paddr = virt_to_phys_060(addr)))
continue;
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
}
break;
}
return 0;
}
/* sys_cacheflush -- flush (part of) the processor cache. */
asmlinkage int
sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
{
struct vm_area_struct *vma;
int ret = -EINVAL;
if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
cache & ~FLUSH_CACHE_BOTH)
goto out;
if (scope == FLUSH_SCOPE_ALL) {
/* Only the superuser may explicitly flush the whole cache. */
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
} else {
/*
* Verify that the specified address region actually belongs
* to this process.
*/
vma = find_vma (current->mm, addr);
ret = -EINVAL;
/* Check for overflow. */
if (addr + len < addr)
goto out;
if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
goto out;
}
if (CPU_IS_020_OR_030) {
if (scope == FLUSH_SCOPE_LINE && len < 256) {
unsigned long cacr;
__asm__ ("movec %%cacr, %0" : "=r" (cacr));
if (cache & FLUSH_CACHE_INSN)
cacr |= 4;
if (cache & FLUSH_CACHE_DATA)
cacr |= 0x400;
len >>= 2;
while (len--) {
__asm__ __volatile__ ("movec %1, %%caar\n\t"
"movec %0, %%cacr"
: /* no outputs */
: "r" (cacr), "r" (addr));
addr += 4;
}
} else {
/* Flush the whole cache, even if page granularity requested. */
unsigned long cacr;
__asm__ ("movec %%cacr, %0" : "=r" (cacr));
if (cache & FLUSH_CACHE_INSN)
cacr |= 8;
if (cache & FLUSH_CACHE_DATA)
cacr |= 0x800;
__asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
}
ret = 0;
goto out;
} else {
/*
* 040 or 060: don't blindly trust 'scope', someone could
* try to flush a few megs of memory.
*/
if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
scope=FLUSH_SCOPE_PAGE;
if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
scope=FLUSH_SCOPE_ALL;
if (CPU_IS_040) {
ret = cache_flush_040 (addr, scope, cache, len);
} else if (CPU_IS_060) {
ret = cache_flush_060 (addr, scope, cache, len);
}
}
out:
return ret;
}
/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
D1 (newval). */
asmlinkage int
sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
unsigned long __user * mem)
{
/* This was borrowed from ARM's implementation. */
for (;;) {
struct mm_struct *mm = current->mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
spinlock_t *ptl;
unsigned long mem_value;
down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, (unsigned long)mem);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, (unsigned long)mem);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
if (!pte_present(*pte) || !pte_dirty(*pte)
|| !pte_write(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
mem_value = *mem;
if (mem_value == oldval)
*mem = newval;
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
return mem_value;
bad_access:
up_read(&mm->mmap_sem);
/* This is not necessarily a bad access, we can get here if
a memory we're trying to write to should be copied-on-write.
Make the kernel do the necessary page stuff, then re-iterate.
Simulate a write access fault to do that. */
{
/* The first argument of the function corresponds to
D1, which is the first field of struct pt_regs. */
struct pt_regs *fp = (struct pt_regs *)&newval;
/* '3' is an RMW flag. */
if (do_page_fault(fp, (unsigned long)mem, 3))
/* If the do_page_fault() failed, we don't
have anything meaningful to return.
There should be a SIGSEGV pending for
the process. */
return 0xdeadbeef;
}
}
}
#else
#include "sys_m68k_no.c"
#endif
/* sys_cacheflush -- flush (part of) the processor cache. */
asmlinkage int
sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
{
flush_cache_all();
return 0;
}
/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
D1 (newval). */
asmlinkage int
sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
unsigned long __user * mem)
{
struct mm_struct *mm = current->mm;
unsigned long mem_value;
down_read(&mm->mmap_sem);
mem_value = *mem;
if (mem_value == oldval)
*mem = newval;
up_read(&mm->mmap_sem);
return mem_value;
}
#endif /* CONFIG_MMU */
asmlinkage int sys_getpagesize(void)
{
return PAGE_SIZE;
}
/*
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
int kernel_execve(const char *filename,
const char *const argv[],
const char *const envp[])
{
register long __res asm ("%d0") = __NR_execve;
register long __a asm ("%d1") = (long)(filename);
register long __b asm ("%d2") = (long)(argv);
register long __c asm ("%d3") = (long)(envp);
asm volatile ("trap #0" : "+d" (__res)
: "d" (__a), "d" (__b), "d" (__c));
return __res;
}
asmlinkage unsigned long sys_get_thread_area(void)
{
return current_thread_info()->tp_value;
}
asmlinkage int sys_set_thread_area(unsigned long tp)
{
current_thread_info()->tp_value = tp;
return 0;
}
asmlinkage int sys_atomic_barrier(void)
{
/* no code needed for uniprocs */
return 0;
}

View file

@ -1,546 +0,0 @@
/*
* linux/arch/m68k/kernel/sys_m68k.c
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/m68k
* platform.
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/ipc.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/cachectl.h>
#include <asm/traps.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <linux/elf.h>
#include <asm/tlb.h>
asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
/*
* This is wrong for sun3 - there PAGE_SIZE is 8Kb,
* so we need to shift the argument down by 1; m68k mmap64(3)
* (in libc) expects the last argument of mmap2 in 4Kb units.
*/
return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
}
/* Convert virtual (user) address VADDR to physical address PADDR */
#define virt_to_phys_040(vaddr) \
({ \
unsigned long _mmusr, _paddr; \
\
__asm__ __volatile__ (".chip 68040\n\t" \
"ptestr (%1)\n\t" \
"movec %%mmusr,%0\n\t" \
".chip 68k" \
: "=r" (_mmusr) \
: "a" (vaddr)); \
_paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
_paddr; \
})
static inline int
cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
{
unsigned long paddr, i;
switch (scope)
{
case FLUSH_SCOPE_ALL:
switch (cache)
{
case FLUSH_CACHE_DATA:
/* This nop is needed for some broken versions of the 68040. */
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %dc\n\t"
".chip 68k");
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %ic\n\t"
".chip 68k");
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %bc\n\t"
".chip 68k");
break;
}
break;
case FLUSH_SCOPE_LINE:
/* Find the physical address of the first mapped page in the
address range. */
if ((paddr = virt_to_phys_040(addr))) {
paddr += addr & ~(PAGE_MASK | 15);
len = (len + (addr & 15) + 15) >> 4;
} else {
unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
tmp = PAGE_SIZE;
for (;;)
{
if ((paddr = virt_to_phys_040(addr)))
break;
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
}
len = (len + 15) >> 4;
}
i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
while (len--)
{
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
if (!--i && len)
{
/*
* No need to page align here since it is done by
* virt_to_phys_040().
*/
addr += PAGE_SIZE;
i = PAGE_SIZE / 16;
/* Recompute physical address when crossing a page
boundary. */
for (;;)
{
if ((paddr = virt_to_phys_040(addr)))
break;
if (len <= i)
return 0;
len -= i;
addr += PAGE_SIZE;
}
}
else
paddr += 16;
}
break;
default:
case FLUSH_SCOPE_PAGE:
len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
{
if (!(paddr = virt_to_phys_040(addr)))
continue;
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
}
break;
}
return 0;
}
#define virt_to_phys_060(vaddr) \
({ \
unsigned long paddr; \
__asm__ __volatile__ (".chip 68060\n\t" \
"plpar (%0)\n\t" \
".chip 68k" \
: "=a" (paddr) \
: "0" (vaddr)); \
(paddr); /* XXX */ \
})
static inline int
cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
{
unsigned long paddr, i;
/*
* 68060 manual says:
* cpush %dc : flush DC, remains valid (with our %cacr setup)
* cpush %ic : invalidate IC
* cpush %bc : flush DC + invalidate IC
*/
switch (scope)
{
case FLUSH_SCOPE_ALL:
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %dc\n\t"
".chip 68k");
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %ic\n\t"
".chip 68k");
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %bc\n\t"
".chip 68k");
break;
}
break;
case FLUSH_SCOPE_LINE:
/* Find the physical address of the first mapped page in the
address range. */
len += addr & 15;
addr &= -16;
if (!(paddr = virt_to_phys_060(addr))) {
unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
tmp = PAGE_SIZE;
for (;;)
{
if ((paddr = virt_to_phys_060(addr)))
break;
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
}
}
len = (len + 15) >> 4;
i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
while (len--)
{
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
if (!--i && len)
{
/*
* We just want to jump to the first cache line
* in the next page.
*/
addr += PAGE_SIZE;
addr &= PAGE_MASK;
i = PAGE_SIZE / 16;
/* Recompute physical address when crossing a page
boundary. */
for (;;)
{
if ((paddr = virt_to_phys_060(addr)))
break;
if (len <= i)
return 0;
len -= i;
addr += PAGE_SIZE;
}
}
else
paddr += 16;
}
break;
default:
case FLUSH_SCOPE_PAGE:
len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
addr &= PAGE_MASK; /* Workaround for bug in some
revisions of the 68060 */
for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
{
if (!(paddr = virt_to_phys_060(addr)))
continue;
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
}
break;
}
return 0;
}
/* sys_cacheflush -- flush (part of) the processor cache. */
asmlinkage int
sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
{
struct vm_area_struct *vma;
int ret = -EINVAL;
if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
cache & ~FLUSH_CACHE_BOTH)
goto out;
if (scope == FLUSH_SCOPE_ALL) {
/* Only the superuser may explicitly flush the whole cache. */
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
} else {
/*
* Verify that the specified address region actually belongs
* to this process.
*/
vma = find_vma (current->mm, addr);
ret = -EINVAL;
/* Check for overflow. */
if (addr + len < addr)
goto out;
if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
goto out;
}
if (CPU_IS_020_OR_030) {
if (scope == FLUSH_SCOPE_LINE && len < 256) {
unsigned long cacr;
__asm__ ("movec %%cacr, %0" : "=r" (cacr));
if (cache & FLUSH_CACHE_INSN)
cacr |= 4;
if (cache & FLUSH_CACHE_DATA)
cacr |= 0x400;
len >>= 2;
while (len--) {
__asm__ __volatile__ ("movec %1, %%caar\n\t"
"movec %0, %%cacr"
: /* no outputs */
: "r" (cacr), "r" (addr));
addr += 4;
}
} else {
/* Flush the whole cache, even if page granularity requested. */
unsigned long cacr;
__asm__ ("movec %%cacr, %0" : "=r" (cacr));
if (cache & FLUSH_CACHE_INSN)
cacr |= 8;
if (cache & FLUSH_CACHE_DATA)
cacr |= 0x800;
__asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
}
ret = 0;
goto out;
} else {
/*
* 040 or 060: don't blindly trust 'scope', someone could
* try to flush a few megs of memory.
*/
if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
scope=FLUSH_SCOPE_PAGE;
if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
scope=FLUSH_SCOPE_ALL;
if (CPU_IS_040) {
ret = cache_flush_040 (addr, scope, cache, len);
} else if (CPU_IS_060) {
ret = cache_flush_060 (addr, scope, cache, len);
}
}
out:
return ret;
}
asmlinkage int sys_getpagesize(void)
{
return PAGE_SIZE;
}
/*
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
int kernel_execve(const char *filename,
const char *const argv[],
const char *const envp[])
{
register long __res asm ("%d0") = __NR_execve;
register long __a asm ("%d1") = (long)(filename);
register long __b asm ("%d2") = (long)(argv);
register long __c asm ("%d3") = (long)(envp);
asm volatile ("trap #0" : "+d" (__res)
: "d" (__a), "d" (__b), "d" (__c));
return __res;
}
asmlinkage unsigned long sys_get_thread_area(void)
{
return current_thread_info()->tp_value;
}
asmlinkage int sys_set_thread_area(unsigned long tp)
{
current_thread_info()->tp_value = tp;
return 0;
}
/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
D1 (newval). */
asmlinkage int
sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
unsigned long __user * mem)
{
/* This was borrowed from ARM's implementation. */
for (;;) {
struct mm_struct *mm = current->mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
spinlock_t *ptl;
unsigned long mem_value;
down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, (unsigned long)mem);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, (unsigned long)mem);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
if (!pte_present(*pte) || !pte_dirty(*pte)
|| !pte_write(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
mem_value = *mem;
if (mem_value == oldval)
*mem = newval;
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
return mem_value;
bad_access:
up_read(&mm->mmap_sem);
/* This is not necessarily a bad access, we can get here if
a memory we're trying to write to should be copied-on-write.
Make the kernel do the necessary page stuff, then re-iterate.
Simulate a write access fault to do that. */
{
/* The first argument of the function corresponds to
D1, which is the first field of struct pt_regs. */
struct pt_regs *fp = (struct pt_regs *)&newval;
/* '3' is an RMW flag. */
if (do_page_fault(fp, (unsigned long)mem, 3))
/* If the do_page_fault() failed, we don't
have anything meaningful to return.
There should be a SIGSEGV pending for
the process. */
return 0xdeadbeef;
}
}
}
asmlinkage int sys_atomic_barrier(void)
{
/* no code needed for uniprocs */
return 0;
}

View file

@ -1,94 +0,0 @@
/*
* linux/arch/m68knommu/kernel/sys_m68k.c
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/m68k
* platform.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/ipc.h>
#include <linux/fs.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/cachectl.h>
#include <asm/traps.h>
#include <asm/cacheflush.h>
#include <asm/unistd.h>
/* sys_cacheflush -- flush (part of) the processor cache. */
asmlinkage int
sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
{
flush_cache_all();
return(0);
}
asmlinkage int sys_getpagesize(void)
{
return PAGE_SIZE;
}
/*
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
int kernel_execve(const char *filename,
const char *const argv[],
const char *const envp[])
{
register long __res asm ("%d0") = __NR_execve;
register long __a asm ("%d1") = (long)(filename);
register long __b asm ("%d2") = (long)(argv);
register long __c asm ("%d3") = (long)(envp);
asm volatile ("trap #0" : "+d" (__res)
: "d" (__a), "d" (__b), "d" (__c));
return __res;
}
asmlinkage unsigned long sys_get_thread_area(void)
{
return current_thread_info()->tp_value;
}
asmlinkage int sys_set_thread_area(unsigned long tp)
{
current_thread_info()->tp_value = tp;
return 0;
}
/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
D1 (newval). */
asmlinkage int
sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
unsigned long __user * mem)
{
struct mm_struct *mm = current->mm;
unsigned long mem_value;
down_read(&mm->mmap_sem);
mem_value = *mem;
if (mem_value == oldval)
*mem = newval;
up_read(&mm->mmap_sem);
return mem_value;
}
asmlinkage int sys_atomic_barrier(void)
{
/* no code needed for uniprocs */
return 0;
}

View file

@ -11,7 +11,6 @@
* Linux/m68k support by Hamish Macdonald
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#ifndef CONFIG_MMU

View file

@ -1,5 +1,14 @@
#
# Makefile for m68k-specific library files..
#
lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
memcpy.o memset.o memmove.o
ifdef CONFIG_MMU
include arch/m68k/lib/Makefile_mm
lib-y += string.o uaccess.o checksum_mm.o
else
include arch/m68k/lib/Makefile_no
lib-y += mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o delay.o checksum_no.o
endif

View file

@ -1,6 +0,0 @@
#
# Makefile for m68k-specific library files..
#
lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
checksum.o string.o uaccess.o

View file

@ -1,7 +0,0 @@
#
# Makefile for m68knommu specific library files..
#
lib-y := ashldi3.o ashrdi3.o lshrdi3.o \
muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
checksum.o memcpy.o memmove.o memset.o delay.o

View file

@ -1,5 +0,0 @@
#ifdef CONFIG_MMU
#include "checksum_mm.c"
#else
#include "checksum_no.c"
#endif

View file

@ -101,6 +101,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
return (__force __sum16)~do_csum(iph,ihl*4);
}
EXPORT_SYMBOL(ip_fast_csum);
#endif
/*
@ -140,6 +141,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
memcpy(dst, (__force const void *)src, len);
return csum_partial(dst, len, sum);
}
EXPORT_SYMBOL(csum_partial_copy_from_user);
/*
* copy from ds while checksumming, otherwise like csum_partial
@ -151,3 +153,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);

View file

@ -1,62 +1,80 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/string.h>
void * memcpy(void * to, const void * from, size_t n)
void *memcpy(void *to, const void *from, size_t n)
{
#ifdef CONFIG_COLDFIRE
void *xto = to;
size_t temp;
void *xto = to;
size_t temp, temp1;
if (!n)
return xto;
if ((long) to & 1)
{
char *cto = to;
const char *cfrom = from;
*cto++ = *cfrom++;
to = cto;
from = cfrom;
n--;
}
if (n > 2 && (long) to & 2)
{
short *sto = to;
const short *sfrom = from;
*sto++ = *sfrom++;
to = sto;
from = sfrom;
n -= 2;
}
temp = n >> 2;
if (temp)
{
long *lto = to;
const long *lfrom = from;
for (; temp; temp--)
*lto++ = *lfrom++;
to = lto;
from = lfrom;
}
if (n & 2)
{
short *sto = to;
const short *sfrom = from;
*sto++ = *sfrom++;
to = sto;
from = sfrom;
}
if (n & 1)
{
char *cto = to;
const char *cfrom = from;
*cto = *cfrom;
}
return xto;
if (!n)
return xto;
if ((long)to & 1) {
char *cto = to;
const char *cfrom = from;
*cto++ = *cfrom++;
to = cto;
from = cfrom;
n--;
}
if (n > 2 && (long)to & 2) {
short *sto = to;
const short *sfrom = from;
*sto++ = *sfrom++;
to = sto;
from = sfrom;
n -= 2;
}
temp = n >> 2;
if (temp) {
long *lto = to;
const long *lfrom = from;
#if defined(__mc68020__) || defined(__mc68030__) || \
defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
asm volatile (
" movel %2,%3\n"
" andw #7,%3\n"
" lsrl #3,%2\n"
" negw %3\n"
" jmp %%pc@(1f,%3:w:2)\n"
"4: movel %0@+,%1@+\n"
" movel %0@+,%1@+\n"
" movel %0@+,%1@+\n"
" movel %0@+,%1@+\n"
" movel %0@+,%1@+\n"
" movel %0@+,%1@+\n"
" movel %0@+,%1@+\n"
" movel %0@+,%1@+\n"
"1: dbra %2,4b\n"
" clrw %2\n"
" subql #1,%2\n"
" jpl 4b"
: "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1)
: "0" (lfrom), "1" (lto), "2" (temp));
#else
const char *c_from = from;
char *c_to = to;
while (n-- > 0)
*c_to++ = *c_from++;
return((void *) to);
for (; temp; temp--)
*lto++ = *lfrom++;
#endif
to = lto;
from = lfrom;
}
if (n & 2) {
short *sto = to;
const short *sfrom = from;
*sto++ = *sfrom++;
to = sto;
from = sfrom;
}
if (n & 1) {
char *cto = to;
const char *cfrom = from;
*cto = *cfrom;
}
return xto;
}
EXPORT_SYMBOL(memcpy);

View file

@ -4,8 +4,6 @@
* for more details.
*/
#define __IN_STRING_C
#include <linux/module.h>
#include <linux/string.h>

View file

@ -1,47 +1,75 @@
#include <linux/types.h>
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
void * memset(void * s, int c, size_t count)
#include <linux/module.h>
#include <linux/string.h>
void *memset(void *s, int c, size_t count)
{
void *xs = s;
size_t temp;
void *xs = s;
size_t temp;
if (!count)
return xs;
c &= 0xff;
c |= c << 8;
c |= c << 16;
if ((long) s & 1)
{
char *cs = s;
*cs++ = c;
s = cs;
count--;
}
if (count > 2 && (long) s & 2)
{
short *ss = s;
*ss++ = c;
s = ss;
count -= 2;
}
temp = count >> 2;
if (temp)
{
long *ls = s;
for (; temp; temp--)
*ls++ = c;
s = ls;
}
if (count & 2)
{
short *ss = s;
*ss++ = c;
s = ss;
}
if (count & 1)
{
char *cs = s;
*cs = c;
}
return xs;
if (!count)
return xs;
c &= 0xff;
c |= c << 8;
c |= c << 16;
if ((long)s & 1) {
char *cs = s;
*cs++ = c;
s = cs;
count--;
}
if (count > 2 && (long)s & 2) {
short *ss = s;
*ss++ = c;
s = ss;
count -= 2;
}
temp = count >> 2;
if (temp) {
long *ls = s;
#if defined(__mc68020__) || defined(__mc68030__) || \
defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
size_t temp1;
asm volatile (
" movel %1,%2\n"
" andw #7,%2\n"
" lsrl #3,%1\n"
" negw %2\n"
" jmp %%pc@(2f,%2:w:2)\n"
"1: movel %3,%0@+\n"
" movel %3,%0@+\n"
" movel %3,%0@+\n"
" movel %3,%0@+\n"
" movel %3,%0@+\n"
" movel %3,%0@+\n"
" movel %3,%0@+\n"
" movel %3,%0@+\n"
"2: dbra %1,1b\n"
" clrw %1\n"
" subql #1,%1\n"
" jpl 1b"
: "=a" (ls), "=d" (temp), "=&d" (temp1)
: "d" (c), "0" (ls), "1" (temp));
#else
for (; temp; temp--)
*ls++ = c;
#endif
s = ls;
}
if (count & 2) {
short *ss = s;
*ss++ = c;
s = ss;
}
if (count & 1) {
char *cs = s;
*cs = c;
}
return xs;
}
EXPORT_SYMBOL(memset);

View file

@ -1,5 +1,98 @@
#ifdef CONFIG_MMU
#include "muldi3_mm.c"
/* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and
gcc-2.7.2.3/longlong.h which is: */
/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#if defined(__mc68020__) || defined(__mc68030__) || \
defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
#define umul_ppmm(w1, w0, u, v) \
__asm__ ("mulu%.l %3,%1:%0" \
: "=d" ((USItype)(w0)), \
"=d" ((USItype)(w1)) \
: "%0" ((USItype)(u)), \
"dmi" ((USItype)(v)))
#else
#include "muldi3_no.c"
#define SI_TYPE_SIZE 32
#define __BITS4 (SI_TYPE_SIZE / 4)
#define __ll_B (1L << (SI_TYPE_SIZE / 2))
#define __ll_lowpart(t) ((USItype) (t) % __ll_B)
#define __ll_highpart(t) ((USItype) (t) / __ll_B)
#define umul_ppmm(w1, w0, u, v) \
do { \
USItype __x0, __x1, __x2, __x3; \
USItype __ul, __vl, __uh, __vh; \
\
__ul = __ll_lowpart (u); \
__uh = __ll_highpart (u); \
__vl = __ll_lowpart (v); \
__vh = __ll_highpart (v); \
\
__x0 = (USItype) __ul * __vl; \
__x1 = (USItype) __ul * __vh; \
__x2 = (USItype) __uh * __vl; \
__x3 = (USItype) __uh * __vh; \
\
__x1 += __ll_highpart (__x0);/* this can't give carry */ \
__x1 += __x2; /* but this indeed can */ \
if (__x1 < __x2) /* did we get it? */ \
__x3 += __ll_B; /* yes, add it in the proper pos. */ \
\
(w1) = __x3 + __ll_highpart (__x1); \
(w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
} while (0)
#endif
#define __umulsidi3(u, v) \
({DIunion __w; \
umul_ppmm (__w.s.high, __w.s.low, u, v); \
__w.ll; })
typedef int SItype __attribute__ ((mode (SI)));
typedef unsigned int USItype __attribute__ ((mode (SI)));
typedef int DItype __attribute__ ((mode (DI)));
typedef int word_type __attribute__ ((mode (__word__)));
struct DIstruct {SItype high, low;};
typedef union
{
struct DIstruct s;
DItype ll;
} DIunion;
DItype
__muldi3 (DItype u, DItype v)
{
DIunion w;
DIunion uu, vv;
uu.ll = u,
vv.ll = v;
w.ll = __umulsidi3 (uu.s.low, vv.s.low);
w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
+ (USItype) uu.s.high * (USItype) vv.s.low);
return w.ll;
}

View file

@ -1,63 +0,0 @@
/* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and
gcc-2.7.2.3/longlong.h which is: */
/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#define BITS_PER_UNIT 8
#define umul_ppmm(w1, w0, u, v) \
__asm__ ("mulu%.l %3,%1:%0" \
: "=d" ((USItype)(w0)), \
"=d" ((USItype)(w1)) \
: "%0" ((USItype)(u)), \
"dmi" ((USItype)(v)))
#define __umulsidi3(u, v) \
({DIunion __w; \
umul_ppmm (__w.s.high, __w.s.low, u, v); \
__w.ll; })
typedef int SItype __attribute__ ((mode (SI)));
typedef unsigned int USItype __attribute__ ((mode (SI)));
typedef int DItype __attribute__ ((mode (DI)));
typedef int word_type __attribute__ ((mode (__word__)));
struct DIstruct {SItype high, low;};
typedef union
{
struct DIstruct s;
DItype ll;
} DIunion;
DItype
__muldi3 (DItype u, DItype v)
{
DIunion w;
DIunion uu, vv;
uu.ll = u,
vv.ll = v;
w.ll = __umulsidi3 (uu.s.low, vv.s.low);
w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
+ (USItype) uu.s.high * (USItype) vv.s.low);
return w.ll;
}

View file

@ -1,86 +0,0 @@
/* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and
gcc-2.7.2.3/longlong.h which is: */
/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#define BITS_PER_UNIT 8
#define SI_TYPE_SIZE 32
#define __BITS4 (SI_TYPE_SIZE / 4)
#define __ll_B (1L << (SI_TYPE_SIZE / 2))
#define __ll_lowpart(t) ((USItype) (t) % __ll_B)
#define __ll_highpart(t) ((USItype) (t) / __ll_B)
#define umul_ppmm(w1, w0, u, v) \
do { \
USItype __x0, __x1, __x2, __x3; \
USItype __ul, __vl, __uh, __vh; \
\
__ul = __ll_lowpart (u); \
__uh = __ll_highpart (u); \
__vl = __ll_lowpart (v); \
__vh = __ll_highpart (v); \
\
__x0 = (USItype) __ul * __vl; \
__x1 = (USItype) __ul * __vh; \
__x2 = (USItype) __uh * __vl; \
__x3 = (USItype) __uh * __vh; \
\
__x1 += __ll_highpart (__x0);/* this can't give carry */ \
__x1 += __x2; /* but this indeed can */ \
if (__x1 < __x2) /* did we get it? */ \
__x3 += __ll_B; /* yes, add it in the proper pos. */ \
\
(w1) = __x3 + __ll_highpart (__x1); \
(w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
} while (0)
#define __umulsidi3(u, v) \
({DIunion __w; \
umul_ppmm (__w.s.high, __w.s.low, u, v); \
__w.ll; })
typedef int SItype __attribute__ ((mode (SI)));
typedef unsigned int USItype __attribute__ ((mode (SI)));
typedef int DItype __attribute__ ((mode (DI)));
typedef int word_type __attribute__ ((mode (__word__)));
struct DIstruct {SItype high, low;};
typedef union
{
struct DIstruct s;
DItype ll;
} DIunion;
DItype
__muldi3 (DItype u, DItype v)
{
DIunion w;
DIunion uu, vv;
uu.ll = u,
vv.ll = v;
w.ll = __umulsidi3 (uu.s.low, vv.s.low);
w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
+ (USItype) uu.s.high * (USItype) vv.s.low);
return w.ll;
}

View file

@ -20,226 +20,3 @@ char *strcat(char *dest, const char *src)
return __kernel_strcpy(dest + __kernel_strlen(dest), src);
}
EXPORT_SYMBOL(strcat);
void *memset(void *s, int c, size_t count)
{
void *xs = s;
size_t temp, temp1;
if (!count)
return xs;
c &= 0xff;
c |= c << 8;
c |= c << 16;
if ((long)s & 1) {
char *cs = s;
*cs++ = c;
s = cs;
count--;
}
if (count > 2 && (long)s & 2) {
short *ss = s;
*ss++ = c;
s = ss;
count -= 2;
}
temp = count >> 2;
if (temp) {
long *ls = s;
asm volatile (
" movel %1,%2\n"
" andw #7,%2\n"
" lsrl #3,%1\n"
" negw %2\n"
" jmp %%pc@(2f,%2:w:2)\n"
"1: movel %3,%0@+\n"
" movel %3,%0@+\n"
" movel %3,%0@+\n"
" movel %3,%0@+\n"
" movel %3,%0@+\n"
" movel %3,%0@+\n"
" movel %3,%0@+\n"
" movel %3,%0@+\n"
"2: dbra %1,1b\n"
" clrw %1\n"
" subql #1,%1\n"
" jpl 1b"
: "=a" (ls), "=d" (temp), "=&d" (temp1)
: "d" (c), "0" (ls), "1" (temp));
s = ls;
}
if (count & 2) {
short *ss = s;
*ss++ = c;
s = ss;
}
if (count & 1) {
char *cs = s;
*cs = c;
}
return xs;
}
EXPORT_SYMBOL(memset);
void *memcpy(void *to, const void *from, size_t n)
{
void *xto = to;
size_t temp, temp1;
if (!n)
return xto;
if ((long)to & 1) {
char *cto = to;
const char *cfrom = from;
*cto++ = *cfrom++;
to = cto;
from = cfrom;
n--;
}
if (n > 2 && (long)to & 2) {
short *sto = to;
const short *sfrom = from;
*sto++ = *sfrom++;
to = sto;
from = sfrom;
n -= 2;
}
temp = n >> 2;
if (temp) {
long *lto = to;
const long *lfrom = from;
asm volatile (
" movel %2,%3\n"
" andw #7,%3\n"
" lsrl #3,%2\n"
" negw %3\n"
" jmp %%pc@(1f,%3:w:2)\n"
"4: movel %0@+,%1@+\n"
" movel %0@+,%1@+\n"
" movel %0@+,%1@+\n"
" movel %0@+,%1@+\n"
" movel %0@+,%1@+\n"
" movel %0@+,%1@+\n"
" movel %0@+,%1@+\n"
" movel %0@+,%1@+\n"
"1: dbra %2,4b\n"
" clrw %2\n"
" subql #1,%2\n"
" jpl 4b"
: "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1)
: "0" (lfrom), "1" (lto), "2" (temp));
to = lto;
from = lfrom;
}
if (n & 2) {
short *sto = to;
const short *sfrom = from;
*sto++ = *sfrom++;
to = sto;
from = sfrom;
}
if (n & 1) {
char *cto = to;
const char *cfrom = from;
*cto = *cfrom;
}
return xto;
}
EXPORT_SYMBOL(memcpy);
void *memmove(void *dest, const void *src, size_t n)
{
void *xdest = dest;
size_t temp;
if (!n)
return xdest;
if (dest < src) {
if ((long)dest & 1) {
char *cdest = dest;
const char *csrc = src;
*cdest++ = *csrc++;
dest = cdest;
src = csrc;
n--;
}
if (n > 2 && (long)dest & 2) {
short *sdest = dest;
const short *ssrc = src;
*sdest++ = *ssrc++;
dest = sdest;
src = ssrc;
n -= 2;
}
temp = n >> 2;
if (temp) {
long *ldest = dest;
const long *lsrc = src;
temp--;
do
*ldest++ = *lsrc++;
while (temp--);
dest = ldest;
src = lsrc;
}
if (n & 2) {
short *sdest = dest;
const short *ssrc = src;
*sdest++ = *ssrc++;
dest = sdest;
src = ssrc;
}
if (n & 1) {
char *cdest = dest;
const char *csrc = src;
*cdest = *csrc;
}
} else {
dest = (char *)dest + n;
src = (const char *)src + n;
if ((long)dest & 1) {
char *cdest = dest;
const char *csrc = src;
*--cdest = *--csrc;
dest = cdest;
src = csrc;
n--;
}
if (n > 2 && (long)dest & 2) {
short *sdest = dest;
const short *ssrc = src;
*--sdest = *--ssrc;
dest = sdest;
src = ssrc;
n -= 2;
}
temp = n >> 2;
if (temp) {
long *ldest = dest;
const long *lsrc = src;
temp--;
do
*--ldest = *--lsrc;
while (temp--);
dest = ldest;
src = lsrc;
}
if (n & 2) {
short *sdest = dest;
const short *ssrc = src;
*--sdest = *--ssrc;
dest = sdest;
src = ssrc;
}
if (n & 1) {
char *cdest = dest;
const char *csrc = src;
*--cdest = *--csrc;
}
}
return xdest;
}
EXPORT_SYMBOL(memmove);

View file

@ -1,5 +1,9 @@
ifdef CONFIG_MMU
include arch/m68k/mm/Makefile_mm
else
include arch/m68k/mm/Makefile_no
endif
#
# Makefile for the linux m68k-specific parts of the memory manager.
#
obj-y := init.o
obj-$(CONFIG_MMU) += cache.o fault.o hwtest.o
obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o

View file

@ -1,8 +0,0 @@
#
# Makefile for the linux m68k-specific parts of the memory manager.
#
obj-y := cache.o init.o fault.o hwtest.o
obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o

View file

@ -1,5 +0,0 @@
#
# Makefile for the linux m68knommu specific parts of the memory manager.
#
obj-y += init.o kmap.o

View file

@ -38,28 +38,10 @@
#include <asm/system.h>
#include <asm/machdep.h>
#undef DEBUG
extern void die_if_kernel(char *,struct pt_regs *,long);
extern void free_initmem(void);
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
* do_exit(), but using this instead means there is less risk
* for a process dying in kernel mode, possibly leaving a inode
* unused etc..
*
* BAD_PAGETABLE is the accompanying page-table: it is initialized
* to point to BAD_PAGE entries.
*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
static unsigned long empty_bad_page_table;
static unsigned long empty_bad_page;
unsigned long empty_zero_page;
extern unsigned long memory_start;
@ -77,22 +59,9 @@ void __init paging_init(void)
* Make sure start_mem is page aligned, otherwise bootmem and
* page_alloc get different views of the world.
*/
#ifdef DEBUG
unsigned long start_mem = PAGE_ALIGN(memory_start);
#endif
unsigned long end_mem = memory_end & PAGE_MASK;
unsigned long zones_size[MAX_NR_ZONES] = {0, };
#ifdef DEBUG
printk (KERN_DEBUG "start_mem is %#lx\nvirtual_end is %#lx\n",
start_mem, end_mem);
#endif
/*
* Initialize the bad page table and bad page to point
* to a couple of allocated pages.
*/
empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
memset((void *)empty_zero_page, 0, PAGE_SIZE);
@ -101,19 +70,8 @@ void __init paging_init(void)
*/
set_fs (USER_DS);
#ifdef DEBUG
printk (KERN_DEBUG "before free_area_init\n");
printk (KERN_DEBUG "free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n",
start_mem, end_mem);
#endif
{
unsigned long zones_size[MAX_NR_ZONES] = {0, };
zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
free_area_init(zones_size);
}
zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
free_area_init(zones_size);
}
void __init mem_init(void)
@ -166,8 +124,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
void
free_initmem()
void free_initmem(void)
{
#ifdef CONFIG_RAMKERNEL
unsigned long addr;

View file

@ -1,5 +1,367 @@
#ifdef CONFIG_MMU
#include "kmap_mm.c"
/*
* linux/arch/m68k/mm/kmap.c
*
* Copyright (C) 1997 Roman Hodek
*
* 10/01/99 cleaned up the code and changing to the same interface
* used by other architectures /Roman Zippel
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
#include <asm/system.h>
#undef DEBUG
#define PTRTREESIZE (256*1024)
/*
* For 040/060 we can use the virtual memory area like other architectures,
* but for 020/030 we want to use early termination page descriptor and we
* can't mix this with normal page descriptors, so we have to copy that code
* (mm/vmalloc.c) and return appriorate aligned addresses.
*/
#ifdef CPU_M68040_OR_M68060_ONLY
#define IO_SIZE PAGE_SIZE
static inline struct vm_struct *get_io_area(unsigned long size)
{
return get_vm_area(size, VM_IOREMAP);
}
static inline void free_io_area(void *addr)
{
vfree((void *)(PAGE_MASK & (unsigned long)addr));
}
#else
#include "kmap_no.c"
#define IO_SIZE (256*1024)
static struct vm_struct *iolist;
static struct vm_struct *get_io_area(unsigned long size)
{
unsigned long addr;
struct vm_struct **p, *tmp, *area;
area = kmalloc(sizeof(*area), GFP_KERNEL);
if (!area)
return NULL;
addr = KMAP_START;
for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
if (size + addr < (unsigned long)tmp->addr)
break;
if (addr > KMAP_END-size) {
kfree(area);
return NULL;
}
addr = tmp->size + (unsigned long)tmp->addr;
}
area->addr = (void *)addr;
area->size = size + IO_SIZE;
area->next = *p;
*p = area;
return area;
}
static inline void free_io_area(void *addr)
{
struct vm_struct **p, *tmp;
if (!addr)
return;
addr = (void *)((unsigned long)addr & -IO_SIZE);
for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
__iounmap(tmp->addr, tmp->size);
kfree(tmp);
return;
}
}
}
#endif
/*
* Map some physical address range into the kernel address space.
*/
/* Rewritten by Andreas Schwab to remove all races. */
void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
{
struct vm_struct *area;
unsigned long virtaddr, retaddr;
long offset;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
/*
* Don't allow mappings that wrap..
*/
if (!size || physaddr > (unsigned long)(-size))
return NULL;
#ifdef CONFIG_AMIGA
if (MACH_IS_AMIGA) {
if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
&& (cacheflag == IOMAP_NOCACHE_SER))
return (void __iomem *)physaddr;
}
#endif
#ifdef DEBUG
printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
#endif
/*
* Mappings have to be aligned
*/
offset = physaddr & (IO_SIZE - 1);
physaddr &= -IO_SIZE;
size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
/*
* Ok, go for it..
*/
area = get_io_area(size);
if (!area)
return NULL;
virtaddr = (unsigned long)area->addr;
retaddr = virtaddr + offset;
#ifdef DEBUG
printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
#endif
/*
* add cache and table flags to physical address
*/
if (CPU_IS_040_OR_060) {
physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
_PAGE_ACCESSED | _PAGE_DIRTY);
switch (cacheflag) {
case IOMAP_FULL_CACHING:
physaddr |= _PAGE_CACHE040;
break;
case IOMAP_NOCACHE_SER:
default:
physaddr |= _PAGE_NOCACHE_S;
break;
case IOMAP_NOCACHE_NONSER:
physaddr |= _PAGE_NOCACHE;
break;
case IOMAP_WRITETHROUGH:
physaddr |= _PAGE_CACHE040W;
break;
}
} else {
physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
switch (cacheflag) {
case IOMAP_NOCACHE_SER:
case IOMAP_NOCACHE_NONSER:
default:
physaddr |= _PAGE_NOCACHE030;
break;
case IOMAP_FULL_CACHING:
case IOMAP_WRITETHROUGH:
break;
}
}
while ((long)size > 0) {
#ifdef DEBUG
if (!(virtaddr & (PTRTREESIZE-1)))
printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
#endif
pgd_dir = pgd_offset_k(virtaddr);
pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
if (!pmd_dir) {
printk("ioremap: no mem for pmd_dir\n");
return NULL;
}
if (CPU_IS_020_OR_030) {
pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
physaddr += PTRTREESIZE;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
} else {
pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
if (!pte_dir) {
printk("ioremap: no mem for pte_dir\n");
return NULL;
}
pte_val(*pte_dir) = physaddr;
virtaddr += PAGE_SIZE;
physaddr += PAGE_SIZE;
size -= PAGE_SIZE;
}
}
#ifdef DEBUG
printk("\n");
#endif
flush_tlb_all();
return (void __iomem *)retaddr;
}
EXPORT_SYMBOL(__ioremap);
/*
* Unmap a ioremap()ed region again
*/
void iounmap(void __iomem *addr)
{
#ifdef CONFIG_AMIGA
if ((!MACH_IS_AMIGA) ||
(((unsigned long)addr < 0x40000000) ||
((unsigned long)addr > 0x60000000)))
free_io_area((__force void *)addr);
#else
free_io_area((__force void *)addr);
#endif
}
EXPORT_SYMBOL(iounmap);
/*
* __iounmap unmaps nearly everything, so be careful
* it doesn't free currently pointer/page tables anymore but it
* wans't used anyway and might be added later.
*/
void __iounmap(void *addr, unsigned long size)
{
unsigned long virtaddr = (unsigned long)addr;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
while ((long)size > 0) {
pgd_dir = pgd_offset_k(virtaddr);
if (pgd_bad(*pgd_dir)) {
printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
pgd_clear(pgd_dir);
return;
}
pmd_dir = pmd_offset(pgd_dir, virtaddr);
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
if (pmd_type == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = 0;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
continue;
} else if (pmd_type == 0)
continue;
}
if (pmd_bad(*pmd_dir)) {
printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
pmd_clear(pmd_dir);
return;
}
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
pte_val(*pte_dir) = 0;
virtaddr += PAGE_SIZE;
size -= PAGE_SIZE;
}
flush_tlb_all();
}
/*
* Set new cache mode for some kernel address space.
* The caller must push data for that range itself, if such data may already
* be in the cache.
*/
void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
{
unsigned long virtaddr = (unsigned long)addr;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
if (CPU_IS_040_OR_060) {
switch (cmode) {
case IOMAP_FULL_CACHING:
cmode = _PAGE_CACHE040;
break;
case IOMAP_NOCACHE_SER:
default:
cmode = _PAGE_NOCACHE_S;
break;
case IOMAP_NOCACHE_NONSER:
cmode = _PAGE_NOCACHE;
break;
case IOMAP_WRITETHROUGH:
cmode = _PAGE_CACHE040W;
break;
}
} else {
switch (cmode) {
case IOMAP_NOCACHE_SER:
case IOMAP_NOCACHE_NONSER:
default:
cmode = _PAGE_NOCACHE030;
break;
case IOMAP_FULL_CACHING:
case IOMAP_WRITETHROUGH:
cmode = 0;
}
}
while ((long)size > 0) {
pgd_dir = pgd_offset_k(virtaddr);
if (pgd_bad(*pgd_dir)) {
printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
pgd_clear(pgd_dir);
return;
}
pmd_dir = pmd_offset(pgd_dir, virtaddr);
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
_CACHEMASK040) | cmode;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
continue;
}
}
if (pmd_bad(*pmd_dir)) {
printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
pmd_clear(pmd_dir);
return;
}
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
virtaddr += PAGE_SIZE;
size -= PAGE_SIZE;
}
flush_tlb_all();
}
EXPORT_SYMBOL(kernel_set_cachemode);

View file

@ -1,367 +0,0 @@
/*
* linux/arch/m68k/mm/kmap.c
*
* Copyright (C) 1997 Roman Hodek
*
* 10/01/99 cleaned up the code and changing to the same interface
* used by other architectures /Roman Zippel
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
#include <asm/system.h>
#undef DEBUG
#define PTRTREESIZE (256*1024)
/*
* For 040/060 we can use the virtual memory area like other architectures,
* but for 020/030 we want to use early termination page descriptor and we
* can't mix this with normal page descriptors, so we have to copy that code
* (mm/vmalloc.c) and return appriorate aligned addresses.
*/
#ifdef CPU_M68040_OR_M68060_ONLY
#define IO_SIZE PAGE_SIZE
static inline struct vm_struct *get_io_area(unsigned long size)
{
return get_vm_area(size, VM_IOREMAP);
}
static inline void free_io_area(void *addr)
{
vfree((void *)(PAGE_MASK & (unsigned long)addr));
}
#else
#define IO_SIZE (256*1024)
static struct vm_struct *iolist;
static struct vm_struct *get_io_area(unsigned long size)
{
unsigned long addr;
struct vm_struct **p, *tmp, *area;
area = kmalloc(sizeof(*area), GFP_KERNEL);
if (!area)
return NULL;
addr = KMAP_START;
for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
if (size + addr < (unsigned long)tmp->addr)
break;
if (addr > KMAP_END-size) {
kfree(area);
return NULL;
}
addr = tmp->size + (unsigned long)tmp->addr;
}
area->addr = (void *)addr;
area->size = size + IO_SIZE;
area->next = *p;
*p = area;
return area;
}
static inline void free_io_area(void *addr)
{
struct vm_struct **p, *tmp;
if (!addr)
return;
addr = (void *)((unsigned long)addr & -IO_SIZE);
for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
__iounmap(tmp->addr, tmp->size);
kfree(tmp);
return;
}
}
}
#endif
/*
* Map some physical address range into the kernel address space.
*/
/* Rewritten by Andreas Schwab to remove all races. */
void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
{
struct vm_struct *area;
unsigned long virtaddr, retaddr;
long offset;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
/*
* Don't allow mappings that wrap..
*/
if (!size || physaddr > (unsigned long)(-size))
return NULL;
#ifdef CONFIG_AMIGA
if (MACH_IS_AMIGA) {
if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
&& (cacheflag == IOMAP_NOCACHE_SER))
return (void __iomem *)physaddr;
}
#endif
#ifdef DEBUG
printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
#endif
/*
* Mappings have to be aligned
*/
offset = physaddr & (IO_SIZE - 1);
physaddr &= -IO_SIZE;
size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
/*
* Ok, go for it..
*/
area = get_io_area(size);
if (!area)
return NULL;
virtaddr = (unsigned long)area->addr;
retaddr = virtaddr + offset;
#ifdef DEBUG
printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
#endif
/*
* add cache and table flags to physical address
*/
if (CPU_IS_040_OR_060) {
physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
_PAGE_ACCESSED | _PAGE_DIRTY);
switch (cacheflag) {
case IOMAP_FULL_CACHING:
physaddr |= _PAGE_CACHE040;
break;
case IOMAP_NOCACHE_SER:
default:
physaddr |= _PAGE_NOCACHE_S;
break;
case IOMAP_NOCACHE_NONSER:
physaddr |= _PAGE_NOCACHE;
break;
case IOMAP_WRITETHROUGH:
physaddr |= _PAGE_CACHE040W;
break;
}
} else {
physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
switch (cacheflag) {
case IOMAP_NOCACHE_SER:
case IOMAP_NOCACHE_NONSER:
default:
physaddr |= _PAGE_NOCACHE030;
break;
case IOMAP_FULL_CACHING:
case IOMAP_WRITETHROUGH:
break;
}
}
while ((long)size > 0) {
#ifdef DEBUG
if (!(virtaddr & (PTRTREESIZE-1)))
printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
#endif
pgd_dir = pgd_offset_k(virtaddr);
pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
if (!pmd_dir) {
printk("ioremap: no mem for pmd_dir\n");
return NULL;
}
if (CPU_IS_020_OR_030) {
pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
physaddr += PTRTREESIZE;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
} else {
pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
if (!pte_dir) {
printk("ioremap: no mem for pte_dir\n");
return NULL;
}
pte_val(*pte_dir) = physaddr;
virtaddr += PAGE_SIZE;
physaddr += PAGE_SIZE;
size -= PAGE_SIZE;
}
}
#ifdef DEBUG
printk("\n");
#endif
flush_tlb_all();
return (void __iomem *)retaddr;
}
EXPORT_SYMBOL(__ioremap);
/*
* Unmap a ioremap()ed region again
*/
void iounmap(void __iomem *addr)
{
#ifdef CONFIG_AMIGA
if ((!MACH_IS_AMIGA) ||
(((unsigned long)addr < 0x40000000) ||
((unsigned long)addr > 0x60000000)))
free_io_area((__force void *)addr);
#else
free_io_area((__force void *)addr);
#endif
}
EXPORT_SYMBOL(iounmap);
/*
* __iounmap unmaps nearly everything, so be careful
* it doesn't free currently pointer/page tables anymore but it
* wans't used anyway and might be added later.
*/
void __iounmap(void *addr, unsigned long size)
{
unsigned long virtaddr = (unsigned long)addr;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
while ((long)size > 0) {
pgd_dir = pgd_offset_k(virtaddr);
if (pgd_bad(*pgd_dir)) {
printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
pgd_clear(pgd_dir);
return;
}
pmd_dir = pmd_offset(pgd_dir, virtaddr);
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
if (pmd_type == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = 0;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
continue;
} else if (pmd_type == 0)
continue;
}
if (pmd_bad(*pmd_dir)) {
printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
pmd_clear(pmd_dir);
return;
}
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
pte_val(*pte_dir) = 0;
virtaddr += PAGE_SIZE;
size -= PAGE_SIZE;
}
flush_tlb_all();
}
/*
* Set new cache mode for some kernel address space.
* The caller must push data for that range itself, if such data may already
* be in the cache.
*/
void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
{
unsigned long virtaddr = (unsigned long)addr;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
if (CPU_IS_040_OR_060) {
switch (cmode) {
case IOMAP_FULL_CACHING:
cmode = _PAGE_CACHE040;
break;
case IOMAP_NOCACHE_SER:
default:
cmode = _PAGE_NOCACHE_S;
break;
case IOMAP_NOCACHE_NONSER:
cmode = _PAGE_NOCACHE;
break;
case IOMAP_WRITETHROUGH:
cmode = _PAGE_CACHE040W;
break;
}
} else {
switch (cmode) {
case IOMAP_NOCACHE_SER:
case IOMAP_NOCACHE_NONSER:
default:
cmode = _PAGE_NOCACHE030;
break;
case IOMAP_FULL_CACHING:
case IOMAP_WRITETHROUGH:
cmode = 0;
}
}
while ((long)size > 0) {
pgd_dir = pgd_offset_k(virtaddr);
if (pgd_bad(*pgd_dir)) {
printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
pgd_clear(pgd_dir);
return;
}
pmd_dir = pmd_offset(pgd_dir, virtaddr);
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
_CACHEMASK040) | cmode;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
continue;
}
}
if (pmd_bad(*pmd_dir)) {
printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
pmd_clear(pmd_dir);
return;
}
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
virtaddr += PAGE_SIZE;
size -= PAGE_SIZE;
}
flush_tlb_all();
}
EXPORT_SYMBOL(kernel_set_cachemode);

View file

@ -1,45 +0,0 @@
/*
* linux/arch/m68knommu/mm/kmap.c
*
* Copyright (C) 2000 Lineo, <davidm@snapgear.com>
* Copyright (C) 2000-2002 David McCullough <davidm@snapgear.com>
*/
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
#include <asm/system.h>
#undef DEBUG
/*
* Map some physical address range into the kernel address space.
*/
void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
{
return (void *)physaddr;
}
/*
* Unmap a ioremap()ed region again.
*/
void iounmap(void *addr)
{
}
/*
* Set new cache mode for some kernel address space.
* The caller must push data for that range itself, if such data may already
* be in the cache.
*/
void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
{
}

View file

@ -10,7 +10,6 @@
* Linux/m68k support by Hamish Macdonald
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
@ -80,7 +79,7 @@ ENTRY(system_call)
movel %sp,%d1 /* get thread_info pointer */
andl #-THREAD_SIZE,%d1
movel %d1,%a2
btst #(TIF_SYSCALL_TRACE%8),%a2@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
btst #(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
jne do_trace
cmpl #NR_syscalls,%d0
jcc badsys
@ -107,12 +106,12 @@ Luser_return:
andl #-THREAD_SIZE,%d1
movel %d1,%a2
1:
move %a2@(TI_FLAGS),%d1 /* thread_info->flags */
move %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
jne Lwork_to_do
RESTORE_ALL
Lwork_to_do:
movel %a2@(TI_FLAGS),%d1 /* thread_info->flags */
movel %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
btst #TIF_NEED_RESCHED,%d1
jne reschedule

View file

@ -12,7 +12,6 @@
* M68360 Port by SED Systems, and Lineo.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
@ -76,7 +75,7 @@ ENTRY(system_call)
movel %sp,%d1 /* get thread_info pointer */
andl #-THREAD_SIZE,%d1
movel %d1,%a2
btst #(TIF_SYSCALL_TRACE%8),%a2@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
btst #(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
jne do_trace
cmpl #NR_syscalls,%d0
jcc badsys
@ -103,12 +102,12 @@ Luser_return:
andl #-THREAD_SIZE,%d1
movel %d1,%a2
1:
move %a2@(TI_FLAGS),%d1 /* thread_info->flags */
move %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
jne Lwork_to_do
RESTORE_ALL
Lwork_to_do:
movel %a2@(TI_FLAGS),%d1 /* thread_info->flags */
movel %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
btst #TIF_NEED_RESCHED,%d1
jne reschedule

View file

@ -9,6 +9,7 @@
/***************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/dma.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
@ -33,7 +34,9 @@ unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS] = {
MCFDMA_BASE3,
#endif
};
EXPORT_SYMBOL(dma_base_addr);
unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
EXPORT_SYMBOL(dma_device_address);
/***************************************************************************/

View file

@ -26,7 +26,6 @@
* Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be>
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
@ -78,7 +77,7 @@ ENTRY(system_call)
movel %d2,%a0
movel %a0@,%a1 /* save top of frame */
movel %sp,%a1@(TASK_THREAD+THREAD_ESP0)
btst #(TIF_SYSCALL_TRACE%8),%a0@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
btst #(TIF_SYSCALL_TRACE%8),%a0@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
bnes 1f
movel %d3,%a0
@ -113,11 +112,11 @@ ret_from_exception:
movel %sp,%d1 /* get thread_info pointer */
andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
movel %d1,%a0
movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
andl #(1<<TIF_NEED_RESCHED),%d1
jeq Lkernel_return
movel %a0@(TI_PREEMPTCOUNT),%d1
movel %a0@(TINFO_PREEMPT),%d1
cmpl #0,%d1
jne Lkernel_return
@ -137,14 +136,14 @@ Luser_return:
movel %sp,%d1 /* get thread_info pointer */
andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
movel %d1,%a0
movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
jne Lwork_to_do /* still work to do */
Lreturn:
RESTORE_USER
Lwork_to_do:
movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
move #0x2000,%sr /* enable intrs again */
btst #TIF_NEED_RESCHED,%d1
jne reschedule

View file

@ -8,7 +8,6 @@
/*****************************************************************************/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/asm-offsets.h>

View file

@ -118,7 +118,7 @@ SECTIONS
EXIT_DATA
}
PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE)
PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */

View file

@ -70,7 +70,7 @@ SECTIONS
.exit.text : { EXIT_TEXT; }
.exit.data : { EXIT_DATA; }
PERCPU(32, PAGE_SIZE)
PERCPU_SECTION(32)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */

View file

@ -149,7 +149,7 @@ SECTIONS
EXIT_DATA
}
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */

View file

@ -160,7 +160,7 @@ SECTIONS
INIT_RAM_FS
}
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(8);
.machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {

View file

@ -230,17 +230,6 @@ config SYSVIPC_COMPAT
config AUDIT_ARCH
def_bool y
config S390_EXEC_PROTECT
def_bool y
prompt "Data execute protection"
help
This option allows to enable a buffer overflow protection for user
space programs and it also selects the addressing mode option above.
The kernel parameter noexec=on will enable this feature and also
switch the addressing modes, default is disabled. Enabling this (via
kernel parameter) on machines earlier than IBM System z9 this will
reduce system performance.
comment "Code generation options"
choice

View file

@ -130,9 +130,7 @@ static void appldata_work_fn(struct work_struct *work)
{
struct list_head *lh;
struct appldata_ops *ops;
int i;
i = 0;
get_online_cpus();
mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) {

View file

@ -167,7 +167,6 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
#ifdef CONFIG_64BIT
#define cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \
})
#else /* CONFIG_64BIT */

View file

@ -196,18 +196,6 @@ do { \
} while (0)
#endif /* __s390x__ */
/*
* An executable for which elf_read_implies_exec() returns TRUE will
* have the READ_IMPLIES_EXEC personality flag set automatically.
*/
#define elf_read_implies_exec(ex, executable_stack) \
({ \
if (current->mm->context.noexec && \
executable_stack != EXSTACK_DISABLE_X) \
disable_noexec(current->mm, current); \
current->mm->context.noexec == 0; \
})
#define STACK_RND_MASK 0x7ffUL
#define ARCH_DLINFO \

View file

@ -111,21 +111,10 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
{
pmd_t *pmdp = (pmd_t *) ptep;
if (!MACHINE_HAS_IDTE) {
__pmd_csp(pmdp);
if (mm->context.noexec) {
pmdp = get_shadow_table(pmdp);
__pmd_csp(pmdp);
}
return;
}
__pmd_idte(address, pmdp);
if (mm->context.noexec) {
pmdp = get_shadow_table(pmdp);
if (MACHINE_HAS_IDTE)
__pmd_idte(address, pmdp);
}
return;
else
__pmd_csp(pmdp);
}
#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \

View file

@ -15,6 +15,7 @@ enum interruption_class {
EXTINT_VRT,
EXTINT_SCP,
EXTINT_IUC,
EXTINT_CPM,
IOINT_QAI,
IOINT_QDI,
IOINT_DAS,

View file

@ -124,7 +124,7 @@ struct _lowcore {
/* Address space pointer. */
__u32 kernel_asce; /* 0x02ac */
__u32 user_asce; /* 0x02b0 */
__u32 user_exec_asce; /* 0x02b4 */
__u32 current_pid; /* 0x02b4 */
/* SMP info area */
__u32 cpu_nr; /* 0x02b8 */
@ -255,7 +255,7 @@ struct _lowcore {
/* Address space pointer. */
__u64 kernel_asce; /* 0x0310 */
__u64 user_asce; /* 0x0318 */
__u64 user_exec_asce; /* 0x0320 */
__u64 current_pid; /* 0x0320 */
/* SMP info area */
__u32 cpu_nr; /* 0x0328 */

View file

@ -5,19 +5,18 @@ typedef struct {
atomic_t attach_count;
unsigned int flush_mm;
spinlock_t list_lock;
struct list_head crst_list;
struct list_head pgtable_list;
unsigned long asce_bits;
unsigned long asce_limit;
unsigned long vdso_base;
int noexec;
int has_pgste; /* The mmu context has extended page tables */
int alloc_pgste; /* cloned contexts will have extended page tables */
/* Cloned contexts will be created with extended page tables. */
unsigned int alloc_pgste:1;
/* The mmu context has extended page tables. */
unsigned int has_pgste:1;
} mm_context_t;
#define INIT_MM_CONTEXT(name) \
.context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
.context.crst_list = LIST_HEAD_INIT(name.context.crst_list), \
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list),
#endif

View file

@ -35,11 +35,9 @@ static inline int init_new_context(struct task_struct *tsk,
* and if has_pgste is set, it will create extended page
* tables.
*/
mm->context.noexec = 0;
mm->context.has_pgste = 1;
mm->context.alloc_pgste = 1;
} else {
mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE);
mm->context.has_pgste = 0;
mm->context.alloc_pgste = 0;
}
@ -63,10 +61,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
if (user_mode != HOME_SPACE_MODE) {
/* Load primary space page table origin. */
pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
asm volatile(LCTL_OPCODE" 1,1,%0\n"
: : "m" (S390_lowcore.user_exec_asce) );
: : "m" (S390_lowcore.user_asce) );
} else
/* Load home space page table origin. */
asm volatile(LCTL_OPCODE" 13,13,%0"

View file

@ -90,6 +90,7 @@ static inline void copy_page(void *to, void *from)
*/
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pgste; } pgste_t;
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pud; } pud_t;
@ -97,18 +98,21 @@ typedef struct { unsigned long pgd; } pgd_t;
typedef pte_t *pgtable_t;
#define pgprot_val(x) ((x).pgprot)
#define pgste_val(x) ((x).pgste)
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
#define pud_val(x) ((x).pud)
#define pgd_val(x) ((x).pgd)
#define __pgste(x) ((pgste_t) { (x) } )
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pud(x) ((pud_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
static inline void
page_set_storage_key(unsigned long addr, unsigned int skey, int mapped)
static inline void page_set_storage_key(unsigned long addr,
unsigned char skey, int mapped)
{
if (!mapped)
asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
@ -117,15 +121,59 @@ page_set_storage_key(unsigned long addr, unsigned int skey, int mapped)
asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
}
static inline unsigned int
page_get_storage_key(unsigned long addr)
static inline unsigned char page_get_storage_key(unsigned long addr)
{
unsigned int skey;
unsigned char skey;
asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0));
asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
return skey;
}
static inline int page_reset_referenced(unsigned long addr)
{
unsigned int ipm;
asm volatile(
" rrbe 0,%1\n"
" ipm %0\n"
: "=d" (ipm) : "a" (addr) : "cc");
return !!(ipm & 0x20000000);
}
/* Bits int the storage key */
#define _PAGE_CHANGED 0x02 /* HW changed bit */
#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
/*
* Test and clear dirty bit in storage key.
* We can't clear the changed bit atomically. This is a potential
* race against modification of the referenced bit. This function
* should therefore only be called if it is not mapped in any
* address space.
*/
#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped)
{
unsigned char skey;
skey = page_get_storage_key(pfn << PAGE_SHIFT);
if (!(skey & _PAGE_CHANGED))
return 0;
page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped);
return 1;
}
/*
* Test and clear referenced bit in storage key.
*/
#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
static inline int page_test_and_clear_young(unsigned long pfn)
{
return page_reset_referenced(pfn << PAGE_SHIFT);
}
struct page;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);

View file

@ -1,6 +1,9 @@
#ifndef __ARCH_S390_PERCPU__
#define __ARCH_S390_PERCPU__
#include <linux/preempt.h>
#include <asm/cmpxchg.h>
/*
* s390 uses its own implementation for per cpu data, the offset of
* the cpu local data area is cached in the cpu's lowcore memory.
@ -16,6 +19,71 @@
#define ARCH_NEEDS_WEAK_PER_CPU
#endif
#define arch_irqsafe_cpu_to_op(pcp, val, op) \
do { \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \
pcp_op_T__ *ptr__; \
preempt_disable(); \
ptr__ = __this_cpu_ptr(&(pcp)); \
prev__ = *ptr__; \
do { \
old__ = prev__; \
new__ = old__ op (val); \
switch (sizeof(*ptr__)) { \
case 8: \
prev__ = cmpxchg64(ptr__, old__, new__); \
break; \
default: \
prev__ = cmpxchg(ptr__, old__, new__); \
} \
} while (prev__ != old__); \
preempt_enable(); \
} while (0)
#define irqsafe_cpu_add_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
#define irqsafe_cpu_add_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
#define irqsafe_cpu_add_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
#define irqsafe_cpu_add_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
#define irqsafe_cpu_and_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
#define irqsafe_cpu_and_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
#define irqsafe_cpu_and_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
#define irqsafe_cpu_and_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
#define irqsafe_cpu_or_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
#define irqsafe_cpu_or_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
#define irqsafe_cpu_or_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
#define irqsafe_cpu_or_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
#define irqsafe_cpu_xor_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
#define irqsafe_cpu_xor_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
#define irqsafe_cpu_xor_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
#define irqsafe_cpu_xor_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
#define arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) \
({ \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ ret__; \
pcp_op_T__ *ptr__; \
preempt_disable(); \
ptr__ = __this_cpu_ptr(&(pcp)); \
switch (sizeof(*ptr__)) { \
case 8: \
ret__ = cmpxchg64(ptr__, oval, nval); \
break; \
default: \
ret__ = cmpxchg(ptr__, oval, nval); \
} \
preempt_enable(); \
ret__; \
})
#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
#include <asm-generic/percpu.h>
#endif /* __ARCH_S390_PERCPU__ */

View file

@ -19,14 +19,13 @@
#define check_pgt_cache() do {} while (0)
unsigned long *crst_table_alloc(struct mm_struct *, int);
unsigned long *crst_table_alloc(struct mm_struct *);
void crst_table_free(struct mm_struct *, unsigned long *);
void crst_table_free_rcu(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *);
void page_table_free(struct mm_struct *, unsigned long *);
void page_table_free_rcu(struct mm_struct *, unsigned long *);
void disable_noexec(struct mm_struct *, struct task_struct *);
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{
@ -50,9 +49,6 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
{
clear_table(crst, entry, sizeof(unsigned long)*2048);
crst = get_shadow_table(crst);
if (crst)
clear_table(crst, entry, sizeof(unsigned long)*2048);
}
#ifndef __s390x__
@ -69,10 +65,7 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
#define pmd_free(mm, x) do { } while (0)
#define pgd_populate(mm, pgd, pud) BUG()
#define pgd_populate_kernel(mm, pgd, pud) BUG()
#define pud_populate(mm, pud, pmd) BUG()
#define pud_populate_kernel(mm, pud, pmd) BUG()
#else /* __s390x__ */
@ -90,7 +83,7 @@ void crst_table_downgrade(struct mm_struct *, unsigned long limit);
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
unsigned long *table = crst_table_alloc(mm);
if (table)
crst_table_init(table, _REGION3_ENTRY_EMPTY);
return (pud_t *) table;
@ -99,43 +92,21 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
unsigned long *table = crst_table_alloc(mm);
if (table)
crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
return (pmd_t *) table;
}
#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
static inline void pgd_populate_kernel(struct mm_struct *mm,
pgd_t *pgd, pud_t *pud)
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{
pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
}
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{
pgd_populate_kernel(mm, pgd, pud);
if (mm->context.noexec) {
pgd = get_shadow_table(pgd);
pud = get_shadow_table(pud);
pgd_populate_kernel(mm, pgd, pud);
}
}
static inline void pud_populate_kernel(struct mm_struct *mm,
pud_t *pud, pmd_t *pmd)
{
pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
}
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
pud_populate_kernel(mm, pud, pmd);
if (mm->context.noexec) {
pud = get_shadow_table(pud);
pmd = get_shadow_table(pmd);
pud_populate_kernel(mm, pud, pmd);
}
pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
}
#endif /* __s390x__ */
@ -143,29 +114,19 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
spin_lock_init(&mm->context.list_lock);
INIT_LIST_HEAD(&mm->context.crst_list);
INIT_LIST_HEAD(&mm->context.pgtable_list);
return (pgd_t *)
crst_table_alloc(mm, user_mode == SECONDARY_SPACE_MODE);
return (pgd_t *) crst_table_alloc(mm);
}
#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
{
pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
}
static inline void pmd_populate(struct mm_struct *mm,
pmd_t *pmd, pgtable_t pte)
{
pmd_populate_kernel(mm, pmd, pte);
if (mm->context.noexec) {
pmd = get_shadow_table(pmd);
pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
}
pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
}
#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
#define pmd_pgtable(pmd) \
(pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)

View file

@ -31,9 +31,8 @@
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <asm/bitops.h>
#include <asm/bug.h>
#include <asm/processor.h>
#include <asm/page.h>
extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
extern void paging_init(void);
@ -243,11 +242,13 @@ extern unsigned long VMALLOC_START;
/* Software bits in the page table entry */
#define _PAGE_SWT 0x001 /* SW pte type bit t */
#define _PAGE_SWX 0x002 /* SW pte type bit x */
#define _PAGE_SPECIAL 0x004 /* SW associated with special page */
#define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */
#define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */
#define _PAGE_SPECIAL 0x010 /* SW associated with special page */
#define __HAVE_ARCH_PTE_SPECIAL
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
/* Six different types of pages. */
#define _PAGE_TYPE_EMPTY 0x400
@ -256,8 +257,6 @@ extern unsigned long VMALLOC_START;
#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
#define _PAGE_TYPE_RO 0x200
#define _PAGE_TYPE_RW 0x000
#define _PAGE_TYPE_EX_RO 0x202
#define _PAGE_TYPE_EX_RW 0x002
/*
* Only four types for huge pages, using the invalid bit and protection bit
@ -287,8 +286,6 @@ extern unsigned long VMALLOC_START;
* _PAGE_TYPE_FILE 11?1 -> 11?1
* _PAGE_TYPE_RO 0100 -> 1100
* _PAGE_TYPE_RW 0000 -> 1000
* _PAGE_TYPE_EX_RO 0110 -> 1110
* _PAGE_TYPE_EX_RW 0010 -> 1010
*
* pte_none is true for bits combinations 1000, 1010, 1100, 1110
* pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
@ -297,14 +294,17 @@ extern unsigned long VMALLOC_START;
*/
/* Page status table bits for virtualization */
#define RCP_PCL_BIT 55
#define RCP_HR_BIT 54
#define RCP_HC_BIT 53
#define RCP_GR_BIT 50
#define RCP_GC_BIT 49
#define RCP_ACC_BITS 0xf000000000000000UL
#define RCP_FP_BIT 0x0800000000000000UL
#define RCP_PCL_BIT 0x0080000000000000UL
#define RCP_HR_BIT 0x0040000000000000UL
#define RCP_HC_BIT 0x0020000000000000UL
#define RCP_GR_BIT 0x0004000000000000UL
#define RCP_GC_BIT 0x0002000000000000UL
/* User dirty bit for KVM's migration feature */
#define KVM_UD_BIT 47
/* User dirty / referenced bit for KVM's migration feature */
#define KVM_UR_BIT 0x0000800000000000UL
#define KVM_UC_BIT 0x0000400000000000UL
#ifndef __s390x__
@ -377,85 +377,54 @@ extern unsigned long VMALLOC_START;
#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
_ASCE_ALT_EVENT)
/* Bits int the storage key */
#define _PAGE_CHANGED 0x02 /* HW changed bit */
#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
/*
* Page protection definitions.
*/
#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
#define PAGE_KERNEL PAGE_RW
#define PAGE_COPY PAGE_RO
/*
* Dependent on the EXEC_PROTECT option s390 can do execute protection.
* Write permission always implies read permission. In theory with a
* primary/secondary page table execute only can be implemented but
* it would cost an additional bit in the pte to distinguish all the
* different pte types. To avoid that execute permission currently
* implies read permission as well.
* On s390 the page table entry has an invalid bit and a read-only bit.
* Read permission implies execute permission and write permission
* implies read permission.
*/
/*xwr*/
#define __P000 PAGE_NONE
#define __P001 PAGE_RO
#define __P010 PAGE_RO
#define __P011 PAGE_RO
#define __P100 PAGE_EX_RO
#define __P101 PAGE_EX_RO
#define __P110 PAGE_EX_RO
#define __P111 PAGE_EX_RO
#define __P100 PAGE_RO
#define __P101 PAGE_RO
#define __P110 PAGE_RO
#define __P111 PAGE_RO
#define __S000 PAGE_NONE
#define __S001 PAGE_RO
#define __S010 PAGE_RW
#define __S011 PAGE_RW
#define __S100 PAGE_EX_RO
#define __S101 PAGE_EX_RO
#define __S110 PAGE_EX_RW
#define __S111 PAGE_EX_RW
#define __S100 PAGE_RO
#define __S101 PAGE_RO
#define __S110 PAGE_RW
#define __S111 PAGE_RW
#ifndef __s390x__
# define PxD_SHADOW_SHIFT 1
#else /* __s390x__ */
# define PxD_SHADOW_SHIFT 2
#endif /* __s390x__ */
static inline void *get_shadow_table(void *table)
static inline int mm_exclusive(struct mm_struct *mm)
{
unsigned long addr, offset;
struct page *page;
addr = (unsigned long) table;
offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
page = virt_to_page((void *)(addr ^ offset));
return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
return likely(mm == current->active_mm &&
atomic_read(&mm->context.attach_count) <= 1);
}
/*
* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
static inline int mm_has_pgste(struct mm_struct *mm)
{
*ptep = entry;
if (mm->context.noexec) {
if (!(pte_val(entry) & _PAGE_INVALID) &&
(pte_val(entry) & _PAGE_SWX))
pte_val(entry) |= _PAGE_RO;
else
pte_val(entry) = _PAGE_TYPE_EMPTY;
ptep[PTRS_PER_PTE] = entry;
}
#ifdef CONFIG_PGSTE
if (unlikely(mm->context.has_pgste))
return 1;
#endif
return 0;
}
/*
* pgd/pmd/pte query functions
*/
@ -568,52 +537,127 @@ static inline int pte_special(pte_t pte)
}
#define __HAVE_ARCH_PTE_SAME
#define pte_same(a,b) (pte_val(a) == pte_val(b))
static inline void rcp_lock(pte_t *ptep)
static inline int pte_same(pte_t a, pte_t b)
{
#ifdef CONFIG_PGSTE
unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
preempt_disable();
while (test_and_set_bit(RCP_PCL_BIT, pgste))
;
#endif
return pte_val(a) == pte_val(b);
}
static inline void rcp_unlock(pte_t *ptep)
static inline pgste_t pgste_get_lock(pte_t *ptep)
{
unsigned long new = 0;
#ifdef CONFIG_PGSTE
unsigned long old;
preempt_disable();
asm(
" lg %0,%2\n"
"0: lgr %1,%0\n"
" nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
" oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
" csg %0,%1,%2\n"
" jl 0b\n"
: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
: "Q" (ptep[PTRS_PER_PTE]) : "cc");
#endif
return __pgste(new);
}
static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
clear_bit(RCP_PCL_BIT, pgste);
asm(
" nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
" stg %1,%0\n"
: "=Q" (ptep[PTRS_PER_PTE])
: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
preempt_enable();
#endif
}
/* forward declaration for SetPageUptodate in page-flags.h*/
static inline void page_clear_dirty(struct page *page, int mapped);
#include <linux/page-flags.h>
static inline void ptep_rcp_copy(pte_t *ptep)
static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
struct page *page = virt_to_page(pte_val(*ptep));
unsigned int skey;
unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
unsigned long pfn, bits;
unsigned char skey;
skey = page_get_storage_key(page_to_phys(page));
if (skey & _PAGE_CHANGED) {
set_bit_simple(RCP_GC_BIT, pgste);
set_bit_simple(KVM_UD_BIT, pgste);
pfn = pte_val(*ptep) >> PAGE_SHIFT;
skey = page_get_storage_key(pfn);
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
/* Clear page changed & referenced bit in the storage key */
if (bits) {
skey ^= bits;
page_set_storage_key(pfn, skey, 1);
}
if (skey & _PAGE_REFERENCED)
set_bit_simple(RCP_GR_BIT, pgste);
if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
SetPageDirty(page);
set_bit_simple(KVM_UD_BIT, pgste);
}
if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
SetPageReferenced(page);
/* Transfer page changed & referenced bit to guest bits in pgste */
pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
/* Get host changed & referenced bits from pgste */
bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
/* Clear host bits in pgste. */
pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
/* Copy page access key and fetch protection bit to pgste */
pgste_val(pgste) |=
(unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
/* Transfer changed and referenced to kvm user bits */
pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
/* Transfer changed & referenced to pte sofware bits */
pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */
#endif
return pgste;
}
static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
int young;
young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
/* Transfer page referenced bit to pte software bit (host view) */
if (young || (pgste_val(pgste) & RCP_HR_BIT))
pte_val(*ptep) |= _PAGE_SWR;
/* Clear host referenced bit in pgste. */
pgste_val(pgste) &= ~RCP_HR_BIT;
/* Transfer page referenced bit to guest bit in pgste */
pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
#endif
return pgste;
}
static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
unsigned long pfn;
unsigned long okey, nkey;
pfn = pte_val(*ptep) >> PAGE_SHIFT;
okey = nkey = page_get_storage_key(pfn);
nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
/* Set page access key and fetch protection bit from pgste */
nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
if (okey != nkey)
page_set_storage_key(pfn, nkey, 1);
#endif
}
/*
* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
pgste_t pgste;
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste_set_pte(ptep, pgste);
*ptep = entry;
pgste_set_unlock(ptep, pgste);
} else
*ptep = entry;
}
/*
@ -627,19 +671,19 @@ static inline int pte_write(pte_t pte)
static inline int pte_dirty(pte_t pte)
{
/* A pte is neither clean nor dirty on s/390. The dirty bit
* is in the storage key. See page_test_and_clear_dirty for
* details.
*/
#ifdef CONFIG_PGSTE
if (pte_val(pte) & _PAGE_SWC)
return 1;
#endif
return 0;
}
static inline int pte_young(pte_t pte)
{
/* A pte is neither young nor old on s/390. The young bit
* is in the storage key. See page_test_and_clear_young for
* details.
*/
#ifdef CONFIG_PGSTE
if (pte_val(pte) & _PAGE_SWR)
return 1;
#endif
return 0;
}
@ -647,64 +691,30 @@ static inline int pte_young(pte_t pte)
* pgd/pmd/pte modification functions
*/
#ifndef __s390x__
#define pgd_clear(pgd) do { } while (0)
#define pud_clear(pud) do { } while (0)
#else /* __s390x__ */
static inline void pgd_clear_kernel(pgd_t * pgd)
static inline void pgd_clear(pgd_t *pgd)
{
#ifdef __s390x__
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
}
static inline void pgd_clear(pgd_t * pgd)
{
pgd_t *shadow = get_shadow_table(pgd);
pgd_clear_kernel(pgd);
if (shadow)
pgd_clear_kernel(shadow);
}
static inline void pud_clear_kernel(pud_t *pud)
{
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pud_val(*pud) = _REGION3_ENTRY_EMPTY;
#endif
}
static inline void pud_clear(pud_t *pud)
{
pud_t *shadow = get_shadow_table(pud);
pud_clear_kernel(pud);
if (shadow)
pud_clear_kernel(shadow);
#ifdef __s390x__
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pud_val(*pud) = _REGION3_ENTRY_EMPTY;
#endif
}
#endif /* __s390x__ */
static inline void pmd_clear_kernel(pmd_t * pmdp)
static inline void pmd_clear(pmd_t *pmdp)
{
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
}
static inline void pmd_clear(pmd_t *pmd)
{
pmd_t *shadow = get_shadow_table(pmd);
pmd_clear_kernel(pmd);
if (shadow)
pmd_clear_kernel(shadow);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm->context.noexec)
pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
}
/*
@ -734,35 +744,27 @@ static inline pte_t pte_mkwrite(pte_t pte)
static inline pte_t pte_mkclean(pte_t pte)
{
/* The only user of pte_mkclean is the fork() code.
We must *not* clear the *physical* page dirty bit
just because fork() wants to clear the dirty bit in
*one* of the page's mappings. So we just do nothing. */
#ifdef CONFIG_PGSTE
pte_val(pte) &= ~_PAGE_SWC;
#endif
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
/* We do not explicitly set the dirty bit because the
* sske instruction is slow. It is faster to let the
* next instruction set the dirty bit.
*/
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
/* S/390 doesn't keep its dirty/referenced bit in the pte.
* There is no point in clearing the real referenced bit.
*/
#ifdef CONFIG_PGSTE
pte_val(pte) &= ~_PAGE_SWR;
#endif
return pte;
}
static inline pte_t pte_mkyoung(pte_t pte)
{
/* S/390 doesn't keep its dirty/referenced bit in the pte.
* There is no point in setting the real referenced bit.
*/
return pte;
}
@ -800,62 +802,60 @@ static inline pte_t pte_mkhuge(pte_t pte)
}
#endif
#ifdef CONFIG_PGSTE
/*
* Get (and clear) the user dirty bit for a PTE.
* Get (and clear) the user dirty bit for a pte.
*/
static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
pte_t *ptep)
static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
pte_t *ptep)
{
int dirty;
unsigned long *pgste;
struct page *page;
unsigned int skey;
pgste_t pgste;
int dirty = 0;
if (!mm->context.has_pgste)
return -EINVAL;
rcp_lock(ptep);
pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
page = virt_to_page(pte_val(*ptep));
skey = page_get_storage_key(page_to_phys(page));
if (skey & _PAGE_CHANGED) {
set_bit_simple(RCP_GC_BIT, pgste);
set_bit_simple(KVM_UD_BIT, pgste);
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste = pgste_update_all(ptep, pgste);
dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
pgste_val(pgste) &= ~KVM_UC_BIT;
pgste_set_unlock(ptep, pgste);
return dirty;
}
if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
SetPageDirty(page);
set_bit_simple(KVM_UD_BIT, pgste);
}
dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
if (skey & _PAGE_CHANGED)
page_clear_dirty(page, 1);
rcp_unlock(ptep);
return dirty;
}
#endif
/*
* Get (and clear) the user referenced bit for a pte.
*/
static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
pte_t *ptep)
{
pgste_t pgste;
int young = 0;
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste = pgste_update_young(ptep, pgste);
young = !!(pgste_val(pgste) & KVM_UR_BIT);
pgste_val(pgste) &= ~KVM_UR_BIT;
pgste_set_unlock(ptep, pgste);
}
return young;
}
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
#ifdef CONFIG_PGSTE
unsigned long physpage;
int young;
unsigned long *pgste;
pgste_t pgste;
pte_t pte;
if (!vma->vm_mm->context.has_pgste)
return 0;
physpage = pte_val(*ptep) & PAGE_MASK;
pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
rcp_lock(ptep);
if (young)
set_bit_simple(RCP_GR_BIT, pgste);
young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
rcp_unlock(ptep);
return young;
#endif
if (mm_has_pgste(vma->vm_mm)) {
pgste = pgste_get_lock(ptep);
pgste = pgste_update_young(ptep, pgste);
pte = *ptep;
*ptep = pte_mkold(pte);
pgste_set_unlock(ptep, pgste);
return pte_young(pte);
}
return 0;
}
@ -867,10 +867,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
* On s390 reference bits are in storage key and never in TLB
* With virtualization we handle the reference bit, without we
* we can simply return */
#ifdef CONFIG_PGSTE
return ptep_test_and_clear_young(vma, address, ptep);
#endif
return 0;
}
static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
@ -890,25 +887,6 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
}
}
static inline void ptep_invalidate(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
if (mm->context.has_pgste) {
rcp_lock(ptep);
__ptep_ipte(address, ptep);
ptep_rcp_copy(ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
rcp_unlock(ptep);
return;
}
__ptep_ipte(address, ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm->context.noexec) {
__ptep_ipte(address, ptep + PTRS_PER_PTE);
pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
}
}
/*
* This is hard to understand. ptep_get_and_clear and ptep_clear_flush
* both clear the TLB for the unmapped pte. The reason is that
@ -923,24 +901,72 @@ static inline void ptep_invalidate(struct mm_struct *mm,
* is a nop.
*/
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define ptep_get_and_clear(__mm, __address, __ptep) \
({ \
pte_t __pte = *(__ptep); \
(__mm)->context.flush_mm = 1; \
if (atomic_read(&(__mm)->context.attach_count) > 1 || \
(__mm) != current->active_mm) \
ptep_invalidate(__mm, __address, __ptep); \
else \
pte_clear((__mm), (__address), (__ptep)); \
__pte; \
})
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
pgste_t pgste;
pte_t pte;
mm->context.flush_mm = 1;
if (mm_has_pgste(mm))
pgste = pgste_get_lock(ptep);
pte = *ptep;
if (!mm_exclusive(mm))
__ptep_ipte(address, ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm_has_pgste(mm)) {
pgste = pgste_update_all(&pte, pgste);
pgste_set_unlock(ptep, pgste);
}
return pte;
}
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
unsigned long address,
pte_t *ptep)
{
pte_t pte;
mm->context.flush_mm = 1;
if (mm_has_pgste(mm))
pgste_get_lock(ptep);
pte = *ptep;
if (!mm_exclusive(mm))
__ptep_ipte(address, ptep);
return pte;
}
static inline void ptep_modify_prot_commit(struct mm_struct *mm,
unsigned long address,
pte_t *ptep, pte_t pte)
{
*ptep = pte;
if (mm_has_pgste(mm))
pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
}
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t pte = *ptep;
ptep_invalidate(vma->vm_mm, address, ptep);
pgste_t pgste;
pte_t pte;
if (mm_has_pgste(vma->vm_mm))
pgste = pgste_get_lock(ptep);
pte = *ptep;
__ptep_ipte(address, ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm_has_pgste(vma->vm_mm)) {
pgste = pgste_update_all(&pte, pgste);
pgste_set_unlock(ptep, pgste);
}
return pte;
}
@ -953,76 +979,67 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
*/
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long addr,
unsigned long address,
pte_t *ptep, int full)
{
pte_t pte = *ptep;
pgste_t pgste;
pte_t pte;
if (full)
pte_clear(mm, addr, ptep);
else
ptep_invalidate(mm, addr, ptep);
if (mm_has_pgste(mm))
pgste = pgste_get_lock(ptep);
pte = *ptep;
if (!full)
__ptep_ipte(address, ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm_has_pgste(mm)) {
pgste = pgste_update_all(&pte, pgste);
pgste_set_unlock(ptep, pgste);
}
return pte;
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define ptep_set_wrprotect(__mm, __addr, __ptep) \
({ \
pte_t __pte = *(__ptep); \
if (pte_write(__pte)) { \
(__mm)->context.flush_mm = 1; \
if (atomic_read(&(__mm)->context.attach_count) > 1 || \
(__mm) != current->active_mm) \
ptep_invalidate(__mm, __addr, __ptep); \
set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
} \
})
static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
pgste_t pgste;
pte_t pte = *ptep;
if (pte_write(pte)) {
mm->context.flush_mm = 1;
if (mm_has_pgste(mm))
pgste = pgste_get_lock(ptep);
if (!mm_exclusive(mm))
__ptep_ipte(address, ptep);
*ptep = pte_wrprotect(pte);
if (mm_has_pgste(mm))
pgste_set_unlock(ptep, pgste);
}
return pte;
}
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
({ \
int __changed = !pte_same(*(__ptep), __entry); \
if (__changed) { \
ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
} \
__changed; \
})
/*
* Test and clear dirty bit in storage key.
* We can't clear the changed bit atomically. This is a potential
* race against modification of the referenced bit. This function
* should therefore only be called if it is not mapped in any
* address space.
*/
#define __HAVE_ARCH_PAGE_TEST_DIRTY
static inline int page_test_dirty(struct page *page)
static inline int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
}
pgste_t pgste;
#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
static inline void page_clear_dirty(struct page *page, int mapped)
{
page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, mapped);
}
if (pte_same(*ptep, entry))
return 0;
if (mm_has_pgste(vma->vm_mm))
pgste = pgste_get_lock(ptep);
/*
* Test and clear referenced bit in storage key.
*/
#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
static inline int page_test_and_clear_young(struct page *page)
{
unsigned long physpage = page_to_phys(page);
int ccode;
__ptep_ipte(address, ptep);
*ptep = entry;
asm volatile(
" rrbe 0,%1\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode) : "a" (physpage) : "cc" );
return ccode & 2;
if (mm_has_pgste(vma->vm_mm))
pgste_set_unlock(ptep, pgste);
return 1;
}
/*

View file

@ -84,6 +84,7 @@ struct thread_struct {
struct per_event per_event; /* Cause of the last PER trap */
/* pfault_wait is used to block the process on a pfault event */
unsigned long pfault_wait;
struct list_head list;
};
typedef struct thread_struct thread_struct;

View file

@ -50,7 +50,7 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
/*
* If the process only ran on the local cpu, do a local flush.
*/
local_cpumask = cpumask_of_cpu(smp_processor_id());
cpumask_copy(&local_cpumask, cpumask_of(smp_processor_id()));
if (cpumask_equal(mm_cpumask(mm), &local_cpumask))
__tlb_flush_local();
else
@ -80,16 +80,11 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* on all cpus instead of doing a local flush if the mm
* only ran on the local cpu.
*/
if (MACHINE_HAS_IDTE) {
if (mm->context.noexec)
__tlb_flush_idte((unsigned long)
get_shadow_table(mm->pgd) |
mm->context.asce_bits);
if (MACHINE_HAS_IDTE)
__tlb_flush_idte((unsigned long) mm->pgd |
mm->context.asce_bits);
return;
}
__tlb_flush_full(mm);
else
__tlb_flush_full(mm);
}
static inline void __tlb_flush_mm_cond(struct mm_struct * mm)

View file

@ -385,6 +385,7 @@
/* Ignore system calls that are also reachable via sys_socket */
#define __IGNORE_recvmmsg
#define __IGNORE_sendmmsg
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR

View file

@ -124,13 +124,11 @@ int main(void)
DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer));
DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock));
DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task));
DEFINE(__LC_CURRENT_PID, offsetof(struct _lowcore, current_pid));
DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info));
DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce));
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));

View file

@ -212,6 +212,7 @@ __switch_to:
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
st %r3,__LC_CURRENT # store task struct of next
mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
st %r5,__LC_THREAD_INFO # store thread info of next
ahi %r5,STACK_SIZE # end of kernel stack of next
st %r5,__LC_KERNEL_STACK # store end of kernel stack

View file

@ -220,6 +220,7 @@ __switch_to:
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
stg %r3,__LC_CURRENT # store task struct of next
mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
stg %r5,__LC_THREAD_INFO # store thread info of next
aghi %r5,STACK_SIZE # end of kernel stack of next
stg %r5,__LC_KERNEL_STACK # store end of kernel stack

View file

@ -32,6 +32,7 @@ static const struct irq_class intrclass_names[] = {
{.name = "VRT", .desc = "[EXT] Virtio" },
{.name = "SCP", .desc = "[EXT] Service Call" },
{.name = "IUC", .desc = "[EXT] IUCV" },
{.name = "CPM", .desc = "[EXT] CPU Measurement" },
{.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
{.name = "QDI", .desc = "[I/O] QDIO Interrupt" },
{.name = "DAS", .desc = "[I/O] DASD" },

View file

@ -9,41 +9,26 @@
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/vmalloc.h>
#include <linux/user.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/tick.h>
#include <linux/elfcore.h>
#include <linux/kernel_stat.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
#include <linux/compat.h>
#include <linux/kprobes.h>
#include <linux/random.h>
#include <asm/compat.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <linux/module.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/timer.h>
#include <asm/nmi.h>
#include <asm/compat.h>
#include <asm/smp.h>
#include "entry.h"

View file

@ -305,8 +305,7 @@ static int set_amode_and_uaccess(unsigned long user_amode,
*/
static int __init early_parse_switch_amode(char *p)
{
if (user_mode != SECONDARY_SPACE_MODE)
user_mode = PRIMARY_SPACE_MODE;
user_mode = PRIMARY_SPACE_MODE;
return 0;
}
early_param("switch_amode", early_parse_switch_amode);
@ -315,10 +314,6 @@ static int __init early_parse_user_mode(char *p)
{
if (p && strcmp(p, "primary") == 0)
user_mode = PRIMARY_SPACE_MODE;
#ifdef CONFIG_S390_EXEC_PROTECT
else if (p && strcmp(p, "secondary") == 0)
user_mode = SECONDARY_SPACE_MODE;
#endif
else if (!p || strcmp(p, "home") == 0)
user_mode = HOME_SPACE_MODE;
else
@ -327,31 +322,9 @@ static int __init early_parse_user_mode(char *p)
}
early_param("user_mode", early_parse_user_mode);
#ifdef CONFIG_S390_EXEC_PROTECT
/*
* Enable execute protection?
*/
static int __init early_parse_noexec(char *p)
{
if (!strncmp(p, "off", 3))
return 0;
user_mode = SECONDARY_SPACE_MODE;
return 0;
}
early_param("noexec", early_parse_noexec);
#endif /* CONFIG_S390_EXEC_PROTECT */
static void setup_addressing_mode(void)
{
if (user_mode == SECONDARY_SPACE_MODE) {
if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
PSW32_ASC_SECONDARY))
pr_info("Execute protection active, "
"mvcos available\n");
else
pr_info("Execute protection active, "
"mvcos not available\n");
} else if (user_mode == PRIMARY_SPACE_MODE) {
if (user_mode == PRIMARY_SPACE_MODE) {
if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
pr_info("Address spaces switched, "
"mvcos available\n");

View file

@ -335,7 +335,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
if (!cpu_stopped(logical_cpu))
continue;
cpu_set(logical_cpu, cpu_present_map);
set_cpu_present(logical_cpu, true);
smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
logical_cpu = cpumask_next(logical_cpu, &avail);
if (logical_cpu >= nr_cpu_ids)
@ -367,7 +367,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail)
continue;
__cpu_logical_map[logical_cpu] = cpu_id;
smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
cpu_set(logical_cpu, cpu_present_map);
set_cpu_present(logical_cpu, true);
if (cpu >= info->configured)
smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
else
@ -385,7 +385,7 @@ static int __smp_rescan_cpus(void)
{
cpumask_t avail;
cpus_xor(avail, cpu_possible_map, cpu_present_map);
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
if (smp_use_sigp_detection)
return smp_rescan_cpus_sigp(avail);
else
@ -467,7 +467,7 @@ int __cpuinit start_secondary(void *cpuvoid)
notify_cpu_starting(smp_processor_id());
/* Mark this cpu as online */
ipi_call_lock();
cpu_set(smp_processor_id(), cpu_online_map);
set_cpu_online(smp_processor_id(), true);
ipi_call_unlock();
/* Switch on interrupts */
local_irq_enable();
@ -644,7 +644,7 @@ int __cpu_disable(void)
struct ec_creg_mask_parms cr_parms;
int cpu = smp_processor_id();
cpu_clear(cpu, cpu_online_map);
set_cpu_online(cpu, false);
/* Disable pfault pseudo page faults on this cpu. */
pfault_fini();
@ -654,8 +654,8 @@ int __cpu_disable(void)
/* disable all external interrupts */
cr_parms.orvals[0] = 0;
cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
1 << 10 | 1 << 9 | 1 << 6 | 1 << 4);
/* disable all I/O interrupts */
cr_parms.orvals[6] = 0;
cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
@ -681,7 +681,7 @@ void __cpu_die(unsigned int cpu)
atomic_dec(&init_mm.context.attach_count);
}
void cpu_die(void)
void __noreturn cpu_die(void)
{
idle_task_exit();
while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
@ -738,8 +738,8 @@ void __init smp_prepare_boot_cpu(void)
BUG_ON(smp_processor_id() != 0);
current_thread_info()->cpu = 0;
cpu_set(0, cpu_present_map);
cpu_set(0, cpu_online_map);
set_cpu_present(0, true);
set_cpu_online(0, true);
S390_lowcore.percpu_offset = __per_cpu_offset[0];
current_set[0] = current;
smp_cpu_state[0] = CPU_STATE_CONFIGURED;
@ -1016,21 +1016,21 @@ int __ref smp_rescan_cpus(void)
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
newcpus = cpu_present_map;
cpumask_copy(&newcpus, cpu_present_mask);
rc = __smp_rescan_cpus();
if (rc)
goto out;
cpus_andnot(newcpus, cpu_present_map, newcpus);
for_each_cpu_mask(cpu, newcpus) {
cpumask_andnot(&newcpus, cpu_present_mask, &newcpus);
for_each_cpu(cpu, &newcpus) {
rc = smp_add_present_cpu(cpu);
if (rc)
cpu_clear(cpu, cpu_present_map);
set_cpu_present(cpu, false);
}
rc = 0;
out:
mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus();
if (!cpus_empty(newcpus))
if (!cpumask_empty(&newcpus))
topology_schedule_update();
return rc;
}

View file

@ -810,7 +810,7 @@ static int etr_sync_clock_stop(struct etr_aib *aib, int port)
etr_sync.etr_port = port;
get_online_cpus();
atomic_set(&etr_sync.cpus, num_online_cpus() - 1);
rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map);
rc = stop_machine(etr_sync_clock, &etr_sync, cpu_online_mask);
put_online_cpus();
return rc;
}
@ -1579,7 +1579,7 @@ static void stp_work_fn(struct work_struct *work)
memset(&stp_sync, 0, sizeof(stp_sync));
get_online_cpus();
atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map);
stop_machine(stp_sync_clock, &stp_sync, cpu_online_mask);
put_online_cpus();
if (!check_sync_clock())

View file

@ -52,20 +52,20 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
cpumask_t mask;
cpus_clear(mask);
cpumask_clear(&mask);
if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
cpumask_copy(&mask, cpumask_of(cpu));
return mask;
}
while (info) {
if (cpu_isset(cpu, info->mask)) {
if (cpumask_test_cpu(cpu, &info->mask)) {
mask = info->mask;
break;
}
info = info->next;
}
if (cpus_empty(mask))
mask = cpumask_of_cpu(cpu);
if (cpumask_empty(&mask))
cpumask_copy(&mask, cpumask_of(cpu));
return mask;
}
@ -85,10 +85,10 @@ static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
if (cpu_logical_map(lcpu) != rcpu)
continue;
#ifdef CONFIG_SCHED_BOOK
cpu_set(lcpu, book->mask);
cpumask_set_cpu(lcpu, &book->mask);
cpu_book_id[lcpu] = book->id;
#endif
cpu_set(lcpu, core->mask);
cpumask_set_cpu(lcpu, &core->mask);
cpu_core_id[lcpu] = core->id;
smp_cpu_polarization[lcpu] = tl_cpu->pp;
}
@ -101,13 +101,13 @@ static void clear_masks(void)
info = &core_info;
while (info) {
cpus_clear(info->mask);
cpumask_clear(&info->mask);
info = info->next;
}
#ifdef CONFIG_SCHED_BOOK
info = &book_info;
while (info) {
cpus_clear(info->mask);
cpumask_clear(&info->mask);
info = info->next;
}
#endif

View file

@ -22,6 +22,9 @@ obj-y += vdso32_wrapper.o
extra-y += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
# Force dependency (incbin is bad)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so

View file

@ -22,6 +22,9 @@ obj-y += vdso64_wrapper.o
extra-y += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
# Force dependency (incbin is bad)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so

View file

@ -77,7 +77,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
PERCPU(0x100, PAGE_SIZE)
PERCPU_SECTION(0x100)
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */

View file

@ -412,6 +412,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
struct dcss_segment *seg;
int rc, diag_cc;
start_addr = end_addr = 0;
seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
if (seg == NULL) {
rc = -ENOMEM;
@ -573,6 +574,7 @@ segment_modify_shared (char *name, int do_nonshared)
unsigned long start_addr, end_addr, dummy;
int rc, diag_cc;
start_addr = end_addr = 0;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
@ -681,8 +683,6 @@ void
segment_save(char *name)
{
struct dcss_segment *seg;
int startpfn = 0;
int endpfn = 0;
char cmd1[160];
char cmd2[80];
int i, response;
@ -698,8 +698,6 @@ segment_save(char *name)
goto out;
}
startpfn = seg->start_addr >> PAGE_SHIFT;
endpfn = (seg->end) >> PAGE_SHIFT;
sprintf(cmd1, "DEFSEG %s", name);
for (i=0; i<seg->segcnt; i++) {
sprintf(cmd1+strlen(cmd1), " %lX-%lX %s",

View file

@ -225,33 +225,6 @@ static noinline void do_sigbus(struct pt_regs *regs, long int_code,
force_sig_info(SIGBUS, &si, tsk);
}
#ifdef CONFIG_S390_EXEC_PROTECT
static noinline int signal_return(struct pt_regs *regs, long int_code,
unsigned long trans_exc_code)
{
u16 instruction;
int rc;
rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
if (!rc && instruction == 0x0a77) {
clear_tsk_thread_flag(current, TIF_PER_TRAP);
if (is_compat_task())
sys32_sigreturn();
else
sys_sigreturn();
} else if (!rc && instruction == 0x0aad) {
clear_tsk_thread_flag(current, TIF_PER_TRAP);
if (is_compat_task())
sys32_rt_sigreturn();
else
sys_rt_sigreturn();
} else
do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
return 0;
}
#endif /* CONFIG_S390_EXEC_PROTECT */
static noinline void do_fault_error(struct pt_regs *regs, long int_code,
unsigned long trans_exc_code, int fault)
{
@ -259,13 +232,6 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
switch (fault) {
case VM_FAULT_BADACCESS:
#ifdef CONFIG_S390_EXEC_PROTECT
if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
(trans_exc_code & 3) == 0) {
signal_return(regs, int_code, trans_exc_code);
break;
}
#endif /* CONFIG_S390_EXEC_PROTECT */
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
@ -414,11 +380,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
int access, fault;
access = VM_READ | VM_EXEC | VM_WRITE;
#ifdef CONFIG_S390_EXEC_PROTECT
if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
(trans_exc_code & 3) == 0)
access = VM_EXEC;
#endif
fault = do_exception(regs, access, trans_exc_code);
if (unlikely(fault))
do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
@ -491,22 +452,28 @@ static int __init nopfault(char *str)
__setup("nopfault", nopfault);
typedef struct {
__u16 refdiagc;
__u16 reffcode;
__u16 refdwlen;
__u16 refversn;
__u64 refgaddr;
__u64 refselmk;
__u64 refcmpmk;
__u64 reserved;
} __attribute__ ((packed, aligned(8))) pfault_refbk_t;
struct pfault_refbk {
u16 refdiagc;
u16 reffcode;
u16 refdwlen;
u16 refversn;
u64 refgaddr;
u64 refselmk;
u64 refcmpmk;
u64 reserved;
} __attribute__ ((packed, aligned(8)));
int pfault_init(void)
{
pfault_refbk_t refbk =
{ 0x258, 0, 5, 2, __LC_CURRENT, 1ULL << 48, 1ULL << 48,
__PF_RES_FIELD };
struct pfault_refbk refbk = {
.refdiagc = 0x258,
.reffcode = 0,
.refdwlen = 5,
.refversn = 2,
.refgaddr = __LC_CURRENT_PID,
.refselmk = 1ULL << 48,
.refcmpmk = 1ULL << 48,
.reserved = __PF_RES_FIELD };
int rc;
if (!MACHINE_IS_VM || pfault_disable)
@ -524,8 +491,12 @@ int pfault_init(void)
void pfault_fini(void)
{
pfault_refbk_t refbk =
{ 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL };
struct pfault_refbk refbk = {
.refdiagc = 0x258,
.reffcode = 1,
.refdwlen = 5,
.refversn = 2,
};
if (!MACHINE_IS_VM || pfault_disable)
return;
@ -537,11 +508,15 @@ void pfault_fini(void)
: : "a" (&refbk), "m" (refbk) : "cc");
}
static DEFINE_SPINLOCK(pfault_lock);
static LIST_HEAD(pfault_list);
static void pfault_interrupt(unsigned int ext_int_code,
unsigned int param32, unsigned long param64)
{
struct task_struct *tsk;
__u16 subcode;
pid_t pid;
/*
* Get the external interruption subcode & pfault
@ -553,44 +528,79 @@ static void pfault_interrupt(unsigned int ext_int_code,
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
/*
* Get the token (= address of the task structure of the affected task).
*/
#ifdef CONFIG_64BIT
tsk = (struct task_struct *) param64;
#else
tsk = (struct task_struct *) param32;
#endif
if (subcode & 0x0080) {
/* Get the token (= pid of the affected task). */
pid = sizeof(void *) == 4 ? param32 : param64;
rcu_read_lock();
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
if (tsk)
get_task_struct(tsk);
rcu_read_unlock();
if (!tsk)
return;
} else {
tsk = current;
}
spin_lock(&pfault_lock);
if (subcode & 0x0080) {
/* signal bit is set -> a page has been swapped in by VM */
if (xchg(&tsk->thread.pfault_wait, -1) != 0) {
if (tsk->thread.pfault_wait == 1) {
/* Initial interrupt was faster than the completion
* interrupt. pfault_wait is valid. Set pfault_wait
* back to zero and wake up the process. This can
* safely be done because the task is still sleeping
* and can't produce new pfaults. */
tsk->thread.pfault_wait = 0;
list_del(&tsk->thread.list);
wake_up_process(tsk);
put_task_struct(tsk);
} else {
/* Completion interrupt was faster than initial
* interrupt. Set pfault_wait to -1 so the initial
* interrupt doesn't put the task to sleep. */
tsk->thread.pfault_wait = -1;
}
put_task_struct(tsk);
} else {
/* signal bit not set -> a real page is missing. */
get_task_struct(tsk);
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (xchg(&tsk->thread.pfault_wait, 1) != 0) {
if (tsk->thread.pfault_wait == -1) {
/* Completion interrupt was faster than the initial
* interrupt (swapped in a -1 for pfault_wait). Set
* pfault_wait back to zero and exit. This can be
* done safely because tsk is running in kernel
* mode and can't produce new pfaults. */
* interrupt (pfault_wait == -1). Set pfault_wait
* back to zero and exit. */
tsk->thread.pfault_wait = 0;
set_task_state(tsk, TASK_RUNNING);
put_task_struct(tsk);
} else
} else {
/* Initial interrupt arrived before completion
* interrupt. Let the task sleep. */
tsk->thread.pfault_wait = 1;
list_add(&tsk->thread.list, &pfault_list);
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
}
}
spin_unlock(&pfault_lock);
}
static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
struct thread_struct *thread, *next;
struct task_struct *tsk;
switch (action) {
case CPU_DEAD:
case CPU_DEAD_FROZEN:
spin_lock_irq(&pfault_lock);
list_for_each_entry_safe(thread, next, &pfault_list, list) {
thread->pfault_wait = 0;
list_del(&thread->list);
tsk = container_of(thread, struct task_struct, thread);
wake_up_process(tsk);
}
spin_unlock_irq(&pfault_lock);
break;
default:
break;
}
return NOTIFY_OK;
}
static int __init pfault_irq_init(void)
@ -599,22 +609,21 @@ static int __init pfault_irq_init(void)
if (!MACHINE_IS_VM)
return 0;
/*
* Try to get pfault pseudo page faults going.
*/
rc = register_external_interrupt(0x2603, pfault_interrupt);
if (rc) {
pfault_disable = 1;
return rc;
}
if (pfault_init() == 0)
return 0;
/* Tough luck, no pfault. */
pfault_disable = 1;
unregister_external_interrupt(0x2603, pfault_interrupt);
if (rc)
goto out_extint;
rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
if (rc)
goto out_pfault;
hotcpu_notifier(pfault_cpu_notify, 0);
return 0;
out_pfault:
unregister_external_interrupt(0x2603, pfault_interrupt);
out_extint:
pfault_disable = 1;
return rc;
}
early_initcall(pfault_irq_init);
#endif
#endif /* CONFIG_PFAULT */

View file

@ -13,7 +13,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *pteptr, pte_t pteval)
{
pmd_t *pmdp = (pmd_t *) pteptr;
pte_t shadow_pteval = pteval;
unsigned long mask;
if (!MACHINE_HAS_HPAGE) {
@ -21,18 +20,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
mask = pte_val(pteval) &
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
if (mm->context.noexec) {
pteptr += PTRS_PER_PTE;
pte_val(shadow_pteval) =
(_SEGMENT_ENTRY + __pa(pteptr)) | mask;
}
}
pmd_val(*pmdp) = pte_val(pteval);
if (mm->context.noexec) {
pmdp = get_shadow_table(pmdp);
pmd_val(*pmdp) = pte_val(shadow_pteval);
}
}
int arch_prepare_hugepage(struct page *page)

View file

@ -175,7 +175,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
if (!enable) {
ptep_invalidate(&init_mm, address, pte);
__ptep_ipte(address, pte);
pte_val(*pte) = _PAGE_TYPE_EMPTY;
continue;
}
*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));

View file

@ -28,7 +28,7 @@ static void change_page_attr(unsigned long addr, int numpages,
pte = *ptep;
pte = set(pte);
ptep_invalidate(&init_mm, addr, ptep);
__ptep_ipte(addr, ptep);
*ptep = pte;
addr += PAGE_SIZE;
}

View file

@ -40,7 +40,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
static void __page_table_free(struct mm_struct *mm, unsigned long *table);
static void __crst_table_free(struct mm_struct *mm, unsigned long *table);
static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
{
@ -67,7 +66,7 @@ static void rcu_table_freelist_callback(struct rcu_head *head)
while (batch->pgt_index > 0)
__page_table_free(batch->mm, batch->table[--batch->pgt_index]);
while (batch->crst_index < RCU_FREELIST_SIZE)
__crst_table_free(batch->mm, batch->table[batch->crst_index++]);
crst_table_free(batch->mm, batch->table[batch->crst_index++]);
free_page((unsigned long) batch);
}
@ -125,63 +124,33 @@ static int __init parse_vmalloc(char *arg)
}
early_param("vmalloc", parse_vmalloc);
unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
unsigned long *crst_table_alloc(struct mm_struct *mm)
{
struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!page)
return NULL;
page->index = 0;
if (noexec) {
struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!shadow) {
__free_pages(page, ALLOC_ORDER);
return NULL;
}
page->index = page_to_phys(shadow);
}
spin_lock_bh(&mm->context.list_lock);
list_add(&page->lru, &mm->context.crst_list);
spin_unlock_bh(&mm->context.list_lock);
return (unsigned long *) page_to_phys(page);
}
static void __crst_table_free(struct mm_struct *mm, unsigned long *table)
{
unsigned long *shadow = get_shadow_table(table);
if (shadow)
free_pages((unsigned long) shadow, ALLOC_ORDER);
free_pages((unsigned long) table, ALLOC_ORDER);
}
void crst_table_free(struct mm_struct *mm, unsigned long *table)
{
struct page *page = virt_to_page(table);
spin_lock_bh(&mm->context.list_lock);
list_del(&page->lru);
spin_unlock_bh(&mm->context.list_lock);
__crst_table_free(mm, table);
free_pages((unsigned long) table, ALLOC_ORDER);
}
void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
{
struct rcu_table_freelist *batch;
struct page *page = virt_to_page(table);
spin_lock_bh(&mm->context.list_lock);
list_del(&page->lru);
spin_unlock_bh(&mm->context.list_lock);
if (atomic_read(&mm->mm_users) < 2 &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
__crst_table_free(mm, table);
crst_table_free(mm, table);
return;
}
batch = rcu_table_freelist_get(mm);
if (!batch) {
smp_call_function(smp_sync, NULL, 1);
__crst_table_free(mm, table);
crst_table_free(mm, table);
return;
}
batch->table[--batch->crst_index] = table;
@ -197,7 +166,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
BUG_ON(limit > (1UL << 53));
repeat:
table = crst_table_alloc(mm, mm->context.noexec);
table = crst_table_alloc(mm);
if (!table)
return -ENOMEM;
spin_lock_bh(&mm->page_table_lock);
@ -273,7 +242,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
unsigned long *table;
unsigned long bits;
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
bits = (mm->context.has_pgste) ? 3UL : 1UL;
spin_lock_bh(&mm->context.list_lock);
page = NULL;
if (!list_empty(&mm->context.pgtable_list)) {
@ -329,7 +298,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
struct page *page;
unsigned long bits;
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
bits = (mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock_bh(&mm->context.list_lock);
@ -366,7 +335,7 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
page_table_free(mm, table);
return;
}
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
bits = (mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock_bh(&mm->context.list_lock);
@ -379,25 +348,6 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
rcu_table_freelist_finish();
}
void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
{
struct page *page;
spin_lock_bh(&mm->context.list_lock);
/* Free shadow region and segment tables. */
list_for_each_entry(page, &mm->context.crst_list, lru)
if (page->index) {
free_pages((unsigned long) page->index, ALLOC_ORDER);
page->index = 0;
}
/* "Free" second halves of page tables. */
list_for_each_entry(page, &mm->context.pgtable_list, lru)
page->flags &= ~SECOND_HALVES;
spin_unlock_bh(&mm->context.list_lock);
mm->context.noexec = 0;
update_mm(mm, tsk);
}
/*
* switch on pgstes for its userspace process (for kvm)
*/

View file

@ -95,7 +95,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pu_dir = vmem_pud_alloc();
if (!pu_dir)
goto out;
pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
pgd_populate(&init_mm, pg_dir, pu_dir);
}
pu_dir = pud_offset(pg_dir, address);
@ -103,7 +103,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
pud_populate_kernel(&init_mm, pu_dir, pm_dir);
pud_populate(&init_mm, pu_dir, pm_dir);
}
pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
@ -123,7 +123,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pt_dir = vmem_pte_alloc();
if (!pt_dir)
goto out;
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
pmd_populate(&init_mm, pm_dir, pt_dir);
}
pt_dir = pte_offset_kernel(pm_dir, address);
@ -159,7 +159,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
continue;
if (pmd_huge(*pm_dir)) {
pmd_clear_kernel(pm_dir);
pmd_clear(pm_dir);
address += HPAGE_SIZE - PAGE_SIZE;
continue;
}
@ -192,7 +192,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pu_dir = vmem_pud_alloc();
if (!pu_dir)
goto out;
pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
pgd_populate(&init_mm, pg_dir, pu_dir);
}
pu_dir = pud_offset(pg_dir, address);
@ -200,7 +200,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
pud_populate_kernel(&init_mm, pu_dir, pm_dir);
pud_populate(&init_mm, pu_dir, pm_dir);
}
pm_dir = pmd_offset(pu_dir, address);
@ -208,7 +208,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pt_dir = vmem_pte_alloc();
if (!pt_dir)
goto out;
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
pmd_populate(&init_mm, pm_dir, pt_dir);
}
pt_dir = pte_offset_kernel(pm_dir, address);

View file

@ -5,6 +5,7 @@
* Author: Heinz Graalfs <graalfs@de.ibm.com>
*/
#include <linux/kernel_stat.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/smp.h>
@ -674,17 +675,11 @@ int hwsampler_activate(unsigned int cpu)
static void hws_ext_handler(unsigned int ext_int_code,
unsigned int param32, unsigned long param64)
{
int cpu;
struct hws_cpu_buffer *cb;
cpu = smp_processor_id();
cb = &per_cpu(sampler_cpu_buffer, cpu);
atomic_xchg(
&cb->ext_params,
atomic_read(&cb->ext_params)
| S390_lowcore.ext_params);
kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++;
cb = &__get_cpu_var(sampler_cpu_buffer);
atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
if (hws_wq)
queue_work(hws_wq, &cb->worker);
}
@ -764,7 +759,7 @@ static int worker_check_error(unsigned int cpu, int ext_params)
if (!sdbt || !*sdbt)
return -EINVAL;
if (ext_params & EI_IEA)
if (ext_params & EI_PRA)
cb->req_alert++;
if (ext_params & EI_LSDA)
@ -1009,7 +1004,7 @@ int hwsampler_deallocate()
if (hws_state != HWS_STOPPED)
goto deallocate_exit;
smp_ctl_clear_bit(0, 5); /* set bit 58 CR0 off */
ctl_clear_bit(0, 5); /* set bit 58 CR0 off */
deallocate_sdbt();
hws_state = HWS_DEALLOCATED;
@ -1123,7 +1118,7 @@ int hwsampler_shutdown()
mutex_lock(&hws_sem);
if (hws_state == HWS_STOPPED) {
smp_ctl_clear_bit(0, 5); /* set bit 58 CR0 off */
ctl_clear_bit(0, 5); /* set bit 58 CR0 off */
deallocate_sdbt();
}
if (hws_wq) {
@ -1198,7 +1193,7 @@ int hwsampler_start_all(unsigned long rate)
hws_oom = 1;
hws_flush_all = 0;
/* now let them in, 1407 CPUMF external interrupts */
smp_ctl_set_bit(0, 5); /* set CR0 bit 58 */
ctl_set_bit(0, 5); /* set CR0 bit 58 */
return 0;
}

Some files were not shown because too many files have changed in this diff Show more