Linux 3.9-rc2
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.19 (GNU/Linux) iQEcBAABAgAGBQJRPR09AAoJEHm+PkMAQRiGQLoIAJpgaFDY2bRKuApUOu61sU2b Cwiza9m0TZrGMUguXE7suudfC4i8PRkaOZOJWk6TUg/nTT1cl4kNscasgFamKGt4 zcpiooVnox1e7gpWS1P9NWpuVY7pFC87FrN4dbXgFs45slYNy+sGlTp9/LMviZ13 mR3aTG2R7o4GBapD+gpqX8hroV53o8X12kjln6aLghoNLfEdtlAawoiWO5PTHHl8 4Pu4DANoyA7Z9pIMT3++uTWl/BR4bbpBVFIqkUomUFRYFjtwhHBXr+ak11pK7jTt EJ5HYYIiOcm0v2nVvIovNwUKnWTNopK4P+bx/vgFjANpwV132v5nPuf/Ucoj0U0= =eQ4p -----END PGP SIGNATURE----- Merge tag 'v3.9-rc2' into asoc-core Linux 3.9-rc2
This commit is contained in:
commit
2db6be6a7b
312 changed files with 4042 additions and 1350 deletions
6
CREDITS
6
CREDITS
|
@ -953,11 +953,11 @@ S: Blacksburg, Virginia 24061
|
|||
S: USA
|
||||
|
||||
N: Randy Dunlap
|
||||
E: rdunlap@xenotime.net
|
||||
W: http://www.xenotime.net/linux/linux.html
|
||||
W: http://www.linux-usb.org
|
||||
E: rdunlap@infradead.org
|
||||
W: http://www.infradead.org/~rdunlap/
|
||||
D: Linux-USB subsystem, USB core/UHCI/printer/storage drivers
|
||||
D: x86 SMP, ACPI, bootflag hacking
|
||||
D: documentation, builds
|
||||
S: (ask for current address)
|
||||
S: USA
|
||||
|
||||
|
|
|
@ -60,8 +60,7 @@ own source tree. For example:
|
|||
"dontdiff" is a list of files which are generated by the kernel during
|
||||
the build process, and should be ignored in any diff(1)-generated
|
||||
patch. The "dontdiff" file is included in the kernel tree in
|
||||
2.6.12 and later. For earlier kernel versions, you can get it
|
||||
from <http://www.xenotime.net/linux/doc/dontdiff>.
|
||||
2.6.12 and later.
|
||||
|
||||
Make sure your patch does not include any extra files which do not
|
||||
belong in a patch submission. Make sure to review your patch -after-
|
||||
|
|
|
@ -30,6 +30,7 @@ The target is named "raid" and it accepts the following parameters:
|
|||
raid10 Various RAID10 inspired algorithms chosen by additional params
|
||||
- RAID10: Striped Mirrors (aka 'Striping on top of mirrors')
|
||||
- RAID1E: Integrated Adjacent Stripe Mirroring
|
||||
- RAID1E: Integrated Offset Stripe Mirroring
|
||||
- and other similar RAID10 variants
|
||||
|
||||
Reference: Chapter 4 of
|
||||
|
@ -64,15 +65,15 @@ The target is named "raid" and it accepts the following parameters:
|
|||
synchronisation state for each region.
|
||||
|
||||
[raid10_copies <# copies>]
|
||||
[raid10_format near]
|
||||
[raid10_format <near|far|offset>]
|
||||
These two options are used to alter the default layout of
|
||||
a RAID10 configuration. The number of copies is can be
|
||||
specified, but the default is 2. There are other variations
|
||||
to how the copies are laid down - the default and only current
|
||||
option is "near". Near copies are what most people think of
|
||||
with respect to mirroring. If these options are left
|
||||
unspecified, or 'raid10_copies 2' and/or 'raid10_format near'
|
||||
are given, then the layouts for 2, 3 and 4 devices are:
|
||||
specified, but the default is 2. There are also three
|
||||
variations to how the copies are laid down - the default
|
||||
is "near". Near copies are what most people think of with
|
||||
respect to mirroring. If these options are left unspecified,
|
||||
or 'raid10_copies 2' and/or 'raid10_format near' are given,
|
||||
then the layouts for 2, 3 and 4 devices are:
|
||||
2 drives 3 drives 4 drives
|
||||
-------- ---------- --------------
|
||||
A1 A1 A1 A1 A2 A1 A1 A2 A2
|
||||
|
@ -85,6 +86,33 @@ The target is named "raid" and it accepts the following parameters:
|
|||
3-device layout is what might be called a 'RAID1E - Integrated
|
||||
Adjacent Stripe Mirroring'.
|
||||
|
||||
If 'raid10_copies 2' and 'raid10_format far', then the layouts
|
||||
for 2, 3 and 4 devices are:
|
||||
2 drives 3 drives 4 drives
|
||||
-------- -------------- --------------------
|
||||
A1 A2 A1 A2 A3 A1 A2 A3 A4
|
||||
A3 A4 A4 A5 A6 A5 A6 A7 A8
|
||||
A5 A6 A7 A8 A9 A9 A10 A11 A12
|
||||
.. .. .. .. .. .. .. .. ..
|
||||
A2 A1 A3 A1 A2 A2 A1 A4 A3
|
||||
A4 A3 A6 A4 A5 A6 A5 A8 A7
|
||||
A6 A5 A9 A7 A8 A10 A9 A12 A11
|
||||
.. .. .. .. .. .. .. .. ..
|
||||
|
||||
If 'raid10_copies 2' and 'raid10_format offset', then the
|
||||
layouts for 2, 3 and 4 devices are:
|
||||
2 drives 3 drives 4 drives
|
||||
-------- ------------ -----------------
|
||||
A1 A2 A1 A2 A3 A1 A2 A3 A4
|
||||
A2 A1 A3 A1 A2 A2 A1 A4 A3
|
||||
A3 A4 A4 A5 A6 A5 A6 A7 A8
|
||||
A4 A3 A6 A4 A5 A6 A5 A8 A7
|
||||
A5 A6 A7 A8 A9 A9 A10 A11 A12
|
||||
A6 A5 A9 A7 A8 A10 A9 A12 A11
|
||||
.. .. .. .. .. .. .. .. ..
|
||||
Here we see layouts closely akin to 'RAID1E - Integrated
|
||||
Offset Stripe Mirroring'.
|
||||
|
||||
<#raid_devs>: The number of devices composing the array.
|
||||
Each device consists of two entries. The first is the device
|
||||
containing the metadata (if any); the second is the one containing the
|
||||
|
@ -142,3 +170,5 @@ Version History
|
|||
1.3.0 Added support for RAID 10
|
||||
1.3.1 Allow device replacement/rebuild for RAID 10
|
||||
1.3.2 Fix/improve redundancy checking for RAID10
|
||||
1.4.0 Non-functional change. Removes arg from mapping function.
|
||||
1.4.1 Add RAID10 "far" and "offset" algorithm support.
|
||||
|
|
|
@ -15,7 +15,7 @@ Supported chips:
|
|||
Addresses scanned: -
|
||||
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1276.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
|
|
@ -4,9 +4,14 @@ Kernel driver adt7410
|
|||
Supported chips:
|
||||
* Analog Devices ADT7410
|
||||
Prefix: 'adt7410'
|
||||
Addresses scanned: I2C 0x48 - 0x4B
|
||||
Addresses scanned: None
|
||||
Datasheet: Publicly available at the Analog Devices website
|
||||
http://www.analog.com/static/imported-files/data_sheets/ADT7410.pdf
|
||||
* Analog Devices ADT7420
|
||||
Prefix: 'adt7420'
|
||||
Addresses scanned: None
|
||||
Datasheet: Publicly available at the Analog Devices website
|
||||
http://www.analog.com/static/imported-files/data_sheets/ADT7420.pdf
|
||||
|
||||
Author: Hartmut Knaack <knaack.h@gmx.de>
|
||||
|
||||
|
@ -27,6 +32,10 @@ value per second or even justget one sample on demand for power saving.
|
|||
Besides, it can completely power down its ADC, if power management is
|
||||
required.
|
||||
|
||||
The ADT7420 is register compatible, the only differences being the package,
|
||||
a slightly narrower operating temperature range (-40°C to +150°C), and a
|
||||
better accuracy (0.25°C instead of 0.50°C.)
|
||||
|
||||
Configuration Notes
|
||||
-------------------
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ Supported chips:
|
|||
Addresses scanned: I2C 0x18 - 0x1f
|
||||
|
||||
Author:
|
||||
Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
|
|
@ -8,7 +8,7 @@ Supported devices:
|
|||
Documentation:
|
||||
http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
|
|
@ -19,7 +19,7 @@ Supported chips:
|
|||
Datasheet:
|
||||
http://www.national.com/pf/LM/LM5066.html
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
|
|
@ -5,13 +5,13 @@ Supported chips:
|
|||
* Linear Technology LTC2978
|
||||
Prefix: 'ltc2978'
|
||||
Addresses scanned: -
|
||||
Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf
|
||||
Datasheet: http://www.linear.com/product/ltc2978
|
||||
* Linear Technology LTC3880
|
||||
Prefix: 'ltc3880'
|
||||
Addresses scanned: -
|
||||
Datasheet: http://cds.linear.com/docs/Datasheet/3880f.pdf
|
||||
Datasheet: http://www.linear.com/product/ltc3880
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
|
|
@ -8,7 +8,7 @@ Supported chips:
|
|||
Datasheet:
|
||||
http://cds.linear.com/docs/Datasheet/42612fb.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
|
|
@ -7,7 +7,7 @@ Supported chips:
|
|||
Addresses scanned: -
|
||||
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
|
|
@ -24,7 +24,7 @@ Supported chips:
|
|||
http://datasheets.maxim-ic.com/en/ds/MAX16070-MAX16071.pdf
|
||||
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
|
|
@ -27,7 +27,7 @@ Supported chips:
|
|||
Addresses scanned: -
|
||||
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34461.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
|
|
@ -7,7 +7,7 @@ Supported chips:
|
|||
Addresses scanned: -
|
||||
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
|
|
@ -34,7 +34,7 @@ Supported chips:
|
|||
Addresses scanned: -
|
||||
Datasheet: n.a.
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
|
|
@ -29,7 +29,7 @@ Supported chips:
|
|||
http://www.summitmicro.com/prod_select/summary/SMM766/SMM766_2086.pdf
|
||||
http://www.summitmicro.com/prod_select/summary/SMM766B/SMM766B_2122.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Module Parameters
|
||||
|
|
|
@ -11,7 +11,7 @@ Supported chips:
|
|||
http://focus.ti.com/lit/ds/symlink/ucd9090.pdf
|
||||
http://focus.ti.com/lit/ds/symlink/ucd90910.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
|
|
@ -15,7 +15,7 @@ Supported chips:
|
|||
http://focus.ti.com/lit/ds/symlink/ucd9246.pdf
|
||||
http://focus.ti.com/lit/ds/symlink/ucd9248.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
|
|
@ -54,7 +54,7 @@ http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146401
|
|||
http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256
|
||||
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
*=============*
|
||||
* OPP Library *
|
||||
*=============*
|
||||
Operating Performance Points (OPP) Library
|
||||
==========================================
|
||||
|
||||
(C) 2009-2010 Nishanth Menon <nm@ti.com>, Texas Instruments Incorporated
|
||||
|
||||
|
@ -16,15 +15,31 @@ Contents
|
|||
|
||||
1. Introduction
|
||||
===============
|
||||
1.1 What is an Operating Performance Point (OPP)?
|
||||
|
||||
Complex SoCs of today consists of a multiple sub-modules working in conjunction.
|
||||
In an operational system executing varied use cases, not all modules in the SoC
|
||||
need to function at their highest performing frequency all the time. To
|
||||
facilitate this, sub-modules in a SoC are grouped into domains, allowing some
|
||||
domains to run at lower voltage and frequency while other domains are loaded
|
||||
more. The set of discrete tuples consisting of frequency and voltage pairs that
|
||||
domains to run at lower voltage and frequency while other domains run at
|
||||
voltage/frequency pairs that are higher.
|
||||
|
||||
The set of discrete tuples consisting of frequency and voltage pairs that
|
||||
the device will support per domain are called Operating Performance Points or
|
||||
OPPs.
|
||||
|
||||
As an example:
|
||||
Let us consider an MPU device which supports the following:
|
||||
{300MHz at minimum voltage of 1V}, {800MHz at minimum voltage of 1.2V},
|
||||
{1GHz at minimum voltage of 1.3V}
|
||||
|
||||
We can represent these as three OPPs as the following {Hz, uV} tuples:
|
||||
{300000000, 1000000}
|
||||
{800000000, 1200000}
|
||||
{1000000000, 1300000}
|
||||
|
||||
1.2 Operating Performance Points Library
|
||||
|
||||
OPP library provides a set of helper functions to organize and query the OPP
|
||||
information. The library is located in drivers/base/power/opp.c and the header
|
||||
is located in include/linux/opp.h. OPP library can be enabled by enabling
|
||||
|
|
|
@ -170,5 +170,5 @@ Reminder: sizeof() result is of type size_t.
|
|||
Thank you for your cooperation and attention.
|
||||
|
||||
|
||||
By Randy Dunlap <rdunlap@xenotime.net> and
|
||||
By Randy Dunlap <rdunlap@infradead.org> and
|
||||
Andrew Murray <amurray@mpc-data.co.uk>
|
||||
|
|
18
MAINTAINERS
18
MAINTAINERS
|
@ -114,12 +114,6 @@ Maintainers List (try to look for most precise areas first)
|
|||
|
||||
-----------------------------------
|
||||
|
||||
3C505 NETWORK DRIVER
|
||||
M: Philip Blundell <philb@gnu.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/i825xx/3c505*
|
||||
|
||||
3C59X NETWORK DRIVER
|
||||
M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
|
||||
L: netdev@vger.kernel.org
|
||||
|
@ -2361,12 +2355,6 @@ W: http://www.arm.linux.org.uk/
|
|||
S: Maintained
|
||||
F: drivers/video/cyber2000fb.*
|
||||
|
||||
CYCLADES 2X SYNC CARD DRIVER
|
||||
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
|
||||
W: http://oops.ghostprotocols.net:81/blog
|
||||
S: Maintained
|
||||
F: drivers/net/wan/cycx*
|
||||
|
||||
CYCLADES ASYNC MUX DRIVER
|
||||
W: http://www.cyclades.com/
|
||||
S: Orphan
|
||||
|
@ -3067,12 +3055,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
|
|||
F: drivers/video/s1d13xxxfb.c
|
||||
F: include/video/s1d13xxxfb.h
|
||||
|
||||
ETHEREXPRESS-16 NETWORK DRIVER
|
||||
M: Philip Blundell <philb@gnu.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/i825xx/eexpress.*
|
||||
|
||||
ETHERNET BRIDGE
|
||||
M: Stephen Hemminger <stephen@networkplumber.org>
|
||||
L: bridge@lists.linux-foundation.org
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Unicycling Gorilla
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
* initial bootloader stuff..
|
||||
*/
|
||||
|
||||
#include <asm/pal.h>
|
||||
|
||||
.set noreorder
|
||||
.globl __start
|
||||
|
|
|
@ -120,7 +120,7 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
|
|||
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
|
||||
endif
|
||||
|
||||
ccflags-y := -fpic -fno-builtin -I$(obj)
|
||||
ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
|
||||
asflags-y := -Wa,-march=all -DZIMAGE
|
||||
|
||||
# Supply kernel BSS size to the decompressor via a linker symbol.
|
||||
|
|
|
@ -5,15 +5,15 @@
|
|||
|
||||
typedef struct {
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
u64 id;
|
||||
atomic64_t id;
|
||||
#endif
|
||||
unsigned int vmalloc_seq;
|
||||
unsigned int vmalloc_seq;
|
||||
} mm_context_t;
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
#define ASID_BITS 8
|
||||
#define ASID_MASK ((~0ULL) << ASID_BITS)
|
||||
#define ASID(mm) ((mm)->context.id & ~ASID_MASK)
|
||||
#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
|
||||
#else
|
||||
#define ASID(mm) (0)
|
||||
#endif
|
||||
|
@ -26,7 +26,7 @@ typedef struct {
|
|||
* modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned long end_brk;
|
||||
unsigned long end_brk;
|
||||
} mm_context_t;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -25,7 +25,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
|
|||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
|
||||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
|
||||
#define init_new_context(tsk,mm) ({ mm->context.id = 0; })
|
||||
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
|
||||
|
||||
#else /* !CONFIG_CPU_HAS_ASID */
|
||||
|
||||
|
|
|
@ -34,10 +34,13 @@
|
|||
#define TLB_V6_D_ASID (1 << 17)
|
||||
#define TLB_V6_I_ASID (1 << 18)
|
||||
|
||||
#define TLB_V6_BP (1 << 19)
|
||||
|
||||
/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
|
||||
#define TLB_V7_UIS_PAGE (1 << 19)
|
||||
#define TLB_V7_UIS_FULL (1 << 20)
|
||||
#define TLB_V7_UIS_ASID (1 << 21)
|
||||
#define TLB_V7_UIS_PAGE (1 << 20)
|
||||
#define TLB_V7_UIS_FULL (1 << 21)
|
||||
#define TLB_V7_UIS_ASID (1 << 22)
|
||||
#define TLB_V7_UIS_BP (1 << 23)
|
||||
|
||||
#define TLB_BARRIER (1 << 28)
|
||||
#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
|
||||
|
@ -150,7 +153,8 @@
|
|||
#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
|
||||
TLB_V6_I_FULL | TLB_V6_D_FULL | \
|
||||
TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
|
||||
TLB_V6_I_ASID | TLB_V6_D_ASID)
|
||||
TLB_V6_I_ASID | TLB_V6_D_ASID | \
|
||||
TLB_V6_BP)
|
||||
|
||||
#ifdef CONFIG_CPU_TLB_V6
|
||||
# define v6wbi_possible_flags v6wbi_tlb_flags
|
||||
|
@ -166,9 +170,11 @@
|
|||
#endif
|
||||
|
||||
#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
|
||||
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
|
||||
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
|
||||
TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
|
||||
#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
|
||||
TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
|
||||
TLB_V6_U_FULL | TLB_V6_U_PAGE | \
|
||||
TLB_V6_U_ASID | TLB_V6_BP)
|
||||
|
||||
#ifdef CONFIG_CPU_TLB_V7
|
||||
|
||||
|
@ -430,6 +436,20 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void local_flush_bp_all(void)
|
||||
{
|
||||
const int zero = 0;
|
||||
const unsigned int __tlb_flag = __cpu_tlb_flags;
|
||||
|
||||
if (tlb_flag(TLB_V7_UIS_BP))
|
||||
asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
|
||||
else if (tlb_flag(TLB_V6_BP))
|
||||
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
|
||||
|
||||
if (tlb_flag(TLB_BARRIER))
|
||||
isb();
|
||||
}
|
||||
|
||||
/*
|
||||
* flush_pmd_entry
|
||||
*
|
||||
|
@ -480,6 +500,7 @@ static inline void clean_pmd_entry(void *pmd)
|
|||
#define flush_tlb_kernel_page local_flush_tlb_kernel_page
|
||||
#define flush_tlb_range local_flush_tlb_range
|
||||
#define flush_tlb_kernel_range local_flush_tlb_kernel_range
|
||||
#define flush_bp_all local_flush_bp_all
|
||||
#else
|
||||
extern void flush_tlb_all(void);
|
||||
extern void flush_tlb_mm(struct mm_struct *mm);
|
||||
|
@ -487,6 +508,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
|
|||
extern void flush_tlb_kernel_page(unsigned long kaddr);
|
||||
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
|
||||
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
extern void flush_bp_all(void);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -404,7 +404,7 @@
|
|||
#define __NR_setns (__NR_SYSCALL_BASE+375)
|
||||
#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
|
||||
#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
|
||||
/* 378 for kcmp */
|
||||
#define __NR_kcmp (__NR_SYSCALL_BASE+378)
|
||||
#define __NR_finit_module (__NR_SYSCALL_BASE+379)
|
||||
|
||||
/*
|
||||
|
|
|
@ -110,7 +110,7 @@ int main(void)
|
|||
BLANK();
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
|
||||
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
|
||||
BLANK();
|
||||
#endif
|
||||
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
|
||||
|
|
|
@ -387,7 +387,7 @@
|
|||
/* 375 */ CALL(sys_setns)
|
||||
CALL(sys_process_vm_readv)
|
||||
CALL(sys_process_vm_writev)
|
||||
CALL(sys_ni_syscall) /* reserved for sys_kcmp */
|
||||
CALL(sys_kcmp)
|
||||
CALL(sys_finit_module)
|
||||
#ifndef syscalls_counted
|
||||
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
|
||||
|
|
|
@ -184,13 +184,22 @@ __create_page_tables:
|
|||
orr r3, r3, #3 @ PGD block type
|
||||
mov r6, #4 @ PTRS_PER_PGD
|
||||
mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
|
||||
1: str r3, [r0], #4 @ set bottom PGD entry bits
|
||||
1:
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
str r7, [r0], #4 @ set top PGD entry bits
|
||||
str r3, [r0], #4 @ set bottom PGD entry bits
|
||||
#else
|
||||
str r3, [r0], #4 @ set bottom PGD entry bits
|
||||
str r7, [r0], #4 @ set top PGD entry bits
|
||||
#endif
|
||||
add r3, r3, #0x1000 @ next PMD table
|
||||
subs r6, r6, #1
|
||||
bne 1b
|
||||
|
||||
add r4, r4, #0x1000 @ point to the PMD tables
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
add r4, r4, #4 @ we only write the bottom word
|
||||
#endif
|
||||
#endif
|
||||
|
||||
ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
|
||||
|
@ -258,6 +267,11 @@ __create_page_tables:
|
|||
addne r6, r6, #1 << SECTION_SHIFT
|
||||
strne r6, [r3]
|
||||
|
||||
#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
|
||||
sub r4, r4, #4 @ Fixup page table pointer
|
||||
@ for 64-bit descriptors
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LL
|
||||
#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
|
||||
/*
|
||||
|
@ -276,12 +290,16 @@ __create_page_tables:
|
|||
orr r3, r7, r3, lsl #SECTION_SHIFT
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
mov r7, #1 << (54 - 32) @ XN
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
str r7, [r0], #4
|
||||
str r3, [r0], #4
|
||||
#else
|
||||
str r3, [r0], #4
|
||||
str r7, [r0], #4
|
||||
#endif
|
||||
#else
|
||||
orr r3, r3, #PMD_SECT_XN
|
||||
#endif
|
||||
str r3, [r0], #4
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
str r7, [r0], #4
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
|
||||
|
|
|
@ -1023,7 +1023,7 @@ static void reset_ctrl_regs(void *unused)
|
|||
static int __cpuinit dbg_reset_notify(struct notifier_block *self,
|
||||
unsigned long action, void *cpu)
|
||||
{
|
||||
if (action == CPU_ONLINE)
|
||||
if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
|
||||
smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
|
||||
|
||||
return NOTIFY_OK;
|
||||
|
|
|
@ -400,7 +400,7 @@ __hw_perf_event_init(struct perf_event *event)
|
|||
}
|
||||
|
||||
if (event->group_leader != event) {
|
||||
if (validate_group(event) != 0);
|
||||
if (validate_group(event) != 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -484,7 +484,7 @@ const struct dev_pm_ops armpmu_dev_pm_ops = {
|
|||
SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static void __init armpmu_init(struct arm_pmu *armpmu)
|
||||
static void armpmu_init(struct arm_pmu *armpmu)
|
||||
{
|
||||
atomic_set(&armpmu->active_events, 0);
|
||||
mutex_init(&armpmu->reserve_mutex);
|
||||
|
|
|
@ -774,7 +774,7 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
|||
/*
|
||||
* PMXEVTYPER: Event selection reg
|
||||
*/
|
||||
#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
|
||||
#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
|
||||
#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
|
||||
|
||||
/*
|
||||
|
|
|
@ -285,6 +285,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
|
|||
* switch away from it before attempting any exclusive accesses.
|
||||
*/
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
local_flush_bp_all();
|
||||
enter_lazy_tlb(mm, current);
|
||||
local_flush_tlb_all();
|
||||
|
||||
|
|
|
@ -64,6 +64,11 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
|
|||
local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_bp_all(void *ignored)
|
||||
{
|
||||
local_flush_bp_all();
|
||||
}
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
|
@ -127,3 +132,10 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|||
local_flush_tlb_kernel_range(start, end);
|
||||
}
|
||||
|
||||
void flush_bp_all(void)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
on_each_cpu(ipi_flush_bp_all, NULL, 1);
|
||||
else
|
||||
local_flush_bp_all();
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/of_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/smp_twd.h>
|
||||
#include <asm/localtimer.h>
|
||||
|
||||
|
@ -373,6 +374,9 @@ void __init twd_local_timer_of_register(void)
|
|||
struct device_node *np;
|
||||
int err;
|
||||
|
||||
if (!is_smp() || !setup_max_cpus)
|
||||
return;
|
||||
|
||||
np = of_find_matching_node(NULL, twd_of_match);
|
||||
if (!np)
|
||||
return;
|
||||
|
|
|
@ -68,6 +68,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|||
ret = __cpu_suspend(arg, fn);
|
||||
if (ret == 0) {
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
local_flush_bp_all();
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
1: subs r2, r2, #4 @ 1 do we have enough
|
||||
blt 5f @ 1 bytes to align with?
|
||||
cmp r3, #2 @ 1
|
||||
strltb r1, [r0], #1 @ 1
|
||||
strleb r1, [r0], #1 @ 1
|
||||
strb r1, [r0], #1 @ 1
|
||||
strltb r1, [ip], #1 @ 1
|
||||
strleb r1, [ip], #1 @ 1
|
||||
strb r1, [ip], #1 @ 1
|
||||
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
|
||||
/*
|
||||
* The pointer is now aligned and the length is adjusted. Try doing the
|
||||
|
@ -29,10 +29,14 @@
|
|||
*/
|
||||
|
||||
ENTRY(memset)
|
||||
ands r3, r0, #3 @ 1 unaligned?
|
||||
/*
|
||||
* Preserve the contents of r0 for the return value.
|
||||
*/
|
||||
mov ip, r0
|
||||
ands r3, ip, #3 @ 1 unaligned?
|
||||
bne 1b @ 1
|
||||
/*
|
||||
* we know that the pointer in r0 is aligned to a word boundary.
|
||||
* we know that the pointer in ip is aligned to a word boundary.
|
||||
*/
|
||||
orr r1, r1, r1, lsl #8
|
||||
orr r1, r1, r1, lsl #16
|
||||
|
@ -43,29 +47,28 @@ ENTRY(memset)
|
|||
#if ! CALGN(1)+0
|
||||
|
||||
/*
|
||||
* We need an extra register for this loop - save the return address and
|
||||
* use the LR
|
||||
* We need 2 extra registers for this loop - use r8 and the LR
|
||||
*/
|
||||
str lr, [sp, #-4]!
|
||||
mov ip, r1
|
||||
stmfd sp!, {r8, lr}
|
||||
mov r8, r1
|
||||
mov lr, r1
|
||||
|
||||
2: subs r2, r2, #64
|
||||
stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time.
|
||||
stmgeia r0!, {r1, r3, ip, lr}
|
||||
stmgeia r0!, {r1, r3, ip, lr}
|
||||
stmgeia r0!, {r1, r3, ip, lr}
|
||||
stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
|
||||
stmgeia ip!, {r1, r3, r8, lr}
|
||||
stmgeia ip!, {r1, r3, r8, lr}
|
||||
stmgeia ip!, {r1, r3, r8, lr}
|
||||
bgt 2b
|
||||
ldmeqfd sp!, {pc} @ Now <64 bytes to go.
|
||||
ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
|
||||
/*
|
||||
* No need to correct the count; we're only testing bits from now on
|
||||
*/
|
||||
tst r2, #32
|
||||
stmneia r0!, {r1, r3, ip, lr}
|
||||
stmneia r0!, {r1, r3, ip, lr}
|
||||
stmneia ip!, {r1, r3, r8, lr}
|
||||
stmneia ip!, {r1, r3, r8, lr}
|
||||
tst r2, #16
|
||||
stmneia r0!, {r1, r3, ip, lr}
|
||||
ldr lr, [sp], #4
|
||||
stmneia ip!, {r1, r3, r8, lr}
|
||||
ldmfd sp!, {r8, lr}
|
||||
|
||||
#else
|
||||
|
||||
|
@ -74,54 +77,54 @@ ENTRY(memset)
|
|||
* whole cache lines at once.
|
||||
*/
|
||||
|
||||
stmfd sp!, {r4-r7, lr}
|
||||
stmfd sp!, {r4-r8, lr}
|
||||
mov r4, r1
|
||||
mov r5, r1
|
||||
mov r6, r1
|
||||
mov r7, r1
|
||||
mov ip, r1
|
||||
mov r8, r1
|
||||
mov lr, r1
|
||||
|
||||
cmp r2, #96
|
||||
tstgt r0, #31
|
||||
tstgt ip, #31
|
||||
ble 3f
|
||||
|
||||
and ip, r0, #31
|
||||
rsb ip, ip, #32
|
||||
sub r2, r2, ip
|
||||
movs ip, ip, lsl #(32 - 4)
|
||||
stmcsia r0!, {r4, r5, r6, r7}
|
||||
stmmiia r0!, {r4, r5}
|
||||
tst ip, #(1 << 30)
|
||||
mov ip, r1
|
||||
strne r1, [r0], #4
|
||||
and r8, ip, #31
|
||||
rsb r8, r8, #32
|
||||
sub r2, r2, r8
|
||||
movs r8, r8, lsl #(32 - 4)
|
||||
stmcsia ip!, {r4, r5, r6, r7}
|
||||
stmmiia ip!, {r4, r5}
|
||||
tst r8, #(1 << 30)
|
||||
mov r8, r1
|
||||
strne r1, [ip], #4
|
||||
|
||||
3: subs r2, r2, #64
|
||||
stmgeia r0!, {r1, r3-r7, ip, lr}
|
||||
stmgeia r0!, {r1, r3-r7, ip, lr}
|
||||
stmgeia ip!, {r1, r3-r8, lr}
|
||||
stmgeia ip!, {r1, r3-r8, lr}
|
||||
bgt 3b
|
||||
ldmeqfd sp!, {r4-r7, pc}
|
||||
ldmeqfd sp!, {r4-r8, pc}
|
||||
|
||||
tst r2, #32
|
||||
stmneia r0!, {r1, r3-r7, ip, lr}
|
||||
stmneia ip!, {r1, r3-r8, lr}
|
||||
tst r2, #16
|
||||
stmneia r0!, {r4-r7}
|
||||
ldmfd sp!, {r4-r7, lr}
|
||||
stmneia ip!, {r4-r7}
|
||||
ldmfd sp!, {r4-r8, lr}
|
||||
|
||||
#endif
|
||||
|
||||
4: tst r2, #8
|
||||
stmneia r0!, {r1, r3}
|
||||
stmneia ip!, {r1, r3}
|
||||
tst r2, #4
|
||||
strne r1, [r0], #4
|
||||
strne r1, [ip], #4
|
||||
/*
|
||||
* When we get here, we've got less than 4 bytes to zero. We
|
||||
* may have an unaligned pointer as well.
|
||||
*/
|
||||
5: tst r2, #2
|
||||
strneb r1, [r0], #1
|
||||
strneb r1, [r0], #1
|
||||
strneb r1, [ip], #1
|
||||
strneb r1, [ip], #1
|
||||
tst r2, #1
|
||||
strneb r1, [r0], #1
|
||||
strneb r1, [ip], #1
|
||||
mov pc, lr
|
||||
ENDPROC(memset)
|
||||
|
|
|
@ -168,7 +168,7 @@ void __init netx_init_irq(void)
|
|||
{
|
||||
int irq;
|
||||
|
||||
vic_init(io_p2v(NETX_PA_VIC), 0, ~0, 0);
|
||||
vic_init(io_p2v(NETX_PA_VIC), NETX_IRQ_VIC_START, ~0, 0);
|
||||
|
||||
for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) {
|
||||
irq_set_chip_and_handler(irq, &netx_hif_chip,
|
||||
|
|
|
@ -17,42 +17,42 @@
|
|||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#define NETX_IRQ_VIC_START 0
|
||||
#define NETX_IRQ_SOFTINT 0
|
||||
#define NETX_IRQ_TIMER0 1
|
||||
#define NETX_IRQ_TIMER1 2
|
||||
#define NETX_IRQ_TIMER2 3
|
||||
#define NETX_IRQ_SYSTIME_NS 4
|
||||
#define NETX_IRQ_SYSTIME_S 5
|
||||
#define NETX_IRQ_GPIO_15 6
|
||||
#define NETX_IRQ_WATCHDOG 7
|
||||
#define NETX_IRQ_UART0 8
|
||||
#define NETX_IRQ_UART1 9
|
||||
#define NETX_IRQ_UART2 10
|
||||
#define NETX_IRQ_USB 11
|
||||
#define NETX_IRQ_SPI 12
|
||||
#define NETX_IRQ_I2C 13
|
||||
#define NETX_IRQ_LCD 14
|
||||
#define NETX_IRQ_HIF 15
|
||||
#define NETX_IRQ_GPIO_0_14 16
|
||||
#define NETX_IRQ_XPEC0 17
|
||||
#define NETX_IRQ_XPEC1 18
|
||||
#define NETX_IRQ_XPEC2 19
|
||||
#define NETX_IRQ_XPEC3 20
|
||||
#define NETX_IRQ_XPEC(no) (17 + (no))
|
||||
#define NETX_IRQ_MSYNC0 21
|
||||
#define NETX_IRQ_MSYNC1 22
|
||||
#define NETX_IRQ_MSYNC2 23
|
||||
#define NETX_IRQ_MSYNC3 24
|
||||
#define NETX_IRQ_IRQ_PHY 25
|
||||
#define NETX_IRQ_ISO_AREA 26
|
||||
#define NETX_IRQ_VIC_START 64
|
||||
#define NETX_IRQ_SOFTINT (NETX_IRQ_VIC_START + 0)
|
||||
#define NETX_IRQ_TIMER0 (NETX_IRQ_VIC_START + 1)
|
||||
#define NETX_IRQ_TIMER1 (NETX_IRQ_VIC_START + 2)
|
||||
#define NETX_IRQ_TIMER2 (NETX_IRQ_VIC_START + 3)
|
||||
#define NETX_IRQ_SYSTIME_NS (NETX_IRQ_VIC_START + 4)
|
||||
#define NETX_IRQ_SYSTIME_S (NETX_IRQ_VIC_START + 5)
|
||||
#define NETX_IRQ_GPIO_15 (NETX_IRQ_VIC_START + 6)
|
||||
#define NETX_IRQ_WATCHDOG (NETX_IRQ_VIC_START + 7)
|
||||
#define NETX_IRQ_UART0 (NETX_IRQ_VIC_START + 8)
|
||||
#define NETX_IRQ_UART1 (NETX_IRQ_VIC_START + 9)
|
||||
#define NETX_IRQ_UART2 (NETX_IRQ_VIC_START + 10)
|
||||
#define NETX_IRQ_USB (NETX_IRQ_VIC_START + 11)
|
||||
#define NETX_IRQ_SPI (NETX_IRQ_VIC_START + 12)
|
||||
#define NETX_IRQ_I2C (NETX_IRQ_VIC_START + 13)
|
||||
#define NETX_IRQ_LCD (NETX_IRQ_VIC_START + 14)
|
||||
#define NETX_IRQ_HIF (NETX_IRQ_VIC_START + 15)
|
||||
#define NETX_IRQ_GPIO_0_14 (NETX_IRQ_VIC_START + 16)
|
||||
#define NETX_IRQ_XPEC0 (NETX_IRQ_VIC_START + 17)
|
||||
#define NETX_IRQ_XPEC1 (NETX_IRQ_VIC_START + 18)
|
||||
#define NETX_IRQ_XPEC2 (NETX_IRQ_VIC_START + 19)
|
||||
#define NETX_IRQ_XPEC3 (NETX_IRQ_VIC_START + 20)
|
||||
#define NETX_IRQ_XPEC(no) (NETX_IRQ_VIC_START + 17 + (no))
|
||||
#define NETX_IRQ_MSYNC0 (NETX_IRQ_VIC_START + 21)
|
||||
#define NETX_IRQ_MSYNC1 (NETX_IRQ_VIC_START + 22)
|
||||
#define NETX_IRQ_MSYNC2 (NETX_IRQ_VIC_START + 23)
|
||||
#define NETX_IRQ_MSYNC3 (NETX_IRQ_VIC_START + 24)
|
||||
#define NETX_IRQ_IRQ_PHY (NETX_IRQ_VIC_START + 25)
|
||||
#define NETX_IRQ_ISO_AREA (NETX_IRQ_VIC_START + 26)
|
||||
/* int 27 is reserved */
|
||||
/* int 28 is reserved */
|
||||
#define NETX_IRQ_TIMER3 29
|
||||
#define NETX_IRQ_TIMER4 30
|
||||
#define NETX_IRQ_TIMER3 (NETX_IRQ_VIC_START + 29)
|
||||
#define NETX_IRQ_TIMER4 (NETX_IRQ_VIC_START + 30)
|
||||
/* int 31 is reserved */
|
||||
|
||||
#define NETX_IRQS 32
|
||||
#define NETX_IRQS (NETX_IRQ_VIC_START + 32)
|
||||
|
||||
/* for multiplexed irqs on gpio 0..14 */
|
||||
#define NETX_IRQ_GPIO(x) (NETX_IRQS + (x))
|
||||
|
|
|
@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void new_context(struct mm_struct *mm, unsigned int cpu)
|
||||
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||
{
|
||||
u64 asid = mm->context.id;
|
||||
u64 asid = atomic64_read(&mm->context.id);
|
||||
u64 generation = atomic64_read(&asid_generation);
|
||||
|
||||
if (asid != 0 && is_reserved_asid(asid)) {
|
||||
|
@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
|
|||
cpumask_clear(mm_cpumask(mm));
|
||||
}
|
||||
|
||||
mm->context.id = asid;
|
||||
return asid;
|
||||
}
|
||||
|
||||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
u64 asid;
|
||||
|
||||
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
|
||||
__check_vmalloc_seq(mm);
|
||||
|
@ -198,20 +199,26 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
|
|||
*/
|
||||
cpu_set_reserved_ttbr0();
|
||||
|
||||
if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
|
||||
&& atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
|
||||
asid = atomic64_read(&mm->context.id);
|
||||
if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
|
||||
&& atomic64_xchg(&per_cpu(active_asids, cpu), asid))
|
||||
goto switch_mm_fastpath;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
|
||||
/* Check that our ASID belongs to the current generation. */
|
||||
if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
|
||||
new_context(mm, cpu);
|
||||
asid = atomic64_read(&mm->context.id);
|
||||
if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
|
||||
asid = new_context(mm, cpu);
|
||||
atomic64_set(&mm->context.id, asid);
|
||||
}
|
||||
|
||||
atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
|
||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
||||
|
||||
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
|
||||
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
|
||||
local_flush_bp_all();
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
atomic64_set(&per_cpu(active_asids, cpu), asid);
|
||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
||||
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
|
||||
|
||||
switch_mm_fastpath:
|
||||
|
|
|
@ -141,6 +141,7 @@ void setup_mm_for_reboot(void)
|
|||
{
|
||||
/* Switch to the identity mapping. */
|
||||
cpu_switch_mm(idmap_pgd, &init_mm);
|
||||
local_flush_bp_all();
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
/*
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
ENTRY(cpu_v7_switch_mm)
|
||||
#ifdef CONFIG_MMU
|
||||
mmid r1, r1 @ get mm->context.id
|
||||
and r3, r1, #0xff
|
||||
asid r3, r1
|
||||
mov r3, r3, lsl #(48 - 32) @ ASID
|
||||
mcrr p15, 0, r0, r3, c2 @ set TTB 0
|
||||
isb
|
||||
|
|
|
@ -619,6 +619,7 @@ static struct file_system_type pfm_fs_type = {
|
|||
.mount = pfmfs_mount,
|
||||
.kill_sb = kill_anon_super,
|
||||
};
|
||||
MODULE_ALIAS_FS("pfmfs");
|
||||
|
||||
DEFINE_PER_CPU(unsigned long, pfm_syst_info);
|
||||
DEFINE_PER_CPU(struct task_struct *, pmu_owner);
|
||||
|
|
|
@ -100,9 +100,6 @@ typedef unsigned long elf_fpregset_t;
|
|||
|
||||
#define ELF_PLATFORM (NULL)
|
||||
|
||||
#define SET_PERSONALITY(ex) \
|
||||
set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
|
||||
|
||||
#define STACK_RND_MASK (0)
|
||||
|
||||
#ifdef CONFIG_METAG_USER_TCM
|
||||
|
|
|
@ -40,6 +40,7 @@ endchoice
|
|||
|
||||
config NUMA
|
||||
bool "Non Uniform Memory Access (NUMA) Support"
|
||||
select ARCH_WANT_NUMA_VARIABLE_LOCALITY
|
||||
help
|
||||
Some Meta systems have MMU-mappable on-chip memories with
|
||||
lower latencies than main memory. This enables support for
|
||||
|
|
|
@ -113,7 +113,7 @@
|
|||
STEPUP4((t)+16, fn)
|
||||
|
||||
_GLOBAL(powerpc_sha_transform)
|
||||
PPC_STLU r1,-STACKFRAMESIZE(r1)
|
||||
PPC_STLU r1,-INT_FRAME_SIZE(r1)
|
||||
SAVE_8GPRS(14, r1)
|
||||
SAVE_10GPRS(22, r1)
|
||||
|
||||
|
@ -175,5 +175,5 @@ _GLOBAL(powerpc_sha_transform)
|
|||
|
||||
REST_8GPRS(14, r1)
|
||||
REST_10GPRS(22, r1)
|
||||
addi r1,r1,STACKFRAMESIZE
|
||||
addi r1,r1,INT_FRAME_SIZE
|
||||
blr
|
||||
|
|
|
@ -52,8 +52,6 @@
|
|||
#define smp_mb__before_clear_bit() smp_mb()
|
||||
#define smp_mb__after_clear_bit() smp_mb()
|
||||
|
||||
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
|
||||
|
||||
/* Macro for generating the ***_bits() functions */
|
||||
#define DEFINE_BITOP(fn, op, prefix, postfix) \
|
||||
static __inline__ void fn(unsigned long mask, \
|
||||
|
|
|
@ -266,7 +266,8 @@
|
|||
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
|
||||
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
|
||||
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
|
||||
#define FSCR_TAR (1<<8) /* Enable Target Adress Register */
|
||||
#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
|
||||
#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
|
||||
#define SPRN_TAR 0x32f /* Target Address Register */
|
||||
#define SPRN_LPCR 0x13E /* LPAR Control Register */
|
||||
#define LPCR_VPM0 (1ul << (63-0))
|
||||
|
|
|
@ -358,3 +358,4 @@ SYSCALL_SPU(setns)
|
|||
COMPAT_SYS(process_vm_readv)
|
||||
COMPAT_SYS(process_vm_writev)
|
||||
SYSCALL(finit_module)
|
||||
SYSCALL(ni_syscall) /* sys_kcmp */
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define __NR_syscalls 354
|
||||
#define __NR_syscalls 355
|
||||
|
||||
#define __NR__exit __NR_exit
|
||||
#define NR_syscalls __NR_syscalls
|
||||
|
|
|
@ -376,6 +376,7 @@
|
|||
#define __NR_process_vm_readv 351
|
||||
#define __NR_process_vm_writev 352
|
||||
#define __NR_finit_module 353
|
||||
#define __NR_kcmp 354
|
||||
|
||||
|
||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||
|
|
|
@ -48,6 +48,7 @@ _GLOBAL(__restore_cpu_power7)
|
|||
|
||||
_GLOBAL(__setup_cpu_power8)
|
||||
mflr r11
|
||||
bl __init_FSCR
|
||||
bl __init_hvmode_206
|
||||
mtlr r11
|
||||
beqlr
|
||||
|
@ -56,13 +57,13 @@ _GLOBAL(__setup_cpu_power8)
|
|||
mfspr r3,SPRN_LPCR
|
||||
oris r3, r3, LPCR_AIL_3@h
|
||||
bl __init_LPCR
|
||||
bl __init_FSCR
|
||||
bl __init_TLB
|
||||
mtlr r11
|
||||
blr
|
||||
|
||||
_GLOBAL(__restore_cpu_power8)
|
||||
mflr r11
|
||||
bl __init_FSCR
|
||||
mfmsr r3
|
||||
rldicl. r0,r3,4,63
|
||||
beqlr
|
||||
|
@ -115,7 +116,7 @@ __init_LPCR:
|
|||
|
||||
__init_FSCR:
|
||||
mfspr r3,SPRN_FSCR
|
||||
ori r3,r3,FSCR_TAR
|
||||
ori r3,r3,FSCR_TAR|FSCR_DSCR
|
||||
mtspr SPRN_FSCR,r3
|
||||
blr
|
||||
|
||||
|
|
|
@ -74,13 +74,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
|
|||
mflr r10 ; \
|
||||
ld r12,PACAKBASE(r13) ; \
|
||||
LOAD_HANDLER(r12, system_call_entry_direct) ; \
|
||||
mtlr r12 ; \
|
||||
mtctr r12 ; \
|
||||
mfspr r12,SPRN_SRR1 ; \
|
||||
/* Re-use of r13... No spare regs to do this */ \
|
||||
li r13,MSR_RI ; \
|
||||
mtmsrd r13,1 ; \
|
||||
GET_PACA(r13) ; /* get r13 back */ \
|
||||
blr ;
|
||||
bctr ;
|
||||
#else
|
||||
/* We can branch directly */
|
||||
#define SYSCALL_PSERIES_2_DIRECT \
|
||||
|
|
|
@ -749,6 +749,7 @@ static struct file_system_type spufs_type = {
|
|||
.mount = spufs_mount,
|
||||
.kill_sb = kill_litter_super,
|
||||
};
|
||||
MODULE_ALIAS_FS("spufs");
|
||||
|
||||
static int __init spufs_init(void)
|
||||
{
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <asm/hvcall.h>
|
||||
#include <asm/hvcserver.h>
|
||||
|
@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
|
|||
= (unsigned int)last_p_partition_ID;
|
||||
|
||||
/* copy the Null-term char too */
|
||||
strncpy(&next_partner_info->location_code[0],
|
||||
strlcpy(&next_partner_info->location_code[0],
|
||||
(char *)&pi_buff[2],
|
||||
strlen((char *)&pi_buff[2]) + 1);
|
||||
sizeof(next_partner_info->location_code));
|
||||
|
||||
list_add_tail(&(next_partner_info->node), head);
|
||||
next_partner_info = NULL;
|
||||
|
|
|
@ -456,6 +456,7 @@ static struct file_system_type hypfs_type = {
|
|||
.mount = hypfs_mount,
|
||||
.kill_sb = hypfs_kill_super
|
||||
};
|
||||
MODULE_ALIAS_FS("s390_hypfs");
|
||||
|
||||
static const struct super_operations hypfs_s_ops = {
|
||||
.statfs = simple_statfs,
|
||||
|
|
|
@ -288,6 +288,9 @@ long compat_sys_sync_file_range2(int fd, unsigned int flags,
|
|||
long compat_sys_fallocate(int fd, int mode,
|
||||
u32 offset_lo, u32 offset_hi,
|
||||
u32 len_lo, u32 len_hi);
|
||||
long compat_sys_llseek(unsigned int fd, unsigned int offset_high,
|
||||
unsigned int offset_low, loff_t __user * result,
|
||||
unsigned int origin);
|
||||
|
||||
/* Assembly trampoline to avoid clobbering r0. */
|
||||
long _compat_sys_rt_sigreturn(void);
|
||||
|
|
|
@ -32,50 +32,65 @@
|
|||
* adapt the usual convention.
|
||||
*/
|
||||
|
||||
long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high)
|
||||
COMPAT_SYSCALL_DEFINE4(truncate64, char __user *, filename, u32, dummy,
|
||||
u32, low, u32, high)
|
||||
{
|
||||
return sys_truncate(filename, ((loff_t)high << 32) | low);
|
||||
}
|
||||
|
||||
long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high)
|
||||
COMPAT_SYSCALL_DEFINE4(ftruncate64, unsigned int, fd, u32, dummy,
|
||||
u32, low, u32, high)
|
||||
{
|
||||
return sys_ftruncate(fd, ((loff_t)high << 32) | low);
|
||||
}
|
||||
|
||||
long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
|
||||
u32 dummy, u32 low, u32 high)
|
||||
COMPAT_SYSCALL_DEFINE6(pread64, unsigned int, fd, char __user *, ubuf,
|
||||
size_t, count, u32, dummy, u32, low, u32, high)
|
||||
{
|
||||
return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low);
|
||||
}
|
||||
|
||||
long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
|
||||
u32 dummy, u32 low, u32 high)
|
||||
COMPAT_SYSCALL_DEFINE6(pwrite64, unsigned int, fd, char __user *, ubuf,
|
||||
size_t, count, u32, dummy, u32, low, u32, high)
|
||||
{
|
||||
return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low);
|
||||
}
|
||||
|
||||
long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len)
|
||||
COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, low, u32, high,
|
||||
char __user *, buf, size_t, len)
|
||||
{
|
||||
return sys_lookup_dcookie(((loff_t)high << 32) | low, buf, len);
|
||||
}
|
||||
|
||||
long compat_sys_sync_file_range2(int fd, unsigned int flags,
|
||||
u32 offset_lo, u32 offset_hi,
|
||||
u32 nbytes_lo, u32 nbytes_hi)
|
||||
COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags,
|
||||
u32, offset_lo, u32, offset_hi,
|
||||
u32, nbytes_lo, u32, nbytes_hi)
|
||||
{
|
||||
return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo,
|
||||
((loff_t)nbytes_hi << 32) | nbytes_lo,
|
||||
flags);
|
||||
}
|
||||
|
||||
long compat_sys_fallocate(int fd, int mode,
|
||||
u32 offset_lo, u32 offset_hi,
|
||||
u32 len_lo, u32 len_hi)
|
||||
COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode,
|
||||
u32, offset_lo, u32, offset_hi,
|
||||
u32, len_lo, u32, len_hi)
|
||||
{
|
||||
return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo,
|
||||
((loff_t)len_hi << 32) | len_lo);
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid bug in generic sys_llseek() that specifies offset_high and
|
||||
* offset_low as "unsigned long", thus making it possible to pass
|
||||
* a sign-extended high 32 bits in offset_low.
|
||||
*/
|
||||
COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high,
|
||||
unsigned int, offset_low, loff_t __user *, result,
|
||||
unsigned int, origin)
|
||||
{
|
||||
return sys_llseek(fd, offset_high, offset_low, result, origin);
|
||||
}
|
||||
|
||||
/* Provide the compat syscall number to call mapping. */
|
||||
#undef __SYSCALL
|
||||
#define __SYSCALL(nr, call) [nr] = (call),
|
||||
|
@ -83,6 +98,7 @@ long compat_sys_fallocate(int fd, int mode,
|
|||
/* See comments in sys.c */
|
||||
#define compat_sys_fadvise64_64 sys32_fadvise64_64
|
||||
#define compat_sys_readahead sys32_readahead
|
||||
#define sys_llseek compat_sys_llseek
|
||||
|
||||
/* Call the assembly trampolines where necessary. */
|
||||
#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
|
||||
|
|
|
@ -14,13 +14,29 @@
|
|||
* analysis of kexec-tools; if other broken bootloaders initialize a
|
||||
* different set of fields we will need to figure out how to disambiguate.
|
||||
*
|
||||
* Note: efi_info is commonly left uninitialized, but that field has a
|
||||
* private magic, so it is better to leave it unchanged.
|
||||
*/
|
||||
static void sanitize_boot_params(struct boot_params *boot_params)
|
||||
{
|
||||
/*
|
||||
* IMPORTANT NOTE TO BOOTLOADER AUTHORS: do not simply clear
|
||||
* this field. The purpose of this field is to guarantee
|
||||
* compliance with the x86 boot spec located in
|
||||
* Documentation/x86/boot.txt . That spec says that the
|
||||
* *whole* structure should be cleared, after which only the
|
||||
* portion defined by struct setup_header (boot_params->hdr)
|
||||
* should be copied in.
|
||||
*
|
||||
* If you're having an issue because the sentinel is set, you
|
||||
* need to change the whole structure to be cleared, not this
|
||||
* (or any other) individual field, or you will soon have
|
||||
* problems again.
|
||||
*/
|
||||
if (boot_params->sentinel) {
|
||||
/*fields in boot_params are not valid, clear them */
|
||||
/* fields in boot_params are left uninitialized, clear them */
|
||||
memset(&boot_params->olpc_ofw_header, 0,
|
||||
(char *)&boot_params->alt_mem_k -
|
||||
(char *)&boot_params->efi_info -
|
||||
(char *)&boot_params->olpc_ofw_header);
|
||||
memset(&boot_params->kbd_status, 0,
|
||||
(char *)&boot_params->hdr -
|
||||
|
|
|
@ -171,9 +171,15 @@ static struct resource bss_resource = {
|
|||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* cpu data as detected by the assembly code in head.S */
|
||||
struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1};
|
||||
struct cpuinfo_x86 new_cpu_data __cpuinitdata = {
|
||||
.wp_works_ok = -1,
|
||||
.fdiv_bug = -1,
|
||||
};
|
||||
/* common cpu data for all cpus */
|
||||
struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1};
|
||||
struct cpuinfo_x86 boot_cpu_data __read_mostly = {
|
||||
.wp_works_ok = -1,
|
||||
.fdiv_bug = -1,
|
||||
};
|
||||
EXPORT_SYMBOL(boot_cpu_data);
|
||||
|
||||
unsigned int def_to_bigsmp;
|
||||
|
|
|
@ -1365,9 +1365,8 @@ static inline void mwait_play_dead(void)
|
|||
unsigned int eax, ebx, ecx, edx;
|
||||
unsigned int highest_cstate = 0;
|
||||
unsigned int highest_subcstate = 0;
|
||||
int i;
|
||||
void *mwait_ptr;
|
||||
struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
|
||||
int i;
|
||||
|
||||
if (!this_cpu_has(X86_FEATURE_MWAIT))
|
||||
return;
|
||||
|
|
|
@ -410,9 +410,8 @@ void __init init_mem_mapping(void)
|
|||
/* the ISA range is always mapped regardless of memory holes */
|
||||
init_memory_mapping(0, ISA_END_ADDRESS);
|
||||
|
||||
/* xen has big range in reserved near end of ram, skip it at first */
|
||||
addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE,
|
||||
PAGE_SIZE);
|
||||
/* xen has big range in reserved near end of ram, skip it at first.*/
|
||||
addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE);
|
||||
real_end = addr + PMD_SIZE;
|
||||
|
||||
/* step_size need to be small so pgt_buf from BRK could cover it */
|
||||
|
|
|
@ -563,6 +563,13 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
|
|||
if (base > __pa(high_memory-1))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* some areas in the middle of the kernel identity range
|
||||
* are not mapped, like the PCI space.
|
||||
*/
|
||||
if (!page_is_ram(base >> PAGE_SHIFT))
|
||||
return 0;
|
||||
|
||||
id_sz = (__pa(high_memory-1) <= base + size) ?
|
||||
__pa(high_memory) - base :
|
||||
size;
|
||||
|
|
|
@ -36,12 +36,11 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
|
|||
{
|
||||
if (acpi_disabled)
|
||||
return -ENODEV;
|
||||
if (type && type->bus && type->find_device) {
|
||||
if (type && type->match && type->find_device) {
|
||||
down_write(&bus_type_sem);
|
||||
list_add_tail(&type->list, &bus_type_list);
|
||||
up_write(&bus_type_sem);
|
||||
printk(KERN_INFO PREFIX "bus type %s registered\n",
|
||||
type->bus->name);
|
||||
printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
|
||||
return 0;
|
||||
}
|
||||
return -ENODEV;
|
||||
|
@ -56,24 +55,21 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
|
|||
down_write(&bus_type_sem);
|
||||
list_del_init(&type->list);
|
||||
up_write(&bus_type_sem);
|
||||
printk(KERN_INFO PREFIX "ACPI bus type %s unregistered\n",
|
||||
type->bus->name);
|
||||
printk(KERN_INFO PREFIX "bus type %s unregistered\n",
|
||||
type->name);
|
||||
return 0;
|
||||
}
|
||||
return -ENODEV;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
|
||||
|
||||
static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
|
||||
static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
|
||||
{
|
||||
struct acpi_bus_type *tmp, *ret = NULL;
|
||||
|
||||
if (!type)
|
||||
return NULL;
|
||||
|
||||
down_read(&bus_type_sem);
|
||||
list_for_each_entry(tmp, &bus_type_list, list) {
|
||||
if (tmp->bus == type) {
|
||||
if (tmp->match(dev)) {
|
||||
ret = tmp;
|
||||
break;
|
||||
}
|
||||
|
@ -82,22 +78,6 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
|
||||
{
|
||||
struct acpi_bus_type *tmp;
|
||||
int ret = -ENODEV;
|
||||
|
||||
down_read(&bus_type_sem);
|
||||
list_for_each_entry(tmp, &bus_type_list, list) {
|
||||
if (tmp->find_bridge && !tmp->find_bridge(dev, handle)) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
up_read(&bus_type_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
|
||||
void *addr_p, void **ret_p)
|
||||
{
|
||||
|
@ -261,29 +241,12 @@ static int acpi_unbind_one(struct device *dev)
|
|||
|
||||
static int acpi_platform_notify(struct device *dev)
|
||||
{
|
||||
struct acpi_bus_type *type;
|
||||
struct acpi_bus_type *type = acpi_get_bus_type(dev);
|
||||
acpi_handle handle;
|
||||
int ret;
|
||||
|
||||
ret = acpi_bind_one(dev, NULL);
|
||||
if (ret && (!dev->bus || !dev->parent)) {
|
||||
/* bridge devices genernally haven't bus or parent */
|
||||
ret = acpi_find_bridge_device(dev, &handle);
|
||||
if (!ret) {
|
||||
ret = acpi_bind_one(dev, handle);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
type = acpi_get_bus_type(dev->bus);
|
||||
if (ret) {
|
||||
if (!type || !type->find_device) {
|
||||
DBG("No ACPI bus support for %s\n", dev_name(dev));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret && type) {
|
||||
ret = type->find_device(dev, &handle);
|
||||
if (ret) {
|
||||
DBG("Unable to get handle for %s\n", dev_name(dev));
|
||||
|
@ -316,7 +279,7 @@ static int acpi_platform_notify_remove(struct device *dev)
|
|||
{
|
||||
struct acpi_bus_type *type;
|
||||
|
||||
type = acpi_get_bus_type(dev->bus);
|
||||
type = acpi_get_bus_type(dev);
|
||||
if (type && type->cleanup)
|
||||
type->cleanup(dev);
|
||||
|
||||
|
|
|
@ -158,8 +158,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
|||
}
|
||||
|
||||
exit:
|
||||
if (buffer.pointer)
|
||||
kfree(buffer.pointer);
|
||||
kfree(buffer.pointer);
|
||||
return apic_id;
|
||||
}
|
||||
|
||||
|
|
|
@ -559,7 +559,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
|
|||
return 0;
|
||||
#endif
|
||||
|
||||
BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
|
||||
BUG_ON(pr->id >= nr_cpu_ids);
|
||||
|
||||
/*
|
||||
* Buggy BIOS check
|
||||
|
|
|
@ -599,7 +599,6 @@ static void acpi_sleep_suspend_setup(void)
|
|||
status = acpi_get_sleep_type_data(i, &type_a, &type_b);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
sleep_states[i] = 1;
|
||||
pr_cont(" S%d", i);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -742,7 +741,6 @@ static void acpi_sleep_hibernate_setup(void)
|
|||
hibernation_set_ops(old_suspend_ordering ?
|
||||
&acpi_hibernation_ops_old : &acpi_hibernation_ops);
|
||||
sleep_states[ACPI_STATE_S4] = 1;
|
||||
pr_cont(KERN_CONT " S4");
|
||||
if (nosigcheck)
|
||||
return;
|
||||
|
||||
|
@ -788,6 +786,9 @@ int __init acpi_sleep_init(void)
|
|||
{
|
||||
acpi_status status;
|
||||
u8 type_a, type_b;
|
||||
char supported[ACPI_S_STATE_COUNT * 3 + 1];
|
||||
char *pos = supported;
|
||||
int i;
|
||||
|
||||
if (acpi_disabled)
|
||||
return 0;
|
||||
|
@ -795,7 +796,6 @@ int __init acpi_sleep_init(void)
|
|||
acpi_sleep_dmi_check();
|
||||
|
||||
sleep_states[ACPI_STATE_S0] = 1;
|
||||
pr_info(PREFIX "(supports S0");
|
||||
|
||||
acpi_sleep_suspend_setup();
|
||||
acpi_sleep_hibernate_setup();
|
||||
|
@ -803,11 +803,17 @@ int __init acpi_sleep_init(void)
|
|||
status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
sleep_states[ACPI_STATE_S5] = 1;
|
||||
pr_cont(" S5");
|
||||
pm_power_off_prepare = acpi_power_off_prepare;
|
||||
pm_power_off = acpi_power_off;
|
||||
}
|
||||
pr_cont(")\n");
|
||||
|
||||
supported[0] = 0;
|
||||
for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
|
||||
if (sleep_states[i])
|
||||
pos += sprintf(pos, " S%d", i);
|
||||
}
|
||||
pr_info(PREFIX "(supports%s)\n", supported);
|
||||
|
||||
/*
|
||||
* Register the tts_notifier to reboot notifier list so that the _TTS
|
||||
* object can also be evaluated when the system enters S5.
|
||||
|
|
|
@ -1144,13 +1144,8 @@ static int ata_acpi_find_device(struct device *dev, acpi_handle *handle)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int ata_acpi_find_dummy(struct device *dev, acpi_handle *handle)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static struct acpi_bus_type ata_acpi_bus = {
|
||||
.find_bridge = ata_acpi_find_dummy,
|
||||
.name = "ATA",
|
||||
.find_device = ata_acpi_find_device,
|
||||
};
|
||||
|
||||
|
|
|
@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
|
|||
dev_warn(dev, "parent %s should not be sleeping\n",
|
||||
dev_name(dev->parent));
|
||||
list_add_tail(&dev->power.entry, &dpm_list);
|
||||
dev_pm_qos_constraints_init(dev);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
}
|
||||
|
||||
|
@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
|
|||
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
|
||||
complete_all(&dev->power.completion);
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
dev_pm_qos_constraints_destroy(dev);
|
||||
list_del_init(&dev->power.entry);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
device_wakeup_disable(dev);
|
||||
|
|
|
@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
|
|||
{
|
||||
if (!dev->power.early_init) {
|
||||
spin_lock_init(&dev->power.lock);
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
dev->power.qos = NULL;
|
||||
dev->power.early_init = true;
|
||||
}
|
||||
}
|
||||
|
@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
|
|||
|
||||
static inline void device_pm_sleep_init(struct device *dev) {}
|
||||
|
||||
static inline void device_pm_add(struct device *dev)
|
||||
{
|
||||
dev_pm_qos_constraints_init(dev);
|
||||
}
|
||||
static inline void device_pm_add(struct device *dev) {}
|
||||
|
||||
static inline void device_pm_remove(struct device *dev)
|
||||
{
|
||||
dev_pm_qos_constraints_destroy(dev);
|
||||
pm_runtime_remove(dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#include "power.h"
|
||||
|
||||
|
@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
|
|||
struct pm_qos_flags *pqf;
|
||||
s32 val;
|
||||
|
||||
if (!qos)
|
||||
if (IS_ERR_OR_NULL(qos))
|
||||
return PM_QOS_FLAGS_UNDEFINED;
|
||||
|
||||
pqf = &qos->flags;
|
||||
|
@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
|
|||
*/
|
||||
s32 __dev_pm_qos_read_value(struct device *dev)
|
||||
{
|
||||
return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0;
|
||||
return IS_ERR_OR_NULL(dev->power.qos) ?
|
||||
0 : pm_qos_read_value(&dev->power.qos->latency);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
|
||||
* @dev: target device
|
||||
*
|
||||
* Called from the device PM subsystem during device insertion under
|
||||
* device_pm_lock().
|
||||
*/
|
||||
void dev_pm_qos_constraints_init(struct device *dev)
|
||||
{
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
dev->power.qos = NULL;
|
||||
dev->power.power_state = PMSG_ON;
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
}
|
||||
static void __dev_pm_qos_hide_latency_limit(struct device *dev);
|
||||
static void __dev_pm_qos_hide_flags(struct device *dev);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_constraints_destroy
|
||||
|
@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
|||
struct pm_qos_constraints *c;
|
||||
struct pm_qos_flags *f;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
/*
|
||||
* If the device's PM QoS resume latency limit or PM QoS flags have been
|
||||
* exposed to user space, they have to be hidden at this point.
|
||||
*/
|
||||
dev_pm_qos_hide_latency_limit(dev);
|
||||
dev_pm_qos_hide_flags(dev);
|
||||
__dev_pm_qos_hide_latency_limit(dev);
|
||||
__dev_pm_qos_hide_flags(dev);
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
qos = dev->power.qos;
|
||||
if (!qos)
|
||||
goto out;
|
||||
|
@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
|||
}
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
dev->power.qos = NULL;
|
||||
dev->power.qos = ERR_PTR(-ENODEV);
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
kfree(c->notifiers);
|
||||
|
@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
|
|||
"%s() called for already added request\n", __func__))
|
||||
return -EINVAL;
|
||||
|
||||
req->dev = dev;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (!dev->power.qos) {
|
||||
if (dev->power.power_state.event == PM_EVENT_INVALID) {
|
||||
/* The device has been removed from the system. */
|
||||
req->dev = NULL;
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
} else {
|
||||
/*
|
||||
* Allocate the constraints data on the first call to
|
||||
* add_request, i.e. only if the data is not already
|
||||
* allocated and if the device has not been removed.
|
||||
*/
|
||||
ret = dev_pm_qos_constraints_allocate(dev);
|
||||
}
|
||||
}
|
||||
if (IS_ERR(dev->power.qos))
|
||||
ret = -ENODEV;
|
||||
else if (!dev->power.qos)
|
||||
ret = dev_pm_qos_constraints_allocate(dev);
|
||||
|
||||
if (!ret) {
|
||||
req->dev = dev;
|
||||
req->type = type;
|
||||
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
|
||||
return ret;
|
||||
|
@ -344,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
|
|||
s32 curr_value;
|
||||
int ret = 0;
|
||||
|
||||
if (!req->dev->power.qos)
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN(!dev_pm_qos_request_active(req),
|
||||
"%s() called for unknown object\n", __func__))
|
||||
return -EINVAL;
|
||||
|
||||
if (IS_ERR_OR_NULL(req->dev->power.qos))
|
||||
return -ENODEV;
|
||||
|
||||
switch(req->type) {
|
||||
|
@ -386,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
|
|||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
ret = __dev_pm_qos_update_request(req, new_value);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
|
||||
|
||||
static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -393,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
|
|||
"%s() called for unknown object\n", __func__))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
ret = __dev_pm_qos_update_request(req, new_value);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
if (IS_ERR_OR_NULL(req->dev->power.qos))
|
||||
return -ENODEV;
|
||||
|
||||
ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
|
||||
memset(req, 0, sizeof(*req));
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_remove_request - modifies an existing qos request
|
||||
|
@ -418,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
|
|||
*/
|
||||
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN(!dev_pm_qos_request_active(req),
|
||||
"%s() called for unknown object\n", __func__))
|
||||
return -EINVAL;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (req->dev->power.qos) {
|
||||
ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
|
||||
PM_QOS_DEFAULT_VALUE);
|
||||
memset(req, 0, sizeof(*req));
|
||||
} else {
|
||||
/* Return if the device has been removed */
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
ret = __dev_pm_qos_remove_request(req);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
return ret;
|
||||
}
|
||||
|
@ -462,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
|
|||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (!dev->power.qos)
|
||||
ret = dev->power.power_state.event != PM_EVENT_INVALID ?
|
||||
dev_pm_qos_constraints_allocate(dev) : -ENODEV;
|
||||
if (IS_ERR(dev->power.qos))
|
||||
ret = -ENODEV;
|
||||
else if (!dev->power.qos)
|
||||
ret = dev_pm_qos_constraints_allocate(dev);
|
||||
|
||||
if (!ret)
|
||||
ret = blocking_notifier_chain_register(
|
||||
|
@ -493,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
|
|||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
/* Silently return if the constraints object is not present. */
|
||||
if (dev->power.qos)
|
||||
if (!IS_ERR_OR_NULL(dev->power.qos))
|
||||
retval = blocking_notifier_chain_unregister(
|
||||
dev->power.qos->latency.notifiers,
|
||||
notifier);
|
||||
|
@ -563,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
|
|||
static void __dev_pm_qos_drop_user_request(struct device *dev,
|
||||
enum dev_pm_qos_req_type type)
|
||||
{
|
||||
struct dev_pm_qos_request *req = NULL;
|
||||
|
||||
switch(type) {
|
||||
case DEV_PM_QOS_LATENCY:
|
||||
dev_pm_qos_remove_request(dev->power.qos->latency_req);
|
||||
req = dev->power.qos->latency_req;
|
||||
dev->power.qos->latency_req = NULL;
|
||||
break;
|
||||
case DEV_PM_QOS_FLAGS:
|
||||
dev_pm_qos_remove_request(dev->power.qos->flags_req);
|
||||
req = dev->power.qos->flags_req;
|
||||
dev->power.qos->flags_req = NULL;
|
||||
break;
|
||||
}
|
||||
__dev_pm_qos_remove_request(req);
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -588,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
|
|||
if (!device_is_registered(dev) || value < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->power.qos && dev->power.qos->latency_req)
|
||||
return -EEXIST;
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
kfree(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (IS_ERR_OR_NULL(dev->power.qos))
|
||||
ret = -ENODEV;
|
||||
else if (dev->power.qos->latency_req)
|
||||
ret = -EEXIST;
|
||||
|
||||
if (ret < 0) {
|
||||
__dev_pm_qos_remove_request(req);
|
||||
kfree(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev->power.qos->latency_req = req;
|
||||
ret = pm_qos_sysfs_add_latency(dev);
|
||||
if (ret)
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
|
||||
|
||||
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
|
||||
pm_qos_sysfs_remove_latency(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
|
||||
* @dev: Device whose PM QoS latency limit is to be hidden from user space.
|
||||
*/
|
||||
void dev_pm_qos_hide_latency_limit(struct device *dev)
|
||||
{
|
||||
if (dev->power.qos && dev->power.qos->latency_req) {
|
||||
pm_qos_sysfs_remove_latency(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
|
||||
}
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
__dev_pm_qos_hide_latency_limit(dev);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
|
||||
|
||||
|
@ -634,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
|
|||
if (!device_is_registered(dev))
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->power.qos && dev->power.qos->flags_req)
|
||||
return -EEXIST;
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
if (ret < 0) {
|
||||
kfree(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (IS_ERR_OR_NULL(dev->power.qos))
|
||||
ret = -ENODEV;
|
||||
else if (dev->power.qos->flags_req)
|
||||
ret = -EEXIST;
|
||||
|
||||
if (ret < 0) {
|
||||
__dev_pm_qos_remove_request(req);
|
||||
kfree(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev->power.qos->flags_req = req;
|
||||
ret = pm_qos_sysfs_add_flags(dev);
|
||||
if (ret)
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
|
||||
|
||||
fail:
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
pm_runtime_put(dev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
|
||||
|
||||
static void __dev_pm_qos_hide_flags(struct device *dev)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
|
||||
pm_qos_sysfs_remove_flags(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
|
||||
* @dev: Device whose PM QoS flags are to be hidden from user space.
|
||||
*/
|
||||
void dev_pm_qos_hide_flags(struct device *dev)
|
||||
{
|
||||
if (dev->power.qos && dev->power.qos->flags_req) {
|
||||
pm_qos_sysfs_remove_flags(dev);
|
||||
pm_runtime_get_sync(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
|
||||
pm_runtime_put(dev);
|
||||
}
|
||||
pm_runtime_get_sync(dev);
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
__dev_pm_qos_hide_flags(dev);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
pm_runtime_put(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
|
||||
|
||||
|
@ -683,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
|
|||
s32 value;
|
||||
int ret;
|
||||
|
||||
if (!dev->power.qos || !dev->power.qos->flags_req)
|
||||
return -EINVAL;
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
value = dev_pm_qos_requested_flags(dev);
|
||||
if (set)
|
||||
value |= mask;
|
||||
|
@ -697,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
|
|||
|
||||
ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
pm_runtime_put(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else /* !CONFIG_PM_RUNTIME */
|
||||
static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
|
||||
static void __dev_pm_qos_hide_flags(struct device *dev) {}
|
||||
#endif /* CONFIG_PM_RUNTIME */
|
||||
|
|
|
@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
|
|||
|
||||
void dpm_sysfs_remove(struct device *dev)
|
||||
{
|
||||
dev_pm_qos_constraints_destroy(dev);
|
||||
rpm_sysfs_remove(dev);
|
||||
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
|
||||
sysfs_remove_group(&dev->kobj, &pm_attr_group);
|
||||
|
|
|
@ -184,6 +184,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
|
|||
if (ret < 0) {
|
||||
dev_err(map->dev, "IRQ thread failed to resume: %d\n",
|
||||
ret);
|
||||
pm_runtime_put(map->dev);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -404,6 +404,8 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
|
|||
return;
|
||||
}
|
||||
|
||||
spin_lock_init(&pc_host->cfgspace_lock);
|
||||
|
||||
pc->host_controller = pc_host;
|
||||
pc_host->pci_controller.io_resource = &pc_host->io_resource;
|
||||
pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
|
||||
|
@ -52,8 +53,12 @@ static struct hwrng *current_rng;
|
|||
static LIST_HEAD(rng_list);
|
||||
static DEFINE_MUTEX(rng_mutex);
|
||||
static int data_avail;
|
||||
static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES]
|
||||
__cacheline_aligned;
|
||||
static u8 *rng_buffer;
|
||||
|
||||
static size_t rng_buffer_size(void)
|
||||
{
|
||||
return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
|
||||
}
|
||||
|
||||
static inline int hwrng_init(struct hwrng *rng)
|
||||
{
|
||||
|
@ -116,7 +121,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
|
|||
|
||||
if (!data_avail) {
|
||||
bytes_read = rng_get_data(current_rng, rng_buffer,
|
||||
sizeof(rng_buffer),
|
||||
rng_buffer_size(),
|
||||
!(filp->f_flags & O_NONBLOCK));
|
||||
if (bytes_read < 0) {
|
||||
err = bytes_read;
|
||||
|
@ -307,6 +312,14 @@ int hwrng_register(struct hwrng *rng)
|
|||
|
||||
mutex_lock(&rng_mutex);
|
||||
|
||||
/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
|
||||
err = -ENOMEM;
|
||||
if (!rng_buffer) {
|
||||
rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
|
||||
if (!rng_buffer)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Must not register two RNGs with the same name. */
|
||||
err = -EEXIST;
|
||||
list_for_each_entry(tmp, &rng_list, list) {
|
||||
|
|
|
@ -852,6 +852,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
|
|||
int reserved)
|
||||
{
|
||||
unsigned long flags;
|
||||
int wakeup_write = 0;
|
||||
|
||||
/* Hold lock while accounting */
|
||||
spin_lock_irqsave(&r->lock, flags);
|
||||
|
@ -873,10 +874,8 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
|
|||
else
|
||||
r->entropy_count = reserved;
|
||||
|
||||
if (r->entropy_count < random_write_wakeup_thresh) {
|
||||
wake_up_interruptible(&random_write_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_OUT);
|
||||
}
|
||||
if (r->entropy_count < random_write_wakeup_thresh)
|
||||
wakeup_write = 1;
|
||||
}
|
||||
|
||||
DEBUG_ENT("debiting %zu entropy credits from %s%s\n",
|
||||
|
@ -884,6 +883,11 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
|
|||
|
||||
spin_unlock_irqrestore(&r->lock, flags);
|
||||
|
||||
if (wakeup_write) {
|
||||
wake_up_interruptible(&random_write_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_OUT);
|
||||
}
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
|
|
|
@ -313,6 +313,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
|
|||
(task_active_pid_ns(current) != &init_pid_ns))
|
||||
return;
|
||||
|
||||
/* Can only change if privileged. */
|
||||
if (!capable(CAP_NET_ADMIN)) {
|
||||
err = EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mc_op = (enum proc_cn_mcast_op *)msg->data;
|
||||
switch (*mc_op) {
|
||||
case PROC_CN_MCAST_LISTEN:
|
||||
|
@ -325,6 +331,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
|
|||
err = EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
cn_proc_ack(err, msg->seq, msg->ack);
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ static void *get_cpu_dbs_info_s(int cpu) \
|
|||
* dbs: used as a shortform for demand based switching It helps to keep variable
|
||||
* names smaller, simpler
|
||||
* cdbs: common dbs
|
||||
* on_*: On-demand governor
|
||||
* od_*: On-demand governor
|
||||
* cs_*: Conservative governor
|
||||
*/
|
||||
|
||||
|
|
|
@ -28,13 +28,7 @@
|
|||
|
||||
static int hb_voltage_change(unsigned int freq)
|
||||
{
|
||||
int i;
|
||||
u32 msg[HB_CPUFREQ_IPC_LEN];
|
||||
|
||||
msg[0] = HB_CPUFREQ_CHANGE_NOTE;
|
||||
msg[1] = freq / 1000000;
|
||||
for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
|
||||
msg[i] = 0;
|
||||
u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
|
||||
|
||||
return pl320_ipc_transmit(msg);
|
||||
}
|
||||
|
|
|
@ -662,6 +662,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
|||
|
||||
cpu = all_cpu_data[policy->cpu];
|
||||
|
||||
if (!policy->cpuinfo.max_freq)
|
||||
return -ENODEV;
|
||||
|
||||
intel_pstate_get_min_max(cpu, &min, &max);
|
||||
|
||||
limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
|
||||
|
@ -747,37 +750,11 @@ static struct cpufreq_driver intel_pstate_driver = {
|
|||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static void intel_pstate_exit(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
sysfs_remove_group(intel_pstate_kobject,
|
||||
&intel_pstate_attr_group);
|
||||
debugfs_remove_recursive(debugfs_parent);
|
||||
|
||||
cpufreq_unregister_driver(&intel_pstate_driver);
|
||||
|
||||
if (!all_cpu_data)
|
||||
return;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (all_cpu_data[cpu]) {
|
||||
del_timer_sync(&all_cpu_data[cpu]->timer);
|
||||
kfree(all_cpu_data[cpu]);
|
||||
}
|
||||
}
|
||||
|
||||
put_online_cpus();
|
||||
vfree(all_cpu_data);
|
||||
}
|
||||
module_exit(intel_pstate_exit);
|
||||
|
||||
static int __initdata no_load;
|
||||
|
||||
static int __init intel_pstate_init(void)
|
||||
{
|
||||
int rc = 0;
|
||||
int cpu, rc = 0;
|
||||
const struct x86_cpu_id *id;
|
||||
|
||||
if (no_load)
|
||||
|
@ -802,7 +779,16 @@ static int __init intel_pstate_init(void)
|
|||
intel_pstate_sysfs_expose_params();
|
||||
return rc;
|
||||
out:
|
||||
intel_pstate_exit();
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (all_cpu_data[cpu]) {
|
||||
del_timer_sync(&all_cpu_data[cpu]->timer);
|
||||
kfree(all_cpu_data[cpu]);
|
||||
}
|
||||
}
|
||||
|
||||
put_online_cpus();
|
||||
vfree(all_cpu_data);
|
||||
return -ENODEV;
|
||||
}
|
||||
device_initcall(intel_pstate_init);
|
||||
|
|
|
@ -442,7 +442,6 @@ static int __init dmi_present(const char __iomem *p)
|
|||
static int __init smbios_present(const char __iomem *p)
|
||||
{
|
||||
u8 buf[32];
|
||||
int offset = 0;
|
||||
|
||||
memcpy_fromio(buf, p, 32);
|
||||
if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
|
||||
|
@ -461,9 +460,9 @@ static int __init smbios_present(const char __iomem *p)
|
|||
dmi_ver = 0x0206;
|
||||
break;
|
||||
}
|
||||
offset = 16;
|
||||
return memcmp(p + 16, "_DMI_", 5) || dmi_present(p + 16);
|
||||
}
|
||||
return dmi_present(buf + offset);
|
||||
return 1;
|
||||
}
|
||||
|
||||
void __init dmi_scan_machine(void)
|
||||
|
|
|
@ -426,6 +426,44 @@ get_var_data(struct efivars *efivars, struct efi_variable *var)
|
|||
return status;
|
||||
}
|
||||
|
||||
static efi_status_t
|
||||
check_var_size_locked(struct efivars *efivars, u32 attributes,
|
||||
unsigned long size)
|
||||
{
|
||||
u64 storage_size, remaining_size, max_size;
|
||||
efi_status_t status;
|
||||
const struct efivar_operations *fops = efivars->ops;
|
||||
|
||||
if (!efivars->ops->query_variable_info)
|
||||
return EFI_UNSUPPORTED;
|
||||
|
||||
status = fops->query_variable_info(attributes, &storage_size,
|
||||
&remaining_size, &max_size);
|
||||
|
||||
if (status != EFI_SUCCESS)
|
||||
return status;
|
||||
|
||||
if (!storage_size || size > remaining_size || size > max_size ||
|
||||
(remaining_size - size) < (storage_size / 2))
|
||||
return EFI_OUT_OF_RESOURCES;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
static efi_status_t
|
||||
check_var_size(struct efivars *efivars, u32 attributes, unsigned long size)
|
||||
{
|
||||
efi_status_t status;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&efivars->lock, flags);
|
||||
status = check_var_size_locked(efivars, attributes, size);
|
||||
spin_unlock_irqrestore(&efivars->lock, flags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
efivar_guid_read(struct efivar_entry *entry, char *buf)
|
||||
{
|
||||
|
@ -547,11 +585,16 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
|
|||
}
|
||||
|
||||
spin_lock_irq(&efivars->lock);
|
||||
status = efivars->ops->set_variable(new_var->VariableName,
|
||||
&new_var->VendorGuid,
|
||||
new_var->Attributes,
|
||||
new_var->DataSize,
|
||||
new_var->Data);
|
||||
|
||||
status = check_var_size_locked(efivars, new_var->Attributes,
|
||||
new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
|
||||
|
||||
if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
|
||||
status = efivars->ops->set_variable(new_var->VariableName,
|
||||
&new_var->VendorGuid,
|
||||
new_var->Attributes,
|
||||
new_var->DataSize,
|
||||
new_var->Data);
|
||||
|
||||
spin_unlock_irq(&efivars->lock);
|
||||
|
||||
|
@ -702,8 +745,7 @@ static ssize_t efivarfs_file_write(struct file *file,
|
|||
u32 attributes;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
unsigned long datasize = count - sizeof(attributes);
|
||||
unsigned long newdatasize;
|
||||
u64 storage_size, remaining_size, max_size;
|
||||
unsigned long newdatasize, varsize;
|
||||
ssize_t bytes = 0;
|
||||
|
||||
if (count < sizeof(attributes))
|
||||
|
@ -722,28 +764,18 @@ static ssize_t efivarfs_file_write(struct file *file,
|
|||
* amounts of memory. Pick a default size of 64K if
|
||||
* QueryVariableInfo() isn't supported by the firmware.
|
||||
*/
|
||||
spin_lock_irq(&efivars->lock);
|
||||
|
||||
if (!efivars->ops->query_variable_info)
|
||||
status = EFI_UNSUPPORTED;
|
||||
else {
|
||||
const struct efivar_operations *fops = efivars->ops;
|
||||
status = fops->query_variable_info(attributes, &storage_size,
|
||||
&remaining_size, &max_size);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&efivars->lock);
|
||||
varsize = datasize + utf16_strsize(var->var.VariableName, 1024);
|
||||
status = check_var_size(efivars, attributes, varsize);
|
||||
|
||||
if (status != EFI_SUCCESS) {
|
||||
if (status != EFI_UNSUPPORTED)
|
||||
return efi_status_to_err(status);
|
||||
|
||||
remaining_size = 65536;
|
||||
if (datasize > 65536)
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (datasize > remaining_size)
|
||||
return -ENOSPC;
|
||||
|
||||
data = kmalloc(datasize, GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
@ -765,6 +797,19 @@ static ssize_t efivarfs_file_write(struct file *file,
|
|||
*/
|
||||
spin_lock_irq(&efivars->lock);
|
||||
|
||||
/*
|
||||
* Ensure that the available space hasn't shrunk below the safe level
|
||||
*/
|
||||
|
||||
status = check_var_size_locked(efivars, attributes, varsize);
|
||||
|
||||
if (status != EFI_SUCCESS && status != EFI_UNSUPPORTED) {
|
||||
spin_unlock_irq(&efivars->lock);
|
||||
kfree(data);
|
||||
|
||||
return efi_status_to_err(status);
|
||||
}
|
||||
|
||||
status = efivars->ops->set_variable(var->var.VariableName,
|
||||
&var->var.VendorGuid,
|
||||
attributes, datasize,
|
||||
|
@ -929,8 +974,8 @@ static bool efivarfs_valid_name(const char *str, int len)
|
|||
if (len < GUID_LEN + 2)
|
||||
return false;
|
||||
|
||||
/* GUID should be right after the first '-' */
|
||||
if (s - 1 != strchr(str, '-'))
|
||||
/* GUID must be preceded by a '-' */
|
||||
if (*(s - 1) != '-')
|
||||
return false;
|
||||
|
||||
/*
|
||||
|
@ -1118,15 +1163,22 @@ static struct dentry_operations efivarfs_d_ops = {
|
|||
|
||||
static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
|
||||
{
|
||||
struct dentry *d;
|
||||
struct qstr q;
|
||||
int err;
|
||||
|
||||
q.name = name;
|
||||
q.len = strlen(name);
|
||||
|
||||
if (efivarfs_d_hash(NULL, NULL, &q))
|
||||
return NULL;
|
||||
err = efivarfs_d_hash(NULL, NULL, &q);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
return d_alloc(parent, &q);
|
||||
d = d_alloc(parent, &q);
|
||||
if (d)
|
||||
return d;
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
|
@ -1136,6 +1188,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
struct efivar_entry *entry, *n;
|
||||
struct efivars *efivars = &__efivars;
|
||||
char *name;
|
||||
int err = -ENOMEM;
|
||||
|
||||
efivarfs_sb = sb;
|
||||
|
||||
|
@ -1186,8 +1239,10 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
goto fail_name;
|
||||
|
||||
dentry = efivarfs_alloc_dentry(root, name);
|
||||
if (!dentry)
|
||||
if (IS_ERR(dentry)) {
|
||||
err = PTR_ERR(dentry);
|
||||
goto fail_inode;
|
||||
}
|
||||
|
||||
/* copied by the above to local storage in the dentry. */
|
||||
kfree(name);
|
||||
|
@ -1214,7 +1269,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
fail_name:
|
||||
kfree(name);
|
||||
fail:
|
||||
return -ENOMEM;
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct dentry *efivarfs_mount(struct file_system_type *fs_type,
|
||||
|
@ -1234,6 +1289,7 @@ static struct file_system_type efivarfs_type = {
|
|||
.mount = efivarfs_mount,
|
||||
.kill_sb = efivarfs_kill_sb,
|
||||
};
|
||||
MODULE_ALIAS_FS("efivarfs");
|
||||
|
||||
/*
|
||||
* Handle negative dentry.
|
||||
|
@ -1345,7 +1401,6 @@ static int efi_pstore_write(enum pstore_type_id type,
|
|||
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
|
||||
struct efivars *efivars = psi->data;
|
||||
int i, ret = 0;
|
||||
u64 storage_space, remaining_space, max_variable_size;
|
||||
efi_status_t status = EFI_NOT_FOUND;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -1365,11 +1420,11 @@ static int efi_pstore_write(enum pstore_type_id type,
|
|||
* size: a size of logging data
|
||||
* DUMP_NAME_LEN * 2: a maximum size of variable name
|
||||
*/
|
||||
status = efivars->ops->query_variable_info(PSTORE_EFI_ATTRIBUTES,
|
||||
&storage_space,
|
||||
&remaining_space,
|
||||
&max_variable_size);
|
||||
if (status || remaining_space < size + DUMP_NAME_LEN * 2) {
|
||||
|
||||
status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
|
||||
size + DUMP_NAME_LEN * 2);
|
||||
|
||||
if (status) {
|
||||
spin_unlock_irqrestore(&efivars->lock, flags);
|
||||
*id = part;
|
||||
return -ENOSPC;
|
||||
|
@ -1544,6 +1599,14 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
status = check_var_size_locked(efivars, new_var->Attributes,
|
||||
new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
|
||||
|
||||
if (status && status != EFI_UNSUPPORTED) {
|
||||
spin_unlock_irq(&efivars->lock);
|
||||
return efi_status_to_err(status);
|
||||
}
|
||||
|
||||
/* now *really* create the variable via EFI */
|
||||
status = efivars->ops->set_variable(new_var->VariableName,
|
||||
&new_var->VendorGuid,
|
||||
|
|
|
@ -128,9 +128,9 @@ static int ichx_read_bit(int reg, unsigned nr)
|
|||
return data & (1 << bit) ? 1 : 0;
|
||||
}
|
||||
|
||||
static int ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
|
||||
static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
|
||||
{
|
||||
return (ichx_priv.use_gpio & (1 << (nr / 32))) ? 0 : -ENXIO;
|
||||
return ichx_priv.use_gpio & (1 << (nr / 32));
|
||||
}
|
||||
|
||||
static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
|
||||
|
|
|
@ -88,13 +88,14 @@ static int gpiod_request(struct gpio_desc *desc, const char *label);
|
|||
static void gpiod_free(struct gpio_desc *desc);
|
||||
static int gpiod_direction_input(struct gpio_desc *desc);
|
||||
static int gpiod_direction_output(struct gpio_desc *desc, int value);
|
||||
static int gpiod_get_direction(const struct gpio_desc *desc);
|
||||
static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
|
||||
static int gpiod_get_value_cansleep(struct gpio_desc *desc);
|
||||
static int gpiod_get_value_cansleep(const struct gpio_desc *desc);
|
||||
static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
|
||||
static int gpiod_get_value(struct gpio_desc *desc);
|
||||
static int gpiod_get_value(const struct gpio_desc *desc);
|
||||
static void gpiod_set_value(struct gpio_desc *desc, int value);
|
||||
static int gpiod_cansleep(struct gpio_desc *desc);
|
||||
static int gpiod_to_irq(struct gpio_desc *desc);
|
||||
static int gpiod_cansleep(const struct gpio_desc *desc);
|
||||
static int gpiod_to_irq(const struct gpio_desc *desc);
|
||||
static int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
|
||||
static int gpiod_export_link(struct device *dev, const char *name,
|
||||
struct gpio_desc *desc);
|
||||
|
@ -171,12 +172,12 @@ static int gpio_ensure_requested(struct gpio_desc *desc)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* caller holds gpio_lock *OR* gpio is marked as requested */
|
||||
static struct gpio_chip *gpiod_to_chip(struct gpio_desc *desc)
|
||||
static struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
|
||||
{
|
||||
return desc->chip;
|
||||
return desc ? desc->chip : NULL;
|
||||
}
|
||||
|
||||
/* caller holds gpio_lock *OR* gpio is marked as requested */
|
||||
struct gpio_chip *gpio_to_chip(unsigned gpio)
|
||||
{
|
||||
return gpiod_to_chip(gpio_to_desc(gpio));
|
||||
|
@ -207,7 +208,7 @@ static int gpiochip_find_base(int ngpio)
|
|||
}
|
||||
|
||||
/* caller ensures gpio is valid and requested, chip->get_direction may sleep */
|
||||
static int gpiod_get_direction(struct gpio_desc *desc)
|
||||
static int gpiod_get_direction(const struct gpio_desc *desc)
|
||||
{
|
||||
struct gpio_chip *chip;
|
||||
unsigned offset;
|
||||
|
@ -223,11 +224,13 @@ static int gpiod_get_direction(struct gpio_desc *desc)
|
|||
if (status > 0) {
|
||||
/* GPIOF_DIR_IN, or other positive */
|
||||
status = 1;
|
||||
clear_bit(FLAG_IS_OUT, &desc->flags);
|
||||
/* FLAG_IS_OUT is just a cache of the result of get_direction(),
|
||||
* so it does not affect constness per se */
|
||||
clear_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
|
||||
}
|
||||
if (status == 0) {
|
||||
/* GPIOF_DIR_OUT */
|
||||
set_bit(FLAG_IS_OUT, &desc->flags);
|
||||
set_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
@ -263,7 +266,7 @@ static DEFINE_MUTEX(sysfs_lock);
|
|||
static ssize_t gpio_direction_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct gpio_desc *desc = dev_get_drvdata(dev);
|
||||
const struct gpio_desc *desc = dev_get_drvdata(dev);
|
||||
ssize_t status;
|
||||
|
||||
mutex_lock(&sysfs_lock);
|
||||
|
@ -654,6 +657,11 @@ static ssize_t export_store(struct class *class,
|
|||
goto done;
|
||||
|
||||
desc = gpio_to_desc(gpio);
|
||||
/* reject invalid GPIOs */
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* No extra locking here; FLAG_SYSFS just signifies that the
|
||||
* request and export were done by on behalf of userspace, so
|
||||
|
@ -690,12 +698,14 @@ static ssize_t unexport_store(struct class *class,
|
|||
if (status < 0)
|
||||
goto done;
|
||||
|
||||
status = -EINVAL;
|
||||
|
||||
desc = gpio_to_desc(gpio);
|
||||
/* reject bogus commands (gpio_unexport ignores them) */
|
||||
if (!desc)
|
||||
goto done;
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
status = -EINVAL;
|
||||
|
||||
/* No extra locking here; FLAG_SYSFS just signifies that the
|
||||
* request and export were done by on behalf of userspace, so
|
||||
|
@ -846,8 +856,10 @@ static int gpiod_export_link(struct device *dev, const char *name,
|
|||
{
|
||||
int status = -EINVAL;
|
||||
|
||||
if (!desc)
|
||||
goto done;
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&sysfs_lock);
|
||||
|
||||
|
@ -865,7 +877,6 @@ static int gpiod_export_link(struct device *dev, const char *name,
|
|||
|
||||
mutex_unlock(&sysfs_lock);
|
||||
|
||||
done:
|
||||
if (status)
|
||||
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
|
||||
status);
|
||||
|
@ -896,8 +907,10 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
|
|||
struct device *dev = NULL;
|
||||
int status = -EINVAL;
|
||||
|
||||
if (!desc)
|
||||
goto done;
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&sysfs_lock);
|
||||
|
||||
|
@ -914,7 +927,6 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
|
|||
unlock:
|
||||
mutex_unlock(&sysfs_lock);
|
||||
|
||||
done:
|
||||
if (status)
|
||||
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
|
||||
status);
|
||||
|
@ -940,8 +952,8 @@ static void gpiod_unexport(struct gpio_desc *desc)
|
|||
struct device *dev = NULL;
|
||||
|
||||
if (!desc) {
|
||||
status = -EINVAL;
|
||||
goto done;
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&sysfs_lock);
|
||||
|
@ -962,7 +974,7 @@ static void gpiod_unexport(struct gpio_desc *desc)
|
|||
device_unregister(dev);
|
||||
put_device(dev);
|
||||
}
|
||||
done:
|
||||
|
||||
if (status)
|
||||
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
|
||||
status);
|
||||
|
@ -1384,12 +1396,13 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
|
|||
int status = -EPROBE_DEFER;
|
||||
unsigned long flags;
|
||||
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
if (!desc) {
|
||||
status = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
chip = desc->chip;
|
||||
if (chip == NULL)
|
||||
goto done;
|
||||
|
@ -1432,8 +1445,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
|
|||
done:
|
||||
if (status)
|
||||
pr_debug("_gpio_request: gpio-%d (%s) status %d\n",
|
||||
desc ? desc_to_gpio(desc) : -1,
|
||||
label ? : "?", status);
|
||||
desc_to_gpio(desc), label ? : "?", status);
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
return status;
|
||||
}
|
||||
|
@ -1616,10 +1628,13 @@ static int gpiod_direction_input(struct gpio_desc *desc)
|
|||
int status = -EINVAL;
|
||||
int offset;
|
||||
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
if (!desc)
|
||||
goto fail;
|
||||
chip = desc->chip;
|
||||
if (!chip || !chip->get || !chip->direction_input)
|
||||
goto fail;
|
||||
|
@ -1655,13 +1670,9 @@ static int gpiod_direction_input(struct gpio_desc *desc)
|
|||
return status;
|
||||
fail:
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
if (status) {
|
||||
int gpio = -1;
|
||||
if (desc)
|
||||
gpio = desc_to_gpio(desc);
|
||||
pr_debug("%s: gpio-%d status %d\n",
|
||||
__func__, gpio, status);
|
||||
}
|
||||
if (status)
|
||||
pr_debug("%s: gpio-%d status %d\n", __func__,
|
||||
desc_to_gpio(desc), status);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1678,6 +1689,11 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
|
|||
int status = -EINVAL;
|
||||
int offset;
|
||||
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Open drain pin should not be driven to 1 */
|
||||
if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags))
|
||||
return gpiod_direction_input(desc);
|
||||
|
@ -1688,8 +1704,6 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
|
|||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
if (!desc)
|
||||
goto fail;
|
||||
chip = desc->chip;
|
||||
if (!chip || !chip->set || !chip->direction_output)
|
||||
goto fail;
|
||||
|
@ -1725,13 +1739,9 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
|
|||
return status;
|
||||
fail:
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
if (status) {
|
||||
int gpio = -1;
|
||||
if (desc)
|
||||
gpio = desc_to_gpio(desc);
|
||||
pr_debug("%s: gpio-%d status %d\n",
|
||||
__func__, gpio, status);
|
||||
}
|
||||
if (status)
|
||||
pr_debug("%s: gpio-%d status %d\n", __func__,
|
||||
desc_to_gpio(desc), status);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1753,10 +1763,13 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
|
|||
int status = -EINVAL;
|
||||
int offset;
|
||||
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
if (!desc)
|
||||
goto fail;
|
||||
chip = desc->chip;
|
||||
if (!chip || !chip->set || !chip->set_debounce)
|
||||
goto fail;
|
||||
|
@ -1776,13 +1789,9 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
|
|||
|
||||
fail:
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
if (status) {
|
||||
int gpio = -1;
|
||||
if (desc)
|
||||
gpio = desc_to_gpio(desc);
|
||||
pr_debug("%s: gpio-%d status %d\n",
|
||||
__func__, gpio, status);
|
||||
}
|
||||
if (status)
|
||||
pr_debug("%s: gpio-%d status %d\n", __func__,
|
||||
desc_to_gpio(desc), status);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -1824,12 +1833,14 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
|
|||
* It returns the zero or nonzero value provided by the associated
|
||||
* gpio_chip.get() method; or zero if no such method is provided.
|
||||
*/
|
||||
static int gpiod_get_value(struct gpio_desc *desc)
|
||||
static int gpiod_get_value(const struct gpio_desc *desc)
|
||||
{
|
||||
struct gpio_chip *chip;
|
||||
int value;
|
||||
int offset;
|
||||
|
||||
if (!desc)
|
||||
return 0;
|
||||
chip = desc->chip;
|
||||
offset = gpio_chip_hwgpio(desc);
|
||||
/* Should be using gpio_get_value_cansleep() */
|
||||
|
@ -1912,6 +1923,8 @@ static void gpiod_set_value(struct gpio_desc *desc, int value)
|
|||
{
|
||||
struct gpio_chip *chip;
|
||||
|
||||
if (!desc)
|
||||
return;
|
||||
chip = desc->chip;
|
||||
/* Should be using gpio_set_value_cansleep() */
|
||||
WARN_ON(chip->can_sleep);
|
||||
|
@ -1938,8 +1951,10 @@ EXPORT_SYMBOL_GPL(__gpio_set_value);
|
|||
* This is used directly or indirectly to implement gpio_cansleep(). It
|
||||
* returns nonzero if access reading or writing the GPIO value can sleep.
|
||||
*/
|
||||
static int gpiod_cansleep(struct gpio_desc *desc)
|
||||
static int gpiod_cansleep(const struct gpio_desc *desc)
|
||||
{
|
||||
if (!desc)
|
||||
return 0;
|
||||
/* only call this on GPIOs that are valid! */
|
||||
return desc->chip->can_sleep;
|
||||
}
|
||||
|
@ -1959,11 +1974,13 @@ EXPORT_SYMBOL_GPL(__gpio_cansleep);
|
|||
* It returns the number of the IRQ signaled by this (input) GPIO,
|
||||
* or a negative errno.
|
||||
*/
|
||||
static int gpiod_to_irq(struct gpio_desc *desc)
|
||||
static int gpiod_to_irq(const struct gpio_desc *desc)
|
||||
{
|
||||
struct gpio_chip *chip;
|
||||
int offset;
|
||||
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
chip = desc->chip;
|
||||
offset = gpio_chip_hwgpio(desc);
|
||||
return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
|
||||
|
@ -1980,13 +1997,15 @@ EXPORT_SYMBOL_GPL(__gpio_to_irq);
|
|||
* Common examples include ones connected to I2C or SPI chips.
|
||||
*/
|
||||
|
||||
static int gpiod_get_value_cansleep(struct gpio_desc *desc)
|
||||
static int gpiod_get_value_cansleep(const struct gpio_desc *desc)
|
||||
{
|
||||
struct gpio_chip *chip;
|
||||
int value;
|
||||
int offset;
|
||||
|
||||
might_sleep_if(extra_checks);
|
||||
if (!desc)
|
||||
return 0;
|
||||
chip = desc->chip;
|
||||
offset = gpio_chip_hwgpio(desc);
|
||||
value = chip->get ? chip->get(chip, offset) : 0;
|
||||
|
@ -2005,6 +2024,8 @@ static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
|
|||
struct gpio_chip *chip;
|
||||
|
||||
might_sleep_if(extra_checks);
|
||||
if (!desc)
|
||||
return;
|
||||
chip = desc->chip;
|
||||
trace_gpio_value(desc_to_gpio(desc), 0, value);
|
||||
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
|
||||
|
|
|
@ -379,15 +379,15 @@ static const struct pci_device_id pciidlist[] = { /* aka */
|
|||
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
|
||||
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
|
||||
INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
|
||||
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
|
||||
INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
|
||||
INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
|
||||
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
|
||||
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
|
||||
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
|
||||
|
@ -495,6 +495,7 @@ static int i915_drm_freeze(struct drm_device *dev)
|
|||
intel_modeset_disable(dev);
|
||||
|
||||
drm_irq_uninstall(dev);
|
||||
dev_priv->enable_hotplug_processing = false;
|
||||
}
|
||||
|
||||
i915_save_state(dev);
|
||||
|
@ -568,10 +569,20 @@ static int __i915_drm_thaw(struct drm_device *dev)
|
|||
error = i915_gem_init_hw(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/* We need working interrupts for modeset enabling ... */
|
||||
drm_irq_install(dev);
|
||||
|
||||
intel_modeset_init_hw(dev);
|
||||
intel_modeset_setup_hw_state(dev, false);
|
||||
drm_irq_install(dev);
|
||||
|
||||
/*
|
||||
* ... but also need to make sure that hotplug processing
|
||||
* doesn't cause havoc. Like in the driver load code we don't
|
||||
* bother with the tiny race here where we might loose hotplug
|
||||
* notifications.
|
||||
* */
|
||||
intel_hpd_init(dev);
|
||||
dev_priv->enable_hotplug_processing = true;
|
||||
}
|
||||
|
||||
intel_opregion_init(dev);
|
||||
|
|
|
@ -701,7 +701,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
|
|||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 de_iir, gt_iir, de_ier, pm_iir;
|
||||
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
int i;
|
||||
|
||||
|
@ -711,6 +711,15 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
|
|||
de_ier = I915_READ(DEIER);
|
||||
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
|
||||
|
||||
/* Disable south interrupts. We'll only write to SDEIIR once, so further
|
||||
* interrupts will will be stored on its back queue, and then we'll be
|
||||
* able to process them after we restore SDEIER (as soon as we restore
|
||||
* it, we'll get an interrupt if SDEIIR still has something to process
|
||||
* due to its back queue). */
|
||||
sde_ier = I915_READ(SDEIER);
|
||||
I915_WRITE(SDEIER, 0);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
gt_iir = I915_READ(GTIIR);
|
||||
if (gt_iir) {
|
||||
snb_gt_irq_handler(dev, dev_priv, gt_iir);
|
||||
|
@ -759,6 +768,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
|
|||
|
||||
I915_WRITE(DEIER, de_ier);
|
||||
POSTING_READ(DEIER);
|
||||
I915_WRITE(SDEIER, sde_ier);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -778,7 +789,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
|||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
int ret = IRQ_NONE;
|
||||
u32 de_iir, gt_iir, de_ier, pm_iir;
|
||||
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
|
||||
|
||||
atomic_inc(&dev_priv->irq_received);
|
||||
|
||||
|
@ -787,6 +798,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
|||
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
|
||||
POSTING_READ(DEIER);
|
||||
|
||||
/* Disable south interrupts. We'll only write to SDEIIR once, so further
|
||||
* interrupts will will be stored on its back queue, and then we'll be
|
||||
* able to process them after we restore SDEIER (as soon as we restore
|
||||
* it, we'll get an interrupt if SDEIIR still has something to process
|
||||
* due to its back queue). */
|
||||
sde_ier = I915_READ(SDEIER);
|
||||
I915_WRITE(SDEIER, 0);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
de_iir = I915_READ(DEIIR);
|
||||
gt_iir = I915_READ(GTIIR);
|
||||
pm_iir = I915_READ(GEN6_PMIIR);
|
||||
|
@ -849,6 +869,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
|||
done:
|
||||
I915_WRITE(DEIER, de_ier);
|
||||
POSTING_READ(DEIER);
|
||||
I915_WRITE(SDEIER, sde_ier);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1613,9 +1613,9 @@
|
|||
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
|
||||
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
|
||||
#define ADPA_SETS_HVPOLARITY 0
|
||||
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
|
||||
#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
|
||||
#define ADPA_VSYNC_CNTL_ENABLE 0
|
||||
#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
|
||||
#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
|
||||
#define ADPA_HSYNC_CNTL_ENABLE 0
|
||||
#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
|
||||
#define ADPA_VSYNC_ACTIVE_LOW 0
|
||||
|
|
|
@ -88,7 +88,7 @@ static void intel_disable_crt(struct intel_encoder *encoder)
|
|||
u32 temp;
|
||||
|
||||
temp = I915_READ(crt->adpa_reg);
|
||||
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
|
||||
temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
|
||||
temp &= ~ADPA_DAC_ENABLE;
|
||||
I915_WRITE(crt->adpa_reg, temp);
|
||||
}
|
||||
|
|
|
@ -1391,8 +1391,8 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
|
|||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
enum port port = intel_dig_port->port;
|
||||
bool wait;
|
||||
uint32_t val;
|
||||
bool wait = false;
|
||||
|
||||
if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
|
||||
val = I915_READ(DDI_BUF_CTL(port));
|
||||
|
|
|
@ -3604,6 +3604,30 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
|
|||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
* i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
|
||||
* cursor plane briefly if not already running after enabling the display
|
||||
* plane.
|
||||
* This workaround avoids occasional blank screens when self refresh is
|
||||
* enabled.
|
||||
*/
|
||||
static void
|
||||
g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
{
|
||||
u32 cntl = I915_READ(CURCNTR(pipe));
|
||||
|
||||
if ((cntl & CURSOR_MODE) == 0) {
|
||||
u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
|
||||
|
||||
I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
|
||||
I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
|
||||
intel_wait_for_vblank(dev_priv->dev, pipe);
|
||||
I915_WRITE(CURCNTR(pipe), cntl);
|
||||
I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
|
||||
I915_WRITE(FW_BLC_SELF, fw_bcl_self);
|
||||
}
|
||||
}
|
||||
|
||||
static void i9xx_crtc_enable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
|
@ -3629,6 +3653,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
|
|||
|
||||
intel_enable_pipe(dev_priv, pipe, false);
|
||||
intel_enable_plane(dev_priv, plane, pipe);
|
||||
if (IS_G4X(dev))
|
||||
g4x_fixup_plane(dev_priv, pipe);
|
||||
|
||||
intel_crtc_load_lut(crtc);
|
||||
intel_update_fbc(dev);
|
||||
|
@ -7256,8 +7282,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_framebuffer *intel_fb;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_framebuffer *old_fb = crtc->fb;
|
||||
struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_unpin_work *work;
|
||||
unsigned long flags;
|
||||
|
@ -7282,8 +7308,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
|
||||
work->event = event;
|
||||
work->crtc = crtc;
|
||||
intel_fb = to_intel_framebuffer(crtc->fb);
|
||||
work->old_fb_obj = intel_fb->obj;
|
||||
work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
|
||||
INIT_WORK(&work->work, intel_unpin_work_fn);
|
||||
|
||||
ret = drm_vblank_get(dev, intel_crtc->pipe);
|
||||
|
@ -7303,9 +7328,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
intel_crtc->unpin_work = work;
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
intel_fb = to_intel_framebuffer(fb);
|
||||
obj = intel_fb->obj;
|
||||
|
||||
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
|
||||
flush_workqueue(dev_priv->wq);
|
||||
|
||||
|
@ -7340,6 +7362,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
|
||||
cleanup_pending:
|
||||
atomic_dec(&intel_crtc->unpin_work_count);
|
||||
crtc->fb = old_fb;
|
||||
drm_gem_object_unreference(&work->old_fb_obj->base);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
|
|
@ -353,7 +353,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
|
|||
|
||||
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
|
||||
if (has_aux_irq)
|
||||
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
|
||||
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
|
||||
msecs_to_jiffies(10));
|
||||
else
|
||||
done = wait_for_atomic(C, 10) == 0;
|
||||
if (!done)
|
||||
|
|
|
@ -2574,7 +2574,7 @@ static void gen6_enable_rps(struct drm_device *dev)
|
|||
I915_WRITE(GEN6_RC_SLEEP, 0);
|
||||
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
|
||||
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
|
||||
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
|
||||
I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
|
||||
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
|
||||
|
||||
/* Check if we are enabling RC6 */
|
||||
|
|
|
@ -112,7 +112,6 @@ struct mga_framebuffer {
|
|||
struct mga_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct mga_framebuffer mfb;
|
||||
struct list_head fbdev_list;
|
||||
void *sysram;
|
||||
int size;
|
||||
struct ttm_bo_kmap_obj mapping;
|
||||
|
|
|
@ -92,6 +92,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
|
|||
int ret;
|
||||
int data, clock;
|
||||
|
||||
WREG_DAC(MGA1064_GEN_IO_CTL2, 1);
|
||||
WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
|
||||
WREG_DAC(MGA1064_GEN_IO_CTL, 0);
|
||||
|
||||
|
|
|
@ -1406,6 +1406,14 @@ static int mga_vga_get_modes(struct drm_connector *connector)
|
|||
static int mga_vga_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct mga_device *mdev = (struct mga_device*)dev->dev_private;
|
||||
struct mga_fbdev *mfbdev = mdev->mfbdev;
|
||||
struct drm_fb_helper *fb_helper = &mfbdev->helper;
|
||||
struct drm_fb_helper_connector *fb_helper_conn = NULL;
|
||||
int bpp = 32;
|
||||
int i = 0;
|
||||
|
||||
/* FIXME: Add bandwidth and g200se limitations */
|
||||
|
||||
if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
|
||||
|
@ -1415,6 +1423,25 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
|
|||
return MODE_BAD;
|
||||
}
|
||||
|
||||
/* Validate the mode input by the user */
|
||||
for (i = 0; i < fb_helper->connector_count; i++) {
|
||||
if (fb_helper->connector_info[i]->connector == connector) {
|
||||
/* Found the helper for this connector */
|
||||
fb_helper_conn = fb_helper->connector_info[i];
|
||||
if (fb_helper_conn->cmdline_mode.specified) {
|
||||
if (fb_helper_conn->cmdline_mode.bpp_specified) {
|
||||
bpp = fb_helper_conn->cmdline_mode.bpp;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
|
||||
if (fb_helper_conn)
|
||||
fb_helper_conn->cmdline_mode.specified = false;
|
||||
return MODE_BAD;
|
||||
}
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -350,7 +350,7 @@ nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
|
|||
nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
|
||||
}
|
||||
|
||||
nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
|
||||
nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
|
||||
nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
|
||||
}
|
||||
|
||||
|
|
|
@ -869,7 +869,7 @@ init_idx_addr_latched(struct nvbios_init *init)
|
|||
init->offset += 2;
|
||||
|
||||
init_wr32(init, dreg, idata);
|
||||
init_mask(init, creg, ~mask, data | idata);
|
||||
init_mask(init, creg, ~mask, data | iaddr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue