Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
Pull thermal management updates from Zhang Rui: "Highlights: - introduction of Dove thermal sensor driver. - introduction of Kirkwood thermal sensor driver. - introduction of intel_powerclamp thermal cooling device driver. - add interrupt and DT support for rcar thermal driver. - add thermal emulation support which allows platform thermal driver to do software/hardware emulation for thermal issues." * 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux: (36 commits) thermal: rcar: remove __devinitconst thermal: return an error on failure to register thermal class Thermal: rename thermal governor Kconfig option to avoid generic naming thermal: exynos: Use the new thermal trend type for quick cooling action. Thermal: exynos: Add support for temperature falling interrupt. Thermal: Dove: Add Themal sensor support for Dove. thermal: Add support for the thermal sensor on Kirkwood SoCs thermal: rcar: add Device Tree support thermal: rcar: remove machine_power_off() from rcar_thermal_notify() thermal: rcar: add interrupt support thermal: rcar: add read/write functions for common/priv data thermal: rcar: multi channel support thermal: rcar: use mutex lock instead of spin lock thermal: rcar: enable CPCTL to use hardware TSC deciding thermal: rcar: use parenthesis on macro Thermal: fix a build warning when CONFIG_THERMAL_EMULATION cleared Thermal: fix a wrong comment thermal: sysfs: Add a new sysfs node emul_temp for thermal emulation PM: intel_powerclamp: off by one in start_power_clamp() thermal: exynos: Miscellaneous fixes to support falling threshold interrupt ...
This commit is contained in:
commit
2af78448ff
23 changed files with 2342 additions and 274 deletions
18
Documentation/devicetree/bindings/thermal/dove-thermal.txt
Normal file
18
Documentation/devicetree/bindings/thermal/dove-thermal.txt
Normal file
|
@ -0,0 +1,18 @@
|
|||
* Dove Thermal
|
||||
|
||||
This driver is for Dove SoCs which contain a thermal sensor.
|
||||
|
||||
Required properties:
|
||||
- compatible : "marvell,dove-thermal"
|
||||
- reg : Address range of the thermal registers
|
||||
|
||||
The reg properties should contain two ranges. The first is for the
|
||||
three Thermal Manager registers, while the second range contains the
|
||||
Thermal Diode Control Registers.
|
||||
|
||||
Example:
|
||||
|
||||
thermal@10078 {
|
||||
compatible = "marvell,dove-thermal";
|
||||
reg = <0xd001c 0x0c>, <0xd005c 0x08>;
|
||||
};
|
|
@ -0,0 +1,15 @@
|
|||
* Kirkwood Thermal
|
||||
|
||||
This version is for Kirkwood 88F8262 & 88F6283 SoCs. Other kirkwoods
|
||||
don't contain a thermal sensor.
|
||||
|
||||
Required properties:
|
||||
- compatible : "marvell,kirkwood-thermal"
|
||||
- reg : Address range of the thermal registers
|
||||
|
||||
Example:
|
||||
|
||||
thermal@10078 {
|
||||
compatible = "marvell,kirkwood-thermal";
|
||||
reg = <0x10078 0x4>;
|
||||
};
|
29
Documentation/devicetree/bindings/thermal/rcar-thermal.txt
Normal file
29
Documentation/devicetree/bindings/thermal/rcar-thermal.txt
Normal file
|
@ -0,0 +1,29 @@
|
|||
* Renesas R-Car Thermal
|
||||
|
||||
Required properties:
|
||||
- compatible : "renesas,rcar-thermal"
|
||||
- reg : Address range of the thermal registers.
|
||||
The 1st reg will be recognized as common register
|
||||
if it has "interrupts".
|
||||
|
||||
Option properties:
|
||||
|
||||
- interrupts : use interrupt
|
||||
|
||||
Example (non interrupt support):
|
||||
|
||||
thermal@e61f0100 {
|
||||
compatible = "renesas,rcar-thermal";
|
||||
reg = <0xe61f0100 0x38>;
|
||||
};
|
||||
|
||||
Example (interrupt support):
|
||||
|
||||
thermal@e61f0000 {
|
||||
compatible = "renesas,rcar-thermal";
|
||||
reg = <0xe61f0000 0x14
|
||||
0xe61f0100 0x38
|
||||
0xe61f0200 0x38
|
||||
0xe61f0300 0x38>;
|
||||
interrupts = <0 69 4>;
|
||||
};
|
53
Documentation/thermal/exynos_thermal_emulation
Normal file
53
Documentation/thermal/exynos_thermal_emulation
Normal file
|
@ -0,0 +1,53 @@
|
|||
EXYNOS EMULATION MODE
|
||||
========================
|
||||
|
||||
Copyright (C) 2012 Samsung Electronics
|
||||
|
||||
Written by Jonghwa Lee <jonghwa3.lee@samsung.com>
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
Exynos 4x12 (4212, 4412) and 5 series provide emulation mode for thermal management unit.
|
||||
Thermal emulation mode supports software debug for TMU's operation. User can set temperature
|
||||
manually with software code and TMU will read current temperature from user value not from
|
||||
sensor's value.
|
||||
|
||||
Enabling CONFIG_EXYNOS_THERMAL_EMUL option will make this support in available.
|
||||
When it's enabled, sysfs node will be created under
|
||||
/sys/bus/platform/devices/'exynos device name'/ with name of 'emulation'.
|
||||
|
||||
The sysfs node, 'emulation', will contain value 0 for the initial state. When you input any
|
||||
temperature you want to update to sysfs node, it automatically enable emulation mode and
|
||||
current temperature will be changed into it.
|
||||
(Exynos also supports user changable delay time which would be used to delay of
|
||||
changing temperature. However, this node only uses same delay of real sensing time, 938us.)
|
||||
|
||||
Exynos emulation mode requires synchronous of value changing and enabling. It means when you
|
||||
want to update the any value of delay or next temperature, then you have to enable emulation
|
||||
mode at the same time. (Or you have to keep the mode enabling.) If you don't, it fails to
|
||||
change the value to updated one and just use last succeessful value repeatedly. That's why
|
||||
this node gives users the right to change termerpature only. Just one interface makes it more
|
||||
simply to use.
|
||||
|
||||
Disabling emulation mode only requires writing value 0 to sysfs node.
|
||||
|
||||
|
||||
TEMP 120 |
|
||||
|
|
||||
100 |
|
||||
|
|
||||
80 |
|
||||
| +-----------
|
||||
60 | | |
|
||||
| +-------------| |
|
||||
40 | | | |
|
||||
| | | |
|
||||
20 | | | +----------
|
||||
| | | | |
|
||||
0 |______________|_____________|__________|__________|_________
|
||||
A A A A TIME
|
||||
|<----->| |<----->| |<----->| |
|
||||
| 938us | | | | | |
|
||||
emulation : 0 50 | 70 | 20 | 0
|
||||
current temp : sensor 50 70 20 sensor
|
307
Documentation/thermal/intel_powerclamp.txt
Normal file
307
Documentation/thermal/intel_powerclamp.txt
Normal file
|
@ -0,0 +1,307 @@
|
|||
=======================
|
||||
INTEL POWERCLAMP DRIVER
|
||||
=======================
|
||||
By: Arjan van de Ven <arjan@linux.intel.com>
|
||||
Jacob Pan <jacob.jun.pan@linux.intel.com>
|
||||
|
||||
Contents:
|
||||
(*) Introduction
|
||||
- Goals and Objectives
|
||||
|
||||
(*) Theory of Operation
|
||||
- Idle Injection
|
||||
- Calibration
|
||||
|
||||
(*) Performance Analysis
|
||||
- Effectiveness and Limitations
|
||||
- Power vs Performance
|
||||
- Scalability
|
||||
- Calibration
|
||||
- Comparison with Alternative Techniques
|
||||
|
||||
(*) Usage and Interfaces
|
||||
- Generic Thermal Layer (sysfs)
|
||||
- Kernel APIs (TBD)
|
||||
|
||||
============
|
||||
INTRODUCTION
|
||||
============
|
||||
|
||||
Consider the situation where a system’s power consumption must be
|
||||
reduced at runtime, due to power budget, thermal constraint, or noise
|
||||
level, and where active cooling is not preferred. Software managed
|
||||
passive power reduction must be performed to prevent the hardware
|
||||
actions that are designed for catastrophic scenarios.
|
||||
|
||||
Currently, P-states, T-states (clock modulation), and CPU offlining
|
||||
are used for CPU throttling.
|
||||
|
||||
On Intel CPUs, C-states provide effective power reduction, but so far
|
||||
they’re only used opportunistically, based on workload. With the
|
||||
development of intel_powerclamp driver, the method of synchronizing
|
||||
idle injection across all online CPU threads was introduced. The goal
|
||||
is to achieve forced and controllable C-state residency.
|
||||
|
||||
Test/Analysis has been made in the areas of power, performance,
|
||||
scalability, and user experience. In many cases, clear advantage is
|
||||
shown over taking the CPU offline or modulating the CPU clock.
|
||||
|
||||
|
||||
===================
|
||||
THEORY OF OPERATION
|
||||
===================
|
||||
|
||||
Idle Injection
|
||||
--------------
|
||||
|
||||
On modern Intel processors (Nehalem or later), package level C-state
|
||||
residency is available in MSRs, thus also available to the kernel.
|
||||
|
||||
These MSRs are:
|
||||
#define MSR_PKG_C2_RESIDENCY 0x60D
|
||||
#define MSR_PKG_C3_RESIDENCY 0x3F8
|
||||
#define MSR_PKG_C6_RESIDENCY 0x3F9
|
||||
#define MSR_PKG_C7_RESIDENCY 0x3FA
|
||||
|
||||
If the kernel can also inject idle time to the system, then a
|
||||
closed-loop control system can be established that manages package
|
||||
level C-state. The intel_powerclamp driver is conceived as such a
|
||||
control system, where the target set point is a user-selected idle
|
||||
ratio (based on power reduction), and the error is the difference
|
||||
between the actual package level C-state residency ratio and the target idle
|
||||
ratio.
|
||||
|
||||
Injection is controlled by high priority kernel threads, spawned for
|
||||
each online CPU.
|
||||
|
||||
These kernel threads, with SCHED_FIFO class, are created to perform
|
||||
clamping actions of controlled duty ratio and duration. Each per-CPU
|
||||
thread synchronizes its idle time and duration, based on the rounding
|
||||
of jiffies, so accumulated errors can be prevented to avoid a jittery
|
||||
effect. Threads are also bound to the CPU such that they cannot be
|
||||
migrated, unless the CPU is taken offline. In this case, threads
|
||||
belong to the offlined CPUs will be terminated immediately.
|
||||
|
||||
Running as SCHED_FIFO and relatively high priority, also allows such
|
||||
scheme to work for both preemptable and non-preemptable kernels.
|
||||
Alignment of idle time around jiffies ensures scalability for HZ
|
||||
values. This effect can be better visualized using a Perf timechart.
|
||||
The following diagram shows the behavior of kernel thread
|
||||
kidle_inject/cpu. During idle injection, it runs monitor/mwait idle
|
||||
for a given "duration", then relinquishes the CPU to other tasks,
|
||||
until the next time interval.
|
||||
|
||||
The NOHZ schedule tick is disabled during idle time, but interrupts
|
||||
are not masked. Tests show that the extra wakeups from scheduler tick
|
||||
have a dramatic impact on the effectiveness of the powerclamp driver
|
||||
on large scale systems (Westmere system with 80 processors).
|
||||
|
||||
CPU0
|
||||
____________ ____________
|
||||
kidle_inject/0 | sleep | mwait | sleep |
|
||||
_________| |________| |_______
|
||||
duration
|
||||
CPU1
|
||||
____________ ____________
|
||||
kidle_inject/1 | sleep | mwait | sleep |
|
||||
_________| |________| |_______
|
||||
^
|
||||
|
|
||||
|
|
||||
roundup(jiffies, interval)
|
||||
|
||||
Only one CPU is allowed to collect statistics and update global
|
||||
control parameters. This CPU is referred to as the controlling CPU in
|
||||
this document. The controlling CPU is elected at runtime, with a
|
||||
policy that favors BSP, taking into account the possibility of a CPU
|
||||
hot-plug.
|
||||
|
||||
In terms of dynamics of the idle control system, package level idle
|
||||
time is considered largely as a non-causal system where its behavior
|
||||
cannot be based on the past or current input. Therefore, the
|
||||
intel_powerclamp driver attempts to enforce the desired idle time
|
||||
instantly as given input (target idle ratio). After injection,
|
||||
powerclamp moniors the actual idle for a given time window and adjust
|
||||
the next injection accordingly to avoid over/under correction.
|
||||
|
||||
When used in a causal control system, such as a temperature control,
|
||||
it is up to the user of this driver to implement algorithms where
|
||||
past samples and outputs are included in the feedback. For example, a
|
||||
PID-based thermal controller can use the powerclamp driver to
|
||||
maintain a desired target temperature, based on integral and
|
||||
derivative gains of the past samples.
|
||||
|
||||
|
||||
|
||||
Calibration
|
||||
-----------
|
||||
During scalability testing, it is observed that synchronized actions
|
||||
among CPUs become challenging as the number of cores grows. This is
|
||||
also true for the ability of a system to enter package level C-states.
|
||||
|
||||
To make sure the intel_powerclamp driver scales well, online
|
||||
calibration is implemented. The goals for doing such a calibration
|
||||
are:
|
||||
|
||||
a) determine the effective range of idle injection ratio
|
||||
b) determine the amount of compensation needed at each target ratio
|
||||
|
||||
Compensation to each target ratio consists of two parts:
|
||||
|
||||
a) steady state error compensation
|
||||
This is to offset the error occurring when the system can
|
||||
enter idle without extra wakeups (such as external interrupts).
|
||||
|
||||
b) dynamic error compensation
|
||||
When an excessive amount of wakeups occurs during idle, an
|
||||
additional idle ratio can be added to quiet interrupts, by
|
||||
slowing down CPU activities.
|
||||
|
||||
A debugfs file is provided for the user to examine compensation
|
||||
progress and results, such as on a Westmere system.
|
||||
[jacob@nex01 ~]$ cat
|
||||
/sys/kernel/debug/intel_powerclamp/powerclamp_calib
|
||||
controlling cpu: 0
|
||||
pct confidence steady dynamic (compensation)
|
||||
0 0 0 0
|
||||
1 1 0 0
|
||||
2 1 1 0
|
||||
3 3 1 0
|
||||
4 3 1 0
|
||||
5 3 1 0
|
||||
6 3 1 0
|
||||
7 3 1 0
|
||||
8 3 1 0
|
||||
...
|
||||
30 3 2 0
|
||||
31 3 2 0
|
||||
32 3 1 0
|
||||
33 3 2 0
|
||||
34 3 1 0
|
||||
35 3 2 0
|
||||
36 3 1 0
|
||||
37 3 2 0
|
||||
38 3 1 0
|
||||
39 3 2 0
|
||||
40 3 3 0
|
||||
41 3 1 0
|
||||
42 3 2 0
|
||||
43 3 1 0
|
||||
44 3 1 0
|
||||
45 3 2 0
|
||||
46 3 3 0
|
||||
47 3 0 0
|
||||
48 3 2 0
|
||||
49 3 3 0
|
||||
|
||||
Calibration occurs during runtime. No offline method is available.
|
||||
Steady state compensation is used only when confidence levels of all
|
||||
adjacent ratios have reached satisfactory level. A confidence level
|
||||
is accumulated based on clean data collected at runtime. Data
|
||||
collected during a period without extra interrupts is considered
|
||||
clean.
|
||||
|
||||
To compensate for excessive amounts of wakeup during idle, additional
|
||||
idle time is injected when such a condition is detected. Currently,
|
||||
we have a simple algorithm to double the injection ratio. A possible
|
||||
enhancement might be to throttle the offending IRQ, such as delaying
|
||||
EOI for level triggered interrupts. But it is a challenge to be
|
||||
non-intrusive to the scheduler or the IRQ core code.
|
||||
|
||||
|
||||
CPU Online/Offline
|
||||
------------------
|
||||
Per-CPU kernel threads are started/stopped upon receiving
|
||||
notifications of CPU hotplug activities. The intel_powerclamp driver
|
||||
keeps track of clamping kernel threads, even after they are migrated
|
||||
to other CPUs, after a CPU offline event.
|
||||
|
||||
|
||||
=====================
|
||||
Performance Analysis
|
||||
=====================
|
||||
This section describes the general performance data collected on
|
||||
multiple systems, including Westmere (80P) and Ivy Bridge (4P, 8P).
|
||||
|
||||
Effectiveness and Limitations
|
||||
-----------------------------
|
||||
The maximum range that idle injection is allowed is capped at 50
|
||||
percent. As mentioned earlier, since interrupts are allowed during
|
||||
forced idle time, excessive interrupts could result in less
|
||||
effectiveness. The extreme case would be doing a ping -f to generated
|
||||
flooded network interrupts without much CPU acknowledgement. In this
|
||||
case, little can be done from the idle injection threads. In most
|
||||
normal cases, such as scp a large file, applications can be throttled
|
||||
by the powerclamp driver, since slowing down the CPU also slows down
|
||||
network protocol processing, which in turn reduces interrupts.
|
||||
|
||||
When control parameters change at runtime by the controlling CPU, it
|
||||
may take an additional period for the rest of the CPUs to catch up
|
||||
with the changes. During this time, idle injection is out of sync,
|
||||
thus not able to enter package C- states at the expected ratio. But
|
||||
this effect is minor, in that in most cases change to the target
|
||||
ratio is updated much less frequently than the idle injection
|
||||
frequency.
|
||||
|
||||
Scalability
|
||||
-----------
|
||||
Tests also show a minor, but measurable, difference between the 4P/8P
|
||||
Ivy Bridge system and the 80P Westmere server under 50% idle ratio.
|
||||
More compensation is needed on Westmere for the same amount of
|
||||
target idle ratio. The compensation also increases as the idle ratio
|
||||
gets larger. The above reason constitutes the need for the
|
||||
calibration code.
|
||||
|
||||
On the IVB 8P system, compared to an offline CPU, powerclamp can
|
||||
achieve up to 40% better performance per watt. (measured by a spin
|
||||
counter summed over per CPU counting threads spawned for all running
|
||||
CPUs).
|
||||
|
||||
====================
|
||||
Usage and Interfaces
|
||||
====================
|
||||
The powerclamp driver is registered to the generic thermal layer as a
|
||||
cooling device. Currently, it’s not bound to any thermal zones.
|
||||
|
||||
jacob@chromoly:/sys/class/thermal/cooling_device14$ grep . *
|
||||
cur_state:0
|
||||
max_state:50
|
||||
type:intel_powerclamp
|
||||
|
||||
Example usage:
|
||||
- To inject 25% idle time
|
||||
$ sudo sh -c "echo 25 > /sys/class/thermal/cooling_device80/cur_state
|
||||
"
|
||||
|
||||
If the system is not busy and has more than 25% idle time already,
|
||||
then the powerclamp driver will not start idle injection. Using Top
|
||||
will not show idle injection kernel threads.
|
||||
|
||||
If the system is busy (spin test below) and has less than 25% natural
|
||||
idle time, powerclamp kernel threads will do idle injection, which
|
||||
appear running to the scheduler. But the overall system idle is still
|
||||
reflected. In this example, 24.1% idle is shown. This helps the
|
||||
system admin or user determine the cause of slowdown, when a
|
||||
powerclamp driver is in action.
|
||||
|
||||
|
||||
Tasks: 197 total, 1 running, 196 sleeping, 0 stopped, 0 zombie
|
||||
Cpu(s): 71.2%us, 4.7%sy, 0.0%ni, 24.1%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
|
||||
Mem: 3943228k total, 1689632k used, 2253596k free, 74960k buffers
|
||||
Swap: 4087804k total, 0k used, 4087804k free, 945336k cached
|
||||
|
||||
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
|
||||
3352 jacob 20 0 262m 644 428 S 286 0.0 0:17.16 spin
|
||||
3341 root -51 0 0 0 0 D 25 0.0 0:01.62 kidle_inject/0
|
||||
3344 root -51 0 0 0 0 D 25 0.0 0:01.60 kidle_inject/3
|
||||
3342 root -51 0 0 0 0 D 25 0.0 0:01.61 kidle_inject/1
|
||||
3343 root -51 0 0 0 0 D 25 0.0 0:01.60 kidle_inject/2
|
||||
2935 jacob 20 0 696m 125m 35m S 5 3.3 0:31.11 firefox
|
||||
1546 root 20 0 158m 20m 6640 S 3 0.5 0:26.97 Xorg
|
||||
2100 jacob 20 0 1223m 88m 30m S 3 2.3 0:23.68 compiz
|
||||
|
||||
Tests have shown that by using the powerclamp driver as a cooling
|
||||
device, a PID based userspace thermal controller can manage to
|
||||
control CPU temperature effectively, when no other thermal influence
|
||||
is added. For example, a UltraBook user can compile the kernel under
|
||||
certain temperature (below most active trip points).
|
|
@ -55,6 +55,8 @@ temperature) and throttle appropriate devices.
|
|||
.get_trip_type: get the type of certain trip point.
|
||||
.get_trip_temp: get the temperature above which the certain trip point
|
||||
will be fired.
|
||||
.set_emul_temp: set the emulation temperature which helps in debugging
|
||||
different threshold temperature points.
|
||||
|
||||
1.1.2 void thermal_zone_device_unregister(struct thermal_zone_device *tz)
|
||||
|
||||
|
@ -153,6 +155,7 @@ Thermal zone device sys I/F, created once it's registered:
|
|||
|---trip_point_[0-*]_temp: Trip point temperature
|
||||
|---trip_point_[0-*]_type: Trip point type
|
||||
|---trip_point_[0-*]_hyst: Hysteresis value for this trip point
|
||||
|---emul_temp: Emulated temperature set node
|
||||
|
||||
Thermal cooling device sys I/F, created once it's registered:
|
||||
/sys/class/thermal/cooling_device[0-*]:
|
||||
|
@ -252,6 +255,16 @@ passive
|
|||
Valid values: 0 (disabled) or greater than 1000
|
||||
RW, Optional
|
||||
|
||||
emul_temp
|
||||
Interface to set the emulated temperature method in thermal zone
|
||||
(sensor). After setting this temperature, the thermal zone may pass
|
||||
this temperature to platform emulation function if registered or
|
||||
cache it locally. This is useful in debugging different temperature
|
||||
threshold and its associated cooling action. This is write only node
|
||||
and writing 0 on this node should disable emulation.
|
||||
Unit: millidegree Celsius
|
||||
WO, Optional
|
||||
|
||||
*****************************
|
||||
* Cooling device attributes *
|
||||
*****************************
|
||||
|
@ -329,8 +342,9 @@ The framework includes a simple notification mechanism, in the form of a
|
|||
netlink event. Netlink socket initialization is done during the _init_
|
||||
of the framework. Drivers which intend to use the notification mechanism
|
||||
just need to call thermal_generate_netlink_event() with two arguments viz
|
||||
(originator, event). Typically the originator will be an integer assigned
|
||||
to a thermal_zone_device when it registers itself with the framework. The
|
||||
(originator, event). The originator is a pointer to struct thermal_zone_device
|
||||
from where the event has been originated. An integer which represents the
|
||||
thermal zone device will be used in the message to identify the zone. The
|
||||
event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL,
|
||||
THERMAL_DEV_FAULT}. Notification can be sent when the current temperature
|
||||
crosses any of the configured thresholds.
|
||||
|
|
|
@ -509,3 +509,4 @@ void local_touch_nmi(void)
|
|||
{
|
||||
__this_cpu_write(last_nmi_rip, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(local_touch_nmi);
|
||||
|
|
|
@ -29,14 +29,14 @@ choice
|
|||
|
||||
config THERMAL_DEFAULT_GOV_STEP_WISE
|
||||
bool "step_wise"
|
||||
select STEP_WISE
|
||||
select THERMAL_GOV_STEP_WISE
|
||||
help
|
||||
Use the step_wise governor as default. This throttles the
|
||||
devices one step at a time.
|
||||
|
||||
config THERMAL_DEFAULT_GOV_FAIR_SHARE
|
||||
bool "fair_share"
|
||||
select FAIR_SHARE
|
||||
select THERMAL_GOV_FAIR_SHARE
|
||||
help
|
||||
Use the fair_share governor as default. This throttles the
|
||||
devices based on their 'contribution' to a zone. The
|
||||
|
@ -44,24 +44,24 @@ config THERMAL_DEFAULT_GOV_FAIR_SHARE
|
|||
|
||||
config THERMAL_DEFAULT_GOV_USER_SPACE
|
||||
bool "user_space"
|
||||
select USER_SPACE
|
||||
select THERMAL_GOV_USER_SPACE
|
||||
help
|
||||
Select this if you want to let the user space manage the
|
||||
lpatform thermals.
|
||||
|
||||
endchoice
|
||||
|
||||
config FAIR_SHARE
|
||||
config THERMAL_GOV_FAIR_SHARE
|
||||
bool "Fair-share thermal governor"
|
||||
help
|
||||
Enable this to manage platform thermals using fair-share governor.
|
||||
|
||||
config STEP_WISE
|
||||
config THERMAL_GOV_STEP_WISE
|
||||
bool "Step_wise thermal governor"
|
||||
help
|
||||
Enable this to manage platform thermals using a simple linear
|
||||
|
||||
config USER_SPACE
|
||||
config THERMAL_GOV_USER_SPACE
|
||||
bool "User_space thermal governor"
|
||||
help
|
||||
Enable this to let the user space manage the platform thermals.
|
||||
|
@ -78,6 +78,14 @@ config CPU_THERMAL
|
|||
and not the ACPI interface.
|
||||
If you want this support, you should say Y here.
|
||||
|
||||
config THERMAL_EMULATION
|
||||
bool "Thermal emulation mode support"
|
||||
help
|
||||
Enable this option to make a emul_temp sysfs node in thermal zone
|
||||
directory to support temperature emulation. With emulation sysfs node,
|
||||
user can manually input temperature and test the different trip
|
||||
threshold behaviour for simulation purpose.
|
||||
|
||||
config SPEAR_THERMAL
|
||||
bool "SPEAr thermal sensor driver"
|
||||
depends on PLAT_SPEAR
|
||||
|
@ -93,6 +101,14 @@ config RCAR_THERMAL
|
|||
Enable this to plug the R-Car thermal sensor driver into the Linux
|
||||
thermal framework
|
||||
|
||||
config KIRKWOOD_THERMAL
|
||||
tristate "Temperature sensor on Marvell Kirkwood SoCs"
|
||||
depends on ARCH_KIRKWOOD
|
||||
depends on OF
|
||||
help
|
||||
Support for the Kirkwood thermal sensor driver into the Linux thermal
|
||||
framework. Only kirkwood 88F6282 and 88F6283 have this sensor.
|
||||
|
||||
config EXYNOS_THERMAL
|
||||
tristate "Temperature sensor on Samsung EXYNOS"
|
||||
depends on (ARCH_EXYNOS4 || ARCH_EXYNOS5)
|
||||
|
@ -101,6 +117,23 @@ config EXYNOS_THERMAL
|
|||
If you say yes here you get support for TMU (Thermal Management
|
||||
Unit) on SAMSUNG EXYNOS series of SoC.
|
||||
|
||||
config EXYNOS_THERMAL_EMUL
|
||||
bool "EXYNOS TMU emulation mode support"
|
||||
depends on EXYNOS_THERMAL
|
||||
help
|
||||
Exynos 4412 and 4414 and 5 series has emulation mode on TMU.
|
||||
Enable this option will be make sysfs node in exynos thermal platform
|
||||
device directory to support emulation mode. With emulation mode sysfs
|
||||
node, you can manually input temperature to TMU for simulation purpose.
|
||||
|
||||
config DOVE_THERMAL
|
||||
tristate "Temperature sensor on Marvell Dove SoCs"
|
||||
depends on ARCH_DOVE
|
||||
depends on OF
|
||||
help
|
||||
Support for the Dove thermal sensor driver in the Linux thermal
|
||||
framework.
|
||||
|
||||
config DB8500_THERMAL
|
||||
bool "DB8500 thermal management"
|
||||
depends on ARCH_U8500
|
||||
|
@ -122,4 +155,14 @@ config DB8500_CPUFREQ_COOLING
|
|||
bound cpufreq cooling device turns active to set CPU frequency low to
|
||||
cool down the CPU.
|
||||
|
||||
config INTEL_POWERCLAMP
|
||||
tristate "Intel PowerClamp idle injection driver"
|
||||
depends on THERMAL
|
||||
depends on X86
|
||||
depends on CPU_SUP_INTEL
|
||||
help
|
||||
Enable this to enable Intel PowerClamp idle injection driver. This
|
||||
enforce idle time which results in more package C-state residency. The
|
||||
user interface is exposed via generic thermal framework.
|
||||
|
||||
endif
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
obj-$(CONFIG_THERMAL) += thermal_sys.o
|
||||
|
||||
# governors
|
||||
obj-$(CONFIG_FAIR_SHARE) += fair_share.o
|
||||
obj-$(CONFIG_STEP_WISE) += step_wise.o
|
||||
obj-$(CONFIG_USER_SPACE) += user_space.o
|
||||
obj-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
|
||||
obj-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
|
||||
obj-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
|
||||
|
||||
# cpufreq cooling
|
||||
obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
|
||||
|
@ -15,6 +15,10 @@ obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
|
|||
# platform thermal drivers
|
||||
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
|
||||
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
|
||||
obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o
|
||||
obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o
|
||||
obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
|
||||
obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
|
||||
obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
|
||||
obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
|
||||
|
||||
|
|
|
@ -111,8 +111,8 @@ static int is_cpufreq_valid(int cpu)
|
|||
/**
|
||||
* get_cpu_frequency - get the absolute value of frequency from level.
|
||||
* @cpu: cpu for which frequency is fetched.
|
||||
* @level: level of frequency of the CPU
|
||||
* e.g level=1 --> 1st MAX FREQ, LEVEL=2 ---> 2nd MAX FREQ, .... etc
|
||||
* @level: level of frequency, equals cooling state of cpu cooling device
|
||||
* e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
|
||||
*/
|
||||
static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
|
||||
{
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/cpufreq.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
|
@ -73,15 +74,13 @@ static const struct of_device_id db8500_cpufreq_cooling_match[] = {
|
|||
{ .compatible = "stericsson,db8500-cpufreq-cooling" },
|
||||
{},
|
||||
};
|
||||
#else
|
||||
#define db8500_cpufreq_cooling_match NULL
|
||||
#endif
|
||||
|
||||
static struct platform_driver db8500_cpufreq_cooling_driver = {
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "db8500-cpufreq-cooling",
|
||||
.of_match_table = db8500_cpufreq_cooling_match,
|
||||
.of_match_table = of_match_ptr(db8500_cpufreq_cooling_match),
|
||||
},
|
||||
.probe = db8500_cpufreq_cooling_probe,
|
||||
.suspend = db8500_cpufreq_cooling_suspend,
|
||||
|
|
|
@ -508,15 +508,13 @@ static const struct of_device_id db8500_thermal_match[] = {
|
|||
{ .compatible = "stericsson,db8500-thermal" },
|
||||
{},
|
||||
};
|
||||
#else
|
||||
#define db8500_thermal_match NULL
|
||||
#endif
|
||||
|
||||
static struct platform_driver db8500_thermal_driver = {
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "db8500-thermal",
|
||||
.of_match_table = db8500_thermal_match,
|
||||
.of_match_table = of_match_ptr(db8500_thermal_match),
|
||||
},
|
||||
.probe = db8500_thermal_probe,
|
||||
.suspend = db8500_thermal_suspend,
|
||||
|
|
209
drivers/thermal/dove_thermal.c
Normal file
209
drivers/thermal/dove_thermal.c
Normal file
|
@ -0,0 +1,209 @@
|
|||
/*
|
||||
* Dove thermal sensor driver
|
||||
*
|
||||
* Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/thermal.h>
|
||||
|
||||
#define DOVE_THERMAL_TEMP_OFFSET 1
|
||||
#define DOVE_THERMAL_TEMP_MASK 0x1FF
|
||||
|
||||
/* Dove Thermal Manager Control and Status Register */
|
||||
#define PMU_TM_DISABLE_OFFS 0
|
||||
#define PMU_TM_DISABLE_MASK (0x1 << PMU_TM_DISABLE_OFFS)
|
||||
|
||||
/* Dove Theraml Diode Control 0 Register */
|
||||
#define PMU_TDC0_SW_RST_MASK (0x1 << 1)
|
||||
#define PMU_TDC0_SEL_VCAL_OFFS 5
|
||||
#define PMU_TDC0_SEL_VCAL_MASK (0x3 << PMU_TDC0_SEL_VCAL_OFFS)
|
||||
#define PMU_TDC0_REF_CAL_CNT_OFFS 11
|
||||
#define PMU_TDC0_REF_CAL_CNT_MASK (0x1FF << PMU_TDC0_REF_CAL_CNT_OFFS)
|
||||
#define PMU_TDC0_AVG_NUM_OFFS 25
|
||||
#define PMU_TDC0_AVG_NUM_MASK (0x7 << PMU_TDC0_AVG_NUM_OFFS)
|
||||
|
||||
/* Dove Thermal Diode Control 1 Register */
|
||||
#define PMU_TEMP_DIOD_CTRL1_REG 0x04
|
||||
#define PMU_TDC1_TEMP_VALID_MASK (0x1 << 10)
|
||||
|
||||
/* Dove Thermal Sensor Dev Structure */
|
||||
struct dove_thermal_priv {
|
||||
void __iomem *sensor;
|
||||
void __iomem *control;
|
||||
};
|
||||
|
||||
static int dove_init_sensor(const struct dove_thermal_priv *priv)
|
||||
{
|
||||
u32 reg;
|
||||
u32 i;
|
||||
|
||||
/* Configure the Diode Control Register #0 */
|
||||
reg = readl_relaxed(priv->control);
|
||||
|
||||
/* Use average of 2 */
|
||||
reg &= ~PMU_TDC0_AVG_NUM_MASK;
|
||||
reg |= (0x1 << PMU_TDC0_AVG_NUM_OFFS);
|
||||
|
||||
/* Reference calibration value */
|
||||
reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
|
||||
reg |= (0x0F1 << PMU_TDC0_REF_CAL_CNT_OFFS);
|
||||
|
||||
/* Set the high level reference for calibration */
|
||||
reg &= ~PMU_TDC0_SEL_VCAL_MASK;
|
||||
reg |= (0x2 << PMU_TDC0_SEL_VCAL_OFFS);
|
||||
writel(reg, priv->control);
|
||||
|
||||
/* Reset the sensor */
|
||||
reg = readl_relaxed(priv->control);
|
||||
writel((reg | PMU_TDC0_SW_RST_MASK), priv->control);
|
||||
writel(reg, priv->control);
|
||||
|
||||
/* Enable the sensor */
|
||||
reg = readl_relaxed(priv->sensor);
|
||||
reg &= ~PMU_TM_DISABLE_MASK;
|
||||
writel(reg, priv->sensor);
|
||||
|
||||
/* Poll the sensor for the first reading */
|
||||
for (i = 0; i < 1000000; i++) {
|
||||
reg = readl_relaxed(priv->sensor);
|
||||
if (reg & DOVE_THERMAL_TEMP_MASK)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == 1000000)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dove_get_temp(struct thermal_zone_device *thermal,
|
||||
unsigned long *temp)
|
||||
{
|
||||
unsigned long reg;
|
||||
struct dove_thermal_priv *priv = thermal->devdata;
|
||||
|
||||
/* Valid check */
|
||||
reg = readl_relaxed(priv->control + PMU_TEMP_DIOD_CTRL1_REG);
|
||||
if ((reg & PMU_TDC1_TEMP_VALID_MASK) == 0x0) {
|
||||
dev_err(&thermal->device,
|
||||
"Temperature sensor reading not valid\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate temperature. See Section 8.10.1 of 88AP510,
|
||||
* Documentation/arm/Marvell/README
|
||||
*/
|
||||
reg = readl_relaxed(priv->sensor);
|
||||
reg = (reg >> DOVE_THERMAL_TEMP_OFFSET) & DOVE_THERMAL_TEMP_MASK;
|
||||
*temp = ((2281638UL - (7298*reg)) / 10);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct thermal_zone_device_ops ops = {
|
||||
.get_temp = dove_get_temp,
|
||||
};
|
||||
|
||||
static const struct of_device_id dove_thermal_id_table[] = {
|
||||
{ .compatible = "marvell,dove-thermal" },
|
||||
{}
|
||||
};
|
||||
|
||||
static int dove_thermal_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct thermal_zone_device *thermal = NULL;
|
||||
struct dove_thermal_priv *priv;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "Failed to get platform resource\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->sensor = devm_request_and_ioremap(&pdev->dev, res);
|
||||
if (!priv->sensor) {
|
||||
dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "Failed to get platform resource\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
priv->control = devm_request_and_ioremap(&pdev->dev, res);
|
||||
if (!priv->control) {
|
||||
dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
ret = dove_init_sensor(priv);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Failed to initialize sensor\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
thermal = thermal_zone_device_register("dove_thermal", 0, 0,
|
||||
priv, &ops, NULL, 0, 0);
|
||||
if (IS_ERR(thermal)) {
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to register thermal zone device\n");
|
||||
return PTR_ERR(thermal);
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, thermal);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dove_thermal_exit(struct platform_device *pdev)
|
||||
{
|
||||
struct thermal_zone_device *dove_thermal =
|
||||
platform_get_drvdata(pdev);
|
||||
|
||||
thermal_zone_device_unregister(dove_thermal);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
MODULE_DEVICE_TABLE(of, dove_thermal_id_table);
|
||||
|
||||
static struct platform_driver dove_thermal_driver = {
|
||||
.probe = dove_thermal_probe,
|
||||
.remove = dove_thermal_exit,
|
||||
.driver = {
|
||||
.name = "dove_thermal",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = of_match_ptr(dove_thermal_id_table),
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(dove_thermal_driver);
|
||||
|
||||
MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
|
||||
MODULE_DESCRIPTION("Dove thermal driver");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -82,7 +82,7 @@
|
|||
|
||||
#define EXYNOS_TRIMINFO_RELOAD 0x1
|
||||
#define EXYNOS_TMU_CLEAR_RISE_INT 0x111
|
||||
#define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 16)
|
||||
#define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 12)
|
||||
#define EXYNOS_MUX_ADDR_VALUE 6
|
||||
#define EXYNOS_MUX_ADDR_SHIFT 20
|
||||
#define EXYNOS_TMU_TRIP_MODE_SHIFT 13
|
||||
|
@ -94,11 +94,20 @@
|
|||
#define SENSOR_NAME_LEN 16
|
||||
#define MAX_TRIP_COUNT 8
|
||||
#define MAX_COOLING_DEVICE 4
|
||||
#define MAX_THRESHOLD_LEVS 4
|
||||
|
||||
#define ACTIVE_INTERVAL 500
|
||||
#define IDLE_INTERVAL 10000
|
||||
#define MCELSIUS 1000
|
||||
|
||||
#ifdef CONFIG_EXYNOS_THERMAL_EMUL
|
||||
#define EXYNOS_EMUL_TIME 0x57F0
|
||||
#define EXYNOS_EMUL_TIME_SHIFT 16
|
||||
#define EXYNOS_EMUL_DATA_SHIFT 8
|
||||
#define EXYNOS_EMUL_DATA_MASK 0xFF
|
||||
#define EXYNOS_EMUL_ENABLE 0x1
|
||||
#endif /* CONFIG_EXYNOS_THERMAL_EMUL */
|
||||
|
||||
/* CPU Zone information */
|
||||
#define PANIC_ZONE 4
|
||||
#define WARN_ZONE 3
|
||||
|
@ -125,6 +134,7 @@ struct exynos_tmu_data {
|
|||
struct thermal_trip_point_conf {
|
||||
int trip_val[MAX_TRIP_COUNT];
|
||||
int trip_count;
|
||||
u8 trigger_falling;
|
||||
};
|
||||
|
||||
struct thermal_cooling_conf {
|
||||
|
@ -174,7 +184,8 @@ static int exynos_set_mode(struct thermal_zone_device *thermal,
|
|||
|
||||
mutex_lock(&th_zone->therm_dev->lock);
|
||||
|
||||
if (mode == THERMAL_DEVICE_ENABLED)
|
||||
if (mode == THERMAL_DEVICE_ENABLED &&
|
||||
!th_zone->sensor_conf->trip_data.trigger_falling)
|
||||
th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
|
||||
else
|
||||
th_zone->therm_dev->polling_delay = 0;
|
||||
|
@ -284,7 +295,7 @@ static int exynos_bind(struct thermal_zone_device *thermal,
|
|||
case MONITOR_ZONE:
|
||||
case WARN_ZONE:
|
||||
if (thermal_zone_bind_cooling_device(thermal, i, cdev,
|
||||
level, level)) {
|
||||
level, 0)) {
|
||||
pr_err("error binding cdev inst %d\n", i);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
@ -362,10 +373,17 @@ static int exynos_get_temp(struct thermal_zone_device *thermal,
|
|||
static int exynos_get_trend(struct thermal_zone_device *thermal,
|
||||
int trip, enum thermal_trend *trend)
|
||||
{
|
||||
if (thermal->temperature >= trip)
|
||||
*trend = THERMAL_TREND_RAISING;
|
||||
int ret;
|
||||
unsigned long trip_temp;
|
||||
|
||||
ret = exynos_get_trip_temp(thermal, trip, &trip_temp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (thermal->temperature >= trip_temp)
|
||||
*trend = THERMAL_TREND_RAISE_FULL;
|
||||
else
|
||||
*trend = THERMAL_TREND_DROPPING;
|
||||
*trend = THERMAL_TREND_DROP_FULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -413,7 +431,8 @@ static void exynos_report_trigger(void)
|
|||
break;
|
||||
}
|
||||
|
||||
if (th_zone->mode == THERMAL_DEVICE_ENABLED) {
|
||||
if (th_zone->mode == THERMAL_DEVICE_ENABLED &&
|
||||
!th_zone->sensor_conf->trip_data.trigger_falling) {
|
||||
if (i > 0)
|
||||
th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL;
|
||||
else
|
||||
|
@ -452,7 +471,8 @@ static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
|
|||
|
||||
th_zone->therm_dev = thermal_zone_device_register(sensor_conf->name,
|
||||
EXYNOS_ZONE_COUNT, 0, NULL, &exynos_dev_ops, NULL, 0,
|
||||
IDLE_INTERVAL);
|
||||
sensor_conf->trip_data.trigger_falling ?
|
||||
0 : IDLE_INTERVAL);
|
||||
|
||||
if (IS_ERR(th_zone->therm_dev)) {
|
||||
pr_err("Failed to register thermal zone device\n");
|
||||
|
@ -559,8 +579,9 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
|
|||
{
|
||||
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
|
||||
struct exynos_tmu_platform_data *pdata = data->pdata;
|
||||
unsigned int status, trim_info, rising_threshold;
|
||||
int ret = 0, threshold_code;
|
||||
unsigned int status, trim_info;
|
||||
unsigned int rising_threshold = 0, falling_threshold = 0;
|
||||
int ret = 0, threshold_code, i, trigger_levs = 0;
|
||||
|
||||
mutex_lock(&data->lock);
|
||||
clk_enable(data->clk);
|
||||
|
@ -585,6 +606,11 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
|
|||
(data->temp_error2 != 0))
|
||||
data->temp_error1 = pdata->efuse_value;
|
||||
|
||||
/* Count trigger levels to be enabled */
|
||||
for (i = 0; i < MAX_THRESHOLD_LEVS; i++)
|
||||
if (pdata->trigger_levels[i])
|
||||
trigger_levs++;
|
||||
|
||||
if (data->soc == SOC_ARCH_EXYNOS4210) {
|
||||
/* Write temperature code for threshold */
|
||||
threshold_code = temp_to_code(data, pdata->threshold);
|
||||
|
@ -594,44 +620,38 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
|
|||
}
|
||||
writeb(threshold_code,
|
||||
data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
|
||||
|
||||
writeb(pdata->trigger_levels[0],
|
||||
data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0);
|
||||
writeb(pdata->trigger_levels[1],
|
||||
data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL1);
|
||||
writeb(pdata->trigger_levels[2],
|
||||
data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL2);
|
||||
writeb(pdata->trigger_levels[3],
|
||||
data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL3);
|
||||
for (i = 0; i < trigger_levs; i++)
|
||||
writeb(pdata->trigger_levels[i],
|
||||
data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
|
||||
|
||||
writel(EXYNOS4210_TMU_INTCLEAR_VAL,
|
||||
data->base + EXYNOS_TMU_REG_INTCLEAR);
|
||||
} else if (data->soc == SOC_ARCH_EXYNOS) {
|
||||
/* Write temperature code for threshold */
|
||||
threshold_code = temp_to_code(data, pdata->trigger_levels[0]);
|
||||
if (threshold_code < 0) {
|
||||
ret = threshold_code;
|
||||
goto out;
|
||||
/* Write temperature code for rising and falling threshold */
|
||||
for (i = 0; i < trigger_levs; i++) {
|
||||
threshold_code = temp_to_code(data,
|
||||
pdata->trigger_levels[i]);
|
||||
if (threshold_code < 0) {
|
||||
ret = threshold_code;
|
||||
goto out;
|
||||
}
|
||||
rising_threshold |= threshold_code << 8 * i;
|
||||
if (pdata->threshold_falling) {
|
||||
threshold_code = temp_to_code(data,
|
||||
pdata->trigger_levels[i] -
|
||||
pdata->threshold_falling);
|
||||
if (threshold_code > 0)
|
||||
falling_threshold |=
|
||||
threshold_code << 8 * i;
|
||||
}
|
||||
}
|
||||
rising_threshold = threshold_code;
|
||||
threshold_code = temp_to_code(data, pdata->trigger_levels[1]);
|
||||
if (threshold_code < 0) {
|
||||
ret = threshold_code;
|
||||
goto out;
|
||||
}
|
||||
rising_threshold |= (threshold_code << 8);
|
||||
threshold_code = temp_to_code(data, pdata->trigger_levels[2]);
|
||||
if (threshold_code < 0) {
|
||||
ret = threshold_code;
|
||||
goto out;
|
||||
}
|
||||
rising_threshold |= (threshold_code << 16);
|
||||
|
||||
writel(rising_threshold,
|
||||
data->base + EXYNOS_THD_TEMP_RISE);
|
||||
writel(0, data->base + EXYNOS_THD_TEMP_FALL);
|
||||
writel(falling_threshold,
|
||||
data->base + EXYNOS_THD_TEMP_FALL);
|
||||
|
||||
writel(EXYNOS_TMU_CLEAR_RISE_INT|EXYNOS_TMU_CLEAR_FALL_INT,
|
||||
writel(EXYNOS_TMU_CLEAR_RISE_INT | EXYNOS_TMU_CLEAR_FALL_INT,
|
||||
data->base + EXYNOS_TMU_REG_INTCLEAR);
|
||||
}
|
||||
out:
|
||||
|
@ -664,6 +684,8 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
|
|||
pdata->trigger_level2_en << 8 |
|
||||
pdata->trigger_level1_en << 4 |
|
||||
pdata->trigger_level0_en;
|
||||
if (pdata->threshold_falling)
|
||||
interrupt_en |= interrupt_en << 16;
|
||||
} else {
|
||||
con |= EXYNOS_TMU_CORE_OFF;
|
||||
interrupt_en = 0; /* Disable all interrupts */
|
||||
|
@ -697,20 +719,19 @@ static void exynos_tmu_work(struct work_struct *work)
|
|||
struct exynos_tmu_data *data = container_of(work,
|
||||
struct exynos_tmu_data, irq_work);
|
||||
|
||||
exynos_report_trigger();
|
||||
mutex_lock(&data->lock);
|
||||
clk_enable(data->clk);
|
||||
|
||||
|
||||
if (data->soc == SOC_ARCH_EXYNOS)
|
||||
writel(EXYNOS_TMU_CLEAR_RISE_INT,
|
||||
writel(EXYNOS_TMU_CLEAR_RISE_INT |
|
||||
EXYNOS_TMU_CLEAR_FALL_INT,
|
||||
data->base + EXYNOS_TMU_REG_INTCLEAR);
|
||||
else
|
||||
writel(EXYNOS4210_TMU_INTCLEAR_VAL,
|
||||
data->base + EXYNOS_TMU_REG_INTCLEAR);
|
||||
|
||||
clk_disable(data->clk);
|
||||
mutex_unlock(&data->lock);
|
||||
exynos_report_trigger();
|
||||
|
||||
enable_irq(data->irq);
|
||||
}
|
||||
|
||||
|
@ -759,6 +780,7 @@ static struct exynos_tmu_platform_data const exynos4210_default_tmu_data = {
|
|||
|
||||
#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)
|
||||
static struct exynos_tmu_platform_data const exynos_default_tmu_data = {
|
||||
.threshold_falling = 10,
|
||||
.trigger_levels[0] = 85,
|
||||
.trigger_levels[1] = 103,
|
||||
.trigger_levels[2] = 110,
|
||||
|
@ -800,8 +822,6 @@ static const struct of_device_id exynos_tmu_match[] = {
|
|||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, exynos_tmu_match);
|
||||
#else
|
||||
#define exynos_tmu_match NULL
|
||||
#endif
|
||||
|
||||
static struct platform_device_id exynos_tmu_driver_ids[] = {
|
||||
|
@ -832,6 +852,94 @@ static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
|
|||
return (struct exynos_tmu_platform_data *)
|
||||
platform_get_device_id(pdev)->driver_data;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EXYNOS_THERMAL_EMUL
|
||||
static ssize_t exynos_tmu_emulation_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct platform_device *pdev = container_of(dev,
|
||||
struct platform_device, dev);
|
||||
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
|
||||
unsigned int reg;
|
||||
u8 temp_code;
|
||||
int temp = 0;
|
||||
|
||||
if (data->soc == SOC_ARCH_EXYNOS4210)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&data->lock);
|
||||
clk_enable(data->clk);
|
||||
reg = readl(data->base + EXYNOS_EMUL_CON);
|
||||
clk_disable(data->clk);
|
||||
mutex_unlock(&data->lock);
|
||||
|
||||
if (reg & EXYNOS_EMUL_ENABLE) {
|
||||
reg >>= EXYNOS_EMUL_DATA_SHIFT;
|
||||
temp_code = reg & EXYNOS_EMUL_DATA_MASK;
|
||||
temp = code_to_temp(data, temp_code);
|
||||
}
|
||||
out:
|
||||
return sprintf(buf, "%d\n", temp * MCELSIUS);
|
||||
}
|
||||
|
||||
static ssize_t exynos_tmu_emulation_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct platform_device *pdev = container_of(dev,
|
||||
struct platform_device, dev);
|
||||
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
|
||||
unsigned int reg;
|
||||
int temp;
|
||||
|
||||
if (data->soc == SOC_ARCH_EXYNOS4210)
|
||||
goto out;
|
||||
|
||||
if (!sscanf(buf, "%d\n", &temp) || temp < 0)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&data->lock);
|
||||
clk_enable(data->clk);
|
||||
|
||||
reg = readl(data->base + EXYNOS_EMUL_CON);
|
||||
|
||||
if (temp) {
|
||||
/* Both CELSIUS and MCELSIUS type are available for input */
|
||||
if (temp > MCELSIUS)
|
||||
temp /= MCELSIUS;
|
||||
|
||||
reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) |
|
||||
(temp_to_code(data, (temp / MCELSIUS))
|
||||
<< EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE;
|
||||
} else {
|
||||
reg &= ~EXYNOS_EMUL_ENABLE;
|
||||
}
|
||||
|
||||
writel(reg, data->base + EXYNOS_EMUL_CON);
|
||||
|
||||
clk_disable(data->clk);
|
||||
mutex_unlock(&data->lock);
|
||||
|
||||
out:
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(emulation, 0644, exynos_tmu_emulation_show,
|
||||
exynos_tmu_emulation_store);
|
||||
static int create_emulation_sysfs(struct device *dev)
|
||||
{
|
||||
return device_create_file(dev, &dev_attr_emulation);
|
||||
}
|
||||
static void remove_emulation_sysfs(struct device *dev)
|
||||
{
|
||||
device_remove_file(dev, &dev_attr_emulation);
|
||||
}
|
||||
#else
|
||||
static inline int create_emulation_sysfs(struct device *dev) { return 0; }
|
||||
static inline void remove_emulation_sysfs(struct device *dev) {}
|
||||
#endif
|
||||
|
||||
static int exynos_tmu_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct exynos_tmu_data *data;
|
||||
|
@ -914,6 +1022,8 @@ static int exynos_tmu_probe(struct platform_device *pdev)
|
|||
exynos_sensor_conf.trip_data.trip_val[i] =
|
||||
pdata->threshold + pdata->trigger_levels[i];
|
||||
|
||||
exynos_sensor_conf.trip_data.trigger_falling = pdata->threshold_falling;
|
||||
|
||||
exynos_sensor_conf.cooling_data.freq_clip_count =
|
||||
pdata->freq_tab_count;
|
||||
for (i = 0; i < pdata->freq_tab_count; i++) {
|
||||
|
@ -928,6 +1038,11 @@ static int exynos_tmu_probe(struct platform_device *pdev)
|
|||
dev_err(&pdev->dev, "Failed to register thermal interface\n");
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
ret = create_emulation_sysfs(&pdev->dev);
|
||||
if (ret)
|
||||
dev_err(&pdev->dev, "Failed to create emulation mode sysfs node\n");
|
||||
|
||||
return 0;
|
||||
err_clk:
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
|
@ -939,6 +1054,8 @@ static int exynos_tmu_remove(struct platform_device *pdev)
|
|||
{
|
||||
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
|
||||
|
||||
remove_emulation_sysfs(&pdev->dev);
|
||||
|
||||
exynos_tmu_control(pdev, false);
|
||||
|
||||
exynos_unregister_thermal();
|
||||
|
@ -980,7 +1097,7 @@ static struct platform_driver exynos_tmu_driver = {
|
|||
.name = "exynos-tmu",
|
||||
.owner = THIS_MODULE,
|
||||
.pm = EXYNOS_TMU_PM,
|
||||
.of_match_table = exynos_tmu_match,
|
||||
.of_match_table = of_match_ptr(exynos_tmu_match),
|
||||
},
|
||||
.probe = exynos_tmu_probe,
|
||||
.remove = exynos_tmu_remove,
|
||||
|
|
794
drivers/thermal/intel_powerclamp.c
Normal file
794
drivers/thermal/intel_powerclamp.c
Normal file
|
@ -0,0 +1,794 @@
|
|||
/*
|
||||
* intel_powerclamp.c - package c-state idle injection
|
||||
*
|
||||
* Copyright (c) 2012, Intel Corporation.
|
||||
*
|
||||
* Authors:
|
||||
* Arjan van de Ven <arjan@linux.intel.com>
|
||||
* Jacob Pan <jacob.jun.pan@linux.intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
*
|
||||
* TODO:
|
||||
* 1. better handle wakeup from external interrupts, currently a fixed
|
||||
* compensation is added to clamping duration when excessive amount
|
||||
* of wakeups are observed during idle time. the reason is that in
|
||||
* case of external interrupts without need for ack, clamping down
|
||||
* cpu in non-irq context does not reduce irq. for majority of the
|
||||
* cases, clamping down cpu does help reduce irq as well, we should
|
||||
* be able to differenciate the two cases and give a quantitative
|
||||
* solution for the irqs that we can control. perhaps based on
|
||||
* get_cpu_iowait_time_us()
|
||||
*
|
||||
* 2. synchronization with other hw blocks
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/thermal.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/mwait.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/hardirq.h>
|
||||
|
||||
#define MAX_TARGET_RATIO (50U)
|
||||
/* For each undisturbed clamping period (no extra wake ups during idle time),
|
||||
* we increment the confidence counter for the given target ratio.
|
||||
* CONFIDENCE_OK defines the level where runtime calibration results are
|
||||
* valid.
|
||||
*/
|
||||
#define CONFIDENCE_OK (3)
|
||||
/* Default idle injection duration, driver adjust sleep time to meet target
|
||||
* idle ratio. Similar to frequency modulation.
|
||||
*/
|
||||
#define DEFAULT_DURATION_JIFFIES (6)
|
||||
|
||||
static unsigned int target_mwait;
|
||||
static struct dentry *debug_dir;
|
||||
|
||||
/* user selected target */
|
||||
static unsigned int set_target_ratio;
|
||||
static unsigned int current_ratio;
|
||||
static bool should_skip;
|
||||
static bool reduce_irq;
|
||||
static atomic_t idle_wakeup_counter;
|
||||
static unsigned int control_cpu; /* The cpu assigned to collect stat and update
|
||||
* control parameters. default to BSP but BSP
|
||||
* can be offlined.
|
||||
*/
|
||||
static bool clamping;
|
||||
|
||||
|
||||
static struct task_struct * __percpu *powerclamp_thread;
|
||||
static struct thermal_cooling_device *cooling_dev;
|
||||
static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
|
||||
* clamping thread
|
||||
*/
|
||||
|
||||
static unsigned int duration;
|
||||
static unsigned int pkg_cstate_ratio_cur;
|
||||
static unsigned int window_size;
|
||||
|
||||
static int duration_set(const char *arg, const struct kernel_param *kp)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long new_duration;
|
||||
|
||||
ret = kstrtoul(arg, 10, &new_duration);
|
||||
if (ret)
|
||||
goto exit;
|
||||
if (new_duration > 25 || new_duration < 6) {
|
||||
pr_err("Out of recommended range %lu, between 6-25ms\n",
|
||||
new_duration);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
duration = clamp(new_duration, 6ul, 25ul);
|
||||
smp_mb();
|
||||
|
||||
exit:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops duration_ops = {
|
||||
.set = duration_set,
|
||||
.get = param_get_int,
|
||||
};
|
||||
|
||||
|
||||
module_param_cb(duration, &duration_ops, &duration, 0644);
|
||||
MODULE_PARM_DESC(duration, "forced idle time for each attempt in msec.");
|
||||
|
||||
struct powerclamp_calibration_data {
|
||||
unsigned long confidence; /* used for calibration, basically a counter
|
||||
* gets incremented each time a clamping
|
||||
* period is completed without extra wakeups
|
||||
* once that counter is reached given level,
|
||||
* compensation is deemed usable.
|
||||
*/
|
||||
unsigned long steady_comp; /* steady state compensation used when
|
||||
* no extra wakeups occurred.
|
||||
*/
|
||||
unsigned long dynamic_comp; /* compensate excessive wakeup from idle
|
||||
* mostly from external interrupts.
|
||||
*/
|
||||
};
|
||||
|
||||
static struct powerclamp_calibration_data cal_data[MAX_TARGET_RATIO];
|
||||
|
||||
static int window_size_set(const char *arg, const struct kernel_param *kp)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long new_window_size;
|
||||
|
||||
ret = kstrtoul(arg, 10, &new_window_size);
|
||||
if (ret)
|
||||
goto exit_win;
|
||||
if (new_window_size > 10 || new_window_size < 2) {
|
||||
pr_err("Out of recommended window size %lu, between 2-10\n",
|
||||
new_window_size);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
window_size = clamp(new_window_size, 2ul, 10ul);
|
||||
smp_mb();
|
||||
|
||||
exit_win:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops window_size_ops = {
|
||||
.set = window_size_set,
|
||||
.get = param_get_int,
|
||||
};
|
||||
|
||||
module_param_cb(window_size, &window_size_ops, &window_size, 0644);
|
||||
MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n"
|
||||
"\tpowerclamp controls idle ratio within this window. larger\n"
|
||||
"\twindow size results in slower response time but more smooth\n"
|
||||
"\tclamping results. default to 2.");
|
||||
|
||||
static void find_target_mwait(void)
|
||||
{
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
unsigned int highest_cstate = 0;
|
||||
unsigned int highest_subcstate = 0;
|
||||
int i;
|
||||
|
||||
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
|
||||
return;
|
||||
|
||||
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
|
||||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
|
||||
return;
|
||||
|
||||
edx >>= MWAIT_SUBSTATE_SIZE;
|
||||
for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
|
||||
if (edx & MWAIT_SUBSTATE_MASK) {
|
||||
highest_cstate = i;
|
||||
highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
|
||||
}
|
||||
}
|
||||
target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
|
||||
(highest_subcstate - 1);
|
||||
|
||||
}
|
||||
|
||||
static u64 pkg_state_counter(void)
|
||||
{
|
||||
u64 val;
|
||||
u64 count = 0;
|
||||
|
||||
static bool skip_c2;
|
||||
static bool skip_c3;
|
||||
static bool skip_c6;
|
||||
static bool skip_c7;
|
||||
|
||||
if (!skip_c2) {
|
||||
if (!rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &val))
|
||||
count += val;
|
||||
else
|
||||
skip_c2 = true;
|
||||
}
|
||||
|
||||
if (!skip_c3) {
|
||||
if (!rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &val))
|
||||
count += val;
|
||||
else
|
||||
skip_c3 = true;
|
||||
}
|
||||
|
||||
if (!skip_c6) {
|
||||
if (!rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &val))
|
||||
count += val;
|
||||
else
|
||||
skip_c6 = true;
|
||||
}
|
||||
|
||||
if (!skip_c7) {
|
||||
if (!rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &val))
|
||||
count += val;
|
||||
else
|
||||
skip_c7 = true;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static void noop_timer(unsigned long foo)
|
||||
{
|
||||
/* empty... just the fact that we get the interrupt wakes us up */
|
||||
}
|
||||
|
||||
static unsigned int get_compensation(int ratio)
|
||||
{
|
||||
unsigned int comp = 0;
|
||||
|
||||
/* we only use compensation if all adjacent ones are good */
|
||||
if (ratio == 1 &&
|
||||
cal_data[ratio].confidence >= CONFIDENCE_OK &&
|
||||
cal_data[ratio + 1].confidence >= CONFIDENCE_OK &&
|
||||
cal_data[ratio + 2].confidence >= CONFIDENCE_OK) {
|
||||
comp = (cal_data[ratio].steady_comp +
|
||||
cal_data[ratio + 1].steady_comp +
|
||||
cal_data[ratio + 2].steady_comp) / 3;
|
||||
} else if (ratio == MAX_TARGET_RATIO - 1 &&
|
||||
cal_data[ratio].confidence >= CONFIDENCE_OK &&
|
||||
cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
|
||||
cal_data[ratio - 2].confidence >= CONFIDENCE_OK) {
|
||||
comp = (cal_data[ratio].steady_comp +
|
||||
cal_data[ratio - 1].steady_comp +
|
||||
cal_data[ratio - 2].steady_comp) / 3;
|
||||
} else if (cal_data[ratio].confidence >= CONFIDENCE_OK &&
|
||||
cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
|
||||
cal_data[ratio + 1].confidence >= CONFIDENCE_OK) {
|
||||
comp = (cal_data[ratio].steady_comp +
|
||||
cal_data[ratio - 1].steady_comp +
|
||||
cal_data[ratio + 1].steady_comp) / 3;
|
||||
}
|
||||
|
||||
/* REVISIT: simple penalty of double idle injection */
|
||||
if (reduce_irq)
|
||||
comp = ratio;
|
||||
/* do not exceed limit */
|
||||
if (comp + ratio >= MAX_TARGET_RATIO)
|
||||
comp = MAX_TARGET_RATIO - ratio - 1;
|
||||
|
||||
return comp;
|
||||
}
|
||||
|
||||
static void adjust_compensation(int target_ratio, unsigned int win)
|
||||
{
|
||||
int delta;
|
||||
struct powerclamp_calibration_data *d = &cal_data[target_ratio];
|
||||
|
||||
/*
|
||||
* adjust compensations if confidence level has not been reached or
|
||||
* there are too many wakeups during the last idle injection period, we
|
||||
* cannot trust the data for compensation.
|
||||
*/
|
||||
if (d->confidence >= CONFIDENCE_OK ||
|
||||
atomic_read(&idle_wakeup_counter) >
|
||||
win * num_online_cpus())
|
||||
return;
|
||||
|
||||
delta = set_target_ratio - current_ratio;
|
||||
/* filter out bad data */
|
||||
if (delta >= 0 && delta <= (1+target_ratio/10)) {
|
||||
if (d->steady_comp)
|
||||
d->steady_comp =
|
||||
roundup(delta+d->steady_comp, 2)/2;
|
||||
else
|
||||
d->steady_comp = delta;
|
||||
d->confidence++;
|
||||
}
|
||||
}
|
||||
|
||||
static bool powerclamp_adjust_controls(unsigned int target_ratio,
|
||||
unsigned int guard, unsigned int win)
|
||||
{
|
||||
static u64 msr_last, tsc_last;
|
||||
u64 msr_now, tsc_now;
|
||||
u64 val64;
|
||||
|
||||
/* check result for the last window */
|
||||
msr_now = pkg_state_counter();
|
||||
rdtscll(tsc_now);
|
||||
|
||||
/* calculate pkg cstate vs tsc ratio */
|
||||
if (!msr_last || !tsc_last)
|
||||
current_ratio = 1;
|
||||
else if (tsc_now-tsc_last) {
|
||||
val64 = 100*(msr_now-msr_last);
|
||||
do_div(val64, (tsc_now-tsc_last));
|
||||
current_ratio = val64;
|
||||
}
|
||||
|
||||
/* update record */
|
||||
msr_last = msr_now;
|
||||
tsc_last = tsc_now;
|
||||
|
||||
adjust_compensation(target_ratio, win);
|
||||
/*
|
||||
* too many external interrupts, set flag such
|
||||
* that we can take measure later.
|
||||
*/
|
||||
reduce_irq = atomic_read(&idle_wakeup_counter) >=
|
||||
2 * win * num_online_cpus();
|
||||
|
||||
atomic_set(&idle_wakeup_counter, 0);
|
||||
/* if we are above target+guard, skip */
|
||||
return set_target_ratio + guard <= current_ratio;
|
||||
}
|
||||
|
||||
static int clamp_thread(void *arg)
|
||||
{
|
||||
int cpunr = (unsigned long)arg;
|
||||
DEFINE_TIMER(wakeup_timer, noop_timer, 0, 0);
|
||||
static const struct sched_param param = {
|
||||
.sched_priority = MAX_USER_RT_PRIO/2,
|
||||
};
|
||||
unsigned int count = 0;
|
||||
unsigned int target_ratio;
|
||||
|
||||
set_bit(cpunr, cpu_clamping_mask);
|
||||
set_freezable();
|
||||
init_timer_on_stack(&wakeup_timer);
|
||||
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
||||
|
||||
while (true == clamping && !kthread_should_stop() &&
|
||||
cpu_online(cpunr)) {
|
||||
int sleeptime;
|
||||
unsigned long target_jiffies;
|
||||
unsigned int guard;
|
||||
unsigned int compensation = 0;
|
||||
int interval; /* jiffies to sleep for each attempt */
|
||||
unsigned int duration_jiffies = msecs_to_jiffies(duration);
|
||||
unsigned int window_size_now;
|
||||
|
||||
try_to_freeze();
|
||||
/*
|
||||
* make sure user selected ratio does not take effect until
|
||||
* the next round. adjust target_ratio if user has changed
|
||||
* target such that we can converge quickly.
|
||||
*/
|
||||
target_ratio = set_target_ratio;
|
||||
guard = 1 + target_ratio/20;
|
||||
window_size_now = window_size;
|
||||
count++;
|
||||
|
||||
/*
|
||||
* systems may have different ability to enter package level
|
||||
* c-states, thus we need to compensate the injected idle ratio
|
||||
* to achieve the actual target reported by the HW.
|
||||
*/
|
||||
compensation = get_compensation(target_ratio);
|
||||
interval = duration_jiffies*100/(target_ratio+compensation);
|
||||
|
||||
/* align idle time */
|
||||
target_jiffies = roundup(jiffies, interval);
|
||||
sleeptime = target_jiffies - jiffies;
|
||||
if (sleeptime <= 0)
|
||||
sleeptime = 1;
|
||||
schedule_timeout_interruptible(sleeptime);
|
||||
/*
|
||||
* only elected controlling cpu can collect stats and update
|
||||
* control parameters.
|
||||
*/
|
||||
if (cpunr == control_cpu && !(count%window_size_now)) {
|
||||
should_skip =
|
||||
powerclamp_adjust_controls(target_ratio,
|
||||
guard, window_size_now);
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
if (should_skip)
|
||||
continue;
|
||||
|
||||
target_jiffies = jiffies + duration_jiffies;
|
||||
mod_timer(&wakeup_timer, target_jiffies);
|
||||
if (unlikely(local_softirq_pending()))
|
||||
continue;
|
||||
/*
|
||||
* stop tick sched during idle time, interrupts are still
|
||||
* allowed. thus jiffies are updated properly.
|
||||
*/
|
||||
preempt_disable();
|
||||
tick_nohz_idle_enter();
|
||||
/* mwait until target jiffies is reached */
|
||||
while (time_before(jiffies, target_jiffies)) {
|
||||
unsigned long ecx = 1;
|
||||
unsigned long eax = target_mwait;
|
||||
|
||||
/*
|
||||
* REVISIT: may call enter_idle() to notify drivers who
|
||||
* can save power during cpu idle. same for exit_idle()
|
||||
*/
|
||||
local_touch_nmi();
|
||||
stop_critical_timings();
|
||||
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
||||
cpu_relax(); /* allow HT sibling to run */
|
||||
__mwait(eax, ecx);
|
||||
start_critical_timings();
|
||||
atomic_inc(&idle_wakeup_counter);
|
||||
}
|
||||
tick_nohz_idle_exit();
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
del_timer_sync(&wakeup_timer);
|
||||
clear_bit(cpunr, cpu_clamping_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* 1 HZ polling while clamping is active, useful for userspace
|
||||
* to monitor actual idle ratio.
|
||||
*/
|
||||
static void poll_pkg_cstate(struct work_struct *dummy);
|
||||
static DECLARE_DELAYED_WORK(poll_pkg_cstate_work, poll_pkg_cstate);
|
||||
static void poll_pkg_cstate(struct work_struct *dummy)
|
||||
{
|
||||
static u64 msr_last;
|
||||
static u64 tsc_last;
|
||||
static unsigned long jiffies_last;
|
||||
|
||||
u64 msr_now;
|
||||
unsigned long jiffies_now;
|
||||
u64 tsc_now;
|
||||
u64 val64;
|
||||
|
||||
msr_now = pkg_state_counter();
|
||||
rdtscll(tsc_now);
|
||||
jiffies_now = jiffies;
|
||||
|
||||
/* calculate pkg cstate vs tsc ratio */
|
||||
if (!msr_last || !tsc_last)
|
||||
pkg_cstate_ratio_cur = 1;
|
||||
else {
|
||||
if (tsc_now - tsc_last) {
|
||||
val64 = 100 * (msr_now - msr_last);
|
||||
do_div(val64, (tsc_now - tsc_last));
|
||||
pkg_cstate_ratio_cur = val64;
|
||||
}
|
||||
}
|
||||
|
||||
/* update record */
|
||||
msr_last = msr_now;
|
||||
jiffies_last = jiffies_now;
|
||||
tsc_last = tsc_now;
|
||||
|
||||
if (true == clamping)
|
||||
schedule_delayed_work(&poll_pkg_cstate_work, HZ);
|
||||
}
|
||||
|
||||
static int start_power_clamp(void)
|
||||
{
|
||||
unsigned long cpu;
|
||||
struct task_struct *thread;
|
||||
|
||||
/* check if pkg cstate counter is completely 0, abort in this case */
|
||||
if (!pkg_state_counter()) {
|
||||
pr_err("pkg cstate counter not functional, abort\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
|
||||
/* prevent cpu hotplug */
|
||||
get_online_cpus();
|
||||
|
||||
/* prefer BSP */
|
||||
control_cpu = 0;
|
||||
if (!cpu_online(control_cpu))
|
||||
control_cpu = smp_processor_id();
|
||||
|
||||
clamping = true;
|
||||
schedule_delayed_work(&poll_pkg_cstate_work, 0);
|
||||
|
||||
/* start one thread per online cpu */
|
||||
for_each_online_cpu(cpu) {
|
||||
struct task_struct **p =
|
||||
per_cpu_ptr(powerclamp_thread, cpu);
|
||||
|
||||
thread = kthread_create_on_node(clamp_thread,
|
||||
(void *) cpu,
|
||||
cpu_to_node(cpu),
|
||||
"kidle_inject/%ld", cpu);
|
||||
/* bind to cpu here */
|
||||
if (likely(!IS_ERR(thread))) {
|
||||
kthread_bind(thread, cpu);
|
||||
wake_up_process(thread);
|
||||
*p = thread;
|
||||
}
|
||||
|
||||
}
|
||||
put_online_cpus();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void end_power_clamp(void)
|
||||
{
|
||||
int i;
|
||||
struct task_struct *thread;
|
||||
|
||||
clamping = false;
|
||||
/*
|
||||
* make clamping visible to other cpus and give per cpu clamping threads
|
||||
* sometime to exit, or gets killed later.
|
||||
*/
|
||||
smp_mb();
|
||||
msleep(20);
|
||||
if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) {
|
||||
for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
|
||||
pr_debug("clamping thread for cpu %d alive, kill\n", i);
|
||||
thread = *per_cpu_ptr(powerclamp_thread, i);
|
||||
kthread_stop(thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int powerclamp_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned long cpu = (unsigned long)hcpu;
|
||||
struct task_struct *thread;
|
||||
struct task_struct **percpu_thread =
|
||||
per_cpu_ptr(powerclamp_thread, cpu);
|
||||
|
||||
if (false == clamping)
|
||||
goto exit_ok;
|
||||
|
||||
switch (action) {
|
||||
case CPU_ONLINE:
|
||||
thread = kthread_create_on_node(clamp_thread,
|
||||
(void *) cpu,
|
||||
cpu_to_node(cpu),
|
||||
"kidle_inject/%lu", cpu);
|
||||
if (likely(!IS_ERR(thread))) {
|
||||
kthread_bind(thread, cpu);
|
||||
wake_up_process(thread);
|
||||
*percpu_thread = thread;
|
||||
}
|
||||
/* prefer BSP as controlling CPU */
|
||||
if (cpu == 0) {
|
||||
control_cpu = 0;
|
||||
smp_mb();
|
||||
}
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
if (test_bit(cpu, cpu_clamping_mask)) {
|
||||
pr_err("cpu %lu dead but powerclamping thread is not\n",
|
||||
cpu);
|
||||
kthread_stop(*percpu_thread);
|
||||
}
|
||||
if (cpu == control_cpu) {
|
||||
control_cpu = smp_processor_id();
|
||||
smp_mb();
|
||||
}
|
||||
}
|
||||
|
||||
exit_ok:
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block powerclamp_cpu_notifier = {
|
||||
.notifier_call = powerclamp_cpu_callback,
|
||||
};
|
||||
|
||||
static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long *state)
|
||||
{
|
||||
*state = MAX_TARGET_RATIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long *state)
|
||||
{
|
||||
if (true == clamping)
|
||||
*state = pkg_cstate_ratio_cur;
|
||||
else
|
||||
/* to save power, do not poll idle ratio while not clamping */
|
||||
*state = -1; /* indicates invalid state */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long new_target_ratio)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
new_target_ratio = clamp(new_target_ratio, 0UL,
|
||||
(unsigned long) (MAX_TARGET_RATIO-1));
|
||||
if (set_target_ratio == 0 && new_target_ratio > 0) {
|
||||
pr_info("Start idle injection to reduce power\n");
|
||||
set_target_ratio = new_target_ratio;
|
||||
ret = start_power_clamp();
|
||||
goto exit_set;
|
||||
} else if (set_target_ratio > 0 && new_target_ratio == 0) {
|
||||
pr_info("Stop forced idle injection\n");
|
||||
set_target_ratio = 0;
|
||||
end_power_clamp();
|
||||
} else /* adjust currently running */ {
|
||||
set_target_ratio = new_target_ratio;
|
||||
/* make new set_target_ratio visible to other cpus */
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
exit_set:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* bind to generic thermal layer as cooling device*/
|
||||
static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
|
||||
.get_max_state = powerclamp_get_max_state,
|
||||
.get_cur_state = powerclamp_get_cur_state,
|
||||
.set_cur_state = powerclamp_set_cur_state,
|
||||
};
|
||||
|
||||
/* runs on Nehalem and later */
|
||||
static const struct x86_cpu_id intel_powerclamp_ids[] = {
|
||||
{ X86_VENDOR_INTEL, 6, 0x1a},
|
||||
{ X86_VENDOR_INTEL, 6, 0x1c},
|
||||
{ X86_VENDOR_INTEL, 6, 0x1e},
|
||||
{ X86_VENDOR_INTEL, 6, 0x1f},
|
||||
{ X86_VENDOR_INTEL, 6, 0x25},
|
||||
{ X86_VENDOR_INTEL, 6, 0x26},
|
||||
{ X86_VENDOR_INTEL, 6, 0x2a},
|
||||
{ X86_VENDOR_INTEL, 6, 0x2c},
|
||||
{ X86_VENDOR_INTEL, 6, 0x2d},
|
||||
{ X86_VENDOR_INTEL, 6, 0x2e},
|
||||
{ X86_VENDOR_INTEL, 6, 0x2f},
|
||||
{ X86_VENDOR_INTEL, 6, 0x3a},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
|
||||
|
||||
static int powerclamp_probe(void)
|
||||
{
|
||||
if (!x86_match_cpu(intel_powerclamp_ids)) {
|
||||
pr_err("Intel powerclamp does not run on family %d model %d\n",
|
||||
boot_cpu_data.x86, boot_cpu_data.x86_model);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
|
||||
!boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ||
|
||||
!boot_cpu_has(X86_FEATURE_MWAIT) ||
|
||||
!boot_cpu_has(X86_FEATURE_ARAT))
|
||||
return -ENODEV;
|
||||
|
||||
/* find the deepest mwait value */
|
||||
find_target_mwait();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int powerclamp_debug_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
seq_printf(m, "controlling cpu: %d\n", control_cpu);
|
||||
seq_printf(m, "pct confidence steady dynamic (compensation)\n");
|
||||
for (i = 0; i < MAX_TARGET_RATIO; i++) {
|
||||
seq_printf(m, "%d\t%lu\t%lu\t%lu\n",
|
||||
i,
|
||||
cal_data[i].confidence,
|
||||
cal_data[i].steady_comp,
|
||||
cal_data[i].dynamic_comp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int powerclamp_debug_open(struct inode *inode,
|
||||
struct file *file)
|
||||
{
|
||||
return single_open(file, powerclamp_debug_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations powerclamp_debug_fops = {
|
||||
.open = powerclamp_debug_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static inline void powerclamp_create_debug_files(void)
|
||||
{
|
||||
debug_dir = debugfs_create_dir("intel_powerclamp", NULL);
|
||||
if (!debug_dir)
|
||||
return;
|
||||
|
||||
if (!debugfs_create_file("powerclamp_calib", S_IRUGO, debug_dir,
|
||||
cal_data, &powerclamp_debug_fops))
|
||||
goto file_error;
|
||||
|
||||
return;
|
||||
|
||||
file_error:
|
||||
debugfs_remove_recursive(debug_dir);
|
||||
}
|
||||
|
||||
static int powerclamp_init(void)
|
||||
{
|
||||
int retval;
|
||||
int bitmap_size;
|
||||
|
||||
bitmap_size = BITS_TO_LONGS(num_possible_cpus()) * sizeof(long);
|
||||
cpu_clamping_mask = kzalloc(bitmap_size, GFP_KERNEL);
|
||||
if (!cpu_clamping_mask)
|
||||
return -ENOMEM;
|
||||
|
||||
/* probe cpu features and ids here */
|
||||
retval = powerclamp_probe();
|
||||
if (retval)
|
||||
return retval;
|
||||
/* set default limit, maybe adjusted during runtime based on feedback */
|
||||
window_size = 2;
|
||||
register_hotcpu_notifier(&powerclamp_cpu_notifier);
|
||||
powerclamp_thread = alloc_percpu(struct task_struct *);
|
||||
cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
|
||||
&powerclamp_cooling_ops);
|
||||
if (IS_ERR(cooling_dev))
|
||||
return -ENODEV;
|
||||
|
||||
if (!duration)
|
||||
duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES);
|
||||
powerclamp_create_debug_files();
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(powerclamp_init);
|
||||
|
||||
static void powerclamp_exit(void)
|
||||
{
|
||||
unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
|
||||
end_power_clamp();
|
||||
free_percpu(powerclamp_thread);
|
||||
thermal_cooling_device_unregister(cooling_dev);
|
||||
kfree(cpu_clamping_mask);
|
||||
|
||||
cancel_delayed_work_sync(&poll_pkg_cstate_work);
|
||||
debugfs_remove_recursive(debug_dir);
|
||||
}
|
||||
module_exit(powerclamp_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
|
||||
MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
|
||||
MODULE_DESCRIPTION("Package Level C-state Idle Injection for Intel CPUs");
|
134
drivers/thermal/kirkwood_thermal.c
Normal file
134
drivers/thermal/kirkwood_thermal.c
Normal file
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
* Kirkwood thermal sensor driver
|
||||
*
|
||||
* Copyright (C) 2012 Nobuhiro Iwamatsu <iwamatsu@nigauri.org>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/thermal.h>
|
||||
|
||||
#define KIRKWOOD_THERMAL_VALID_OFFSET 9
|
||||
#define KIRKWOOD_THERMAL_VALID_MASK 0x1
|
||||
#define KIRKWOOD_THERMAL_TEMP_OFFSET 10
|
||||
#define KIRKWOOD_THERMAL_TEMP_MASK 0x1FF
|
||||
|
||||
/* Kirkwood Thermal Sensor Dev Structure */
|
||||
struct kirkwood_thermal_priv {
|
||||
void __iomem *sensor;
|
||||
};
|
||||
|
||||
static int kirkwood_get_temp(struct thermal_zone_device *thermal,
|
||||
unsigned long *temp)
|
||||
{
|
||||
unsigned long reg;
|
||||
struct kirkwood_thermal_priv *priv = thermal->devdata;
|
||||
|
||||
reg = readl_relaxed(priv->sensor);
|
||||
|
||||
/* Valid check */
|
||||
if (!(reg >> KIRKWOOD_THERMAL_VALID_OFFSET) &
|
||||
KIRKWOOD_THERMAL_VALID_MASK) {
|
||||
dev_err(&thermal->device,
|
||||
"Temperature sensor reading not valid\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate temperature. See Section 8.10.1 of the 88AP510,
|
||||
* datasheet, which has the same sensor.
|
||||
* Documentation/arm/Marvell/README
|
||||
*/
|
||||
reg = (reg >> KIRKWOOD_THERMAL_TEMP_OFFSET) &
|
||||
KIRKWOOD_THERMAL_TEMP_MASK;
|
||||
*temp = ((2281638UL - (7298*reg)) / 10);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct thermal_zone_device_ops ops = {
|
||||
.get_temp = kirkwood_get_temp,
|
||||
};
|
||||
|
||||
static const struct of_device_id kirkwood_thermal_id_table[] = {
|
||||
{ .compatible = "marvell,kirkwood-thermal" },
|
||||
{}
|
||||
};
|
||||
|
||||
static int kirkwood_thermal_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct thermal_zone_device *thermal = NULL;
|
||||
struct kirkwood_thermal_priv *priv;
|
||||
struct resource *res;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "Failed to get platform resource\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->sensor = devm_request_and_ioremap(&pdev->dev, res);
|
||||
if (!priv->sensor) {
|
||||
dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0,
|
||||
priv, &ops, NULL, 0, 0);
|
||||
if (IS_ERR(thermal)) {
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to register thermal zone device\n");
|
||||
return PTR_ERR(thermal);
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, thermal);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kirkwood_thermal_exit(struct platform_device *pdev)
|
||||
{
|
||||
struct thermal_zone_device *kirkwood_thermal =
|
||||
platform_get_drvdata(pdev);
|
||||
|
||||
thermal_zone_device_unregister(kirkwood_thermal);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
MODULE_DEVICE_TABLE(of, kirkwood_thermal_id_table);
|
||||
|
||||
static struct platform_driver kirkwood_thermal_driver = {
|
||||
.probe = kirkwood_thermal_probe,
|
||||
.remove = kirkwood_thermal_exit,
|
||||
.driver = {
|
||||
.name = "kirkwood_thermal",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = of_match_ptr(kirkwood_thermal_id_table),
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(kirkwood_thermal_driver);
|
||||
|
||||
MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu@nigauri.org>");
|
||||
MODULE_DESCRIPTION("kirkwood thermal driver");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -19,225 +19,473 @@
|
|||
*/
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/thermal.h>
|
||||
|
||||
#define THSCR 0x2c
|
||||
#define THSSR 0x30
|
||||
#define IDLE_INTERVAL 5000
|
||||
|
||||
#define COMMON_STR 0x00
|
||||
#define COMMON_ENR 0x04
|
||||
#define COMMON_INTMSK 0x0c
|
||||
|
||||
#define REG_POSNEG 0x20
|
||||
#define REG_FILONOFF 0x28
|
||||
#define REG_THSCR 0x2c
|
||||
#define REG_THSSR 0x30
|
||||
#define REG_INTCTRL 0x34
|
||||
|
||||
/* THSCR */
|
||||
#define CPTAP 0xf
|
||||
#define CPCTL (1 << 12)
|
||||
|
||||
/* THSSR */
|
||||
#define CTEMP 0x3f
|
||||
|
||||
struct rcar_thermal_common {
|
||||
void __iomem *base;
|
||||
struct device *dev;
|
||||
struct list_head head;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct rcar_thermal_priv {
|
||||
void __iomem *base;
|
||||
struct device *dev;
|
||||
spinlock_t lock;
|
||||
u32 comp;
|
||||
struct rcar_thermal_common *common;
|
||||
struct thermal_zone_device *zone;
|
||||
struct delayed_work work;
|
||||
struct mutex lock;
|
||||
struct list_head list;
|
||||
int id;
|
||||
int ctemp;
|
||||
};
|
||||
|
||||
#define rcar_thermal_for_each_priv(pos, common) \
|
||||
list_for_each_entry(pos, &common->head, list)
|
||||
|
||||
#define MCELSIUS(temp) ((temp) * 1000)
|
||||
#define rcar_zone_to_priv(zone) (zone->devdata)
|
||||
#define rcar_zone_to_priv(zone) ((zone)->devdata)
|
||||
#define rcar_priv_to_dev(priv) ((priv)->common->dev)
|
||||
#define rcar_has_irq_support(priv) ((priv)->common->base)
|
||||
#define rcar_id_to_shift(priv) ((priv)->id * 8)
|
||||
|
||||
#ifdef DEBUG
|
||||
# define rcar_force_update_temp(priv) 1
|
||||
#else
|
||||
# define rcar_force_update_temp(priv) 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* basic functions
|
||||
*/
|
||||
static u32 rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg)
|
||||
#define rcar_thermal_common_read(c, r) \
|
||||
_rcar_thermal_common_read(c, COMMON_ ##r)
|
||||
static u32 _rcar_thermal_common_read(struct rcar_thermal_common *common,
|
||||
u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 ret;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
ret = ioread32(priv->base + reg);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
return ret;
|
||||
return ioread32(common->base + reg);
|
||||
}
|
||||
|
||||
#if 0 /* no user at this point */
|
||||
static void rcar_thermal_write(struct rcar_thermal_priv *priv,
|
||||
u32 reg, u32 data)
|
||||
#define rcar_thermal_common_write(c, r, d) \
|
||||
_rcar_thermal_common_write(c, COMMON_ ##r, d)
|
||||
static void _rcar_thermal_common_write(struct rcar_thermal_common *common,
|
||||
u32 reg, u32 data)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
iowrite32(data, priv->base + reg);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
iowrite32(data, common->base + reg);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg,
|
||||
u32 mask, u32 data)
|
||||
#define rcar_thermal_common_bset(c, r, m, d) \
|
||||
_rcar_thermal_common_bset(c, COMMON_ ##r, m, d)
|
||||
static void _rcar_thermal_common_bset(struct rcar_thermal_common *common,
|
||||
u32 reg, u32 mask, u32 data)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
val = ioread32(common->base + reg);
|
||||
val &= ~mask;
|
||||
val |= (data & mask);
|
||||
iowrite32(val, common->base + reg);
|
||||
}
|
||||
|
||||
#define rcar_thermal_read(p, r) _rcar_thermal_read(p, REG_ ##r)
|
||||
static u32 _rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg)
|
||||
{
|
||||
return ioread32(priv->base + reg);
|
||||
}
|
||||
|
||||
#define rcar_thermal_write(p, r, d) _rcar_thermal_write(p, REG_ ##r, d)
|
||||
static void _rcar_thermal_write(struct rcar_thermal_priv *priv,
|
||||
u32 reg, u32 data)
|
||||
{
|
||||
iowrite32(data, priv->base + reg);
|
||||
}
|
||||
|
||||
#define rcar_thermal_bset(p, r, m, d) _rcar_thermal_bset(p, REG_ ##r, m, d)
|
||||
static void _rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg,
|
||||
u32 mask, u32 data)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = ioread32(priv->base + reg);
|
||||
val &= ~mask;
|
||||
val |= (data & mask);
|
||||
iowrite32(val, priv->base + reg);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* zone device functions
|
||||
*/
|
||||
static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
|
||||
unsigned long *temp)
|
||||
static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
|
||||
{
|
||||
struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
|
||||
int val, min, max, tmp;
|
||||
struct device *dev = rcar_priv_to_dev(priv);
|
||||
int i;
|
||||
int ctemp, old, new;
|
||||
|
||||
tmp = -200; /* default */
|
||||
while (1) {
|
||||
if (priv->comp < 1 || priv->comp > 12) {
|
||||
dev_err(priv->dev,
|
||||
"THSSR invalid data (%d)\n", priv->comp);
|
||||
priv->comp = 4; /* for next thermal */
|
||||
return -EINVAL;
|
||||
}
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
/*
|
||||
* THS comparator offset and the reference temperature
|
||||
*
|
||||
* Comparator | reference | Temperature field
|
||||
* offset | temperature | measurement
|
||||
* | (degrees C) | (degrees C)
|
||||
* -------------+---------------+-------------------
|
||||
* 1 | -45 | -45 to -30
|
||||
* 2 | -30 | -30 to -15
|
||||
* 3 | -15 | -15 to 0
|
||||
* 4 | 0 | 0 to +15
|
||||
* 5 | +15 | +15 to +30
|
||||
* 6 | +30 | +30 to +45
|
||||
* 7 | +45 | +45 to +60
|
||||
* 8 | +60 | +60 to +75
|
||||
* 9 | +75 | +75 to +90
|
||||
* 10 | +90 | +90 to +105
|
||||
* 11 | +105 | +105 to +120
|
||||
* 12 | +120 | +120 to +135
|
||||
*/
|
||||
|
||||
/* calculate thermal limitation */
|
||||
min = (priv->comp * 15) - 60;
|
||||
max = min + 15;
|
||||
/*
|
||||
* TSC decides a value of CPTAP automatically,
|
||||
* and this is the conditions which validate interrupt.
|
||||
*/
|
||||
rcar_thermal_bset(priv, THSCR, CPCTL, CPCTL);
|
||||
|
||||
ctemp = 0;
|
||||
old = ~0;
|
||||
for (i = 0; i < 128; i++) {
|
||||
/*
|
||||
* we need to wait 300us after changing comparator offset
|
||||
* to get stable temperature.
|
||||
* see "Usage Notes" on datasheet
|
||||
*/
|
||||
rcar_thermal_bset(priv, THSCR, CPTAP, priv->comp);
|
||||
udelay(300);
|
||||
|
||||
/* calculate current temperature */
|
||||
val = rcar_thermal_read(priv, THSSR) & CTEMP;
|
||||
val = (val * 5) - 65;
|
||||
|
||||
dev_dbg(priv->dev, "comp/min/max/val = %d/%d/%d/%d\n",
|
||||
priv->comp, min, max, val);
|
||||
|
||||
/*
|
||||
* If val is same as min/max, then,
|
||||
* it should try again on next comparator.
|
||||
* But the val might be correct temperature.
|
||||
* Keep it on "tmp" and compare with next val.
|
||||
*/
|
||||
if (tmp == val)
|
||||
break;
|
||||
|
||||
if (val <= min) {
|
||||
tmp = min;
|
||||
priv->comp--; /* try again */
|
||||
} else if (val >= max) {
|
||||
tmp = max;
|
||||
priv->comp++; /* try again */
|
||||
} else {
|
||||
tmp = val;
|
||||
new = rcar_thermal_read(priv, THSSR) & CTEMP;
|
||||
if (new == old) {
|
||||
ctemp = new;
|
||||
break;
|
||||
}
|
||||
old = new;
|
||||
}
|
||||
|
||||
if (!ctemp) {
|
||||
dev_err(dev, "thermal sensor was broken\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* enable IRQ
|
||||
*/
|
||||
if (rcar_has_irq_support(priv)) {
|
||||
rcar_thermal_write(priv, FILONOFF, 0);
|
||||
|
||||
/* enable Rising/Falling edge interrupt */
|
||||
rcar_thermal_write(priv, POSNEG, 0x1);
|
||||
rcar_thermal_write(priv, INTCTRL, (((ctemp - 0) << 8) |
|
||||
((ctemp - 1) << 0)));
|
||||
}
|
||||
|
||||
dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp);
|
||||
|
||||
priv->ctemp = ctemp;
|
||||
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
|
||||
unsigned long *temp)
|
||||
{
|
||||
struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
|
||||
|
||||
if (!rcar_has_irq_support(priv) || rcar_force_update_temp(priv))
|
||||
rcar_thermal_update_temp(priv);
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
*temp = MCELSIUS((priv->ctemp * 5) - 65);
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
|
||||
int trip, enum thermal_trip_type *type)
|
||||
{
|
||||
struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
|
||||
struct device *dev = rcar_priv_to_dev(priv);
|
||||
|
||||
/* see rcar_thermal_get_temp() */
|
||||
switch (trip) {
|
||||
case 0: /* +90 <= temp */
|
||||
*type = THERMAL_TRIP_CRITICAL;
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "rcar driver trip error\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone,
|
||||
int trip, unsigned long *temp)
|
||||
{
|
||||
struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
|
||||
struct device *dev = rcar_priv_to_dev(priv);
|
||||
|
||||
/* see rcar_thermal_get_temp() */
|
||||
switch (trip) {
|
||||
case 0: /* +90 <= temp */
|
||||
*temp = MCELSIUS(90);
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "rcar driver trip error\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rcar_thermal_notify(struct thermal_zone_device *zone,
|
||||
int trip, enum thermal_trip_type type)
|
||||
{
|
||||
struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
|
||||
struct device *dev = rcar_priv_to_dev(priv);
|
||||
|
||||
switch (type) {
|
||||
case THERMAL_TRIP_CRITICAL:
|
||||
/* FIXME */
|
||||
dev_warn(dev, "Thermal reached to critical temperature\n");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
*temp = MCELSIUS(tmp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
|
||||
.get_temp = rcar_thermal_get_temp,
|
||||
.get_temp = rcar_thermal_get_temp,
|
||||
.get_trip_type = rcar_thermal_get_trip_type,
|
||||
.get_trip_temp = rcar_thermal_get_trip_temp,
|
||||
.notify = rcar_thermal_notify,
|
||||
};
|
||||
|
||||
/*
|
||||
* interrupt
|
||||
*/
|
||||
#define rcar_thermal_irq_enable(p) _rcar_thermal_irq_ctrl(p, 1)
|
||||
#define rcar_thermal_irq_disable(p) _rcar_thermal_irq_ctrl(p, 0)
|
||||
static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable)
|
||||
{
|
||||
struct rcar_thermal_common *common = priv->common;
|
||||
unsigned long flags;
|
||||
u32 mask = 0x3 << rcar_id_to_shift(priv); /* enable Rising/Falling */
|
||||
|
||||
spin_lock_irqsave(&common->lock, flags);
|
||||
|
||||
rcar_thermal_common_bset(common, INTMSK, mask, enable ? 0 : mask);
|
||||
|
||||
spin_unlock_irqrestore(&common->lock, flags);
|
||||
}
|
||||
|
||||
static void rcar_thermal_work(struct work_struct *work)
|
||||
{
|
||||
struct rcar_thermal_priv *priv;
|
||||
|
||||
priv = container_of(work, struct rcar_thermal_priv, work.work);
|
||||
|
||||
rcar_thermal_update_temp(priv);
|
||||
rcar_thermal_irq_enable(priv);
|
||||
thermal_zone_device_update(priv->zone);
|
||||
}
|
||||
|
||||
static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status)
|
||||
{
|
||||
struct device *dev = rcar_priv_to_dev(priv);
|
||||
|
||||
status = (status >> rcar_id_to_shift(priv)) & 0x3;
|
||||
|
||||
if (status & 0x3) {
|
||||
dev_dbg(dev, "thermal%d %s%s\n",
|
||||
priv->id,
|
||||
(status & 0x2) ? "Rising " : "",
|
||||
(status & 0x1) ? "Falling" : "");
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static irqreturn_t rcar_thermal_irq(int irq, void *data)
|
||||
{
|
||||
struct rcar_thermal_common *common = data;
|
||||
struct rcar_thermal_priv *priv;
|
||||
unsigned long flags;
|
||||
u32 status, mask;
|
||||
|
||||
spin_lock_irqsave(&common->lock, flags);
|
||||
|
||||
mask = rcar_thermal_common_read(common, INTMSK);
|
||||
status = rcar_thermal_common_read(common, STR);
|
||||
rcar_thermal_common_write(common, STR, 0x000F0F0F & mask);
|
||||
|
||||
spin_unlock_irqrestore(&common->lock, flags);
|
||||
|
||||
status = status & ~mask;
|
||||
|
||||
/*
|
||||
* check the status
|
||||
*/
|
||||
rcar_thermal_for_each_priv(priv, common) {
|
||||
if (rcar_thermal_had_changed(priv, status)) {
|
||||
rcar_thermal_irq_disable(priv);
|
||||
schedule_delayed_work(&priv->work,
|
||||
msecs_to_jiffies(300));
|
||||
}
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/*
|
||||
* platform functions
|
||||
*/
|
||||
static int rcar_thermal_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct thermal_zone_device *zone;
|
||||
struct rcar_thermal_common *common;
|
||||
struct rcar_thermal_priv *priv;
|
||||
struct resource *res;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *res, *irq;
|
||||
int mres = 0;
|
||||
int i;
|
||||
int idle = IDLE_INTERVAL;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "Could not get platform resource\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv) {
|
||||
dev_err(&pdev->dev, "Could not allocate priv\n");
|
||||
common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
|
||||
if (!common) {
|
||||
dev_err(dev, "Could not allocate common\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
priv->comp = 4; /* basic setup */
|
||||
priv->dev = &pdev->dev;
|
||||
spin_lock_init(&priv->lock);
|
||||
priv->base = devm_ioremap_nocache(&pdev->dev,
|
||||
res->start, resource_size(res));
|
||||
if (!priv->base) {
|
||||
dev_err(&pdev->dev, "Unable to ioremap thermal register\n");
|
||||
return -ENOMEM;
|
||||
INIT_LIST_HEAD(&common->head);
|
||||
spin_lock_init(&common->lock);
|
||||
common->dev = dev;
|
||||
|
||||
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (irq) {
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* platform has IRQ support.
|
||||
* Then, drier use common register
|
||||
*/
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
|
||||
if (!res) {
|
||||
dev_err(dev, "Could not get platform resource\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
|
||||
dev_name(dev), common);
|
||||
if (ret) {
|
||||
dev_err(dev, "irq request failed\n ");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* rcar_has_irq_support() will be enabled
|
||||
*/
|
||||
common->base = devm_request_and_ioremap(dev, res);
|
||||
if (!common->base) {
|
||||
dev_err(dev, "Unable to ioremap thermal register\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* enable temperature comparation */
|
||||
rcar_thermal_common_write(common, ENR, 0x00030303);
|
||||
|
||||
idle = 0; /* polling delaye is not needed */
|
||||
}
|
||||
|
||||
zone = thermal_zone_device_register("rcar_thermal", 0, 0, priv,
|
||||
&rcar_thermal_zone_ops, NULL, 0, 0);
|
||||
if (IS_ERR(zone)) {
|
||||
dev_err(&pdev->dev, "thermal zone device is NULL\n");
|
||||
return PTR_ERR(zone);
|
||||
for (i = 0;; i++) {
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
|
||||
if (!res)
|
||||
break;
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv) {
|
||||
dev_err(dev, "Could not allocate priv\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
priv->base = devm_request_and_ioremap(dev, res);
|
||||
if (!priv->base) {
|
||||
dev_err(dev, "Unable to ioremap priv register\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
priv->common = common;
|
||||
priv->id = i;
|
||||
mutex_init(&priv->lock);
|
||||
INIT_LIST_HEAD(&priv->list);
|
||||
INIT_DELAYED_WORK(&priv->work, rcar_thermal_work);
|
||||
rcar_thermal_update_temp(priv);
|
||||
|
||||
priv->zone = thermal_zone_device_register("rcar_thermal",
|
||||
1, 0, priv,
|
||||
&rcar_thermal_zone_ops, NULL, 0,
|
||||
idle);
|
||||
if (IS_ERR(priv->zone)) {
|
||||
dev_err(dev, "can't register thermal zone\n");
|
||||
goto error_unregister;
|
||||
}
|
||||
|
||||
list_move_tail(&priv->list, &common->head);
|
||||
|
||||
if (rcar_has_irq_support(priv))
|
||||
rcar_thermal_irq_enable(priv);
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, zone);
|
||||
platform_set_drvdata(pdev, common);
|
||||
|
||||
dev_info(&pdev->dev, "proved\n");
|
||||
dev_info(dev, "%d sensor proved\n", i);
|
||||
|
||||
return 0;
|
||||
|
||||
error_unregister:
|
||||
rcar_thermal_for_each_priv(priv, common)
|
||||
thermal_zone_device_unregister(priv->zone);
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int rcar_thermal_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct thermal_zone_device *zone = platform_get_drvdata(pdev);
|
||||
struct rcar_thermal_common *common = platform_get_drvdata(pdev);
|
||||
struct rcar_thermal_priv *priv;
|
||||
|
||||
rcar_thermal_for_each_priv(priv, common)
|
||||
thermal_zone_device_unregister(priv->zone);
|
||||
|
||||
thermal_zone_device_unregister(zone);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id rcar_thermal_dt_ids[] = {
|
||||
{ .compatible = "renesas,rcar-thermal", },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids);
|
||||
|
||||
static struct platform_driver rcar_thermal_driver = {
|
||||
.driver = {
|
||||
.name = "rcar_thermal",
|
||||
.of_match_table = rcar_thermal_dt_ids,
|
||||
},
|
||||
.probe = rcar_thermal_probe,
|
||||
.remove = rcar_thermal_remove,
|
||||
|
|
|
@ -131,7 +131,7 @@ static int spear_thermal_probe(struct platform_device *pdev)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
stdev->clk = clk_get(&pdev->dev, NULL);
|
||||
stdev->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(stdev->clk)) {
|
||||
dev_err(&pdev->dev, "Can't get clock\n");
|
||||
return PTR_ERR(stdev->clk);
|
||||
|
@ -140,7 +140,7 @@ static int spear_thermal_probe(struct platform_device *pdev)
|
|||
ret = clk_enable(stdev->clk);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Can't enable clock\n");
|
||||
goto put_clk;
|
||||
return ret;
|
||||
}
|
||||
|
||||
stdev->flags = val;
|
||||
|
@ -163,8 +163,6 @@ static int spear_thermal_probe(struct platform_device *pdev)
|
|||
|
||||
disable_clk:
|
||||
clk_disable(stdev->clk);
|
||||
put_clk:
|
||||
clk_put(stdev->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -183,7 +181,6 @@ static int spear_thermal_exit(struct platform_device *pdev)
|
|||
writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base);
|
||||
|
||||
clk_disable(stdev->clk);
|
||||
clk_put(stdev->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -35,21 +35,54 @@
|
|||
* state for this trip point
|
||||
* b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
|
||||
* state for this trip point
|
||||
* c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit
|
||||
* for this trip point
|
||||
* d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
|
||||
* for this trip point
|
||||
* If the temperature is lower than a trip point,
|
||||
* a. if the trend is THERMAL_TREND_RAISING, do nothing
|
||||
* b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
|
||||
* state for this trip point, if the cooling state already
|
||||
* equals lower limit, deactivate the thermal instance
|
||||
* c. if the trend is THERMAL_TREND_RAISE_FULL, do nothing
|
||||
* d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit,
|
||||
* if the cooling state already equals lower limit,
|
||||
* deactive the thermal instance
|
||||
*/
|
||||
static unsigned long get_target_state(struct thermal_instance *instance,
|
||||
enum thermal_trend trend)
|
||||
enum thermal_trend trend, bool throttle)
|
||||
{
|
||||
struct thermal_cooling_device *cdev = instance->cdev;
|
||||
unsigned long cur_state;
|
||||
|
||||
cdev->ops->get_cur_state(cdev, &cur_state);
|
||||
|
||||
if (trend == THERMAL_TREND_RAISING) {
|
||||
cur_state = cur_state < instance->upper ?
|
||||
(cur_state + 1) : instance->upper;
|
||||
} else if (trend == THERMAL_TREND_DROPPING) {
|
||||
cur_state = cur_state > instance->lower ?
|
||||
(cur_state - 1) : instance->lower;
|
||||
switch (trend) {
|
||||
case THERMAL_TREND_RAISING:
|
||||
if (throttle)
|
||||
cur_state = cur_state < instance->upper ?
|
||||
(cur_state + 1) : instance->upper;
|
||||
break;
|
||||
case THERMAL_TREND_RAISE_FULL:
|
||||
if (throttle)
|
||||
cur_state = instance->upper;
|
||||
break;
|
||||
case THERMAL_TREND_DROPPING:
|
||||
if (cur_state == instance->lower) {
|
||||
if (!throttle)
|
||||
cur_state = -1;
|
||||
} else
|
||||
cur_state -= 1;
|
||||
break;
|
||||
case THERMAL_TREND_DROP_FULL:
|
||||
if (cur_state == instance->lower) {
|
||||
if (!throttle)
|
||||
cur_state = -1;
|
||||
} else
|
||||
cur_state = instance->lower;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return cur_state;
|
||||
|
@ -66,57 +99,14 @@ static void update_passive_instance(struct thermal_zone_device *tz,
|
|||
tz->passive += value;
|
||||
}
|
||||
|
||||
static void update_instance_for_throttle(struct thermal_zone_device *tz,
|
||||
int trip, enum thermal_trip_type trip_type,
|
||||
enum thermal_trend trend)
|
||||
{
|
||||
struct thermal_instance *instance;
|
||||
|
||||
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
|
||||
if (instance->trip != trip)
|
||||
continue;
|
||||
|
||||
instance->target = get_target_state(instance, trend);
|
||||
|
||||
/* Activate a passive thermal instance */
|
||||
if (instance->target == THERMAL_NO_TARGET)
|
||||
update_passive_instance(tz, trip_type, 1);
|
||||
|
||||
instance->cdev->updated = false; /* cdev needs update */
|
||||
}
|
||||
}
|
||||
|
||||
static void update_instance_for_dethrottle(struct thermal_zone_device *tz,
|
||||
int trip, enum thermal_trip_type trip_type)
|
||||
{
|
||||
struct thermal_instance *instance;
|
||||
struct thermal_cooling_device *cdev;
|
||||
unsigned long cur_state;
|
||||
|
||||
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
|
||||
if (instance->trip != trip ||
|
||||
instance->target == THERMAL_NO_TARGET)
|
||||
continue;
|
||||
|
||||
cdev = instance->cdev;
|
||||
cdev->ops->get_cur_state(cdev, &cur_state);
|
||||
|
||||
instance->target = cur_state > instance->lower ?
|
||||
(cur_state - 1) : THERMAL_NO_TARGET;
|
||||
|
||||
/* Deactivate a passive thermal instance */
|
||||
if (instance->target == THERMAL_NO_TARGET)
|
||||
update_passive_instance(tz, trip_type, -1);
|
||||
|
||||
cdev->updated = false; /* cdev needs update */
|
||||
}
|
||||
}
|
||||
|
||||
static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
|
||||
{
|
||||
long trip_temp;
|
||||
enum thermal_trip_type trip_type;
|
||||
enum thermal_trend trend;
|
||||
struct thermal_instance *instance;
|
||||
bool throttle = false;
|
||||
int old_target;
|
||||
|
||||
if (trip == THERMAL_TRIPS_NONE) {
|
||||
trip_temp = tz->forced_passive;
|
||||
|
@ -128,12 +118,30 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
|
|||
|
||||
trend = get_tz_trend(tz, trip);
|
||||
|
||||
if (tz->temperature >= trip_temp)
|
||||
throttle = true;
|
||||
|
||||
mutex_lock(&tz->lock);
|
||||
|
||||
if (tz->temperature >= trip_temp)
|
||||
update_instance_for_throttle(tz, trip, trip_type, trend);
|
||||
else
|
||||
update_instance_for_dethrottle(tz, trip, trip_type);
|
||||
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
|
||||
if (instance->trip != trip)
|
||||
continue;
|
||||
|
||||
old_target = instance->target;
|
||||
instance->target = get_target_state(instance, trend, throttle);
|
||||
|
||||
/* Activate a passive thermal instance */
|
||||
if (old_target == THERMAL_NO_TARGET &&
|
||||
instance->target != THERMAL_NO_TARGET)
|
||||
update_passive_instance(tz, trip_type, 1);
|
||||
/* Deactivate a passive thermal instance */
|
||||
else if (old_target != THERMAL_NO_TARGET &&
|
||||
instance->target == THERMAL_NO_TARGET)
|
||||
update_passive_instance(tz, trip_type, -1);
|
||||
|
||||
|
||||
instance->cdev->updated = false; /* cdev needs update */
|
||||
}
|
||||
|
||||
mutex_unlock(&tz->lock);
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include <linux/kdev_t.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/thermal.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <net/netlink.h>
|
||||
#include <net/genetlink.h>
|
||||
|
@ -348,8 +347,9 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
|
|||
tz->ops->notify(tz, trip, trip_type);
|
||||
|
||||
if (trip_type == THERMAL_TRIP_CRITICAL) {
|
||||
pr_emerg("Critical temperature reached(%d C),shutting down\n",
|
||||
tz->temperature / 1000);
|
||||
dev_emerg(&tz->device,
|
||||
"critical temperature reached(%d C),shutting down\n",
|
||||
tz->temperature / 1000);
|
||||
orderly_poweroff(true);
|
||||
}
|
||||
}
|
||||
|
@ -371,23 +371,57 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
|
|||
monitor_thermal_zone(tz);
|
||||
}
|
||||
|
||||
static int thermal_zone_get_temp(struct thermal_zone_device *tz,
|
||||
unsigned long *temp)
|
||||
{
|
||||
int ret = 0;
|
||||
#ifdef CONFIG_THERMAL_EMULATION
|
||||
int count;
|
||||
unsigned long crit_temp = -1UL;
|
||||
enum thermal_trip_type type;
|
||||
#endif
|
||||
|
||||
mutex_lock(&tz->lock);
|
||||
|
||||
ret = tz->ops->get_temp(tz, temp);
|
||||
#ifdef CONFIG_THERMAL_EMULATION
|
||||
if (!tz->emul_temperature)
|
||||
goto skip_emul;
|
||||
|
||||
for (count = 0; count < tz->trips; count++) {
|
||||
ret = tz->ops->get_trip_type(tz, count, &type);
|
||||
if (!ret && type == THERMAL_TRIP_CRITICAL) {
|
||||
ret = tz->ops->get_trip_temp(tz, count, &crit_temp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto skip_emul;
|
||||
|
||||
if (*temp < crit_temp)
|
||||
*temp = tz->emul_temperature;
|
||||
skip_emul:
|
||||
#endif
|
||||
mutex_unlock(&tz->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void update_temperature(struct thermal_zone_device *tz)
|
||||
{
|
||||
long temp;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&tz->lock);
|
||||
|
||||
ret = tz->ops->get_temp(tz, &temp);
|
||||
ret = thermal_zone_get_temp(tz, &temp);
|
||||
if (ret) {
|
||||
pr_warn("failed to read out thermal zone %d\n", tz->id);
|
||||
goto exit;
|
||||
dev_warn(&tz->device, "failed to read out thermal zone %d\n",
|
||||
tz->id);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&tz->lock);
|
||||
tz->last_temperature = tz->temperature;
|
||||
tz->temperature = temp;
|
||||
|
||||
exit:
|
||||
mutex_unlock(&tz->lock);
|
||||
}
|
||||
|
||||
|
@ -430,10 +464,7 @@ temp_show(struct device *dev, struct device_attribute *attr, char *buf)
|
|||
long temperature;
|
||||
int ret;
|
||||
|
||||
if (!tz->ops->get_temp)
|
||||
return -EPERM;
|
||||
|
||||
ret = tz->ops->get_temp(tz, &temperature);
|
||||
ret = thermal_zone_get_temp(tz, &temperature);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -693,6 +724,31 @@ policy_show(struct device *dev, struct device_attribute *devattr, char *buf)
|
|||
return sprintf(buf, "%s\n", tz->governor->name);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_THERMAL_EMULATION
|
||||
static ssize_t
|
||||
emul_temp_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct thermal_zone_device *tz = to_thermal_zone(dev);
|
||||
int ret = 0;
|
||||
unsigned long temperature;
|
||||
|
||||
if (kstrtoul(buf, 10, &temperature))
|
||||
return -EINVAL;
|
||||
|
||||
if (!tz->ops->set_emul_temp) {
|
||||
mutex_lock(&tz->lock);
|
||||
tz->emul_temperature = temperature;
|
||||
mutex_unlock(&tz->lock);
|
||||
} else {
|
||||
ret = tz->ops->set_emul_temp(tz, temperature);
|
||||
}
|
||||
|
||||
return ret ? ret : count;
|
||||
}
|
||||
static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
|
||||
#endif/*CONFIG_THERMAL_EMULATION*/
|
||||
|
||||
static DEVICE_ATTR(type, 0444, type_show, NULL);
|
||||
static DEVICE_ATTR(temp, 0444, temp_show, NULL);
|
||||
static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
|
||||
|
@ -835,7 +891,7 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
|
|||
temp_input);
|
||||
struct thermal_zone_device *tz = temp->tz;
|
||||
|
||||
ret = tz->ops->get_temp(tz, &temperature);
|
||||
ret = thermal_zone_get_temp(tz, &temperature);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1522,6 +1578,9 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
|
|||
if (!ops || !ops->get_temp)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (trips > 0 && !ops->get_trip_type)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL);
|
||||
if (!tz)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -1585,6 +1644,11 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
|
|||
goto unregister;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_THERMAL_EMULATION
|
||||
result = device_create_file(&tz->device, &dev_attr_emul_temp);
|
||||
if (result)
|
||||
goto unregister;
|
||||
#endif
|
||||
/* Create policy attribute */
|
||||
result = device_create_file(&tz->device, &dev_attr_policy);
|
||||
if (result)
|
||||
|
@ -1704,7 +1768,8 @@ static struct genl_multicast_group thermal_event_mcgrp = {
|
|||
.name = THERMAL_GENL_MCAST_GROUP_NAME,
|
||||
};
|
||||
|
||||
int thermal_generate_netlink_event(u32 orig, enum events event)
|
||||
int thermal_generate_netlink_event(struct thermal_zone_device *tz,
|
||||
enum events event)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct nlattr *attr;
|
||||
|
@ -1714,6 +1779,9 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
|
|||
int result;
|
||||
static unsigned int thermal_event_seqnum;
|
||||
|
||||
if (!tz)
|
||||
return -EINVAL;
|
||||
|
||||
/* allocate memory */
|
||||
size = nla_total_size(sizeof(struct thermal_genl_event)) +
|
||||
nla_total_size(0);
|
||||
|
@ -1748,7 +1816,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
|
|||
|
||||
memset(thermal_event, 0, sizeof(struct thermal_genl_event));
|
||||
|
||||
thermal_event->orig = orig;
|
||||
thermal_event->orig = tz->id;
|
||||
thermal_event->event = event;
|
||||
|
||||
/* send multicast genetlink message */
|
||||
|
@ -1760,7 +1828,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
|
|||
|
||||
result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
|
||||
if (result)
|
||||
pr_info("failed to send netlink event:%d\n", result);
|
||||
dev_err(&tz->device, "Failed to send netlink event:%d", result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1800,6 +1868,7 @@ static int __init thermal_init(void)
|
|||
idr_destroy(&thermal_cdev_idr);
|
||||
mutex_destroy(&thermal_idr_lock);
|
||||
mutex_destroy(&thermal_list_lock);
|
||||
return result;
|
||||
}
|
||||
result = genetlink_init();
|
||||
return result;
|
||||
|
|
|
@ -53,6 +53,8 @@ struct freq_clip_table {
|
|||
* struct exynos_tmu_platform_data
|
||||
* @threshold: basic temperature for generating interrupt
|
||||
* 25 <= threshold <= 125 [unit: degree Celsius]
|
||||
* @threshold_falling: differntial value for setting threshold
|
||||
* of temperature falling interrupt.
|
||||
* @trigger_levels: array for each interrupt levels
|
||||
* [unit: degree Celsius]
|
||||
* 0: temperature for trigger_level0 interrupt
|
||||
|
@ -97,6 +99,7 @@ struct freq_clip_table {
|
|||
*/
|
||||
struct exynos_tmu_platform_data {
|
||||
u8 threshold;
|
||||
u8 threshold_falling;
|
||||
u8 trigger_levels[4];
|
||||
bool trigger_level0_en;
|
||||
bool trigger_level1_en;
|
||||
|
|
|
@ -74,6 +74,8 @@ enum thermal_trend {
|
|||
THERMAL_TREND_STABLE, /* temperature is stable */
|
||||
THERMAL_TREND_RAISING, /* temperature is raising */
|
||||
THERMAL_TREND_DROPPING, /* temperature is dropping */
|
||||
THERMAL_TREND_RAISE_FULL, /* apply highest cooling action */
|
||||
THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */
|
||||
};
|
||||
|
||||
/* Events supported by Thermal Netlink */
|
||||
|
@ -121,6 +123,7 @@ struct thermal_zone_device_ops {
|
|||
int (*set_trip_hyst) (struct thermal_zone_device *, int,
|
||||
unsigned long);
|
||||
int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
|
||||
int (*set_emul_temp) (struct thermal_zone_device *, unsigned long);
|
||||
int (*get_trend) (struct thermal_zone_device *, int,
|
||||
enum thermal_trend *);
|
||||
int (*notify) (struct thermal_zone_device *, int,
|
||||
|
@ -163,6 +166,7 @@ struct thermal_zone_device {
|
|||
int polling_delay;
|
||||
int temperature;
|
||||
int last_temperature;
|
||||
int emul_temperature;
|
||||
int passive;
|
||||
unsigned int forced_passive;
|
||||
const struct thermal_zone_device_ops *ops;
|
||||
|
@ -244,9 +248,11 @@ int thermal_register_governor(struct thermal_governor *);
|
|||
void thermal_unregister_governor(struct thermal_governor *);
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
extern int thermal_generate_netlink_event(u32 orig, enum events event);
|
||||
extern int thermal_generate_netlink_event(struct thermal_zone_device *tz,
|
||||
enum events event);
|
||||
#else
|
||||
static inline int thermal_generate_netlink_event(u32 orig, enum events event)
|
||||
static int thermal_generate_netlink_event(struct thermal_zone_device *tz,
|
||||
enum events event)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -554,6 +554,7 @@ void tick_nohz_idle_enter(void)
|
|||
|
||||
local_irq_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
|
||||
|
||||
/**
|
||||
* tick_nohz_irq_exit - update next tick event from interrupt exit
|
||||
|
@ -685,6 +686,7 @@ void tick_nohz_idle_exit(void)
|
|||
|
||||
local_irq_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tick_nohz_idle_exit);
|
||||
|
||||
static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue