Merge "Merge android-4.19.30 (4afd59719
) into msm-4.19"
This commit is contained in:
commit
7adab7b9ef
286 changed files with 4313 additions and 1173 deletions
16
Documentation/ABI/testing/procfs-concurrent_time
Normal file
16
Documentation/ABI/testing/procfs-concurrent_time
Normal file
|
@ -0,0 +1,16 @@
|
|||
What: /proc/uid_concurrent_active_time
|
||||
Date: December 2018
|
||||
Contact: Connor O'Brien <connoro@google.com>
|
||||
Description:
|
||||
The /proc/uid_concurrent_active_time file displays aggregated cputime
|
||||
numbers for each uid, broken down by the total number of cores that were
|
||||
active while the uid's task was running.
|
||||
|
||||
What: /proc/uid_concurrent_policy_time
|
||||
Date: December 2018
|
||||
Contact: Connor O'Brien <connoro@google.com>
|
||||
Description:
|
||||
The /proc/uid_concurrent_policy_time file displays aggregated cputime
|
||||
numbers for each uid, broken down based on the cpufreq policy
|
||||
of the core used by the uid's task and the number of cores associated
|
||||
with that policy that were active while the uid's task was running.
|
|
@ -102,6 +102,29 @@ Only the lists of names from directories are merged. Other content
|
|||
such as metadata and extended attributes are reported for the upper
|
||||
directory only. These attributes of the lower directory are hidden.
|
||||
|
||||
credentials
|
||||
-----------
|
||||
|
||||
By default, all access to the upper, lower and work directories is the
|
||||
recorded mounter's MAC and DAC credentials. The incoming accesses are
|
||||
checked against the caller's credentials.
|
||||
|
||||
In the case where caller MAC or DAC credentials do not overlap, a
|
||||
use case available in older versions of the driver, the
|
||||
override_creds mount flag can be turned off and help when the use
|
||||
pattern has caller with legitimate credentials where the mounter
|
||||
does not. Several unintended side effects will occur though. The
|
||||
caller without certain key capabilities or lower privilege will not
|
||||
always be able to delete files or directories, create nodes, or
|
||||
search some restricted directories. The ability to search and read
|
||||
a directory entry is spotty as a result of the cache mechanism not
|
||||
retesting the credentials because of the assumption, a privileged
|
||||
caller can fill cache, then a lower privilege can read the directory
|
||||
cache. The uneven security model where cache, upperdir and workdir
|
||||
are opened at privilege, but accessed without creating a form of
|
||||
privilege escalation, should only be used with strict understanding
|
||||
of the side effects and of the security policies.
|
||||
|
||||
whiteouts and opaque directories
|
||||
--------------------------------
|
||||
|
||||
|
|
169
Documentation/power/energy-model.txt
Normal file
169
Documentation/power/energy-model.txt
Normal file
|
@ -0,0 +1,169 @@
|
|||
====================
|
||||
Energy Model of CPUs
|
||||
====================
|
||||
|
||||
1. Overview
|
||||
-----------
|
||||
|
||||
The Energy Model (EM) framework serves as an interface between drivers knowing
|
||||
the power consumed by CPUs at various performance levels, and the kernel
|
||||
subsystems willing to use that information to make energy-aware decisions.
|
||||
|
||||
The source of the information about the power consumed by CPUs can vary greatly
|
||||
from one platform to another. These power costs can be estimated using
|
||||
devicetree data in some cases. In others, the firmware will know better.
|
||||
Alternatively, userspace might be best positioned. And so on. In order to avoid
|
||||
each and every client subsystem to re-implement support for each and every
|
||||
possible source of information on its own, the EM framework intervenes as an
|
||||
abstraction layer which standardizes the format of power cost tables in the
|
||||
kernel, hence enabling to avoid redundant work.
|
||||
|
||||
The figure below depicts an example of drivers (Arm-specific here, but the
|
||||
approach is applicable to any architecture) providing power costs to the EM
|
||||
framework, and interested clients reading the data from it.
|
||||
|
||||
+---------------+ +-----------------+ +---------------+
|
||||
| Thermal (IPA) | | Scheduler (EAS) | | Other |
|
||||
+---------------+ +-----------------+ +---------------+
|
||||
| | em_pd_energy() |
|
||||
| | em_cpu_get() |
|
||||
+---------+ | +---------+
|
||||
| | |
|
||||
v v v
|
||||
+---------------------+
|
||||
| Energy Model |
|
||||
| Framework |
|
||||
+---------------------+
|
||||
^ ^ ^
|
||||
| | | em_register_perf_domain()
|
||||
+----------+ | +---------+
|
||||
| | |
|
||||
+---------------+ +---------------+ +--------------+
|
||||
| cpufreq-dt | | arm_scmi | | Other |
|
||||
+---------------+ +---------------+ +--------------+
|
||||
^ ^ ^
|
||||
| | |
|
||||
+--------------+ +---------------+ +--------------+
|
||||
| Device Tree | | Firmware | | ? |
|
||||
+--------------+ +---------------+ +--------------+
|
||||
|
||||
The EM framework manages power cost tables per 'performance domain' in the
|
||||
system. A performance domain is a group of CPUs whose performance is scaled
|
||||
together. Performance domains generally have a 1-to-1 mapping with CPUFreq
|
||||
policies. All CPUs in a performance domain are required to have the same
|
||||
micro-architecture. CPUs in different performance domains can have different
|
||||
micro-architectures.
|
||||
|
||||
|
||||
2. Core APIs
|
||||
------------
|
||||
|
||||
2.1 Config options
|
||||
|
||||
CONFIG_ENERGY_MODEL must be enabled to use the EM framework.
|
||||
|
||||
|
||||
2.2 Registration of performance domains
|
||||
|
||||
Drivers are expected to register performance domains into the EM framework by
|
||||
calling the following API:
|
||||
|
||||
int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
|
||||
struct em_data_callback *cb);
|
||||
|
||||
Drivers must specify the CPUs of the performance domains using the cpumask
|
||||
argument, and provide a callback function returning <frequency, power> tuples
|
||||
for each capacity state. The callback function provided by the driver is free
|
||||
to fetch data from any relevant location (DT, firmware, ...), and by any mean
|
||||
deemed necessary. See Section 3. for an example of driver implementing this
|
||||
callback, and kernel/power/energy_model.c for further documentation on this
|
||||
API.
|
||||
|
||||
|
||||
2.3 Accessing performance domains
|
||||
|
||||
Subsystems interested in the energy model of a CPU can retrieve it using the
|
||||
em_cpu_get() API. The energy model tables are allocated once upon creation of
|
||||
the performance domains, and kept in memory untouched.
|
||||
|
||||
The energy consumed by a performance domain can be estimated using the
|
||||
em_pd_energy() API. The estimation is performed assuming that the schedutil
|
||||
CPUfreq governor is in use.
|
||||
|
||||
More details about the above APIs can be found in include/linux/energy_model.h.
|
||||
|
||||
|
||||
3. Example driver
|
||||
-----------------
|
||||
|
||||
This section provides a simple example of a CPUFreq driver registering a
|
||||
performance domain in the Energy Model framework using the (fake) 'foo'
|
||||
protocol. The driver implements an est_power() function to be provided to the
|
||||
EM framework.
|
||||
|
||||
-> drivers/cpufreq/foo_cpufreq.c
|
||||
|
||||
01 static int est_power(unsigned long *mW, unsigned long *KHz, int cpu)
|
||||
02 {
|
||||
03 long freq, power;
|
||||
04
|
||||
05 /* Use the 'foo' protocol to ceil the frequency */
|
||||
06 freq = foo_get_freq_ceil(cpu, *KHz);
|
||||
07 if (freq < 0);
|
||||
08 return freq;
|
||||
09
|
||||
10 /* Estimate the power cost for the CPU at the relevant freq. */
|
||||
11 power = foo_estimate_power(cpu, freq);
|
||||
12 if (power < 0);
|
||||
13 return power;
|
||||
14
|
||||
15 /* Return the values to the EM framework */
|
||||
16 *mW = power;
|
||||
17 *KHz = freq;
|
||||
18
|
||||
19 return 0;
|
||||
20 }
|
||||
21
|
||||
22 static int foo_cpufreq_init(struct cpufreq_policy *policy)
|
||||
23 {
|
||||
24 struct em_data_callback em_cb = EM_DATA_CB(est_power);
|
||||
25 int nr_opp, ret;
|
||||
26
|
||||
27 /* Do the actual CPUFreq init work ... */
|
||||
28 ret = do_foo_cpufreq_init(policy);
|
||||
29 if (ret)
|
||||
30 return ret;
|
||||
31
|
||||
32 /* Find the number of OPPs for this policy */
|
||||
33 nr_opp = foo_get_nr_opp(policy);
|
||||
34
|
||||
35 /* And register the new performance domain */
|
||||
36 em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
|
||||
37
|
||||
38 return 0;
|
||||
39 }
|
||||
|
||||
|
||||
4. Support for legacy Energy Models (DEPRECATED)
|
||||
------------------------------------------------
|
||||
|
||||
The Android kernel version 4.14 and before used a different type of EM for EAS,
|
||||
referred to as the 'legacy' EM. The legacy EM relies on the out-of-tree
|
||||
'sched-energy-costs' devicetree bindings to provide the kernel with power costs.
|
||||
The usage of such bindings in Android has now been DEPRECATED in favour of the
|
||||
mainline equivalents.
|
||||
|
||||
The currently supported alternatives to populate the EM include:
|
||||
- using a firmware-based solution such as Arm SCMI (supported in
|
||||
drivers/cpufreq/scmi-cpufreq.c);
|
||||
- using the 'dynamic-power-coefficient' devicetree binding together with
|
||||
PM_OPP. See the of_dev_pm_opp_get_cpu_power() helper in PM_OPP, and the
|
||||
reference implementation in drivers/cpufreq/cpufreq-dt.c.
|
||||
|
||||
In order to ease the transition to the new EM format, Android 4.19 also provides
|
||||
a compatibility driver able to load a legacy EM from DT into the EM framework.
|
||||
*** Please note that THIS FEATURE WILL NOT BE AVAILABLE in future Android
|
||||
kernels, and as such it must be considered only as a temporary workaround. ***
|
||||
|
||||
If you know what you're doing and still want to use this driver, you need to set
|
||||
CONFIG_LEGACY_ENERGY_MODEL_DT=y in your kernel configuration to enable it.
|
425
Documentation/scheduler/sched-energy.txt
Normal file
425
Documentation/scheduler/sched-energy.txt
Normal file
|
@ -0,0 +1,425 @@
|
|||
=======================
|
||||
Energy Aware Scheduling
|
||||
=======================
|
||||
|
||||
1. Introduction
|
||||
---------------
|
||||
|
||||
Energy Aware Scheduling (or EAS) gives the scheduler the ability to predict
|
||||
the impact of its decisions on the energy consumed by CPUs. EAS relies on an
|
||||
Energy Model (EM) of the CPUs to select an energy efficient CPU for each task,
|
||||
with a minimal impact on throughput. This document aims at providing an
|
||||
introduction on how EAS works, what are the main design decisions behind it, and
|
||||
details what is needed to get it to run.
|
||||
|
||||
Before going any further, please note that at the time of writing:
|
||||
|
||||
/!\ EAS does not support platforms with symmetric CPU topologies /!\
|
||||
|
||||
EAS operates only on heterogeneous CPU topologies (such as Arm big.LITTLE)
|
||||
because this is where the potential for saving energy through scheduling is
|
||||
the highest.
|
||||
|
||||
The actual EM used by EAS is _not_ maintained by the scheduler, but by a
|
||||
dedicated framework. For details about this framework and what it provides,
|
||||
please refer to its documentation (see Documentation/power/energy-model.txt).
|
||||
|
||||
|
||||
2. Background and Terminology
|
||||
-----------------------------
|
||||
|
||||
To make it clear from the start:
|
||||
- energy = [joule] (resource like a battery on powered devices)
|
||||
- power = energy/time = [joule/second] = [watt]
|
||||
|
||||
The goal of EAS is to minimize energy, while still getting the job done. That
|
||||
is, we want to maximize:
|
||||
|
||||
performance [inst/s]
|
||||
--------------------
|
||||
power [W]
|
||||
|
||||
which is equivalent to minimizing:
|
||||
|
||||
energy [J]
|
||||
-----------
|
||||
instruction
|
||||
|
||||
while still getting 'good' performance. It is essentially an alternative
|
||||
optimization objective to the current performance-only objective for the
|
||||
scheduler. This alternative considers two objectives: energy-efficiency and
|
||||
performance.
|
||||
|
||||
The idea behind introducing an EM is to allow the scheduler to evaluate the
|
||||
implications of its decisions rather than blindly applying energy-saving
|
||||
techniques that may have positive effects only on some platforms. At the same
|
||||
time, the EM must be as simple as possible to minimize the scheduler latency
|
||||
impact.
|
||||
|
||||
In short, EAS changes the way CFS tasks are assigned to CPUs. When it is time
|
||||
for the scheduler to decide where a task should run (during wake-up), the EM
|
||||
is used to break the tie between several good CPU candidates and pick the one
|
||||
that is predicted to yield the best energy consumption without harming the
|
||||
system's throughput. The predictions made by EAS rely on specific elements of
|
||||
knowledge about the platform's topology, which include the 'capacity' of CPUs,
|
||||
and their respective energy costs.
|
||||
|
||||
|
||||
3. Topology information
|
||||
-----------------------
|
||||
|
||||
EAS (as well as the rest of the scheduler) uses the notion of 'capacity' to
|
||||
differentiate CPUs with different computing throughput. The 'capacity' of a CPU
|
||||
represents the amount of work it can absorb when running at its highest
|
||||
frequency compared to the most capable CPU of the system. Capacity values are
|
||||
normalized in a 1024 range, and are comparable with the utilization signals of
|
||||
tasks and CPUs computed by the Per-Entity Load Tracking (PELT) mechanism. Thanks
|
||||
to capacity and utilization values, EAS is able to estimate how big/busy a
|
||||
task/CPU is, and to take this into consideration when evaluating performance vs
|
||||
energy trade-offs. The capacity of CPUs is provided via arch-specific code
|
||||
through the arch_scale_cpu_capacity() callback.
|
||||
|
||||
The rest of platform knowledge used by EAS is directly read from the Energy
|
||||
Model (EM) framework. The EM of a platform is composed of a power cost table
|
||||
per 'performance domain' in the system (see Documentation/power/energy-model.txt
|
||||
for futher details about performance domains).
|
||||
|
||||
The scheduler manages references to the EM objects in the topology code when the
|
||||
scheduling domains are built, or re-built. For each root domain (rd), the
|
||||
scheduler maintains a singly linked list of all performance domains intersecting
|
||||
the current rd->span. Each node in the list contains a pointer to a struct
|
||||
em_perf_domain as provided by the EM framework.
|
||||
|
||||
The lists are attached to the root domains in order to cope with exclusive
|
||||
cpuset configurations. Since the boundaries of exclusive cpusets do not
|
||||
necessarily match those of performance domains, the lists of different root
|
||||
domains can contain duplicate elements.
|
||||
|
||||
Example 1.
|
||||
Let us consider a platform with 12 CPUs, split in 3 performance domains
|
||||
(pd0, pd4 and pd8), organized as follows:
|
||||
|
||||
CPUs: 0 1 2 3 4 5 6 7 8 9 10 11
|
||||
PDs: |--pd0--|--pd4--|---pd8---|
|
||||
RDs: |----rd1----|-----rd2-----|
|
||||
|
||||
Now, consider that userspace decided to split the system with two
|
||||
exclusive cpusets, hence creating two independent root domains, each
|
||||
containing 6 CPUs. The two root domains are denoted rd1 and rd2 in the
|
||||
above figure. Since pd4 intersects with both rd1 and rd2, it will be
|
||||
present in the linked list '->pd' attached to each of them:
|
||||
* rd1->pd: pd0 -> pd4
|
||||
* rd2->pd: pd4 -> pd8
|
||||
|
||||
Please note that the scheduler will create two duplicate list nodes for
|
||||
pd4 (one for each list). However, both just hold a pointer to the same
|
||||
shared data structure of the EM framework.
|
||||
|
||||
Since the access to these lists can happen concurrently with hotplug and other
|
||||
things, they are protected by RCU, like the rest of topology structures
|
||||
manipulated by the scheduler.
|
||||
|
||||
EAS also maintains a static key (sched_energy_present) which is enabled when at
|
||||
least one root domain meets all conditions for EAS to start. Those conditions
|
||||
are summarized in Section 6.
|
||||
|
||||
|
||||
4. Energy-Aware task placement
|
||||
------------------------------
|
||||
|
||||
EAS overrides the CFS task wake-up balancing code. It uses the EM of the
|
||||
platform and the PELT signals to choose an energy-efficient target CPU during
|
||||
wake-up balance. When EAS is enabled, select_task_rq_fair() calls
|
||||
find_energy_efficient_cpu() to do the placement decision. This function looks
|
||||
for the CPU with the highest spare capacity (CPU capacity - CPU utilization) in
|
||||
each performance domain since it is the one which will allow us to keep the
|
||||
frequency the lowest. Then, the function checks if placing the task there could
|
||||
save energy compared to leaving it on prev_cpu, i.e. the CPU where the task ran
|
||||
in its previous activation.
|
||||
|
||||
find_energy_efficient_cpu() uses compute_energy() to estimate what will be the
|
||||
energy consumed by the system if the waking task was migrated. compute_energy()
|
||||
looks at the current utilization landscape of the CPUs and adjusts it to
|
||||
'simulate' the task migration. The EM framework provides the em_pd_energy() API
|
||||
which computes the expected energy consumption of each performance domain for
|
||||
the given utilization landscape.
|
||||
|
||||
An example of energy-optimized task placement decision is detailed below.
|
||||
|
||||
Example 2.
|
||||
Let us consider a (fake) platform with 2 independent performance domains
|
||||
composed of two CPUs each. CPU0 and CPU1 are little CPUs; CPU2 and CPU3
|
||||
are big.
|
||||
|
||||
The scheduler must decide where to place a task P whose util_avg = 200
|
||||
and prev_cpu = 0.
|
||||
|
||||
The current utilization landscape of the CPUs is depicted on the graph
|
||||
below. CPUs 0-3 have a util_avg of 400, 100, 600 and 500 respectively
|
||||
Each performance domain has three Operating Performance Points (OPPs).
|
||||
The CPU capacity and power cost associated with each OPP is listed in
|
||||
the Energy Model table. The util_avg of P is shown on the figures
|
||||
below as 'PP'.
|
||||
|
||||
CPU util.
|
||||
1024 - - - - - - - Energy Model
|
||||
+-----------+-------------+
|
||||
| Little | Big |
|
||||
768 ============= +-----+-----+------+------+
|
||||
| Cap | Pwr | Cap | Pwr |
|
||||
+-----+-----+------+------+
|
||||
512 =========== - ##- - - - - | 170 | 50 | 512 | 400 |
|
||||
## ## | 341 | 150 | 768 | 800 |
|
||||
341 -PP - - - - ## ## | 512 | 300 | 1024 | 1700 |
|
||||
PP ## ## +-----+-----+------+------+
|
||||
170 -## - - - - ## ##
|
||||
## ## ## ##
|
||||
------------ -------------
|
||||
CPU0 CPU1 CPU2 CPU3
|
||||
|
||||
Current OPP: ===== Other OPP: - - - util_avg (100 each): ##
|
||||
|
||||
|
||||
find_energy_efficient_cpu() will first look for the CPUs with the
|
||||
maximum spare capacity in the two performance domains. In this example,
|
||||
CPU1 and CPU3. Then it will estimate the energy of the system if P was
|
||||
placed on either of them, and check if that would save some energy
|
||||
compared to leaving P on CPU0. EAS assumes that OPPs follow utilization
|
||||
(which is coherent with the behaviour of the schedutil CPUFreq
|
||||
governor, see Section 6. for more details on this topic).
|
||||
|
||||
Case 1. P is migrated to CPU1
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
1024 - - - - - - -
|
||||
|
||||
Energy calculation:
|
||||
768 ============= * CPU0: 200 / 341 * 150 = 88
|
||||
* CPU1: 300 / 341 * 150 = 131
|
||||
* CPU2: 600 / 768 * 800 = 625
|
||||
512 - - - - - - - ##- - - - - * CPU3: 500 / 768 * 800 = 520
|
||||
## ## => total_energy = 1364
|
||||
341 =========== ## ##
|
||||
PP ## ##
|
||||
170 -## - - PP- ## ##
|
||||
## ## ## ##
|
||||
------------ -------------
|
||||
CPU0 CPU1 CPU2 CPU3
|
||||
|
||||
|
||||
Case 2. P is migrated to CPU3
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
1024 - - - - - - -
|
||||
|
||||
Energy calculation:
|
||||
768 ============= * CPU0: 200 / 341 * 150 = 88
|
||||
* CPU1: 100 / 341 * 150 = 43
|
||||
PP * CPU2: 600 / 768 * 800 = 625
|
||||
512 - - - - - - - ##- - -PP - * CPU3: 700 / 768 * 800 = 729
|
||||
## ## => total_energy = 1485
|
||||
341 =========== ## ##
|
||||
## ##
|
||||
170 -## - - - - ## ##
|
||||
## ## ## ##
|
||||
------------ -------------
|
||||
CPU0 CPU1 CPU2 CPU3
|
||||
|
||||
|
||||
Case 3. P stays on prev_cpu / CPU 0
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
1024 - - - - - - -
|
||||
|
||||
Energy calculation:
|
||||
768 ============= * CPU0: 400 / 512 * 300 = 234
|
||||
* CPU1: 100 / 512 * 300 = 58
|
||||
* CPU2: 600 / 768 * 800 = 625
|
||||
512 =========== - ##- - - - - * CPU3: 500 / 768 * 800 = 520
|
||||
## ## => total_energy = 1437
|
||||
341 -PP - - - - ## ##
|
||||
PP ## ##
|
||||
170 -## - - - - ## ##
|
||||
## ## ## ##
|
||||
------------ -------------
|
||||
CPU0 CPU1 CPU2 CPU3
|
||||
|
||||
|
||||
From these calculations, the Case 1 has the lowest total energy. So CPU 1
|
||||
is be the best candidate from an energy-efficiency standpoint.
|
||||
|
||||
Big CPUs are generally more power hungry than the little ones and are thus used
|
||||
mainly when a task doesn't fit the littles. However, little CPUs aren't always
|
||||
necessarily more energy-efficient than big CPUs. For some systems, the high OPPs
|
||||
of the little CPUs can be less energy-efficient than the lowest OPPs of the
|
||||
bigs, for example. So, if the little CPUs happen to have enough utilization at
|
||||
a specific point in time, a small task waking up at that moment could be better
|
||||
of executing on the big side in order to save energy, even though it would fit
|
||||
on the little side.
|
||||
|
||||
And even in the case where all OPPs of the big CPUs are less energy-efficient
|
||||
than those of the little, using the big CPUs for a small task might still, under
|
||||
specific conditions, save energy. Indeed, placing a task on a little CPU can
|
||||
result in raising the OPP of the entire performance domain, and that will
|
||||
increase the cost of the tasks already running there. If the waking task is
|
||||
placed on a big CPU, its own execution cost might be higher than if it was
|
||||
running on a little, but it won't impact the other tasks of the little CPUs
|
||||
which will keep running at a lower OPP. So, when considering the total energy
|
||||
consumed by CPUs, the extra cost of running that one task on a big core can be
|
||||
smaller than the cost of raising the OPP on the little CPUs for all the other
|
||||
tasks.
|
||||
|
||||
The examples above would be nearly impossible to get right in a generic way, and
|
||||
for all platforms, without knowing the cost of running at different OPPs on all
|
||||
CPUs of the system. Thanks to its EM-based design, EAS should cope with them
|
||||
correctly without too many troubles. However, in order to ensure a minimal
|
||||
impact on throughput for high-utilization scenarios, EAS also implements another
|
||||
mechanism called 'over-utilization'.
|
||||
|
||||
|
||||
5. Over-utilization
|
||||
-------------------
|
||||
|
||||
From a general standpoint, the use-cases where EAS can help the most are those
|
||||
involving a light/medium CPU utilization. Whenever long CPU-bound tasks are
|
||||
being run, they will require all of the available CPU capacity, and there isn't
|
||||
much that can be done by the scheduler to save energy without severly harming
|
||||
throughput. In order to avoid hurting performance with EAS, CPUs are flagged as
|
||||
'over-utilized' as soon as they are used at more than 80% of their compute
|
||||
capacity. As long as no CPUs are over-utilized in a root domain, load balancing
|
||||
is disabled and EAS overridess the wake-up balancing code. EAS is likely to load
|
||||
the most energy efficient CPUs of the system more than the others if that can be
|
||||
done without harming throughput. So, the load-balancer is disabled to prevent
|
||||
it from breaking the energy-efficient task placement found by EAS. It is safe to
|
||||
do so when the system isn't overutilized since being below the 80% tipping point
|
||||
implies that:
|
||||
|
||||
a. there is some idle time on all CPUs, so the utilization signals used by
|
||||
EAS are likely to accurately represent the 'size' of the various tasks
|
||||
in the system;
|
||||
b. all tasks should already be provided with enough CPU capacity,
|
||||
regardless of their nice values;
|
||||
c. since there is spare capacity all tasks must be blocking/sleeping
|
||||
regularly and balancing at wake-up is sufficient.
|
||||
|
||||
As soon as one CPU goes above the 80% tipping point, at least one of the three
|
||||
assumptions above becomes incorrect. In this scenario, the 'overutilized' flag
|
||||
is raised for the entire root domain, EAS is disabled, and the load-balancer is
|
||||
re-enabled. By doing so, the scheduler falls back onto load-based algorithms for
|
||||
wake-up and load balance under CPU-bound conditions. This provides a better
|
||||
respect of the nice values of tasks.
|
||||
|
||||
Since the notion of overutilization largely relies on detecting whether or not
|
||||
there is some idle time in the system, the CPU capacity 'stolen' by higher
|
||||
(than CFS) scheduling classes (as well as IRQ) must be taken into account. As
|
||||
such, the detection of overutilization accounts for the capacity used not only
|
||||
by CFS tasks, but also by the other scheduling classes and IRQ.
|
||||
|
||||
|
||||
6. Dependencies and requirements for EAS
|
||||
----------------------------------------
|
||||
|
||||
Energy Aware Scheduling depends on the CPUs of the system having specific
|
||||
hardware properties and on other features of the kernel being enabled. This
|
||||
section lists these dependencies and provides hints as to how they can be met.
|
||||
|
||||
|
||||
6.1 - Asymmetric CPU topology
|
||||
|
||||
As mentioned in the introduction, EAS is only supported on platforms with
|
||||
asymmetric CPU topologies for now. This requirement is checked at run-time by
|
||||
looking for the presence of the SD_ASYM_CPUCAPACITY flag when the scheduling
|
||||
domains are built.
|
||||
|
||||
The flag is set/cleared automatically by the scheduler topology code whenever
|
||||
there are CPUs with different capacities in a root domain. The capacities of
|
||||
CPUs are provided by arch-specific code through the arch_scale_cpu_capacity()
|
||||
callback. As an example, arm and arm64 share an implementation of this callback
|
||||
which uses a combination of CPUFreq data and device-tree bindings to compute the
|
||||
capacity of CPUs (see drivers/base/arch_topology.c for more details).
|
||||
|
||||
So, in order to use EAS on your platform your architecture must implement the
|
||||
arch_scale_cpu_capacity() callback, and some of the CPUs must have a lower
|
||||
capacity than others.
|
||||
|
||||
Please note that EAS is not fundamentally incompatible with SMP, but no
|
||||
significant savings on SMP platforms have been observed yet. This restriction
|
||||
could be amended in the future if proven otherwise.
|
||||
|
||||
|
||||
6.2 - Energy Model presence
|
||||
|
||||
EAS uses the EM of a platform to estimate the impact of scheduling decisions on
|
||||
energy. So, your platform must provide power cost tables to the EM framework in
|
||||
order to make EAS start. To do so, please refer to documentation of the
|
||||
independent EM framework in Documentation/power/energy-model.txt.
|
||||
|
||||
Please also note that the scheduling domains need to be re-built after the
|
||||
EM has been registered in order to start EAS.
|
||||
|
||||
|
||||
6.3 - Energy Model complexity
|
||||
|
||||
The task wake-up path is very latency-sensitive. When the EM of a platform is
|
||||
too complex (too many CPUs, too many performance domains, too many performance
|
||||
states, ...), the cost of using it in the wake-up path can become prohibitive.
|
||||
The energy-aware wake-up algorithm has a complexity of:
|
||||
|
||||
C = Nd * (Nc + Ns)
|
||||
|
||||
with: Nd the number of performance domains; Nc the number of CPUs; and Ns the
|
||||
total number of OPPs (ex: for two perf. domains with 4 OPPs each, Ns = 8).
|
||||
|
||||
A complexity check is performed at the root domain level, when scheduling
|
||||
domains are built. EAS will not start on a root domain if its C happens to be
|
||||
higher than the completely arbitrary EM_MAX_COMPLEXITY threshold (2048 at the
|
||||
time of writing).
|
||||
|
||||
If you really want to use EAS but the complexity of your platform's Energy
|
||||
Model is too high to be used with a single root domain, you're left with only
|
||||
two possible options:
|
||||
|
||||
1. split your system into separate, smaller, root domains using exclusive
|
||||
cpusets and enable EAS locally on each of them. This option has the
|
||||
benefit to work out of the box but the drawback of preventing load
|
||||
balance between root domains, which can result in an unbalanced system
|
||||
overall;
|
||||
2. submit patches to reduce the complexity of the EAS wake-up algorithm,
|
||||
hence enabling it to cope with larger EMs in reasonable time.
|
||||
|
||||
|
||||
6.4 - Schedutil governor
|
||||
|
||||
EAS tries to predict at which OPP will the CPUs be running in the close future
|
||||
in order to estimate their energy consumption. To do so, it is assumed that OPPs
|
||||
of CPUs follow their utilization.
|
||||
|
||||
Although it is very difficult to provide hard guarantees regarding the accuracy
|
||||
of this assumption in practice (because the hardware might not do what it is
|
||||
told to do, for example), schedutil as opposed to other CPUFreq governors at
|
||||
least _requests_ frequencies calculated using the utilization signals.
|
||||
Consequently, the only sane governor to use together with EAS is schedutil,
|
||||
because it is the only one providing some degree of consistency between
|
||||
frequency requests and energy predictions.
|
||||
|
||||
Using EAS with any other governor than schedutil is not supported.
|
||||
|
||||
|
||||
6.5 Scale-invariant utilization signals
|
||||
|
||||
In order to make accurate prediction across CPUs and for all performance
|
||||
states, EAS needs frequency-invariant and CPU-invariant PELT signals. These can
|
||||
be obtained using the architecture-defined arch_scale{cpu,freq}_capacity()
|
||||
callbacks.
|
||||
|
||||
Using EAS on a platform that doesn't implement these two callbacks is not
|
||||
supported.
|
||||
|
||||
|
||||
6.6 Multithreading (SMT)
|
||||
|
||||
EAS in its current form is SMT unaware and is not able to leverage
|
||||
multithreaded hardware to save energy. EAS considers threads as independent
|
||||
CPUs, which can actually be counter-productive for both performance and energy.
|
||||
|
||||
EAS on SMT is not supported.
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 27
|
||||
SUBLEVEL = 30
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
|
|
@ -168,6 +168,9 @@
|
|||
interrupt-controller;
|
||||
#interrupt-cells = <3>;
|
||||
interrupt-parent = <&gic>;
|
||||
clock-names = "clkout8";
|
||||
clocks = <&cmu CLK_FIN_PLL>;
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
mipi_phy: video-phy {
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
};
|
||||
|
||||
emmc_pwrseq: pwrseq {
|
||||
pinctrl-0 = <&sd1_cd>;
|
||||
pinctrl-0 = <&emmc_rstn>;
|
||||
pinctrl-names = "default";
|
||||
compatible = "mmc-pwrseq-emmc";
|
||||
reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>;
|
||||
|
@ -161,12 +161,6 @@
|
|||
cpu0-supply = <&buck2_reg>;
|
||||
};
|
||||
|
||||
/* RSTN signal for eMMC */
|
||||
&sd1_cd {
|
||||
samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
|
||||
samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
|
||||
};
|
||||
|
||||
&pinctrl_1 {
|
||||
gpio_power_key: power_key {
|
||||
samsung,pins = "gpx1-3";
|
||||
|
@ -184,6 +178,11 @@
|
|||
samsung,pins = "gpx3-7";
|
||||
samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
|
||||
};
|
||||
|
||||
emmc_rstn: emmc-rstn {
|
||||
samsung,pins = "gpk1-2";
|
||||
samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
|
||||
};
|
||||
};
|
||||
|
||||
&ehci {
|
||||
|
|
|
@ -334,7 +334,7 @@
|
|||
buck8_reg: BUCK8 {
|
||||
regulator-name = "vdd_1.8v_ldo";
|
||||
regulator-min-microvolt = <800000>;
|
||||
regulator-max-microvolt = <1500000>;
|
||||
regulator-max-microvolt = <2000000>;
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
};
|
||||
|
|
|
@ -462,7 +462,7 @@
|
|||
};
|
||||
|
||||
gpt: gpt@2098000 {
|
||||
compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt";
|
||||
compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
|
||||
reg = <0x02098000 0x4000>;
|
||||
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clks IMX6SX_CLK_GPT_BUS>,
|
||||
|
|
|
@ -263,7 +263,7 @@
|
|||
compatible = "amlogic,meson6-dwmac", "snps,dwmac";
|
||||
reg = <0xc9410000 0x10000
|
||||
0xc1108108 0x4>;
|
||||
interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "macirq";
|
||||
status = "disabled";
|
||||
};
|
||||
|
|
|
@ -125,7 +125,6 @@
|
|||
/* Realtek RTL8211F (0x001cc916) */
|
||||
eth_phy: ethernet-phy@0 {
|
||||
reg = <0>;
|
||||
eee-broken-1000t;
|
||||
interrupt-parent = <&gpio_intc>;
|
||||
/* GPIOH_3 */
|
||||
interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
|
||||
|
@ -172,8 +171,7 @@
|
|||
cap-sd-highspeed;
|
||||
disable-wp;
|
||||
|
||||
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
|
||||
cd-inverted;
|
||||
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
|
||||
|
||||
vmmc-supply = <&tflash_vdd>;
|
||||
vqmmc-supply = <&tf_io>;
|
||||
|
|
|
@ -206,8 +206,7 @@
|
|||
cap-sd-highspeed;
|
||||
disable-wp;
|
||||
|
||||
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
|
||||
cd-inverted;
|
||||
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
|
||||
|
||||
vmmc-supply = <&vcc_3v3>;
|
||||
};
|
||||
|
|
|
@ -105,7 +105,7 @@
|
|||
interrupts-extended = <
|
||||
&cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
|
||||
&cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
|
||||
&cpcap 48 1
|
||||
&cpcap 48 0
|
||||
>;
|
||||
interrupt-names =
|
||||
"id_ground", "id_float", "se0conn", "vbusvld",
|
||||
|
|
|
@ -370,6 +370,19 @@
|
|||
compatible = "ti,omap2-onenand";
|
||||
reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
|
||||
|
||||
/*
|
||||
* These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
|
||||
* bootloader set values when booted with v4.19 using both N950
|
||||
* and N9 devices (OneNAND Manufacturer: Samsung):
|
||||
*
|
||||
* gpmc cs0 before gpmc_cs_program_settings:
|
||||
* cs0 GPMC_CS_CONFIG1: 0xfd001202
|
||||
* cs0 GPMC_CS_CONFIG2: 0x00181800
|
||||
* cs0 GPMC_CS_CONFIG3: 0x00030300
|
||||
* cs0 GPMC_CS_CONFIG4: 0x18001804
|
||||
* cs0 GPMC_CS_CONFIG5: 0x03171d1d
|
||||
* cs0 GPMC_CS_CONFIG6: 0x97080000
|
||||
*/
|
||||
gpmc,sync-read;
|
||||
gpmc,sync-write;
|
||||
gpmc,burst-length = <16>;
|
||||
|
@ -379,26 +392,27 @@
|
|||
gpmc,device-width = <2>;
|
||||
gpmc,mux-add-data = <2>;
|
||||
gpmc,cs-on-ns = <0>;
|
||||
gpmc,cs-rd-off-ns = <87>;
|
||||
gpmc,cs-wr-off-ns = <87>;
|
||||
gpmc,cs-rd-off-ns = <122>;
|
||||
gpmc,cs-wr-off-ns = <122>;
|
||||
gpmc,adv-on-ns = <0>;
|
||||
gpmc,adv-rd-off-ns = <10>;
|
||||
gpmc,adv-wr-off-ns = <10>;
|
||||
gpmc,oe-on-ns = <15>;
|
||||
gpmc,oe-off-ns = <87>;
|
||||
gpmc,adv-rd-off-ns = <15>;
|
||||
gpmc,adv-wr-off-ns = <15>;
|
||||
gpmc,oe-on-ns = <20>;
|
||||
gpmc,oe-off-ns = <122>;
|
||||
gpmc,we-on-ns = <0>;
|
||||
gpmc,we-off-ns = <87>;
|
||||
gpmc,rd-cycle-ns = <112>;
|
||||
gpmc,wr-cycle-ns = <112>;
|
||||
gpmc,access-ns = <81>;
|
||||
gpmc,we-off-ns = <122>;
|
||||
gpmc,rd-cycle-ns = <148>;
|
||||
gpmc,wr-cycle-ns = <148>;
|
||||
gpmc,access-ns = <117>;
|
||||
gpmc,page-burst-access-ns = <15>;
|
||||
gpmc,bus-turnaround-ns = <0>;
|
||||
gpmc,cycle2cycle-delay-ns = <0>;
|
||||
gpmc,wait-monitoring-ns = <0>;
|
||||
gpmc,clk-activation-ns = <5>;
|
||||
gpmc,wr-data-mux-bus-ns = <30>;
|
||||
gpmc,wr-access-ns = <81>;
|
||||
gpmc,sync-clk-ps = <15000>;
|
||||
gpmc,clk-activation-ns = <10>;
|
||||
gpmc,wr-data-mux-bus-ns = <40>;
|
||||
gpmc,wr-access-ns = <117>;
|
||||
|
||||
gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */
|
||||
|
||||
/*
|
||||
* MTD partition table corresponding to Nokia's MeeGo 1.2
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
|
||||
aliases {
|
||||
serial0 = &uart0;
|
||||
/* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
|
||||
ethernet0 = &emac;
|
||||
ethernet1 = &sdiowifi;
|
||||
};
|
||||
|
||||
|
|
|
@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
|
|||
if (ssp == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
iounmap(ssp->mmio_base);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
release_mem_region(res->start, resource_size(res));
|
||||
|
||||
|
@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
|
|||
list_del(&ssp->node);
|
||||
mutex_unlock(&ssp_lock);
|
||||
|
||||
kfree(ssp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -118,6 +118,7 @@
|
|||
reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
|
||||
clocks = <&pmic>;
|
||||
clock-names = "ext_clock";
|
||||
post-power-on-delay-ms = <10>;
|
||||
power-off-delay-us = <10>;
|
||||
};
|
||||
|
||||
|
@ -300,7 +301,6 @@
|
|||
|
||||
dwmmc_0: dwmmc0@f723d000 {
|
||||
cap-mmc-highspeed;
|
||||
mmc-hs200-1_8v;
|
||||
non-removable;
|
||||
bus-width = <0x8>;
|
||||
vmmc-supply = <&ldo19>;
|
||||
|
|
|
@ -399,7 +399,7 @@
|
|||
};
|
||||
|
||||
intc: interrupt-controller@9bc0000 {
|
||||
compatible = "arm,gic-v3";
|
||||
compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
|
||||
#interrupt-cells = <3>;
|
||||
interrupt-controller;
|
||||
#redistributor-regions = <1>;
|
||||
|
|
|
@ -1161,6 +1161,9 @@
|
|||
<&cpg CPG_CORE R8A7796_CLK_S3D1>,
|
||||
<&scif_clk>;
|
||||
clock-names = "fck", "brg_int", "scif_clk";
|
||||
dmas = <&dmac1 0x13>, <&dmac1 0x12>,
|
||||
<&dmac2 0x13>, <&dmac2 0x12>;
|
||||
dma-names = "tx", "rx", "tx", "rx";
|
||||
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
|
||||
resets = <&cpg 310>;
|
||||
status = "disabled";
|
||||
|
|
|
@ -951,6 +951,9 @@
|
|||
<&cpg CPG_CORE R8A77965_CLK_S3D1>,
|
||||
<&scif_clk>;
|
||||
clock-names = "fck", "brg_int", "scif_clk";
|
||||
dmas = <&dmac1 0x13>, <&dmac1 0x12>,
|
||||
<&dmac2 0x13>, <&dmac2 0x12>;
|
||||
dma-names = "tx", "rx", "tx", "rx";
|
||||
power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
|
||||
resets = <&cpg 310>;
|
||||
status = "disabled";
|
||||
|
|
|
@ -101,6 +101,7 @@
|
|||
sdio_pwrseq: sdio_pwrseq {
|
||||
compatible = "mmc-pwrseq-simple";
|
||||
reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
|
||||
post-power-on-delay-ms = <10>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@ CONFIG_ENERGY_MODEL=y
|
|||
CONFIG_CPU_IDLE=y
|
||||
CONFIG_ARM_CPUIDLE=y
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_CPU_FREQ_TIMES=y
|
||||
CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||||
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
|
||||
|
@ -124,6 +125,7 @@ CONFIG_NF_CT_NETLINK=y
|
|||
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CT=y
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
|
||||
|
@ -229,6 +231,7 @@ CONFIG_PPP_DEFLATE=y
|
|||
CONFIG_PPP_MPPE=y
|
||||
CONFIG_PPTP=y
|
||||
CONFIG_PPPOL2TP=y
|
||||
CONFIG_USB_RTL8152=y
|
||||
CONFIG_USB_USBNET=y
|
||||
# CONFIG_USB_NET_AX8817X is not set
|
||||
# CONFIG_USB_NET_AX88179_178A is not set
|
||||
|
@ -299,6 +302,12 @@ CONFIG_DRM=y
|
|||
CONFIG_DRM_VIRTIO_GPU=y
|
||||
CONFIG_SOUND=y
|
||||
CONFIG_SND=y
|
||||
CONFIG_SND_HRTIMER=y
|
||||
# CONFIG_SND_SUPPORT_OLD_API is not set
|
||||
# CONFIG_SND_VERBOSE_PROCFS is not set
|
||||
# CONFIG_SND_DRIVERS is not set
|
||||
CONFIG_SND_INTEL8X0=y
|
||||
# CONFIG_SND_USB is not set
|
||||
CONFIG_HIDRAW=y
|
||||
CONFIG_UHID=y
|
||||
CONFIG_HID_A4TECH=y
|
||||
|
|
|
@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
|
|||
addr < (unsigned long)__entry_text_end) ||
|
||||
(addr >= (unsigned long)__idmap_text_start &&
|
||||
addr < (unsigned long)__idmap_text_end) ||
|
||||
(addr >= (unsigned long)__hyp_text_start &&
|
||||
addr < (unsigned long)__hyp_text_end) ||
|
||||
!!search_exception_tables(addr))
|
||||
return true;
|
||||
|
||||
if (!is_kernel_in_hyp_mode()) {
|
||||
if ((addr >= (unsigned long)__hyp_text_start &&
|
||||
addr < (unsigned long)__hyp_text_end) ||
|
||||
(addr >= (unsigned long)__hyp_idmap_text_start &&
|
||||
if ((addr >= (unsigned long)__hyp_idmap_text_start &&
|
||||
addr < (unsigned long)__hyp_idmap_text_end))
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@
|
|||
status = "okay";
|
||||
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pins_uart2>;
|
||||
pinctrl-0 = <&pins_uart3>;
|
||||
};
|
||||
|
||||
&uart4 {
|
||||
|
@ -196,9 +196,9 @@
|
|||
bias-disable;
|
||||
};
|
||||
|
||||
pins_uart2: uart2 {
|
||||
function = "uart2";
|
||||
groups = "uart2-data", "uart2-hwflow";
|
||||
pins_uart3: uart3 {
|
||||
function = "uart3";
|
||||
groups = "uart3-data", "uart3-hwflow";
|
||||
bias-disable;
|
||||
};
|
||||
|
||||
|
|
|
@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void)
|
|||
void __init init_IRQ(void)
|
||||
{
|
||||
int i;
|
||||
unsigned int order = get_order(IRQ_STACK_SIZE);
|
||||
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
irq_set_noprobe(i);
|
||||
|
@ -62,8 +63,7 @@ void __init init_IRQ(void)
|
|||
arch_init_irq();
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
|
||||
void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
|
||||
void *s = (void *)__get_free_pages(GFP_KERNEL, order);
|
||||
|
||||
irq_stack[i] = s;
|
||||
pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
|
||||
|
|
|
@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
|
|||
static int get_frame_info(struct mips_frame_info *info)
|
||||
{
|
||||
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
|
||||
union mips_instruction insn, *ip, *ip_end;
|
||||
union mips_instruction insn, *ip;
|
||||
const unsigned int max_insns = 128;
|
||||
unsigned int last_insn_size = 0;
|
||||
unsigned int i;
|
||||
|
@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info)
|
|||
if (!ip)
|
||||
goto err;
|
||||
|
||||
ip_end = (void *)ip + info->func_size;
|
||||
|
||||
for (i = 0; i < max_insns && ip < ip_end; i++) {
|
||||
for (i = 0; i < max_insns; i++) {
|
||||
ip = (void *)ip + last_insn_size;
|
||||
|
||||
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
|
||||
insn.word = ip->halfword[0] << 16;
|
||||
last_insn_size = 2;
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
* This decides where the kernel will search for a free chunk of vm
|
||||
* space during mmap's.
|
||||
*/
|
||||
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1)
|
||||
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
|
||||
|
||||
#define STACK_TOP TASK_SIZE
|
||||
#define STACK_TOP_MAX STACK_TOP
|
||||
|
|
|
@ -186,7 +186,7 @@ static void __init setup_bootmem(void)
|
|||
BUG_ON(mem_size == 0);
|
||||
|
||||
set_max_mapnr(PFN_DOWN(mem_size));
|
||||
max_low_pfn = memblock_end_of_DRAM();
|
||||
max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
setup_initrd();
|
||||
|
|
|
@ -29,7 +29,8 @@ static void __init zone_sizes_init(void)
|
|||
unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
|
||||
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
|
||||
(unsigned long) PFN_PHYS(max_low_pfn)));
|
||||
#endif
|
||||
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
|
||||
|
||||
|
|
|
@ -600,6 +600,14 @@ ENTRY(trampoline_32bit_src)
|
|||
leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
|
||||
movl %eax, %cr3
|
||||
3:
|
||||
/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
|
||||
pushl %ecx
|
||||
movl $MSR_EFER, %ecx
|
||||
rdmsr
|
||||
btsl $_EFER_LME, %eax
|
||||
wrmsr
|
||||
popl %ecx
|
||||
|
||||
/* Enable PAE and LA57 (if required) paging modes */
|
||||
movl $X86_CR4_PAE, %eax
|
||||
cmpl $0, %edx
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
|
||||
|
||||
#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
|
||||
#define TRAMPOLINE_32BIT_CODE_SIZE 0x60
|
||||
#define TRAMPOLINE_32BIT_CODE_SIZE 0x70
|
||||
|
||||
#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
|
||||
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
#include <linux/efi.h>
|
||||
#include <asm/e820/types.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/efi.h>
|
||||
#include "pgtable.h"
|
||||
#include "../string.h"
|
||||
|
||||
|
@ -37,9 +39,10 @@ int cmdline_find_option_bool(const char *option);
|
|||
|
||||
static unsigned long find_trampoline_placement(void)
|
||||
{
|
||||
unsigned long bios_start, ebda_start;
|
||||
unsigned long bios_start = 0, ebda_start = 0;
|
||||
unsigned long trampoline_start;
|
||||
struct boot_e820_entry *entry;
|
||||
char *signature;
|
||||
int i;
|
||||
|
||||
/*
|
||||
|
@ -47,8 +50,18 @@ static unsigned long find_trampoline_placement(void)
|
|||
* This code is based on reserve_bios_regions().
|
||||
*/
|
||||
|
||||
ebda_start = *(unsigned short *)0x40e << 4;
|
||||
bios_start = *(unsigned short *)0x413 << 10;
|
||||
/*
|
||||
* EFI systems may not provide legacy ROM. The memory may not be mapped
|
||||
* at all.
|
||||
*
|
||||
* Only look for values in the legacy ROM for non-EFI system.
|
||||
*/
|
||||
signature = (char *)&boot_params->efi_info.efi_loader_signature;
|
||||
if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
|
||||
strncmp(signature, EFI64_LOADER_SIGNATURE, 4)) {
|
||||
ebda_start = *(unsigned short *)0x40e << 4;
|
||||
bios_start = *(unsigned short *)0x413 << 10;
|
||||
}
|
||||
|
||||
if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
|
||||
bios_start = BIOS_START_MAX;
|
||||
|
|
|
@ -58,6 +58,7 @@ CONFIG_ACPI_PROCFS_POWER=y
|
|||
# CONFIG_ACPI_FAN is not set
|
||||
# CONFIG_ACPI_THERMAL is not set
|
||||
# CONFIG_X86_PM_TIMER is not set
|
||||
CONFIG_CPU_FREQ_TIMES=y
|
||||
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
|
||||
CONFIG_X86_ACPI_CPUFREQ=y
|
||||
CONFIG_PCI_MSI=y
|
||||
|
@ -96,6 +97,7 @@ CONFIG_SYN_COOKIES=y
|
|||
CONFIG_NET_IPVTI=y
|
||||
CONFIG_INET_ESP=y
|
||||
# CONFIG_INET_XFRM_MODE_BEET is not set
|
||||
CONFIG_INET_UDP_DIAG=y
|
||||
CONFIG_INET_DIAG_DESTROY=y
|
||||
CONFIG_TCP_CONG_ADVANCED=y
|
||||
# CONFIG_TCP_CONG_BIC is not set
|
||||
|
@ -128,6 +130,7 @@ CONFIG_NF_CT_NETLINK=y
|
|||
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CT=y
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
|
||||
|
@ -234,6 +237,7 @@ CONFIG_PPP=y
|
|||
CONFIG_PPP_BSDCOMP=y
|
||||
CONFIG_PPP_DEFLATE=y
|
||||
CONFIG_PPP_MPPE=y
|
||||
CONFIG_USB_RTL8152=y
|
||||
CONFIG_USB_USBNET=y
|
||||
# CONFIG_USB_NET_AX8817X is not set
|
||||
# CONFIG_USB_NET_AX88179_178A is not set
|
||||
|
@ -311,6 +315,12 @@ CONFIG_DRM=y
|
|||
CONFIG_DRM_VIRTIO_GPU=y
|
||||
CONFIG_SOUND=y
|
||||
CONFIG_SND=y
|
||||
CONFIG_SND_HRTIMER=y
|
||||
# CONFIG_SND_SUPPORT_OLD_API is not set
|
||||
# CONFIG_SND_VERBOSE_PROCFS is not set
|
||||
# CONFIG_SND_DRIVERS is not set
|
||||
CONFIG_SND_INTEL8X0=y
|
||||
# CONFIG_SND_USB is not set
|
||||
CONFIG_HIDRAW=y
|
||||
CONFIG_UHID=y
|
||||
CONFIG_HID_A4TECH=y
|
||||
|
|
|
@ -1970,7 +1970,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
|
|||
*/
|
||||
static void free_fake_cpuc(struct cpu_hw_events *cpuc)
|
||||
{
|
||||
kfree(cpuc->shared_regs);
|
||||
intel_cpuc_finish(cpuc);
|
||||
kfree(cpuc);
|
||||
}
|
||||
|
||||
|
@ -1982,14 +1982,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
|
|||
cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
|
||||
if (!cpuc)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* only needed, if we have extra_regs */
|
||||
if (x86_pmu.extra_regs) {
|
||||
cpuc->shared_regs = allocate_shared_regs(cpu);
|
||||
if (!cpuc->shared_regs)
|
||||
goto error;
|
||||
}
|
||||
cpuc->is_fake = 1;
|
||||
|
||||
if (intel_cpuc_prepare(cpuc, cpu))
|
||||
goto error;
|
||||
|
||||
return cpuc;
|
||||
error:
|
||||
free_fake_cpuc(cpuc);
|
||||
|
|
|
@ -1995,6 +1995,39 @@ static void intel_pmu_nhm_enable_all(int added)
|
|||
intel_pmu_enable_all(added);
|
||||
}
|
||||
|
||||
static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
|
||||
{
|
||||
u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
|
||||
|
||||
if (cpuc->tfa_shadow != val) {
|
||||
cpuc->tfa_shadow = val;
|
||||
wrmsrl(MSR_TSX_FORCE_ABORT, val);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
|
||||
{
|
||||
/*
|
||||
* We're going to use PMC3, make sure TFA is set before we touch it.
|
||||
*/
|
||||
if (cntr == 3 && !cpuc->is_fake)
|
||||
intel_set_tfa(cpuc, true);
|
||||
}
|
||||
|
||||
static void intel_tfa_pmu_enable_all(int added)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
|
||||
/*
|
||||
* If we find PMC3 is no longer used when we enable the PMU, we can
|
||||
* clear TFA.
|
||||
*/
|
||||
if (!test_bit(3, cpuc->active_mask))
|
||||
intel_set_tfa(cpuc, false);
|
||||
|
||||
intel_pmu_enable_all(added);
|
||||
}
|
||||
|
||||
static inline u64 intel_pmu_get_status(void)
|
||||
{
|
||||
u64 status;
|
||||
|
@ -2652,6 +2685,35 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
|
|||
raw_spin_unlock(&excl_cntrs->lock);
|
||||
}
|
||||
|
||||
static struct event_constraint *
|
||||
dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
|
||||
{
|
||||
WARN_ON_ONCE(!cpuc->constraint_list);
|
||||
|
||||
if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
|
||||
struct event_constraint *cx;
|
||||
|
||||
/*
|
||||
* grab pre-allocated constraint entry
|
||||
*/
|
||||
cx = &cpuc->constraint_list[idx];
|
||||
|
||||
/*
|
||||
* initialize dynamic constraint
|
||||
* with static constraint
|
||||
*/
|
||||
*cx = *c;
|
||||
|
||||
/*
|
||||
* mark constraint as dynamic
|
||||
*/
|
||||
cx->flags |= PERF_X86_EVENT_DYNAMIC;
|
||||
c = cx;
|
||||
}
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
static struct event_constraint *
|
||||
intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
|
||||
int idx, struct event_constraint *c)
|
||||
|
@ -2682,27 +2744,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
|
|||
* only needed when constraint has not yet
|
||||
* been cloned (marked dynamic)
|
||||
*/
|
||||
if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
|
||||
struct event_constraint *cx;
|
||||
|
||||
/*
|
||||
* grab pre-allocated constraint entry
|
||||
*/
|
||||
cx = &cpuc->constraint_list[idx];
|
||||
|
||||
/*
|
||||
* initialize dynamic constraint
|
||||
* with static constraint
|
||||
*/
|
||||
*cx = *c;
|
||||
|
||||
/*
|
||||
* mark constraint as dynamic, so we
|
||||
* can free it later on
|
||||
*/
|
||||
cx->flags |= PERF_X86_EVENT_DYNAMIC;
|
||||
c = cx;
|
||||
}
|
||||
c = dyn_constraint(cpuc, c, idx);
|
||||
|
||||
/*
|
||||
* From here on, the constraint is dynamic.
|
||||
|
@ -3229,6 +3271,26 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
|
|||
return c;
|
||||
}
|
||||
|
||||
static bool allow_tsx_force_abort = true;
|
||||
|
||||
static struct event_constraint *
|
||||
tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
|
||||
struct perf_event *event)
|
||||
{
|
||||
struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
|
||||
|
||||
/*
|
||||
* Without TFA we must not use PMC3.
|
||||
*/
|
||||
if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) {
|
||||
c = dyn_constraint(cpuc, c, idx);
|
||||
c->idxmsk64 &= ~(1ULL << 3);
|
||||
c->weight--;
|
||||
}
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
/*
|
||||
* Broadwell:
|
||||
*
|
||||
|
@ -3282,7 +3344,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
|
|||
return x86_event_sysfs_show(page, config, event);
|
||||
}
|
||||
|
||||
struct intel_shared_regs *allocate_shared_regs(int cpu)
|
||||
static struct intel_shared_regs *allocate_shared_regs(int cpu)
|
||||
{
|
||||
struct intel_shared_regs *regs;
|
||||
int i;
|
||||
|
@ -3314,23 +3376,24 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
|
|||
return c;
|
||||
}
|
||||
|
||||
static int intel_pmu_cpu_prepare(int cpu)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||
|
||||
int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
|
||||
{
|
||||
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
|
||||
cpuc->shared_regs = allocate_shared_regs(cpu);
|
||||
if (!cpuc->shared_regs)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
|
||||
if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
|
||||
size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
|
||||
|
||||
cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
|
||||
cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!cpuc->constraint_list)
|
||||
goto err_shared_regs;
|
||||
}
|
||||
|
||||
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
|
||||
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
|
||||
if (!cpuc->excl_cntrs)
|
||||
goto err_constraint_list;
|
||||
|
@ -3352,6 +3415,11 @@ static int intel_pmu_cpu_prepare(int cpu)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int intel_pmu_cpu_prepare(int cpu)
|
||||
{
|
||||
return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
|
||||
}
|
||||
|
||||
static void flip_smm_bit(void *data)
|
||||
{
|
||||
unsigned long set = *(unsigned long *)data;
|
||||
|
@ -3423,9 +3491,8 @@ static void intel_pmu_cpu_starting(int cpu)
|
|||
}
|
||||
}
|
||||
|
||||
static void free_excl_cntrs(int cpu)
|
||||
static void free_excl_cntrs(struct cpu_hw_events *cpuc)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||
struct intel_excl_cntrs *c;
|
||||
|
||||
c = cpuc->excl_cntrs;
|
||||
|
@ -3433,9 +3500,10 @@ static void free_excl_cntrs(int cpu)
|
|||
if (c->core_id == -1 || --c->refcnt == 0)
|
||||
kfree(c);
|
||||
cpuc->excl_cntrs = NULL;
|
||||
kfree(cpuc->constraint_list);
|
||||
cpuc->constraint_list = NULL;
|
||||
}
|
||||
|
||||
kfree(cpuc->constraint_list);
|
||||
cpuc->constraint_list = NULL;
|
||||
}
|
||||
|
||||
static void intel_pmu_cpu_dying(int cpu)
|
||||
|
@ -3443,9 +3511,8 @@ static void intel_pmu_cpu_dying(int cpu)
|
|||
fini_debug_store_on_cpu(cpu);
|
||||
}
|
||||
|
||||
static void intel_pmu_cpu_dead(int cpu)
|
||||
void intel_cpuc_finish(struct cpu_hw_events *cpuc)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||
struct intel_shared_regs *pc;
|
||||
|
||||
pc = cpuc->shared_regs;
|
||||
|
@ -3455,7 +3522,12 @@ static void intel_pmu_cpu_dead(int cpu)
|
|||
cpuc->shared_regs = NULL;
|
||||
}
|
||||
|
||||
free_excl_cntrs(cpu);
|
||||
free_excl_cntrs(cpuc);
|
||||
}
|
||||
|
||||
static void intel_pmu_cpu_dead(int cpu)
|
||||
{
|
||||
intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
|
||||
}
|
||||
|
||||
static void intel_pmu_sched_task(struct perf_event_context *ctx,
|
||||
|
@ -3917,8 +3989,11 @@ static struct attribute *intel_pmu_caps_attrs[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
|
||||
|
||||
static struct attribute *intel_pmu_attrs[] = {
|
||||
&dev_attr_freeze_on_smi.attr,
|
||||
NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -4374,6 +4449,15 @@ __init int intel_pmu_init(void)
|
|||
x86_pmu.cpu_events = get_hsw_events_attrs();
|
||||
intel_pmu_pebs_data_source_skl(
|
||||
boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
|
||||
x86_pmu.flags |= PMU_FL_TFA;
|
||||
x86_pmu.get_event_constraints = tfa_get_event_constraints;
|
||||
x86_pmu.enable_all = intel_tfa_pmu_enable_all;
|
||||
x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
|
||||
intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
|
||||
}
|
||||
|
||||
pr_cont("Skylake events, ");
|
||||
name = "skylake";
|
||||
break;
|
||||
|
@ -4515,7 +4599,7 @@ static __init int fixup_ht_bug(void)
|
|||
hardlockup_detector_perf_restart();
|
||||
|
||||
for_each_online_cpu(c)
|
||||
free_excl_cntrs(c);
|
||||
free_excl_cntrs(&per_cpu(cpu_hw_events, c));
|
||||
|
||||
cpus_read_unlock();
|
||||
pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
|
||||
|
|
|
@ -242,6 +242,11 @@ struct cpu_hw_events {
|
|||
struct intel_excl_cntrs *excl_cntrs;
|
||||
int excl_thread_id; /* 0 or 1 */
|
||||
|
||||
/*
|
||||
* SKL TSX_FORCE_ABORT shadow
|
||||
*/
|
||||
u64 tfa_shadow;
|
||||
|
||||
/*
|
||||
* AMD specific bits
|
||||
*/
|
||||
|
@ -679,6 +684,7 @@ do { \
|
|||
#define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
|
||||
#define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
|
||||
#define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
|
||||
#define PMU_FL_TFA 0x20 /* deal with TSX force abort */
|
||||
|
||||
#define EVENT_VAR(_id) event_attr_##_id
|
||||
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
|
||||
|
@ -887,7 +893,8 @@ struct event_constraint *
|
|||
x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
|
||||
struct perf_event *event);
|
||||
|
||||
struct intel_shared_regs *allocate_shared_regs(int cpu);
|
||||
extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
|
||||
extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
|
||||
|
||||
int intel_pmu_init(void);
|
||||
|
||||
|
@ -1023,9 +1030,13 @@ static inline int intel_pmu_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
|
||||
static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int is_ht_workaround_enabled(void)
|
||||
|
|
|
@ -340,6 +340,7 @@
|
|||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
|
||||
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
|
||||
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
|
||||
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
|
||||
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
||||
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
||||
|
|
|
@ -629,6 +629,12 @@
|
|||
|
||||
#define MSR_IA32_TSC_DEADLINE 0x000006E0
|
||||
|
||||
|
||||
#define MSR_TSX_FORCE_ABORT 0x0000010F
|
||||
|
||||
#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
|
||||
#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
|
||||
|
||||
/* P4/Xeon+ specific */
|
||||
#define MSR_IA32_MCG_EAX 0x00000180
|
||||
#define MSR_IA32_MCG_EBX 0x00000181
|
||||
|
|
|
@ -7,7 +7,11 @@
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#ifdef CONFIG_KASAN_EXTRA
|
||||
#define KASAN_STACK_ORDER 2
|
||||
#else
|
||||
#define KASAN_STACK_ORDER 1
|
||||
#endif
|
||||
#else
|
||||
#define KASAN_STACK_ORDER 0
|
||||
#endif
|
||||
|
|
|
@ -818,11 +818,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
|||
static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||
{
|
||||
set_cpu_cap(c, X86_FEATURE_ZEN);
|
||||
/*
|
||||
* Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
|
||||
* all up to and including B1.
|
||||
*/
|
||||
if (c->x86_model <= 1 && c->x86_stepping <= 1)
|
||||
|
||||
/* Fix erratum 1076: CPB feature bit not being set in CPUID. */
|
||||
if (!cpu_has(c, X86_FEATURE_CPB))
|
||||
set_cpu_cap(c, X86_FEATURE_CPB);
|
||||
}
|
||||
|
||||
|
|
|
@ -707,7 +707,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
|
|||
if (!p) {
|
||||
return ret;
|
||||
} else {
|
||||
if (boot_cpu_data.microcode == p->patch_id)
|
||||
if (boot_cpu_data.microcode >= p->patch_id)
|
||||
return ret;
|
||||
|
||||
ret = UCODE_NEW;
|
||||
|
|
|
@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
|
|||
struct efi_info *current_ei = &boot_params.efi_info;
|
||||
struct efi_info *ei = ¶ms->efi_info;
|
||||
|
||||
if (!efi_enabled(EFI_RUNTIME_SERVICES))
|
||||
return 0;
|
||||
|
||||
if (!current_ei->efi_memmap_size)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -641,6 +641,22 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334b, quirk_no_aersid);
|
|||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid);
|
||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid);
|
||||
|
||||
static void quirk_intel_th_dnv(struct pci_dev *dev)
|
||||
{
|
||||
struct resource *r = &dev->resource[4];
|
||||
|
||||
/*
|
||||
* Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
|
||||
* appears to be 4 MB in reality.
|
||||
*/
|
||||
if (r->end == r->start + 0x7ff) {
|
||||
r->start = 0;
|
||||
r->end = 0x3fffff;
|
||||
r->flags |= IORESOURCE_UNSET;
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
|
||||
|
||||
#ifdef CONFIG_PHYS_ADDR_T_64BIT
|
||||
|
||||
#define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
|
||||
|
|
|
@ -33,6 +33,7 @@ CONFIG_SMP=y
|
|||
CONFIG_HOTPLUG_CPU=y
|
||||
# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
|
||||
# CONFIG_PCI is not set
|
||||
CONFIG_VECTORS_OFFSET=0x00002000
|
||||
CONFIG_XTENSA_PLATFORM_XTFPGA=y
|
||||
CONFIG_CMDLINE_BOOL=y
|
||||
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
|
||||
|
|
|
@ -280,12 +280,13 @@ should_never_return:
|
|||
|
||||
movi a2, cpu_start_ccount
|
||||
1:
|
||||
memw
|
||||
l32i a3, a2, 0
|
||||
beqi a3, 0, 1b
|
||||
movi a3, 0
|
||||
s32i a3, a2, 0
|
||||
memw
|
||||
1:
|
||||
memw
|
||||
l32i a3, a2, 0
|
||||
beqi a3, 0, 1b
|
||||
wsr a3, ccount
|
||||
|
@ -321,11 +322,13 @@ ENTRY(cpu_restart)
|
|||
rsr a0, prid
|
||||
neg a2, a0
|
||||
movi a3, cpu_start_id
|
||||
memw
|
||||
s32i a2, a3, 0
|
||||
#if XCHAL_DCACHE_IS_WRITEBACK
|
||||
dhwbi a3, 0
|
||||
#endif
|
||||
1:
|
||||
memw
|
||||
l32i a2, a3, 0
|
||||
dhi a3, 0
|
||||
bne a2, a0, 1b
|
||||
|
|
|
@ -320,8 +320,8 @@ unsigned long get_wchan(struct task_struct *p)
|
|||
|
||||
/* Stack layout: sp-4: ra, sp-3: sp' */
|
||||
|
||||
pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
|
||||
sp = *(unsigned long *)sp - 3;
|
||||
pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
|
||||
sp = SPILL_SLOT(sp, 1);
|
||||
} while (count++ < 16);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < max_cpus; ++i)
|
||||
for_each_possible_cpu(i)
|
||||
set_cpu_present(i, true);
|
||||
}
|
||||
|
||||
|
@ -96,6 +96,11 @@ void __init smp_init_cpus(void)
|
|||
pr_info("%s: Core Count = %d\n", __func__, ncpus);
|
||||
pr_info("%s: Core Id = %d\n", __func__, core_id);
|
||||
|
||||
if (ncpus > NR_CPUS) {
|
||||
ncpus = NR_CPUS;
|
||||
pr_info("%s: limiting core count by %d\n", __func__, ncpus);
|
||||
}
|
||||
|
||||
for (i = 0; i < ncpus; ++i)
|
||||
set_cpu_possible(i, true);
|
||||
}
|
||||
|
@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
|
|||
int i;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
cpu_start_id = cpu;
|
||||
system_flush_invalidate_dcache_range(
|
||||
(unsigned long)&cpu_start_id, sizeof(cpu_start_id));
|
||||
WRITE_ONCE(cpu_start_id, cpu);
|
||||
/* Pairs with the third memw in the cpu_restart */
|
||||
mb();
|
||||
system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
|
||||
sizeof(cpu_start_id));
|
||||
#endif
|
||||
smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
|
||||
|
||||
|
@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
|
|||
ccount = get_ccount();
|
||||
while (!ccount);
|
||||
|
||||
cpu_start_ccount = ccount;
|
||||
WRITE_ONCE(cpu_start_ccount, ccount);
|
||||
|
||||
while (time_before(jiffies, timeout)) {
|
||||
do {
|
||||
/*
|
||||
* Pairs with the first two memws in the
|
||||
* .Lboot_secondary.
|
||||
*/
|
||||
mb();
|
||||
if (!cpu_start_ccount)
|
||||
break;
|
||||
}
|
||||
ccount = READ_ONCE(cpu_start_ccount);
|
||||
} while (ccount && time_before(jiffies, timeout));
|
||||
|
||||
if (cpu_start_ccount) {
|
||||
if (ccount) {
|
||||
smp_call_function_single(0, mx_cpu_stop,
|
||||
(void *)cpu, 1);
|
||||
cpu_start_ccount = 0;
|
||||
(void *)cpu, 1);
|
||||
WRITE_ONCE(cpu_start_ccount, 0);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|||
pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
|
||||
__func__, cpu, idle, start_info.stack);
|
||||
|
||||
init_completion(&cpu_running);
|
||||
ret = boot_secondary(cpu, idle);
|
||||
if (ret == 0) {
|
||||
wait_for_completion_timeout(&cpu_running,
|
||||
|
@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu)
|
|||
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
|
||||
while (time_before(jiffies, timeout)) {
|
||||
system_invalidate_dcache_range((unsigned long)&cpu_start_id,
|
||||
sizeof(cpu_start_id));
|
||||
if (cpu_start_id == -cpu) {
|
||||
sizeof(cpu_start_id));
|
||||
/* Pairs with the second memw in the cpu_restart */
|
||||
mb();
|
||||
if (READ_ONCE(cpu_start_id) == -cpu) {
|
||||
platform_cpu_kill(cpu);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
|
|||
container_of(evt, struct ccount_timer, evt);
|
||||
|
||||
if (timer->irq_enabled) {
|
||||
disable_irq(evt->irq);
|
||||
disable_irq_nosync(evt->irq);
|
||||
timer->irq_enabled = 0;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -72,6 +72,7 @@
|
|||
#include <linux/sched/loadavg.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <trace/events/block.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include "blk-rq-qos.h"
|
||||
#include "blk-stat.h"
|
||||
|
||||
|
@ -568,6 +569,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
|
|||
return;
|
||||
|
||||
enabled = blk_iolatency_enabled(iolat->blkiolat);
|
||||
if (!enabled)
|
||||
return;
|
||||
|
||||
while (blkg && blkg->parent) {
|
||||
iolat = blkg_to_lat(blkg);
|
||||
if (!iolat) {
|
||||
|
@ -577,7 +581,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
|
|||
rqw = &iolat->rq_wait;
|
||||
|
||||
atomic_dec(&rqw->inflight);
|
||||
if (!enabled || iolat->min_lat_nsec == 0)
|
||||
if (iolat->min_lat_nsec == 0)
|
||||
goto next;
|
||||
iolatency_record_time(iolat, &bio->bi_issue, now,
|
||||
issue_as_root);
|
||||
|
@ -721,10 +725,13 @@ int blk_iolatency_init(struct request_queue *q)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
|
||||
/*
|
||||
* return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
|
||||
* return 0.
|
||||
*/
|
||||
static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
|
||||
{
|
||||
struct iolatency_grp *iolat = blkg_to_lat(blkg);
|
||||
struct blk_iolatency *blkiolat = iolat->blkiolat;
|
||||
u64 oldval = iolat->min_lat_nsec;
|
||||
|
||||
iolat->min_lat_nsec = val;
|
||||
|
@ -733,9 +740,10 @@ static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
|
|||
BLKIOLATENCY_MAX_WIN_SIZE);
|
||||
|
||||
if (!oldval && val)
|
||||
atomic_inc(&blkiolat->enabled);
|
||||
return 1;
|
||||
if (oldval && !val)
|
||||
atomic_dec(&blkiolat->enabled);
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iolatency_clear_scaling(struct blkcg_gq *blkg)
|
||||
|
@ -768,6 +776,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
|
|||
u64 lat_val = 0;
|
||||
u64 oldval;
|
||||
int ret;
|
||||
int enable = 0;
|
||||
|
||||
ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
|
||||
if (ret)
|
||||
|
@ -803,7 +812,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
|
|||
blkg = ctx.blkg;
|
||||
oldval = iolat->min_lat_nsec;
|
||||
|
||||
iolatency_set_min_lat_nsec(blkg, lat_val);
|
||||
enable = iolatency_set_min_lat_nsec(blkg, lat_val);
|
||||
if (enable) {
|
||||
WARN_ON_ONCE(!blk_get_queue(blkg->q));
|
||||
blkg_get(blkg);
|
||||
}
|
||||
|
||||
if (oldval != iolat->min_lat_nsec) {
|
||||
iolatency_clear_scaling(blkg);
|
||||
}
|
||||
|
@ -811,6 +825,24 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
|
|||
ret = 0;
|
||||
out:
|
||||
blkg_conf_finish(&ctx);
|
||||
if (ret == 0 && enable) {
|
||||
struct iolatency_grp *tmp = blkg_to_lat(blkg);
|
||||
struct blk_iolatency *blkiolat = tmp->blkiolat;
|
||||
|
||||
blk_mq_freeze_queue(blkg->q);
|
||||
|
||||
if (enable == 1)
|
||||
atomic_inc(&blkiolat->enabled);
|
||||
else if (enable == -1)
|
||||
atomic_dec(&blkiolat->enabled);
|
||||
else
|
||||
WARN_ON_ONCE(1);
|
||||
|
||||
blk_mq_unfreeze_queue(blkg->q);
|
||||
|
||||
blkg_put(blkg);
|
||||
blk_put_queue(blkg->q);
|
||||
}
|
||||
return ret ?: nbytes;
|
||||
}
|
||||
|
||||
|
@ -910,8 +942,14 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
|
|||
{
|
||||
struct iolatency_grp *iolat = pd_to_lat(pd);
|
||||
struct blkcg_gq *blkg = lat_to_blkg(iolat);
|
||||
struct blk_iolatency *blkiolat = iolat->blkiolat;
|
||||
int ret;
|
||||
|
||||
iolatency_set_min_lat_nsec(blkg, 0);
|
||||
ret = iolatency_set_min_lat_nsec(blkg, 0);
|
||||
if (ret == 1)
|
||||
atomic_inc(&blkiolat->enabled);
|
||||
if (ret == -1)
|
||||
atomic_dec(&blkiolat->enabled);
|
||||
iolatency_clear_scaling(blkg);
|
||||
}
|
||||
|
||||
|
|
|
@ -984,9 +984,9 @@ static void __device_release_driver(struct device *dev, struct device *parent)
|
|||
drv->remove(dev);
|
||||
|
||||
device_links_driver_cleanup(dev);
|
||||
dma_deconfigure(dev);
|
||||
|
||||
devres_release_all(dev);
|
||||
dma_deconfigure(dev);
|
||||
dev->driver = NULL;
|
||||
dev_set_drvdata(dev, NULL);
|
||||
if (dev->pm_domain && dev->pm_domain->dismiss)
|
||||
|
|
|
@ -544,10 +544,9 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
|
|||
hdev->bus);
|
||||
|
||||
if (!btrtl_dev->ic_info) {
|
||||
rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
|
||||
rtl_dev_info(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
|
||||
lmp_subver, hci_rev, hci_ver);
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
return btrtl_dev;
|
||||
}
|
||||
|
||||
if (btrtl_dev->ic_info->has_rom_version) {
|
||||
|
@ -602,6 +601,11 @@ int btrtl_download_firmware(struct hci_dev *hdev,
|
|||
* standard btusb. Once that firmware is uploaded, the subver changes
|
||||
* to a different value.
|
||||
*/
|
||||
if (!btrtl_dev->ic_info) {
|
||||
rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (btrtl_dev->ic_info->lmp_subver) {
|
||||
case RTL_ROM_LMP_8723A:
|
||||
case RTL_ROM_LMP_3499:
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/wait.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char __user *buf, size_t count,
|
|||
TicCard = st_loc.tic_des_from_pc; /* tic number to send */
|
||||
IndexCard = NumCard - 1;
|
||||
|
||||
if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
|
||||
if (IndexCard >= MAX_BOARD)
|
||||
return -EINVAL;
|
||||
IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
|
||||
|
||||
if (!apbs[IndexCard].RamIO)
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
unsigned char IndexCard;
|
||||
void __iomem *pmem;
|
||||
int ret = 0;
|
||||
static int warncount = 10;
|
||||
volatile unsigned char byte_reset_it;
|
||||
struct st_ram_io *adgl;
|
||||
void __user *argp = (void __user *)arg;
|
||||
|
@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
mutex_lock(&ac_mutex);
|
||||
IndexCard = adgl->num_card-1;
|
||||
|
||||
if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
|
||||
static int warncount = 10;
|
||||
if (warncount) {
|
||||
printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
|
||||
warncount--;
|
||||
}
|
||||
kfree(adgl);
|
||||
mutex_unlock(&ac_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (cmd != 6 && IndexCard >= MAX_BOARD)
|
||||
goto err;
|
||||
IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
|
||||
|
||||
if (cmd != 6 && !apbs[IndexCard].RamIO)
|
||||
goto err;
|
||||
|
||||
switch (cmd) {
|
||||
|
||||
|
@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
kfree(adgl);
|
||||
mutex_unlock(&ac_mutex);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
if (warncount) {
|
||||
pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
|
||||
(int)IndexCard + 1);
|
||||
warncount--;
|
||||
}
|
||||
kfree(adgl);
|
||||
mutex_unlock(&ac_mutex);
|
||||
return -EINVAL;
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -131,8 +131,8 @@ static const char * const gcc_parent_names_6[] = {
|
|||
"core_bi_pll_test_se",
|
||||
};
|
||||
|
||||
static const char * const gcc_parent_names_7[] = {
|
||||
"bi_tcxo",
|
||||
static const char * const gcc_parent_names_7_ao[] = {
|
||||
"bi_tcxo_ao",
|
||||
"gpll0",
|
||||
"gpll0_out_even",
|
||||
"core_bi_pll_test_se",
|
||||
|
@ -144,6 +144,12 @@ static const char * const gcc_parent_names_8[] = {
|
|||
"core_bi_pll_test_se",
|
||||
};
|
||||
|
||||
static const char * const gcc_parent_names_8_ao[] = {
|
||||
"bi_tcxo_ao",
|
||||
"gpll0",
|
||||
"core_bi_pll_test_se",
|
||||
};
|
||||
|
||||
static const struct parent_map gcc_parent_map_10[] = {
|
||||
{ P_BI_TCXO, 0 },
|
||||
{ P_GPLL0_OUT_MAIN, 1 },
|
||||
|
@ -226,7 +232,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
|
|||
.freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
|
||||
.clkr.hw.init = &(struct clk_init_data){
|
||||
.name = "gcc_cpuss_ahb_clk_src",
|
||||
.parent_names = gcc_parent_names_7,
|
||||
.parent_names = gcc_parent_names_7_ao,
|
||||
.num_parents = 4,
|
||||
.ops = &clk_rcg2_ops,
|
||||
},
|
||||
|
@ -245,7 +251,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
|
|||
.freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
|
||||
.clkr.hw.init = &(struct clk_init_data){
|
||||
.name = "gcc_cpuss_rbcpr_clk_src",
|
||||
.parent_names = gcc_parent_names_8,
|
||||
.parent_names = gcc_parent_names_8_ao,
|
||||
.num_parents = 3,
|
||||
.ops = &clk_rcg2_ops,
|
||||
},
|
||||
|
|
|
@ -367,8 +367,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
|
|||
num_dividers = i;
|
||||
|
||||
tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
|
||||
if (!tmp)
|
||||
if (!tmp) {
|
||||
*table = ERR_PTR(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
valid_div = 0;
|
||||
*width = 0;
|
||||
|
@ -403,6 +405,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
|
|||
{
|
||||
struct clk_omap_divider *div;
|
||||
struct clk_omap_reg *reg;
|
||||
int ret;
|
||||
|
||||
if (!setup)
|
||||
return NULL;
|
||||
|
@ -422,6 +425,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
|
|||
div->flags |= CLK_DIVIDER_POWER_OF_TWO;
|
||||
|
||||
div->table = _get_div_table_from_setup(setup, &div->width);
|
||||
if (IS_ERR(div->table)) {
|
||||
ret = PTR_ERR(div->table);
|
||||
kfree(div);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
||||
div->shift = setup->bit_shift;
|
||||
div->latch = -EINVAL;
|
||||
|
|
|
@ -250,6 +250,7 @@ void proc_coredump_connector(struct task_struct *task)
|
|||
{
|
||||
struct cn_msg *msg;
|
||||
struct proc_event *ev;
|
||||
struct task_struct *parent;
|
||||
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
|
||||
|
||||
if (atomic_read(&proc_event_num_listeners) < 1)
|
||||
|
@ -262,8 +263,14 @@ void proc_coredump_connector(struct task_struct *task)
|
|||
ev->what = PROC_EVENT_COREDUMP;
|
||||
ev->event_data.coredump.process_pid = task->pid;
|
||||
ev->event_data.coredump.process_tgid = task->tgid;
|
||||
ev->event_data.coredump.parent_pid = task->real_parent->pid;
|
||||
ev->event_data.coredump.parent_tgid = task->real_parent->tgid;
|
||||
|
||||
rcu_read_lock();
|
||||
if (pid_alive(task)) {
|
||||
parent = rcu_dereference(task->real_parent);
|
||||
ev->event_data.coredump.parent_pid = parent->pid;
|
||||
ev->event_data.coredump.parent_tgid = parent->tgid;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
|
@ -276,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
|
|||
{
|
||||
struct cn_msg *msg;
|
||||
struct proc_event *ev;
|
||||
struct task_struct *parent;
|
||||
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
|
||||
|
||||
if (atomic_read(&proc_event_num_listeners) < 1)
|
||||
|
@ -290,8 +298,14 @@ void proc_exit_connector(struct task_struct *task)
|
|||
ev->event_data.exit.process_tgid = task->tgid;
|
||||
ev->event_data.exit.exit_code = task->exit_code;
|
||||
ev->event_data.exit.exit_signal = task->exit_signal;
|
||||
ev->event_data.exit.parent_pid = task->real_parent->pid;
|
||||
ev->event_data.exit.parent_tgid = task->real_parent->tgid;
|
||||
|
||||
rcu_read_lock();
|
||||
if (pid_alive(task)) {
|
||||
parent = rcu_dereference(task->real_parent);
|
||||
ev->event_data.exit.parent_pid = parent->pid;
|
||||
ev->event_data.exit.parent_tgid = parent->tgid;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
|
|
|
@ -358,7 +358,7 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
|
|||
}
|
||||
|
||||
cpufreq_stats_record_transition(policy, freqs->new);
|
||||
cpufreq_times_record_transition(freqs);
|
||||
cpufreq_times_record_transition(policy, freqs->new);
|
||||
policy->cur = freqs->new;
|
||||
}
|
||||
}
|
||||
|
@ -555,13 +555,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
|
|||
* SYSFS INTERFACE *
|
||||
*********************************************************************/
|
||||
static ssize_t show_boost(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
|
||||
}
|
||||
|
||||
static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret, enable;
|
||||
|
||||
|
@ -1869,9 +1869,15 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
|
|||
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
int ret;
|
||||
|
||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
||||
|
||||
return cpufreq_driver->fast_switch(policy, target_freq);
|
||||
ret = cpufreq_driver->fast_switch(policy, target_freq);
|
||||
if (ret)
|
||||
cpufreq_times_record_transition(policy, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
|
||||
|
||||
|
|
|
@ -32,11 +32,17 @@ static DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
|
|||
static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
|
||||
static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */
|
||||
|
||||
struct concurrent_times {
|
||||
atomic64_t active[NR_CPUS];
|
||||
atomic64_t policy[NR_CPUS];
|
||||
};
|
||||
|
||||
struct uid_entry {
|
||||
uid_t uid;
|
||||
unsigned int max_state;
|
||||
struct hlist_node hash;
|
||||
struct rcu_head rcu;
|
||||
struct concurrent_times *concurrent_times;
|
||||
u64 time_in_state[0];
|
||||
};
|
||||
|
||||
|
@ -87,6 +93,7 @@ static struct uid_entry *find_uid_entry_locked(uid_t uid)
|
|||
static struct uid_entry *find_or_register_uid_locked(uid_t uid)
|
||||
{
|
||||
struct uid_entry *uid_entry, *temp;
|
||||
struct concurrent_times *times;
|
||||
unsigned int max_state = READ_ONCE(next_offset);
|
||||
size_t alloc_size = sizeof(*uid_entry) + max_state *
|
||||
sizeof(uid_entry->time_in_state[0]);
|
||||
|
@ -115,9 +122,15 @@ static struct uid_entry *find_or_register_uid_locked(uid_t uid)
|
|||
uid_entry = kzalloc(alloc_size, GFP_ATOMIC);
|
||||
if (!uid_entry)
|
||||
return NULL;
|
||||
times = kzalloc(sizeof(*times), GFP_ATOMIC);
|
||||
if (!times) {
|
||||
kfree(uid_entry);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
uid_entry->uid = uid;
|
||||
uid_entry->max_state = max_state;
|
||||
uid_entry->concurrent_times = times;
|
||||
|
||||
hash_add_rcu(uid_hash_table, &uid_entry->hash, uid);
|
||||
|
||||
|
@ -180,10 +193,12 @@ static void *uid_seq_start(struct seq_file *seq, loff_t *pos)
|
|||
|
||||
static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
(*pos)++;
|
||||
do {
|
||||
(*pos)++;
|
||||
|
||||
if (*pos >= HASH_SIZE(uid_hash_table))
|
||||
return NULL;
|
||||
if (*pos >= HASH_SIZE(uid_hash_table))
|
||||
return NULL;
|
||||
} while (hlist_empty(&uid_hash_table[*pos]));
|
||||
|
||||
return &uid_hash_table[*pos];
|
||||
}
|
||||
|
@ -207,7 +222,8 @@ static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
|
|||
if (freqs->freq_table[i] ==
|
||||
CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
seq_printf(m, " %d", freqs->freq_table[i]);
|
||||
seq_put_decimal_ull(m, " ",
|
||||
freqs->freq_table[i]);
|
||||
}
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
|
@ -216,13 +232,16 @@ static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
|
|||
rcu_read_lock();
|
||||
|
||||
hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
|
||||
if (uid_entry->max_state)
|
||||
seq_printf(m, "%d:", uid_entry->uid);
|
||||
if (uid_entry->max_state) {
|
||||
seq_put_decimal_ull(m, "", uid_entry->uid);
|
||||
seq_putc(m, ':');
|
||||
}
|
||||
for (i = 0; i < uid_entry->max_state; ++i) {
|
||||
u64 time;
|
||||
if (freq_index_invalid(i))
|
||||
continue;
|
||||
seq_printf(m, " %lu", (unsigned long)nsec_to_clock_t(
|
||||
uid_entry->time_in_state[i]));
|
||||
time = nsec_to_clock_t(uid_entry->time_in_state[i]);
|
||||
seq_put_decimal_ull(m, " ", time);
|
||||
}
|
||||
if (uid_entry->max_state)
|
||||
seq_putc(m, '\n');
|
||||
|
@ -232,6 +251,86 @@ static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int concurrent_time_seq_show(struct seq_file *m, void *v,
|
||||
atomic64_t *(*get_times)(struct concurrent_times *))
|
||||
{
|
||||
struct uid_entry *uid_entry;
|
||||
int i, num_possible_cpus = num_possible_cpus();
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
|
||||
atomic64_t *times = get_times(uid_entry->concurrent_times);
|
||||
|
||||
seq_put_decimal_ull(m, "", (u64)uid_entry->uid);
|
||||
seq_putc(m, ':');
|
||||
|
||||
for (i = 0; i < num_possible_cpus; ++i) {
|
||||
u64 time = nsec_to_clock_t(atomic64_read(×[i]));
|
||||
|
||||
seq_put_decimal_ull(m, " ", time);
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline atomic64_t *get_active_times(struct concurrent_times *times)
|
||||
{
|
||||
return times->active;
|
||||
}
|
||||
|
||||
static int concurrent_active_time_seq_show(struct seq_file *m, void *v)
|
||||
{
|
||||
if (v == uid_hash_table) {
|
||||
seq_put_decimal_ull(m, "cpus: ", num_possible_cpus());
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
return concurrent_time_seq_show(m, v, get_active_times);
|
||||
}
|
||||
|
||||
static inline atomic64_t *get_policy_times(struct concurrent_times *times)
|
||||
{
|
||||
return times->policy;
|
||||
}
|
||||
|
||||
static int concurrent_policy_time_seq_show(struct seq_file *m, void *v)
|
||||
{
|
||||
int i;
|
||||
struct cpu_freqs *freqs, *last_freqs = NULL;
|
||||
|
||||
if (v == uid_hash_table) {
|
||||
int cnt = 0;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
freqs = all_freqs[i];
|
||||
if (!freqs)
|
||||
continue;
|
||||
if (freqs != last_freqs) {
|
||||
if (last_freqs) {
|
||||
seq_put_decimal_ull(m, ": ", cnt);
|
||||
seq_putc(m, ' ');
|
||||
cnt = 0;
|
||||
}
|
||||
seq_put_decimal_ull(m, "policy", i);
|
||||
|
||||
last_freqs = freqs;
|
||||
}
|
||||
cnt++;
|
||||
}
|
||||
if (last_freqs) {
|
||||
seq_put_decimal_ull(m, ": ", cnt);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
}
|
||||
|
||||
return concurrent_time_seq_show(m, v, get_policy_times);
|
||||
}
|
||||
|
||||
void cpufreq_task_times_init(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -326,11 +425,16 @@ void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
|
|||
{
|
||||
unsigned long flags;
|
||||
unsigned int state;
|
||||
unsigned int active_cpu_cnt = 0;
|
||||
unsigned int policy_cpu_cnt = 0;
|
||||
unsigned int policy_first_cpu;
|
||||
struct uid_entry *uid_entry;
|
||||
struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
|
||||
struct cpufreq_policy *policy;
|
||||
uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
|
||||
int cpu = 0;
|
||||
|
||||
if (!freqs || p->flags & PF_EXITING)
|
||||
if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
|
||||
return;
|
||||
|
||||
state = freqs->offset + READ_ONCE(freqs->last_index);
|
||||
|
@ -346,6 +450,42 @@ void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
|
|||
if (uid_entry && state < uid_entry->max_state)
|
||||
uid_entry->time_in_state[state] += cputime;
|
||||
spin_unlock_irqrestore(&uid_lock, flags);
|
||||
|
||||
rcu_read_lock();
|
||||
uid_entry = find_uid_entry_rcu(uid);
|
||||
if (!uid_entry) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
if (!idle_cpu(cpu))
|
||||
++active_cpu_cnt;
|
||||
|
||||
atomic64_add(cputime,
|
||||
&uid_entry->concurrent_times->active[active_cpu_cnt - 1]);
|
||||
|
||||
policy = cpufreq_cpu_get(task_cpu(p));
|
||||
if (!policy) {
|
||||
/*
|
||||
* This CPU may have just come up and not have a cpufreq policy
|
||||
* yet.
|
||||
*/
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_cpu(cpu, policy->related_cpus)
|
||||
if (!idle_cpu(cpu))
|
||||
++policy_cpu_cnt;
|
||||
|
||||
policy_first_cpu = cpumask_first(policy->related_cpus);
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
atomic64_add(cputime,
|
||||
&uid_entry->concurrent_times->policy[policy_first_cpu +
|
||||
policy_cpu_cnt - 1]);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void cpufreq_times_create_policy(struct cpufreq_policy *policy)
|
||||
|
@ -387,6 +527,14 @@ void cpufreq_times_create_policy(struct cpufreq_policy *policy)
|
|||
all_freqs[cpu] = freqs;
|
||||
}
|
||||
|
||||
static void uid_entry_reclaim(struct rcu_head *rcu)
|
||||
{
|
||||
struct uid_entry *uid_entry = container_of(rcu, struct uid_entry, rcu);
|
||||
|
||||
kfree(uid_entry->concurrent_times);
|
||||
kfree(uid_entry);
|
||||
}
|
||||
|
||||
void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
|
||||
{
|
||||
struct uid_entry *uid_entry;
|
||||
|
@ -400,7 +548,7 @@ void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
|
|||
hash, uid_start) {
|
||||
if (uid_start == uid_entry->uid) {
|
||||
hash_del_rcu(&uid_entry->hash);
|
||||
kfree_rcu(uid_entry, rcu);
|
||||
call_rcu(&uid_entry->rcu, uid_entry_reclaim);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -408,24 +556,17 @@ void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
|
|||
spin_unlock_irqrestore(&uid_lock, flags);
|
||||
}
|
||||
|
||||
void cpufreq_times_record_transition(struct cpufreq_freqs *freq)
|
||||
void cpufreq_times_record_transition(struct cpufreq_policy *policy,
|
||||
unsigned int new_freq)
|
||||
{
|
||||
int index;
|
||||
struct cpu_freqs *freqs = all_freqs[freq->cpu];
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
struct cpu_freqs *freqs = all_freqs[policy->cpu];
|
||||
if (!freqs)
|
||||
return;
|
||||
|
||||
policy = cpufreq_cpu_get(freq->cpu);
|
||||
if (!policy)
|
||||
return;
|
||||
|
||||
index = cpufreq_frequency_table_get_index(policy, freq->new);
|
||||
index = cpufreq_frequency_table_get_index(policy, new_freq);
|
||||
if (index >= 0)
|
||||
WRITE_ONCE(freqs->last_index, index);
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
|
||||
static const struct seq_operations uid_time_in_state_seq_ops = {
|
||||
|
@ -453,11 +594,55 @@ static const struct file_operations uid_time_in_state_fops = {
|
|||
.release = seq_release,
|
||||
};
|
||||
|
||||
static const struct seq_operations concurrent_active_time_seq_ops = {
|
||||
.start = uid_seq_start,
|
||||
.next = uid_seq_next,
|
||||
.stop = uid_seq_stop,
|
||||
.show = concurrent_active_time_seq_show,
|
||||
};
|
||||
|
||||
static int concurrent_active_time_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &concurrent_active_time_seq_ops);
|
||||
}
|
||||
|
||||
static const struct file_operations concurrent_active_time_fops = {
|
||||
.open = concurrent_active_time_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static const struct seq_operations concurrent_policy_time_seq_ops = {
|
||||
.start = uid_seq_start,
|
||||
.next = uid_seq_next,
|
||||
.stop = uid_seq_stop,
|
||||
.show = concurrent_policy_time_seq_show,
|
||||
};
|
||||
|
||||
static int concurrent_policy_time_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &concurrent_policy_time_seq_ops);
|
||||
}
|
||||
|
||||
static const struct file_operations concurrent_policy_time_fops = {
|
||||
.open = concurrent_policy_time_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static int __init cpufreq_times_init(void)
|
||||
{
|
||||
proc_create_data("uid_time_in_state", 0444, NULL,
|
||||
&uid_time_in_state_fops, NULL);
|
||||
|
||||
proc_create_data("uid_concurrent_active_time", 0444, NULL,
|
||||
&concurrent_active_time_fops, NULL);
|
||||
|
||||
proc_create_data("uid_concurrent_policy_time", 0444, NULL,
|
||||
&concurrent_policy_time_fops, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -833,7 +833,7 @@ static void intel_pstate_update_policies(void)
|
|||
/************************** sysfs begin ************************/
|
||||
#define show_one(file_name, object) \
|
||||
static ssize_t show_##file_name \
|
||||
(struct kobject *kobj, struct attribute *attr, char *buf) \
|
||||
(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
|
||||
{ \
|
||||
return sprintf(buf, "%u\n", global.object); \
|
||||
}
|
||||
|
@ -842,7 +842,7 @@ static ssize_t intel_pstate_show_status(char *buf);
|
|||
static int intel_pstate_update_status(const char *buf, size_t size);
|
||||
|
||||
static ssize_t show_status(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
|
@ -853,7 +853,7 @@ static ssize_t show_status(struct kobject *kobj,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t store_status(struct kobject *a, struct attribute *b,
|
||||
static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
char *p = memchr(buf, '\n', count);
|
||||
|
@ -867,7 +867,7 @@ static ssize_t store_status(struct kobject *a, struct attribute *b,
|
|||
}
|
||||
|
||||
static ssize_t show_turbo_pct(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct cpudata *cpu;
|
||||
int total, no_turbo, turbo_pct;
|
||||
|
@ -893,7 +893,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
|
|||
}
|
||||
|
||||
static ssize_t show_num_pstates(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct cpudata *cpu;
|
||||
int total;
|
||||
|
@ -914,7 +914,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
|
|||
}
|
||||
|
||||
static ssize_t show_no_turbo(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
|
@ -936,7 +936,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
|
||||
static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
unsigned int input;
|
||||
|
@ -983,7 +983,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
|
|||
return count;
|
||||
}
|
||||
|
||||
static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
|
||||
static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
unsigned int input;
|
||||
|
@ -1013,7 +1013,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
|
|||
return count;
|
||||
}
|
||||
|
||||
static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
|
||||
static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
unsigned int input;
|
||||
|
@ -1045,12 +1045,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
|
|||
}
|
||||
|
||||
static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", hwp_boost);
|
||||
}
|
||||
|
||||
static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
|
||||
static ssize_t store_hwp_dynamic_boost(struct kobject *a,
|
||||
struct kobj_attribute *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
unsigned int input;
|
||||
|
|
|
@ -203,6 +203,7 @@ struct at_xdmac_chan {
|
|||
u32 save_cim;
|
||||
u32 save_cnda;
|
||||
u32 save_cndc;
|
||||
u32 irq_status;
|
||||
unsigned long status;
|
||||
struct tasklet_struct tasklet;
|
||||
struct dma_slave_config sconfig;
|
||||
|
@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data)
|
|||
struct at_xdmac_desc *desc;
|
||||
u32 error_mask;
|
||||
|
||||
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
|
||||
__func__, atchan->status);
|
||||
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
|
||||
__func__, atchan->irq_status);
|
||||
|
||||
error_mask = AT_XDMAC_CIS_RBEIS
|
||||
| AT_XDMAC_CIS_WBEIS
|
||||
|
@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data)
|
|||
|
||||
if (at_xdmac_chan_is_cyclic(atchan)) {
|
||||
at_xdmac_handle_cyclic(atchan);
|
||||
} else if ((atchan->status & AT_XDMAC_CIS_LIS)
|
||||
|| (atchan->status & error_mask)) {
|
||||
} else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
|
||||
|| (atchan->irq_status & error_mask)) {
|
||||
struct dma_async_tx_descriptor *txd;
|
||||
|
||||
if (atchan->status & AT_XDMAC_CIS_RBEIS)
|
||||
if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
|
||||
dev_err(chan2dev(&atchan->chan), "read bus error!!!");
|
||||
if (atchan->status & AT_XDMAC_CIS_WBEIS)
|
||||
if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
|
||||
dev_err(chan2dev(&atchan->chan), "write bus error!!!");
|
||||
if (atchan->status & AT_XDMAC_CIS_ROIS)
|
||||
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
|
||||
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
|
||||
|
||||
spin_lock_bh(&atchan->lock);
|
||||
|
@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
|
|||
atchan = &atxdmac->chan[i];
|
||||
chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
|
||||
chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
|
||||
atchan->status = chan_status & chan_imr;
|
||||
atchan->irq_status = chan_status & chan_imr;
|
||||
dev_vdbg(atxdmac->dma.dev,
|
||||
"%s: chan%d: imr=0x%x, status=0x%x\n",
|
||||
__func__, i, chan_imr, chan_status);
|
||||
|
@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
|
|||
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
|
||||
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
|
||||
|
||||
if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
|
||||
if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
|
||||
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
|
||||
|
||||
tasklet_schedule(&atchan->tasklet);
|
||||
|
|
|
@ -642,11 +642,9 @@ static int dmatest_func(void *data)
|
|||
srcs[i] = um->addr[i] + src_off;
|
||||
ret = dma_mapping_error(dev->dev, um->addr[i]);
|
||||
if (ret) {
|
||||
dmaengine_unmap_put(um);
|
||||
result("src mapping error", total_tests,
|
||||
src_off, dst_off, len, ret);
|
||||
failed_tests++;
|
||||
continue;
|
||||
goto error_unmap_continue;
|
||||
}
|
||||
um->to_cnt++;
|
||||
}
|
||||
|
@ -661,11 +659,9 @@ static int dmatest_func(void *data)
|
|||
DMA_BIDIRECTIONAL);
|
||||
ret = dma_mapping_error(dev->dev, dsts[i]);
|
||||
if (ret) {
|
||||
dmaengine_unmap_put(um);
|
||||
result("dst mapping error", total_tests,
|
||||
src_off, dst_off, len, ret);
|
||||
failed_tests++;
|
||||
continue;
|
||||
goto error_unmap_continue;
|
||||
}
|
||||
um->bidi_cnt++;
|
||||
}
|
||||
|
@ -693,12 +689,10 @@ static int dmatest_func(void *data)
|
|||
}
|
||||
|
||||
if (!tx) {
|
||||
dmaengine_unmap_put(um);
|
||||
result("prep error", total_tests, src_off,
|
||||
dst_off, len, ret);
|
||||
msleep(100);
|
||||
failed_tests++;
|
||||
continue;
|
||||
goto error_unmap_continue;
|
||||
}
|
||||
|
||||
done->done = false;
|
||||
|
@ -707,12 +701,10 @@ static int dmatest_func(void *data)
|
|||
cookie = tx->tx_submit(tx);
|
||||
|
||||
if (dma_submit_error(cookie)) {
|
||||
dmaengine_unmap_put(um);
|
||||
result("submit error", total_tests, src_off,
|
||||
dst_off, len, ret);
|
||||
msleep(100);
|
||||
failed_tests++;
|
||||
continue;
|
||||
goto error_unmap_continue;
|
||||
}
|
||||
dma_async_issue_pending(chan);
|
||||
|
||||
|
@ -725,16 +717,14 @@ static int dmatest_func(void *data)
|
|||
dmaengine_unmap_put(um);
|
||||
result("test timed out", total_tests, src_off, dst_off,
|
||||
len, 0);
|
||||
failed_tests++;
|
||||
continue;
|
||||
goto error_unmap_continue;
|
||||
} else if (status != DMA_COMPLETE) {
|
||||
dmaengine_unmap_put(um);
|
||||
result(status == DMA_ERROR ?
|
||||
"completion error status" :
|
||||
"completion busy status", total_tests, src_off,
|
||||
dst_off, len, ret);
|
||||
failed_tests++;
|
||||
continue;
|
||||
goto error_unmap_continue;
|
||||
}
|
||||
|
||||
dmaengine_unmap_put(um);
|
||||
|
@ -779,6 +769,12 @@ static int dmatest_func(void *data)
|
|||
verbose_result("test passed", total_tests, src_off,
|
||||
dst_off, len, 0);
|
||||
}
|
||||
|
||||
continue;
|
||||
|
||||
error_unmap_continue:
|
||||
dmaengine_unmap_put(um);
|
||||
failed_tests++;
|
||||
}
|
||||
ktime = ktime_sub(ktime_get(), ktime);
|
||||
ktime = ktime_sub(ktime, comparetime);
|
||||
|
|
|
@ -542,6 +542,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int type)
|
|||
case ISCSI_BOOT_TGT_NIC_ASSOC:
|
||||
case ISCSI_BOOT_TGT_CHAP_TYPE:
|
||||
rc = S_IRUGO;
|
||||
break;
|
||||
case ISCSI_BOOT_TGT_NAME:
|
||||
if (tgt->tgt_name_len)
|
||||
rc = S_IRUGO;
|
||||
|
|
|
@ -310,30 +310,26 @@ static int sirf_probe(struct serdev_device *serdev)
|
|||
ret = -ENODEV;
|
||||
goto err_put_device;
|
||||
}
|
||||
|
||||
ret = regulator_enable(data->vcc);
|
||||
if (ret)
|
||||
goto err_put_device;
|
||||
|
||||
/* Wait for chip to boot into hibernate mode. */
|
||||
msleep(SIRF_BOOT_DELAY);
|
||||
}
|
||||
|
||||
if (data->wakeup) {
|
||||
ret = gpiod_to_irq(data->wakeup);
|
||||
if (ret < 0)
|
||||
goto err_put_device;
|
||||
|
||||
goto err_disable_vcc;
|
||||
data->irq = ret;
|
||||
|
||||
ret = devm_request_threaded_irq(dev, data->irq, NULL,
|
||||
sirf_wakeup_handler,
|
||||
ret = request_threaded_irq(data->irq, NULL, sirf_wakeup_handler,
|
||||
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
|
||||
"wakeup", data);
|
||||
if (ret)
|
||||
goto err_put_device;
|
||||
}
|
||||
|
||||
if (data->on_off) {
|
||||
ret = regulator_enable(data->vcc);
|
||||
if (ret)
|
||||
goto err_put_device;
|
||||
|
||||
/* Wait for chip to boot into hibernate mode */
|
||||
msleep(SIRF_BOOT_DELAY);
|
||||
goto err_disable_vcc;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PM)) {
|
||||
|
@ -342,7 +338,7 @@ static int sirf_probe(struct serdev_device *serdev)
|
|||
} else {
|
||||
ret = sirf_runtime_resume(dev);
|
||||
if (ret < 0)
|
||||
goto err_disable_vcc;
|
||||
goto err_free_irq;
|
||||
}
|
||||
|
||||
ret = gnss_register_device(gdev);
|
||||
|
@ -356,6 +352,9 @@ static int sirf_probe(struct serdev_device *serdev)
|
|||
pm_runtime_disable(dev);
|
||||
else
|
||||
sirf_runtime_suspend(dev);
|
||||
err_free_irq:
|
||||
if (data->wakeup)
|
||||
free_irq(data->irq, data);
|
||||
err_disable_vcc:
|
||||
if (data->on_off)
|
||||
regulator_disable(data->vcc);
|
||||
|
@ -376,6 +375,9 @@ static void sirf_remove(struct serdev_device *serdev)
|
|||
else
|
||||
sirf_runtime_suspend(&serdev->dev);
|
||||
|
||||
if (data->wakeup)
|
||||
free_irq(data->irq, data);
|
||||
|
||||
if (data->on_off)
|
||||
regulator_disable(data->vcc);
|
||||
|
||||
|
|
|
@ -259,6 +259,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
|
|||
struct vf610_gpio_port *port;
|
||||
struct resource *iores;
|
||||
struct gpio_chip *gc;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
|
||||
|
@ -298,6 +299,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Mask all GPIO interrupts */
|
||||
for (i = 0; i < gc->ngpio; i++)
|
||||
vf610_gpio_writel(0, port->base + PORT_PCR(i));
|
||||
|
||||
/* Clear the interrupt status register for all GPIO's */
|
||||
vf610_gpio_writel(~0, port->base + PORT_ISFR);
|
||||
|
||||
|
|
|
@ -1443,7 +1443,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
|||
effective_mode &= ~S_IWUSR;
|
||||
|
||||
if ((adev->flags & AMD_IS_APU) &&
|
||||
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
|
||||
(attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
|
||||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
|
||||
return 0;
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include "amdgpu_display.h"
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-fence-array.h>
|
||||
|
||||
static const struct dma_buf_ops amdgpu_dmabuf_ops;
|
||||
|
||||
|
@ -188,6 +189,48 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int
|
||||
__reservation_object_make_exclusive(struct reservation_object *obj)
|
||||
{
|
||||
struct dma_fence **fences;
|
||||
unsigned int count;
|
||||
int r;
|
||||
|
||||
if (!reservation_object_get_list(obj)) /* no shared fences to convert */
|
||||
return 0;
|
||||
|
||||
r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (count == 0) {
|
||||
/* Now that was unexpected. */
|
||||
} else if (count == 1) {
|
||||
reservation_object_add_excl_fence(obj, fences[0]);
|
||||
dma_fence_put(fences[0]);
|
||||
kfree(fences);
|
||||
} else {
|
||||
struct dma_fence_array *array;
|
||||
|
||||
array = dma_fence_array_create(count, fences,
|
||||
dma_fence_context_alloc(1), 0,
|
||||
false);
|
||||
if (!array)
|
||||
goto err_fences_put;
|
||||
|
||||
reservation_object_add_excl_fence(obj, &array->base);
|
||||
dma_fence_put(&array->base);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_fences_put:
|
||||
while (count--)
|
||||
dma_fence_put(fences[count]);
|
||||
kfree(fences);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
|
||||
* @dma_buf: shared DMA buffer
|
||||
|
@ -219,16 +262,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
|
|||
|
||||
if (attach->dev->driver != adev->dev->driver) {
|
||||
/*
|
||||
* Wait for all shared fences to complete before we switch to future
|
||||
* use of exclusive fence on this prime shared bo.
|
||||
* We only create shared fences for internal use, but importers
|
||||
* of the dmabuf rely on exclusive fences for implicitly
|
||||
* tracking write hazards. As any of the current fences may
|
||||
* correspond to a write, we need to convert all existing
|
||||
* fences on the reservation object into a single exclusive
|
||||
* fence.
|
||||
*/
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
|
||||
true, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (unlikely(r < 0)) {
|
||||
DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
|
||||
r = __reservation_object_make_exclusive(bo->tbo.resv);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
}
|
||||
}
|
||||
|
||||
/* pin buffer into GTT */
|
||||
|
|
|
@ -3011,14 +3011,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
|
|||
struct amdgpu_task_info *task_info)
|
||||
{
|
||||
struct amdgpu_vm *vm;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&adev->vm_manager.pasid_lock);
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
if (vm)
|
||||
*task_info = vm->task_info;
|
||||
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1573,6 +1573,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
|
|||
if (old_plane_state->fb != new_plane_state->fb)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* FIXME: Since prepare_fb and cleanup_fb are always called on
|
||||
* the new_plane_state for async updates we need to block framebuffer
|
||||
* changes. This prevents use of a fb that's been cleaned up and
|
||||
* double cleanups from occuring.
|
||||
*/
|
||||
if (old_plane_state->fb != new_plane_state->fb)
|
||||
return -EINVAL;
|
||||
|
||||
funcs = plane->helper_private;
|
||||
if (!funcs->atomic_async_update)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
|
|||
u16 data_offset, size;
|
||||
u8 frev, crev;
|
||||
struct ci_power_info *pi;
|
||||
enum pci_bus_speed speed_cap;
|
||||
enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
|
||||
struct pci_dev *root = rdev->pdev->bus->self;
|
||||
int ret;
|
||||
|
||||
|
@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
|
|||
return -ENOMEM;
|
||||
rdev->pm.dpm.priv = pi;
|
||||
|
||||
speed_cap = pcie_get_speed_cap(root);
|
||||
if (!pci_is_root_bus(rdev->pdev->bus))
|
||||
speed_cap = pcie_get_speed_cap(root);
|
||||
if (speed_cap == PCI_SPEED_UNKNOWN) {
|
||||
pi->sys_pcie_mask = 0;
|
||||
} else {
|
||||
|
|
|
@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
|
|||
struct ni_power_info *ni_pi;
|
||||
struct si_power_info *si_pi;
|
||||
struct atom_clock_dividers dividers;
|
||||
enum pci_bus_speed speed_cap;
|
||||
enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
|
||||
struct pci_dev *root = rdev->pdev->bus->self;
|
||||
int ret;
|
||||
|
||||
|
@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
|
|||
eg_pi = &ni_pi->eg;
|
||||
pi = &eg_pi->rv7xx;
|
||||
|
||||
speed_cap = pcie_get_speed_cap(root);
|
||||
if (!pci_is_root_bus(rdev->pdev->bus))
|
||||
speed_cap = pcie_get_speed_cap(root);
|
||||
if (speed_cap == PCI_SPEED_UNKNOWN) {
|
||||
si_pi->sys_pcie_mask = 0;
|
||||
} else {
|
||||
|
|
|
@ -672,6 +672,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
|
|||
return PTR_ERR(tcon->sclk0);
|
||||
}
|
||||
}
|
||||
clk_prepare_enable(tcon->sclk0);
|
||||
|
||||
if (tcon->quirks->has_channel_1) {
|
||||
tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
|
||||
|
@ -686,6 +687,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
|
|||
|
||||
static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
|
||||
{
|
||||
clk_disable_unprepare(tcon->sclk0);
|
||||
clk_disable_unprepare(tcon->clk);
|
||||
}
|
||||
|
||||
|
|
|
@ -1498,8 +1498,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int omap_i2c_runtime_suspend(struct device *dev)
|
||||
static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct omap_i2c_dev *omap = dev_get_drvdata(dev);
|
||||
|
||||
|
@ -1525,7 +1524,7 @@ static int omap_i2c_runtime_suspend(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int omap_i2c_runtime_resume(struct device *dev)
|
||||
static int __maybe_unused omap_i2c_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct omap_i2c_dev *omap = dev_get_drvdata(dev);
|
||||
|
||||
|
@ -1540,20 +1539,18 @@ static int omap_i2c_runtime_resume(struct device *dev)
|
|||
}
|
||||
|
||||
static const struct dev_pm_ops omap_i2c_pm_ops = {
|
||||
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
||||
pm_runtime_force_resume)
|
||||
SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
|
||||
omap_i2c_runtime_resume, NULL)
|
||||
};
|
||||
#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
|
||||
#else
|
||||
#define OMAP_I2C_PM_OPS NULL
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static struct platform_driver omap_i2c_driver = {
|
||||
.probe = omap_i2c_probe,
|
||||
.remove = omap_i2c_remove,
|
||||
.driver = {
|
||||
.name = "omap_i2c",
|
||||
.pm = OMAP_I2C_PM_OPS,
|
||||
.pm = &omap_i2c_pm_ops,
|
||||
.of_match_table = of_match_ptr(omap_i2c_of_match),
|
||||
},
|
||||
};
|
||||
|
|
|
@ -980,7 +980,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
|
|||
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
|
||||
wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
|
||||
wc.wc_flags = IB_WC_WITH_IMM;
|
||||
tlen -= sizeof(u32);
|
||||
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
|
||||
wc.ex.imm_data = 0;
|
||||
wc.wc_flags = 0;
|
||||
|
|
|
@ -513,7 +513,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
|
|||
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
|
||||
wc.ex.imm_data = ohdr->u.ud.imm_data;
|
||||
wc.wc_flags = IB_WC_WITH_IMM;
|
||||
tlen -= sizeof(u32);
|
||||
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
|
||||
wc.ex.imm_data = 0;
|
||||
wc.wc_flags = 0;
|
||||
|
|
|
@ -248,7 +248,6 @@ struct ipoib_cm_tx {
|
|||
struct list_head list;
|
||||
struct net_device *dev;
|
||||
struct ipoib_neigh *neigh;
|
||||
struct ipoib_path *path;
|
||||
struct ipoib_tx_buf *tx_ring;
|
||||
unsigned int tx_head;
|
||||
unsigned int tx_tail;
|
||||
|
|
|
@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
|
|||
|
||||
neigh->cm = tx;
|
||||
tx->neigh = neigh;
|
||||
tx->path = path;
|
||||
tx->dev = dev;
|
||||
list_add(&tx->list, &priv->cm.start_list);
|
||||
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
|
||||
|
@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
|
|||
neigh->daddr + QPN_AND_OPTIONS_OFFSET);
|
||||
goto free_neigh;
|
||||
}
|
||||
memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec));
|
||||
memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
netif_tx_unlock_bh(dev);
|
||||
|
|
|
@ -1337,6 +1337,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
|
|||
{ "ELAN0000", 0 },
|
||||
{ "ELAN0100", 0 },
|
||||
{ "ELAN0600", 0 },
|
||||
{ "ELAN0601", 0 },
|
||||
{ "ELAN0602", 0 },
|
||||
{ "ELAN0605", 0 },
|
||||
{ "ELAN0608", 0 },
|
||||
|
|
|
@ -187,6 +187,7 @@ enum {
|
|||
MODEL_DIGITIZER_II = 0x5544, /* UD */
|
||||
MODEL_GRAPHIRE = 0x4554, /* ET */
|
||||
MODEL_PENPARTNER = 0x4354, /* CT */
|
||||
MODEL_ARTPAD_II = 0x4B54, /* KT */
|
||||
};
|
||||
|
||||
static void wacom_handle_model_response(struct wacom *wacom)
|
||||
|
@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom *wacom)
|
|||
wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
|
||||
break;
|
||||
|
||||
case MODEL_ARTPAD_II:
|
||||
case MODEL_DIGITIZER_II:
|
||||
wacom->dev->name = "Wacom Digitizer II";
|
||||
wacom->dev->id.version = MODEL_DIGITIZER_II;
|
||||
|
|
|
@ -1929,16 +1929,13 @@ static void do_attach(struct iommu_dev_data *dev_data,
|
|||
|
||||
static void do_detach(struct iommu_dev_data *dev_data)
|
||||
{
|
||||
struct protection_domain *domain = dev_data->domain;
|
||||
struct amd_iommu *iommu;
|
||||
u16 alias;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
||||
alias = dev_data->alias;
|
||||
|
||||
/* decrease reference counters */
|
||||
dev_data->domain->dev_iommu[iommu->index] -= 1;
|
||||
dev_data->domain->dev_cnt -= 1;
|
||||
|
||||
/* Update data structures */
|
||||
dev_data->domain = NULL;
|
||||
list_del(&dev_data->list);
|
||||
|
@ -1948,6 +1945,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
|
|||
|
||||
/* Flush the DTE entry */
|
||||
device_flush_dte(dev_data);
|
||||
|
||||
/* Flush IOTLB */
|
||||
domain_flush_tlb_pde(domain);
|
||||
|
||||
/* Wait for the flushes to finish */
|
||||
domain_flush_complete(domain);
|
||||
|
||||
/* decrease reference counters - needs to happen after the flushes */
|
||||
domain->dev_iommu[iommu->index] -= 1;
|
||||
domain->dev_cnt -= 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2555,13 +2562,13 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
|
||||
iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
|
||||
|
||||
if (--mapped_pages)
|
||||
if (--mapped_pages == 0)
|
||||
goto out_free_iova;
|
||||
}
|
||||
}
|
||||
|
||||
out_free_iova:
|
||||
free_iova_fast(&dma_dom->iovad, address, npages);
|
||||
free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
|
||||
|
||||
out_err:
|
||||
return 0;
|
||||
|
|
|
@ -1581,6 +1581,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
|
|||
nr_irqs /= 2;
|
||||
} while (nr_irqs > 0);
|
||||
|
||||
if (!nr_irqs)
|
||||
err = -ENOSPC;
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -1951,6 +1954,29 @@ static void its_free_pending_table(struct page *pt)
|
|||
get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
|
||||
}
|
||||
|
||||
static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
|
||||
{
|
||||
u32 count = 1000000; /* 1s! */
|
||||
bool clean;
|
||||
u64 val;
|
||||
|
||||
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
|
||||
val &= ~GICR_VPENDBASER_Valid;
|
||||
gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
|
||||
|
||||
do {
|
||||
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
|
||||
clean = !(val & GICR_VPENDBASER_Dirty);
|
||||
if (!clean) {
|
||||
count--;
|
||||
cpu_relax();
|
||||
udelay(1);
|
||||
}
|
||||
} while (!clean && count);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void its_cpu_init_lpis(void)
|
||||
{
|
||||
void __iomem *rbase = gic_data_rdist_rd_base();
|
||||
|
@ -2024,6 +2050,30 @@ static void its_cpu_init_lpis(void)
|
|||
val |= GICR_CTLR_ENABLE_LPIS;
|
||||
writel_relaxed(val, rbase + GICR_CTLR);
|
||||
|
||||
if (gic_rdists->has_vlpis) {
|
||||
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
|
||||
|
||||
/*
|
||||
* It's possible for CPU to receive VLPIs before it is
|
||||
* sheduled as a vPE, especially for the first CPU, and the
|
||||
* VLPI with INTID larger than 2^(IDbits+1) will be considered
|
||||
* as out of range and dropped by GIC.
|
||||
* So we initialize IDbits to known value to avoid VLPI drop.
|
||||
*/
|
||||
val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
|
||||
pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
|
||||
smp_processor_id(), val);
|
||||
gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
|
||||
|
||||
/*
|
||||
* Also clear Valid bit of GICR_VPENDBASER, in case some
|
||||
* ancient programming gets left in and has possibility of
|
||||
* corrupting memory.
|
||||
*/
|
||||
val = its_clear_vpend_valid(vlpi_base);
|
||||
WARN_ON(val & GICR_VPENDBASER_Dirty);
|
||||
}
|
||||
|
||||
/* Make sure the GIC has seen the above */
|
||||
dsb(sy);
|
||||
}
|
||||
|
@ -2644,26 +2694,11 @@ static void its_vpe_schedule(struct its_vpe *vpe)
|
|||
static void its_vpe_deschedule(struct its_vpe *vpe)
|
||||
{
|
||||
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
|
||||
u32 count = 1000000; /* 1s! */
|
||||
bool clean;
|
||||
u64 val;
|
||||
|
||||
/* We're being scheduled out */
|
||||
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
|
||||
val &= ~GICR_VPENDBASER_Valid;
|
||||
gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
|
||||
val = its_clear_vpend_valid(vlpi_base);
|
||||
|
||||
do {
|
||||
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
|
||||
clean = !(val & GICR_VPENDBASER_Dirty);
|
||||
if (!clean) {
|
||||
count--;
|
||||
cpu_relax();
|
||||
udelay(1);
|
||||
}
|
||||
} while (!clean && count);
|
||||
|
||||
if (unlikely(!clean && !count)) {
|
||||
if (unlikely(val & GICR_VPENDBASER_Dirty)) {
|
||||
pr_err_ratelimited("ITS virtual pending table not cleaning\n");
|
||||
vpe->idai = false;
|
||||
vpe->pending_last = true;
|
||||
|
|
|
@ -34,6 +34,9 @@
|
|||
#define SEL_INT_PENDING (1 << 6)
|
||||
#define SEL_INT_NUM_MASK 0x3f
|
||||
|
||||
#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
|
||||
#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
|
||||
|
||||
struct icu_chip_data {
|
||||
int nr_irqs;
|
||||
unsigned int virq_base;
|
||||
|
@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = {
|
|||
static const struct mmp_intc_conf mmp2_conf = {
|
||||
.conf_enable = 0x20,
|
||||
.conf_disable = 0x0,
|
||||
.conf_mask = 0x7f,
|
||||
.conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
|
||||
MMP2_ICU_INT_ROUTE_PJ4_FIQ,
|
||||
};
|
||||
|
||||
static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
|
||||
|
|
|
@ -4630,7 +4630,6 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
|
|||
atomic_inc(&r10_bio->remaining);
|
||||
read_bio->bi_next = NULL;
|
||||
generic_make_request(read_bio);
|
||||
sector_nr += nr_sectors;
|
||||
sectors_done += nr_sectors;
|
||||
if (sector_nr <= last)
|
||||
goto read_more;
|
||||
|
|
|
@ -1065,11 +1065,19 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Make sure the terminal type MSB is not null, otherwise it
|
||||
* could be confused with a unit.
|
||||
/*
|
||||
* Reject invalid terminal types that would cause issues:
|
||||
*
|
||||
* - The high byte must be non-zero, otherwise it would be
|
||||
* confused with a unit.
|
||||
*
|
||||
* - Bit 15 must be 0, as we use it internally as a terminal
|
||||
* direction flag.
|
||||
*
|
||||
* Other unknown types are accepted.
|
||||
*/
|
||||
type = get_unaligned_le16(&buffer[4]);
|
||||
if ((type & 0xff00) == 0) {
|
||||
if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
|
||||
uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
|
||||
"interface %d INPUT_TERMINAL %d has invalid "
|
||||
"type 0x%04x, skipping\n", udev->devnum,
|
||||
|
|
|
@ -1171,29 +1171,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
|
|||
}
|
||||
}
|
||||
|
||||
/* Link-local multicast packets should be passed to the
|
||||
* stack on the link they arrive as well as pass them to the
|
||||
* bond-master device. These packets are mostly usable when
|
||||
* stack receives it with the link on which they arrive
|
||||
* (e.g. LLDP) they also must be available on master. Some of
|
||||
* the use cases include (but are not limited to): LLDP agents
|
||||
* that must be able to operate both on enslaved interfaces as
|
||||
* well as on bonds themselves; linux bridges that must be able
|
||||
* to process/pass BPDUs from attached bonds when any kind of
|
||||
* STP version is enabled on the network.
|
||||
/*
|
||||
* For packets determined by bond_should_deliver_exact_match() call to
|
||||
* be suppressed we want to make an exception for link-local packets.
|
||||
* This is necessary for e.g. LLDP daemons to be able to monitor
|
||||
* inactive slave links without being forced to bind to them
|
||||
* explicitly.
|
||||
*
|
||||
* At the same time, packets that are passed to the bonding master
|
||||
* (including link-local ones) can have their originating interface
|
||||
* determined via PACKET_ORIGDEV socket option.
|
||||
*/
|
||||
if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
|
||||
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
if (nskb) {
|
||||
nskb->dev = bond->dev;
|
||||
nskb->queue_mapping = 0;
|
||||
netif_rx(nskb);
|
||||
}
|
||||
return RX_HANDLER_PASS;
|
||||
}
|
||||
if (bond_should_deliver_exact_match(skb, slave, bond))
|
||||
if (bond_should_deliver_exact_match(skb, slave, bond)) {
|
||||
if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
|
||||
return RX_HANDLER_PASS;
|
||||
return RX_HANDLER_EXACT;
|
||||
}
|
||||
|
||||
skb->dev = bond->dev;
|
||||
|
||||
|
|
|
@ -884,7 +884,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
|
|||
default:
|
||||
return U64_MAX;
|
||||
}
|
||||
value = (((u64)high) << 16) | low;
|
||||
value = (((u64)high) << 32) | low;
|
||||
return value;
|
||||
}
|
||||
|
||||
|
@ -3070,7 +3070,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
|
|||
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
|
||||
.port_link_state = mv88e6352_port_link_state,
|
||||
.port_get_cmode = mv88e6185_port_get_cmode,
|
||||
.stats_snapshot = mv88e6320_g1_stats_snapshot,
|
||||
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
|
||||
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
|
||||
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
|
||||
.stats_get_strings = mv88e6095_stats_get_strings,
|
||||
|
@ -4188,7 +4188,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.name = "Marvell 88E6190",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
|
@ -4211,7 +4211,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.name = "Marvell 88E6190X",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
|
@ -4234,7 +4234,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.name = "Marvell 88E6191",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
.phy_base_addr = 0x0,
|
||||
|
@ -4281,7 +4281,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.name = "Marvell 88E6290",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
|
@ -4443,7 +4443,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.name = "Marvell 88E6390",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
|
@ -4466,7 +4466,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.name = "Marvell 88E6390X",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
|
@ -4561,6 +4561,14 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
|
||||
chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID;
|
||||
}
|
||||
|
||||
static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
|
||||
int port)
|
||||
{
|
||||
|
@ -4597,6 +4605,8 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
|
|||
if (err)
|
||||
goto free;
|
||||
|
||||
mv88e6xxx_ports_cmode_init(chip);
|
||||
|
||||
mutex_lock(&chip->reg_lock);
|
||||
err = mv88e6xxx_switch_reset(chip);
|
||||
mutex_unlock(&chip->reg_lock);
|
||||
|
|
|
@ -190,7 +190,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup)
|
|||
/* normal duplex detection */
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
|
||||
|
@ -374,6 +374,10 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
|
|||
cmode = 0;
|
||||
}
|
||||
|
||||
/* cmode doesn't change, nothing to do for us */
|
||||
if (cmode == chip->ports[port].cmode)
|
||||
return 0;
|
||||
|
||||
lane = mv88e6390x_serdes_get_lane(chip, port);
|
||||
if (lane < 0)
|
||||
return lane;
|
||||
|
@ -384,7 +388,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
|
|||
return err;
|
||||
}
|
||||
|
||||
err = mv88e6390_serdes_power(chip, port, false);
|
||||
err = mv88e6390x_serdes_power(chip, port, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -400,7 +404,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = mv88e6390_serdes_power(chip, port, true);
|
||||
err = mv88e6390x_serdes_power(chip, port, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005
|
||||
#define MV88E6185_PORT_STS_CMODE_PHY 0x0006
|
||||
#define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007
|
||||
#define MV88E6XXX_PORT_STS_CMODE_INVALID 0xff
|
||||
|
||||
/* Offset 0x01: MAC (or PCS or Physical) Control Register */
|
||||
#define MV88E6XXX_PORT_MAC_CTL 0x01
|
||||
|
|
|
@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
|
|||
& 0xffff;
|
||||
|
||||
if (inuse) { /* Tx FIFO is not empty */
|
||||
ready = priv->tx_prod - priv->tx_cons - inuse - 1;
|
||||
ready = max_t(int,
|
||||
priv->tx_prod - priv->tx_cons - inuse - 1, 0);
|
||||
} else {
|
||||
/* Check for buffered last packet */
|
||||
status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
|
||||
|
|
|
@ -463,6 +463,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
|
||||
length >>= 9;
|
||||
if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
|
||||
dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
|
||||
skb->len);
|
||||
i = 0;
|
||||
goto tx_dma_error;
|
||||
}
|
||||
flags |= bnxt_lhint_arr[length];
|
||||
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
|
||||
|
||||
|
|
|
@ -643,6 +643,7 @@
|
|||
#define MACB_CAPS_JUMBO 0x00000020
|
||||
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
|
||||
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
|
||||
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
|
||||
#define MACB_CAPS_FIFO_MODE 0x10000000
|
||||
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
|
||||
#define MACB_CAPS_SG_DISABLED 0x40000000
|
||||
|
@ -1214,6 +1215,8 @@ struct macb {
|
|||
|
||||
int rx_bd_rd_prefetch;
|
||||
int tx_bd_rd_prefetch;
|
||||
|
||||
u32 rx_intr_mask;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MACB_USE_HWSTAMP
|
||||
|
|
|
@ -56,8 +56,7 @@
|
|||
/* level of occupied TX descriptors under which we wake up TX process */
|
||||
#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
|
||||
|
||||
#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
|
||||
| MACB_BIT(ISR_ROVR))
|
||||
#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
|
||||
#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
|
||||
| MACB_BIT(ISR_RLE) \
|
||||
| MACB_BIT(TXERR))
|
||||
|
@ -1271,7 +1270,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
|
|||
queue_writel(queue, ISR, MACB_BIT(RCOMP));
|
||||
napi_reschedule(napi);
|
||||
} else {
|
||||
queue_writel(queue, IER, MACB_RX_INT_FLAGS);
|
||||
queue_writel(queue, IER, bp->rx_intr_mask);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1289,7 +1288,7 @@ static void macb_hresp_error_task(unsigned long data)
|
|||
u32 ctrl;
|
||||
|
||||
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
||||
queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
|
||||
queue_writel(queue, IDR, bp->rx_intr_mask |
|
||||
MACB_TX_INT_FLAGS |
|
||||
MACB_BIT(HRESP));
|
||||
}
|
||||
|
@ -1319,7 +1318,7 @@ static void macb_hresp_error_task(unsigned long data)
|
|||
|
||||
/* Enable interrupts */
|
||||
queue_writel(queue, IER,
|
||||
MACB_RX_INT_FLAGS |
|
||||
bp->rx_intr_mask |
|
||||
MACB_TX_INT_FLAGS |
|
||||
MACB_BIT(HRESP));
|
||||
}
|
||||
|
@ -1373,14 +1372,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
|
|||
(unsigned int)(queue - bp->queues),
|
||||
(unsigned long)status);
|
||||
|
||||
if (status & MACB_RX_INT_FLAGS) {
|
||||
if (status & bp->rx_intr_mask) {
|
||||
/* There's no point taking any more interrupts
|
||||
* until we have processed the buffers. The
|
||||
* scheduling call may fail if the poll routine
|
||||
* is already scheduled, so disable interrupts
|
||||
* now.
|
||||
*/
|
||||
queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
|
||||
queue_writel(queue, IDR, bp->rx_intr_mask);
|
||||
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
||||
queue_writel(queue, ISR, MACB_BIT(RCOMP));
|
||||
|
||||
|
@ -1413,8 +1412,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
|
|||
/* There is a hardware issue under heavy load where DMA can
|
||||
* stop, this causes endless "used buffer descriptor read"
|
||||
* interrupts but it can be cleared by re-enabling RX. See
|
||||
* the at91 manual, section 41.3.1 or the Zynq manual
|
||||
* section 16.7.4 for details.
|
||||
* the at91rm9200 manual, section 41.3.1 or the Zynq manual
|
||||
* section 16.7.4 for details. RXUBR is only enabled for
|
||||
* these two versions.
|
||||
*/
|
||||
if (status & MACB_BIT(RXUBR)) {
|
||||
ctrl = macb_readl(bp, NCR);
|
||||
|
@ -2264,7 +2264,7 @@ static void macb_init_hw(struct macb *bp)
|
|||
|
||||
/* Enable interrupts */
|
||||
queue_writel(queue, IER,
|
||||
MACB_RX_INT_FLAGS |
|
||||
bp->rx_intr_mask |
|
||||
MACB_TX_INT_FLAGS |
|
||||
MACB_BIT(HRESP));
|
||||
}
|
||||
|
@ -3912,6 +3912,7 @@ static const struct macb_config sama5d4_config = {
|
|||
};
|
||||
|
||||
static const struct macb_config emac_config = {
|
||||
.caps = MACB_CAPS_NEEDS_RSTONUBR,
|
||||
.clk_init = at91ether_clk_init,
|
||||
.init = at91ether_init,
|
||||
};
|
||||
|
@ -3933,7 +3934,8 @@ static const struct macb_config zynqmp_config = {
|
|||
};
|
||||
|
||||
static const struct macb_config zynq_config = {
|
||||
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
|
||||
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
|
||||
MACB_CAPS_NEEDS_RSTONUBR,
|
||||
.dma_burst_length = 16,
|
||||
.clk_init = macb_clk_init,
|
||||
.init = macb_init,
|
||||
|
@ -4088,6 +4090,10 @@ static int macb_probe(struct platform_device *pdev)
|
|||
macb_dma_desc_get_size(bp);
|
||||
}
|
||||
|
||||
bp->rx_intr_mask = MACB_RX_INT_FLAGS;
|
||||
if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
|
||||
bp->rx_intr_mask |= MACB_BIT(RXUBR);
|
||||
|
||||
mac = of_get_mac_address(np);
|
||||
if (mac) {
|
||||
ether_addr_copy(bp->dev->dev_addr, mac);
|
||||
|
|
|
@ -2419,6 +2419,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
|
|||
out_notify_fail:
|
||||
(void)cancel_work_sync(&priv->service_task);
|
||||
out_read_prop_fail:
|
||||
/* safe for ACPI FW */
|
||||
of_node_put(to_of_node(priv->fwnode));
|
||||
free_netdev(ndev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2448,6 +2450,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
|
|||
set_bit(NIC_STATE_REMOVING, &priv->state);
|
||||
(void)cancel_work_sync(&priv->service_task);
|
||||
|
||||
/* safe for ACPI FW */
|
||||
of_node_put(to_of_node(priv->fwnode));
|
||||
|
||||
free_netdev(ndev);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev)
|
|||
*/
|
||||
static int hns_nic_nway_reset(struct net_device *netdev)
|
||||
{
|
||||
int ret = 0;
|
||||
struct phy_device *phy = netdev->phydev;
|
||||
|
||||
if (netif_running(netdev)) {
|
||||
/* if autoneg is disabled, don't restart auto-negotiation */
|
||||
if (phy && phy->autoneg == AUTONEG_ENABLE)
|
||||
ret = genphy_restart_aneg(phy);
|
||||
}
|
||||
if (!netif_running(netdev))
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
if (!phy)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (phy->autoneg != AUTONEG_ENABLE)
|
||||
return -EINVAL;
|
||||
|
||||
return genphy_restart_aneg(phy);
|
||||
}
|
||||
|
||||
static u32
|
||||
|
|
|
@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
|
|||
}
|
||||
|
||||
hns_mdio_cmd_write(mdio_dev, is_c45,
|
||||
MDIO_C45_WRITE_ADDR, phy_id, devad);
|
||||
MDIO_C45_READ, phy_id, devad);
|
||||
}
|
||||
|
||||
/* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
|
||||
|
|
|
@ -424,9 +424,9 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
|
|||
struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
struct i40e_netdev_priv *np = netdev_priv(netdev);
|
||||
struct i40e_ring *tx_ring, *rx_ring;
|
||||
struct i40e_vsi *vsi = np->vsi;
|
||||
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
|
||||
struct i40e_ring *ring;
|
||||
int i;
|
||||
|
||||
if (test_bit(__I40E_VSI_DOWN, vsi->state))
|
||||
|
@ -440,24 +440,26 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
|
|||
u64 bytes, packets;
|
||||
unsigned int start;
|
||||
|
||||
tx_ring = READ_ONCE(vsi->tx_rings[i]);
|
||||
if (!tx_ring)
|
||||
ring = READ_ONCE(vsi->tx_rings[i]);
|
||||
if (!ring)
|
||||
continue;
|
||||
i40e_get_netdev_stats_struct_tx(tx_ring, stats);
|
||||
i40e_get_netdev_stats_struct_tx(ring, stats);
|
||||
|
||||
rx_ring = &tx_ring[1];
|
||||
if (i40e_enabled_xdp_vsi(vsi)) {
|
||||
ring++;
|
||||
i40e_get_netdev_stats_struct_tx(ring, stats);
|
||||
}
|
||||
|
||||
ring++;
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
|
||||
packets = rx_ring->stats.packets;
|
||||
bytes = rx_ring->stats.bytes;
|
||||
} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
|
||||
start = u64_stats_fetch_begin_irq(&ring->syncp);
|
||||
packets = ring->stats.packets;
|
||||
bytes = ring->stats.bytes;
|
||||
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
|
||||
|
||||
stats->rx_packets += packets;
|
||||
stats->rx_bytes += bytes;
|
||||
|
||||
if (i40e_enabled_xdp_vsi(vsi))
|
||||
i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include <linux/mii.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_net.h>
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
|
||||
|
@ -93,7 +94,7 @@ static int copybreak __read_mostly = 128;
|
|||
module_param(copybreak, int, 0);
|
||||
MODULE_PARM_DESC(copybreak, "Receive copy threshold");
|
||||
|
||||
static int disable_msi = 0;
|
||||
static int disable_msi = -1;
|
||||
module_param(disable_msi, int, 0);
|
||||
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
|
||||
|
||||
|
@ -4931,6 +4932,24 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
|
|||
return buf;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id msi_blacklist[] = {
|
||||
{
|
||||
.ident = "Dell Inspiron 1545",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Gateway P-79",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
struct net_device *dev, *dev1;
|
||||
|
@ -5042,6 +5061,9 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
goto err_out_free_pci;
|
||||
}
|
||||
|
||||
if (disable_msi == -1)
|
||||
disable_msi = !!dmi_check_system(msi_blacklist);
|
||||
|
||||
if (!disable_msi && pci_enable_msi(pdev) == 0) {
|
||||
err = sky2_test_msi(hw);
|
||||
if (err) {
|
||||
|
|
|
@ -2645,6 +2645,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
|
|||
if (!priv->cmd.context)
|
||||
return -ENOMEM;
|
||||
|
||||
if (mlx4_is_mfunc(dev))
|
||||
mutex_lock(&priv->cmd.slave_cmd_mutex);
|
||||
down_write(&priv->cmd.switch_sem);
|
||||
for (i = 0; i < priv->cmd.max_cmds; ++i) {
|
||||
priv->cmd.context[i].token = i;
|
||||
|
@ -2670,6 +2672,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
|
|||
down(&priv->cmd.poll_sem);
|
||||
priv->cmd.use_events = 1;
|
||||
up_write(&priv->cmd.switch_sem);
|
||||
if (mlx4_is_mfunc(dev))
|
||||
mutex_unlock(&priv->cmd.slave_cmd_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -2682,6 +2686,8 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
|
|||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
int i;
|
||||
|
||||
if (mlx4_is_mfunc(dev))
|
||||
mutex_lock(&priv->cmd.slave_cmd_mutex);
|
||||
down_write(&priv->cmd.switch_sem);
|
||||
priv->cmd.use_events = 0;
|
||||
|
||||
|
@ -2689,9 +2695,12 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
|
|||
down(&priv->cmd.event_sem);
|
||||
|
||||
kfree(priv->cmd.context);
|
||||
priv->cmd.context = NULL;
|
||||
|
||||
up(&priv->cmd.poll_sem);
|
||||
up_write(&priv->cmd.switch_sem);
|
||||
if (mlx4_is_mfunc(dev))
|
||||
mutex_unlock(&priv->cmd.slave_cmd_mutex);
|
||||
}
|
||||
|
||||
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
|
||||
|
|
|
@ -2719,13 +2719,13 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
|
|||
int total_pages;
|
||||
int total_mem;
|
||||
int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
|
||||
int tot;
|
||||
|
||||
sq_size = 1 << (log_sq_size + log_sq_sride + 4);
|
||||
rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
|
||||
total_mem = sq_size + rq_size;
|
||||
total_pages =
|
||||
roundup_pow_of_two((total_mem + (page_offset << 6)) >>
|
||||
page_shift);
|
||||
tot = (total_mem + (page_offset << 6)) >> page_shift;
|
||||
total_pages = !tot ? 1 : roundup_pow_of_two(tot);
|
||||
|
||||
return total_pages;
|
||||
}
|
||||
|
|
|
@ -585,8 +585,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
|
|||
|
||||
if (adapter->csr.flags &
|
||||
LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
|
||||
flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
|
||||
LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
|
||||
flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
|
||||
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
|
||||
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
|
||||
LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
|
||||
|
@ -599,12 +598,6 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
|
|||
/* map TX interrupt to vector */
|
||||
int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
|
||||
lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
|
||||
if (flags &
|
||||
LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
|
||||
int_vec_en_auto_clr |= INT_VEC_EN_(vector);
|
||||
lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
|
||||
int_vec_en_auto_clr);
|
||||
}
|
||||
|
||||
/* Remove TX interrupt from shared mask */
|
||||
intr->vector_list[0].int_mask &= ~int_bit;
|
||||
|
@ -1403,7 +1396,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
|
|||
}
|
||||
|
||||
static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
|
||||
unsigned int frame_length)
|
||||
unsigned int frame_length,
|
||||
int nr_frags)
|
||||
{
|
||||
/* called only from within lan743x_tx_xmit_frame.
|
||||
* assuming tx->ring_lock has already been acquired.
|
||||
|
@ -1413,6 +1407,10 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
|
|||
|
||||
/* wrap up previous descriptor */
|
||||
tx->frame_data0 |= TX_DESC_DATA0_EXT_;
|
||||
if (nr_frags <= 0) {
|
||||
tx->frame_data0 |= TX_DESC_DATA0_LS_;
|
||||
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
|
||||
}
|
||||
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
|
||||
tx_descriptor->data0 = tx->frame_data0;
|
||||
|
||||
|
@ -1517,8 +1515,11 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
|
|||
u32 tx_tail_flags = 0;
|
||||
|
||||
/* wrap up previous descriptor */
|
||||
tx->frame_data0 |= TX_DESC_DATA0_LS_;
|
||||
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
|
||||
if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
|
||||
TX_DESC_DATA0_DTYPE_DATA_) {
|
||||
tx->frame_data0 |= TX_DESC_DATA0_LS_;
|
||||
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
|
||||
}
|
||||
|
||||
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
|
||||
buffer_info = &tx->buffer_info[tx->frame_tail];
|
||||
|
@ -1603,7 +1604,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
|
|||
}
|
||||
|
||||
if (gso)
|
||||
lan743x_tx_frame_add_lso(tx, frame_length);
|
||||
lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
|
||||
|
||||
if (nr_frags <= 0)
|
||||
goto finish;
|
||||
|
@ -1897,7 +1898,17 @@ static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
|
|||
return ((++index) % rx->ring_size);
|
||||
}
|
||||
|
||||
static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
|
||||
static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
|
||||
{
|
||||
int length = 0;
|
||||
|
||||
length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
|
||||
return __netdev_alloc_skb(rx->adapter->netdev,
|
||||
length, GFP_ATOMIC | GFP_DMA);
|
||||
}
|
||||
|
||||
static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct lan743x_rx_buffer_info *buffer_info;
|
||||
struct lan743x_rx_descriptor *descriptor;
|
||||
|
@ -1906,9 +1917,7 @@ static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
|
|||
length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
|
||||
descriptor = &rx->ring_cpu_ptr[index];
|
||||
buffer_info = &rx->buffer_info[index];
|
||||
buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
|
||||
length,
|
||||
GFP_ATOMIC | GFP_DMA);
|
||||
buffer_info->skb = skb;
|
||||
if (!(buffer_info->skb))
|
||||
return -ENOMEM;
|
||||
buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
|
||||
|
@ -2055,8 +2064,19 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
|
|||
/* packet is available */
|
||||
if (first_index == last_index) {
|
||||
/* single buffer packet */
|
||||
struct sk_buff *new_skb = NULL;
|
||||
int packet_length;
|
||||
|
||||
new_skb = lan743x_rx_allocate_skb(rx);
|
||||
if (!new_skb) {
|
||||
/* failed to allocate next skb.
|
||||
* Memory is very low.
|
||||
* Drop this packet and reuse buffer.
|
||||
*/
|
||||
lan743x_rx_reuse_ring_element(rx, first_index);
|
||||
goto process_extension;
|
||||
}
|
||||
|
||||
buffer_info = &rx->buffer_info[first_index];
|
||||
skb = buffer_info->skb;
|
||||
descriptor = &rx->ring_cpu_ptr[first_index];
|
||||
|
@ -2076,7 +2096,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
|
|||
skb_put(skb, packet_length - 4);
|
||||
skb->protocol = eth_type_trans(skb,
|
||||
rx->adapter->netdev);
|
||||
lan743x_rx_allocate_ring_element(rx, first_index);
|
||||
lan743x_rx_init_ring_element(rx, first_index, new_skb);
|
||||
} else {
|
||||
int index = first_index;
|
||||
|
||||
|
@ -2089,26 +2109,23 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
|
|||
if (first_index <= last_index) {
|
||||
while ((index >= first_index) &&
|
||||
(index <= last_index)) {
|
||||
lan743x_rx_release_ring_element(rx,
|
||||
index);
|
||||
lan743x_rx_allocate_ring_element(rx,
|
||||
index);
|
||||
lan743x_rx_reuse_ring_element(rx,
|
||||
index);
|
||||
index = lan743x_rx_next_index(rx,
|
||||
index);
|
||||
}
|
||||
} else {
|
||||
while ((index >= first_index) ||
|
||||
(index <= last_index)) {
|
||||
lan743x_rx_release_ring_element(rx,
|
||||
index);
|
||||
lan743x_rx_allocate_ring_element(rx,
|
||||
index);
|
||||
lan743x_rx_reuse_ring_element(rx,
|
||||
index);
|
||||
index = lan743x_rx_next_index(rx,
|
||||
index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
process_extension:
|
||||
if (extension_index >= 0) {
|
||||
descriptor = &rx->ring_cpu_ptr[extension_index];
|
||||
buffer_info = &rx->buffer_info[extension_index];
|
||||
|
@ -2285,7 +2302,9 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
|
|||
|
||||
rx->last_head = 0;
|
||||
for (index = 0; index < rx->ring_size; index++) {
|
||||
ret = lan743x_rx_allocate_ring_element(rx, index);
|
||||
struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
|
||||
|
||||
ret = lan743x_rx_init_ring_element(rx, index, new_skb);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
}
|
||||
|
|
|
@ -473,19 +473,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
|
|||
|
||||
/* get pq index according to PQ_FLAGS */
|
||||
static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
|
||||
u32 pq_flags)
|
||||
unsigned long pq_flags)
|
||||
{
|
||||
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
|
||||
|
||||
/* Can't have multiple flags set here */
|
||||
if (bitmap_weight((unsigned long *)&pq_flags,
|
||||
if (bitmap_weight(&pq_flags,
|
||||
sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
|
||||
DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
|
||||
DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
|
||||
DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
|
||||
DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
|
|
@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
|
|||
(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
|
||||
!!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
|
||||
|
||||
SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
|
||||
(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
|
||||
!!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
|
||||
|
||||
SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
|
||||
!!(accept_filter & QED_ACCEPT_BCAST));
|
||||
|
||||
|
@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
}
|
||||
|
||||
if (p_params->update_ctl_frame_check) {
|
||||
p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
|
||||
p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
|
||||
}
|
||||
|
||||
/* Update mcast bins for VFs, PF doesn't use this functionality */
|
||||
qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
|
||||
|
||||
|
@ -2207,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
|
|||
u16 num_queues = 0;
|
||||
|
||||
/* Since the feature controls only queue-zones,
|
||||
* make sure we have the contexts [rx, tx, xdp] to
|
||||
* make sure we have the contexts [rx, xdp, tcs] to
|
||||
* match.
|
||||
*/
|
||||
for_each_hwfn(cdev, i) {
|
||||
|
@ -2217,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
|
|||
u16 cids;
|
||||
|
||||
cids = hwfn->pf_params.eth_pf_params.num_cons;
|
||||
num_queues += min_t(u16, l2_queues, cids / 3);
|
||||
cids /= (2 + info->num_tc);
|
||||
num_queues += min_t(u16, l2_queues, cids);
|
||||
}
|
||||
|
||||
/* queues might theoretically be >256, but interrupts'
|
||||
|
@ -2688,7 +2698,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
|
|||
if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
|
||||
accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
|
||||
QED_ACCEPT_MCAST_UNMATCHED;
|
||||
accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
|
||||
accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
|
||||
QED_ACCEPT_MCAST_UNMATCHED;
|
||||
} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
|
||||
accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
|
||||
accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue