x86: add notrace annotations to vsyscall.
Add the notrace annotations to the vsyscall functions - there we are not in kernel context yet, so the tracer function cannot (and must not) be called. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
ffdc1a09ae
commit
23adec554a
4 changed files with 14 additions and 10 deletions
|
@ -42,7 +42,8 @@
|
||||||
#include <asm/topology.h>
|
#include <asm/topology.h>
|
||||||
#include <asm/vgtod.h>
|
#include <asm/vgtod.h>
|
||||||
|
|
||||||
#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
|
#define __vsyscall(nr) \
|
||||||
|
__attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace
|
||||||
#define __syscall_clobber "r11","cx","memory"
|
#define __syscall_clobber "r11","cx","memory"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
|
|
||||||
#define gtod vdso_vsyscall_gtod_data
|
#define gtod vdso_vsyscall_gtod_data
|
||||||
|
|
||||||
static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
||||||
{
|
{
|
||||||
long ret;
|
long ret;
|
||||||
asm("syscall" : "=a" (ret) :
|
asm("syscall" : "=a" (ret) :
|
||||||
|
@ -31,7 +31,7 @@ static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline long vgetns(void)
|
notrace static inline long vgetns(void)
|
||||||
{
|
{
|
||||||
long v;
|
long v;
|
||||||
cycles_t (*vread)(void);
|
cycles_t (*vread)(void);
|
||||||
|
@ -40,7 +40,7 @@ static inline long vgetns(void)
|
||||||
return (v * gtod->clock.mult) >> gtod->clock.shift;
|
return (v * gtod->clock.mult) >> gtod->clock.shift;
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline int do_realtime(struct timespec *ts)
|
notrace static noinline int do_realtime(struct timespec *ts)
|
||||||
{
|
{
|
||||||
unsigned long seq, ns;
|
unsigned long seq, ns;
|
||||||
do {
|
do {
|
||||||
|
@ -54,7 +54,8 @@ static noinline int do_realtime(struct timespec *ts)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Copy of the version in kernel/time.c which we cannot directly access */
|
/* Copy of the version in kernel/time.c which we cannot directly access */
|
||||||
static void vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
|
notrace static void
|
||||||
|
vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
|
||||||
{
|
{
|
||||||
while (nsec >= NSEC_PER_SEC) {
|
while (nsec >= NSEC_PER_SEC) {
|
||||||
nsec -= NSEC_PER_SEC;
|
nsec -= NSEC_PER_SEC;
|
||||||
|
@ -68,7 +69,7 @@ static void vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
|
||||||
ts->tv_nsec = nsec;
|
ts->tv_nsec = nsec;
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline int do_monotonic(struct timespec *ts)
|
notrace static noinline int do_monotonic(struct timespec *ts)
|
||||||
{
|
{
|
||||||
unsigned long seq, ns, secs;
|
unsigned long seq, ns, secs;
|
||||||
do {
|
do {
|
||||||
|
@ -82,7 +83,7 @@ static noinline int do_monotonic(struct timespec *ts)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
|
notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
|
||||||
{
|
{
|
||||||
if (likely(gtod->sysctl_enabled && gtod->clock.vread))
|
if (likely(gtod->sysctl_enabled && gtod->clock.vread))
|
||||||
switch (clock) {
|
switch (clock) {
|
||||||
|
@ -96,7 +97,7 @@ int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
|
||||||
int clock_gettime(clockid_t, struct timespec *)
|
int clock_gettime(clockid_t, struct timespec *)
|
||||||
__attribute__((weak, alias("__vdso_clock_gettime")));
|
__attribute__((weak, alias("__vdso_clock_gettime")));
|
||||||
|
|
||||||
int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
|
notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
|
||||||
{
|
{
|
||||||
long ret;
|
long ret;
|
||||||
if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
|
if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
|
||||||
|
|
|
@ -13,7 +13,8 @@
|
||||||
#include <asm/vgtod.h>
|
#include <asm/vgtod.h>
|
||||||
#include "vextern.h"
|
#include "vextern.h"
|
||||||
|
|
||||||
long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
|
notrace long
|
||||||
|
__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
|
||||||
{
|
{
|
||||||
unsigned int p;
|
unsigned int p;
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,8 @@ enum vsyscall_num {
|
||||||
((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
|
((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
|
||||||
#define __section_vsyscall_clock __attribute__ \
|
#define __section_vsyscall_clock __attribute__ \
|
||||||
((unused, __section__ (".vsyscall_clock"),aligned(16)))
|
((unused, __section__ (".vsyscall_clock"),aligned(16)))
|
||||||
#define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn")))
|
#define __vsyscall_fn \
|
||||||
|
__attribute__ ((unused, __section__(".vsyscall_fn"))) notrace
|
||||||
|
|
||||||
#define VGETCPU_RDTSCP 1
|
#define VGETCPU_RDTSCP 1
|
||||||
#define VGETCPU_LSL 2
|
#define VGETCPU_LSL 2
|
||||||
|
|
Loading…
Reference in a new issue