167 lines
4.1 KiB
C
Raw Normal View History

2026-01-21 18:59:54 +08:00
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CONTEXT_TRACKING_H
#define _LINUX_CONTEXT_TRACKING_H
#include <linux/sched.h>
#include <linux/vtime.h>
#include <linux/context_tracking_state.h>
#include <linux/instrumentation.h>
#include <asm/ptrace.h>
2026-01-29 22:25:33 +08:00
#ifdef CONFIG_CONTEXT_TRACKING_USER
extern void ct_cpu_track_user(int cpu);
2026-01-21 18:59:54 +08:00
/* Called with interrupts disabled. */
2026-01-29 22:25:33 +08:00
extern void __ct_user_enter(enum ctx_state state);
extern void __ct_user_exit(enum ctx_state state);
2026-01-21 18:59:54 +08:00
2026-01-29 22:25:33 +08:00
extern void ct_user_enter(enum ctx_state state);
extern void ct_user_exit(enum ctx_state state);
extern void user_enter_callable(void);
extern void user_exit_callable(void);
2026-01-21 18:59:54 +08:00
static inline void user_enter(void)
{
if (context_tracking_enabled())
2026-01-29 22:25:33 +08:00
ct_user_enter(CONTEXT_USER);
2026-01-21 18:59:54 +08:00
}
static inline void user_exit(void)
{
if (context_tracking_enabled())
2026-01-29 22:25:33 +08:00
ct_user_exit(CONTEXT_USER);
2026-01-21 18:59:54 +08:00
}
/* Called with interrupts disabled. */
static __always_inline void user_enter_irqoff(void)
{
if (context_tracking_enabled())
2026-01-29 22:25:33 +08:00
__ct_user_enter(CONTEXT_USER);
2026-01-21 18:59:54 +08:00
}
static __always_inline void user_exit_irqoff(void)
{
if (context_tracking_enabled())
2026-01-29 22:25:33 +08:00
__ct_user_exit(CONTEXT_USER);
2026-01-21 18:59:54 +08:00
}
static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
2026-01-29 22:25:33 +08:00
if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) ||
!context_tracking_enabled())
2026-01-21 18:59:54 +08:00
return 0;
2026-01-29 22:25:33 +08:00
prev_ctx = __ct_state();
2026-01-21 18:59:54 +08:00
if (prev_ctx != CONTEXT_KERNEL)
2026-01-29 22:25:33 +08:00
ct_user_exit(prev_ctx);
2026-01-21 18:59:54 +08:00
return prev_ctx;
}
static inline void exception_exit(enum ctx_state prev_ctx)
{
2026-01-29 22:25:33 +08:00
if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
context_tracking_enabled()) {
2026-01-21 18:59:54 +08:00
if (prev_ctx != CONTEXT_KERNEL)
2026-01-29 22:25:33 +08:00
ct_user_enter(prev_ctx);
2026-01-21 18:59:54 +08:00
}
}
2026-01-29 22:25:33 +08:00
static __always_inline bool context_tracking_guest_enter(void)
{
if (context_tracking_enabled())
__ct_user_enter(CONTEXT_GUEST);
2026-01-21 18:59:54 +08:00
2026-01-29 22:25:33 +08:00
return context_tracking_enabled_this_cpu();
}
static __always_inline void context_tracking_guest_exit(void)
2026-01-21 18:59:54 +08:00
{
2026-01-29 22:25:33 +08:00
if (context_tracking_enabled())
__ct_user_exit(CONTEXT_GUEST);
2026-01-21 18:59:54 +08:00
}
2026-01-29 22:25:33 +08:00
#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
2026-01-21 18:59:54 +08:00
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline void user_enter_irqoff(void) { }
static inline void user_exit_irqoff(void) { }
2026-01-29 22:25:33 +08:00
static inline int exception_enter(void) { return 0; }
2026-01-21 18:59:54 +08:00
static inline void exception_exit(enum ctx_state prev_ctx) { }
2026-01-29 22:25:33 +08:00
static inline int ct_state(void) { return -1; }
static inline int __ct_state(void) { return -1; }
static __always_inline bool context_tracking_guest_enter(void) { return false; }
static __always_inline void context_tracking_guest_exit(void) { }
#define CT_WARN_ON(cond) do { } while (0)
#endif /* !CONFIG_CONTEXT_TRACKING_USER */
2026-01-21 18:59:54 +08:00
2026-01-29 22:25:33 +08:00
#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
2026-01-21 18:59:54 +08:00
extern void context_tracking_init(void);
#else
static inline void context_tracking_init(void) { }
2026-01-29 22:25:33 +08:00
#endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */
2026-01-21 18:59:54 +08:00
2026-01-29 22:25:33 +08:00
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
extern void ct_idle_enter(void);
extern void ct_idle_exit(void);
2026-01-21 18:59:54 +08:00
2026-01-29 22:25:33 +08:00
/*
* Is the current CPU in an extended quiescent state?
*
* No ordering, as we are sampling CPU-local information.
*/
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
2026-01-21 18:59:54 +08:00
{
2026-01-29 22:25:33 +08:00
return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
2026-01-21 18:59:54 +08:00
}
2026-01-29 22:25:33 +08:00
/*
* Increment the current CPU's context_tracking structure's ->state field
* with ordering. Return the new value.
*/
static __always_inline unsigned long ct_state_inc(int incby)
2026-01-21 18:59:54 +08:00
{
2026-01-29 22:25:33 +08:00
return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
2026-01-21 18:59:54 +08:00
}
2026-01-29 22:25:33 +08:00
static __always_inline bool warn_rcu_enter(void)
2026-01-21 18:59:54 +08:00
{
2026-01-29 22:25:33 +08:00
bool ret = false;
2026-01-21 18:59:54 +08:00
/*
2026-01-29 22:25:33 +08:00
* Horrible hack to shut up recursive RCU isn't watching fail since
* lots of the actual reporting also relies on RCU.
2026-01-21 18:59:54 +08:00
*/
2026-01-29 22:25:33 +08:00
preempt_disable_notrace();
if (rcu_dynticks_curr_cpu_in_eqs()) {
ret = true;
ct_state_inc(RCU_DYNTICKS_IDX);
}
2026-01-21 18:59:54 +08:00
2026-01-29 22:25:33 +08:00
return ret;
2026-01-21 18:59:54 +08:00
}
2026-01-29 22:25:33 +08:00
static __always_inline void warn_rcu_exit(bool rcu)
2026-01-21 18:59:54 +08:00
{
2026-01-29 22:25:33 +08:00
if (rcu)
ct_state_inc(RCU_DYNTICKS_IDX);
preempt_enable_notrace();
2026-01-21 18:59:54 +08:00
}
2026-01-29 22:25:33 +08:00
#else
static inline void ct_idle_enter(void) { }
static inline void ct_idle_exit(void) { }
2026-01-21 18:59:54 +08:00
2026-01-29 22:25:33 +08:00
static __always_inline bool warn_rcu_enter(void) { return false; }
static __always_inline void warn_rcu_exit(bool rcu) { }
#endif /* !CONFIG_CONTEXT_TRACKING_IDLE */
2026-01-21 18:59:54 +08:00
#endif