Skip to content

Commit

Permalink
lockdep: Prepare for NMI IRQ state tracking
Browse files Browse the repository at this point in the history
There is no reason not to always, accurately, track IRQ state.

This change also makes IRQ state tracking ignore lockdep_off().

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Ingo Molnar <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
  • Loading branch information
Peter Zijlstra committed Jul 10, 2020
1 parent 248591f commit 859d069
Showing 1 changed file with 42 additions and 4 deletions.
46 changes: 42 additions & 4 deletions kernel/locking/lockdep.c
Original file line number Diff line number Diff line change
Expand Up @@ -395,7 +395,7 @@ void lockdep_init_task(struct task_struct *task)

static __always_inline void lockdep_recursion_finish(void)
{
if (WARN_ON_ONCE(--current->lockdep_recursion))
if (WARN_ON_ONCE((--current->lockdep_recursion) & LOCKDEP_RECURSION_MASK))
current->lockdep_recursion = 0;
}

Expand Down Expand Up @@ -3646,7 +3646,16 @@ static void __trace_hardirqs_on_caller(void)
*/
void lockdep_hardirqs_on_prepare(unsigned long ip)
{
if (unlikely(!debug_locks || current->lockdep_recursion))
if (unlikely(!debug_locks))
return;

/*
* NMIs do not (and cannot) track lock dependencies, nothing to do.
*/
if (unlikely(in_nmi()))
return;

if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
return;

if (unlikely(current->hardirqs_enabled)) {
Expand Down Expand Up @@ -3692,7 +3701,27 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
{
struct task_struct *curr = current;

if (unlikely(!debug_locks || curr->lockdep_recursion))
if (unlikely(!debug_locks))
return;

/*
* NMIs can happen in the middle of local_irq_{en,dis}able() where the
* tracking state and hardware state are out of sync.
*
* NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from,
* and not rely on hardware state like normal interrupts.
*/
if (unlikely(in_nmi())) {
/*
* Skip:
* - recursion check, because NMI can hit lockdep;
* - hardware state check, because above;
* - chain_key check, see lockdep_hardirqs_on_prepare().
*/
goto skip_checks;
}

if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
return;

if (curr->hardirqs_enabled) {
Expand Down Expand Up @@ -3720,6 +3749,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
current->curr_chain_key);

skip_checks:
/* we'll do an OFF -> ON transition: */
curr->hardirqs_enabled = 1;
curr->hardirq_enable_ip = ip;
Expand All @@ -3735,7 +3765,15 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
{
struct task_struct *curr = current;

if (unlikely(!debug_locks || curr->lockdep_recursion))
if (unlikely(!debug_locks))
return;

/*
* Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep;
* they will restore the software state. This ensures the software
* state is consistent inside NMIs as well.
*/
if (unlikely(!in_nmi() && (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)))
return;

/*
Expand Down

0 comments on commit 859d069

Please sign in to comment.