Skip to content

Commit

Permalink
Merge pull request torvalds#652 from ojeda/ppc-apply-fix
Browse files Browse the repository at this point in the history
ppc: apply fix for hard lockup
  • Loading branch information
ojeda committed Jan 27, 2022
2 parents 3fe71f8 + 8e5bf28 commit ed35954
Show file tree
Hide file tree
Showing 4 changed files with 70 additions and 29 deletions.
57 changes: 47 additions & 10 deletions arch/powerpc/include/asm/hw_irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -345,17 +345,54 @@ static inline bool lazy_irq_pending_nocheck(void)
bool power_pmu_wants_prompt_pmi(void);

/*
* This is called by asynchronous interrupts to conditionally
* re-enable hard interrupts after having cleared the source
* of the interrupt. They are kept disabled if there is a different
* soft-masked interrupt pending that requires hard masking.
* This is called by asynchronous interrupts to check whether to
* conditionally re-enable hard interrupts after having cleared
* the source of the interrupt. They are kept disabled if there
* is a different soft-masked interrupt pending that requires hard
* masking.
*/
static inline void may_hard_irq_enable(void)
static inline bool should_hard_irq_enable(void)
{
if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
__hard_irq_enable();
}
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
WARN_ON(mfmsr() & MSR_EE);
#endif
#ifdef CONFIG_PERF_EVENTS
/*
* If the PMU is not running, there is not much reason to enable
* MSR[EE] in irq handlers because any interrupts would just be
* soft-masked.
*
* TODO: Add test for 64e
*/
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
return false;

if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
return false;

return true;
#else
return false;
#endif
}

/*
* Do the hard enabling, only call this if should_hard_irq_enable is true.
*/
static inline void do_hard_irq_enable(void)
{
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
WARN_ON(mfmsr() & MSR_EE);
#endif
/*
* This allows PMI interrupts (and watchdog soft-NMIs) through.
* There is no other reason to enable this way.
*/
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
__hard_irq_enable();
}

static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
Expand Down Expand Up @@ -436,7 +473,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}

static inline bool may_hard_irq_enable(void)
static inline bool should_hard_irq_enable(void)
{
return false;
}
Expand Down
3 changes: 2 additions & 1 deletion arch/powerpc/kernel/dbell.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)

ppc_msgsync();

may_hard_irq_enable();
if (should_hard_irq_enable())
do_hard_irq_enable();

kvmppc_clear_host_ipi(smp_processor_id());
__this_cpu_inc(irq_stat.doorbell_irqs);
Expand Down
3 changes: 2 additions & 1 deletion arch/powerpc/kernel/irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -745,7 +745,8 @@ void __do_irq(struct pt_regs *regs)
irq = ppc_md.get_irq();

/* We can hard enable interrupts now to allow perf interrupts */
may_hard_irq_enable();
if (should_hard_irq_enable())
do_hard_irq_enable();

/* And finally process it */
if (unlikely(!irq))
Expand Down
36 changes: 19 additions & 17 deletions arch/powerpc/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -609,22 +609,23 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
return;
}

/* Ensure a positive value is written to the decrementer, or else
* some CPUs will continue to take decrementer exceptions. When the
* PPC_WATCHDOG (decrementer based) is configured, keep this at most
* 31 bits, which is about 4 seconds on most systems, which gives
* the watchdog a chance of catching timer interrupt hard lockups.
*/
if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
set_dec(0x7fffffff);
else
set_dec(decrementer_max);

/* Conditionally hard-enable interrupts now that the DEC has been
* bumped to its maximum value
*/
may_hard_irq_enable();
/* Conditionally hard-enable interrupts. */
if (should_hard_irq_enable()) {
/*
* Ensure a positive value is written to the decrementer, or
* else some CPUs will continue to take decrementer exceptions.
* When the PPC_WATCHDOG (decrementer based) is configured,
* keep this at most 31 bits, which is about 4 seconds on most
* systems, which gives the watchdog a chance of catching timer
* interrupt hard lockups.
*/
if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
set_dec(0x7fffffff);
else
set_dec(decrementer_max);

do_hard_irq_enable();
}

#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
Expand All @@ -648,8 +649,9 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
__this_cpu_inc(irq_stat.timer_irqs_event);
} else {
now = *next_tb - now;
if (now <= decrementer_max)
set_dec_or_work(now);
if (now > decrementer_max)
now = decrementer_max;
set_dec_or_work(now);
__this_cpu_inc(irq_stat.timer_irqs_others);
}

Expand Down

0 comments on commit ed35954

Please sign in to comment.