void idle_loop(void)
{
- int cpu = smp_processor_id();
-
for ( ; ; )
{
page_scrub_schedule_work();
-
default_idle();
-
- if ( softirq_pending(cpu) )
- do_softirq();
+ do_softirq();
}
}
cpu_set(cpu, smp_commenced_mask);
while (!cpu_isset(cpu, cpu_online_map)) {
mb();
- if (softirq_pending(0))
- do_softirq();
+ process_pending_timers();
}
return 0;
}
{
void *p;
unsigned long pfn;
- int cpu = smp_processor_id();
printk("Scrubbing Free RAM: ");
if ( (pfn % ((100*1024*1024)/PAGE_SIZE)) == 0 )
printk(".");
- if ( unlikely(softirq_pending(cpu)) )
- do_softirq();
+ process_pending_timers();
/* Quick lock-free check. */
if ( allocated_in_map(pfn) )
asmlinkage void do_softirq(void)
{
- unsigned int i, cpu = smp_processor_id();
+ unsigned int i, cpu;
unsigned long pending;
- pending = softirq_pending(cpu);
- ASSERT(pending != 0);
+ for ( ; ; )
+ {
+ /*
+ * Initialise @cpu on every iteration: SCHEDULE_SOFTIRQ may move
+ * us to another processor.
+ */
+ cpu = smp_processor_id();
+ if ( (pending = softirq_pending(cpu)) == 0 )
+ break;
- do {
i = find_first_set_bit(pending);
clear_bit(i, &softirq_pending(cpu));
(*softirq_handlers[i])();
- } while ( (pending = softirq_pending(cpu)) != 0 );
+ }
}
void open_softirq(int nr, softirq_handler handler)
}
+void process_pending_timers(void)
+{
+ unsigned int cpu = smp_processor_id();
+ ASSERT(!in_irq() && local_irq_is_enabled());
+ if ( test_and_clear_bit(TIMER_SOFTIRQ, &softirq_pending(cpu)) )
+ timer_softirq_action();
+}
+
+
static void dump_timerq(unsigned char key)
{
struct timer *t;
printk("%d... ", 3-i);
for ( j = 0; j < 100; j++ )
{
- if ( softirq_pending(smp_processor_id()) )
- do_softirq();
+ process_pending_timers();
mdelay(10);
}
}
*/
extern void kill_timer(struct timer *timer);
+/*
+ * Process pending timers on this CPU. This should be called periodically
+ * when performing work that prevents softirqs from running in a timely manner.
+ */
+extern void process_pending_timers(void);
+
/*
* Bootstrap initialisation. Must be called before any other timer function.
*/