* multiple correctable errors between two polls. In that case,
* increase polling frequency higher than normal.
*/
-static void mce_amd_work_fn(void *data)
+static void cf_check mce_amd_work_fn(void *data)
{
on_each_cpu(mce_amd_checkregs, data, 1);
}
}
-static void mce_work_fn(void *data)
+static void cf_check mce_work_fn(void *data)
{
on_each_cpu(mce_checkregs, NULL, 1);
/* This function should be called soon after each time the MSB of the
* pmtimer register rolls over, to make sure we update the status
* registers and SCI at least once per rollover */
-static void pmt_timer_callback(void *opaque)
+static void cf_check pmt_timer_callback(void *opaque)
{
PMTState *s = opaque;
uint32_t pmt_cycles_until_flip;
s->use_timer = 0;
}
-static void rtc_update_timer(void *opaque)
+static void cf_check rtc_update_timer(void *opaque)
{
RTCState *s = opaque;
spin_unlock(&s->lock);
}
-static void rtc_update_timer2(void *opaque)
+static void cf_check rtc_update_timer2(void *opaque)
{
RTCState *s = opaque;
}
}
-static void rtc_alarm_cb(void *opaque)
+static void cf_check rtc_alarm_cb(void *opaque)
{
RTCState *s = opaque;
vs->started = false;
}
-static void stimer_expire(void *data)
+static void cf_check stimer_expire(void *data)
{
struct viridian_stimer *vs = data;
struct vcpu *v = vs->v;
pt_vcpu_unlock(v);
}
-static void pt_timer_fn(void *data)
+static void cf_check pt_timer_fn(void *data)
{
struct periodic_time *pt = data;
spin_unlock(&lock);
}
-static void irq_ratelimit_timer_fn(void *data)
+static void cf_check irq_ratelimit_timer_fn(void *data)
{
struct irq_desc *desc, *tmp;
unsigned long flags;
static void set_eoi_ready(void *data);
-static void irq_guest_eoi_timer_fn(void *data)
+static void cf_check irq_guest_eoi_timer_fn(void *data)
{
struct irq_desc *desc = data;
unsigned int i, irq = desc - irq_desc;
return;
}
-static void nmi_timer_fn(void *unused)
+static void cf_check nmi_timer_fn(void *unused)
{
this_cpu(nmi_timer_ticks)++;
set_timer(&this_cpu(nmi_timer), NOW() + MILLISECS(1000));
return (stime_platform_stamp + scale_delta(diff, &plt_scale));
}
-static void plt_overflow(void *unused)
+static void cf_check plt_overflow(void *unused)
{
int i;
u64 count;
static void (*time_calibration_rendezvous_fn)(void *) =
time_calibration_std_rendezvous;
-static void time_calibration(void *unused)
+static void cf_check time_calibration(void *unused)
{
struct calibration_rendezvous r = {
.semaphore = ATOMIC_INIT(0)
stop_timer(&rdp->idle_timer);
}
-static void rcu_idle_timer_handler(void* data)
+static void cf_check rcu_idle_timer_handler(void* data)
{
perfc_incr(rcu_idle_timer);
static DEFINE_SPINLOCK(sched_free_cpu_lock);
/* Various timer handlers. */
-static void s_timer_fn(void *unused);
-static void vcpu_periodic_timer_fn(void *data);
-static void vcpu_singleshot_timer_fn(void *data);
-static void poll_timer_fn(void *data);
+static void cf_check s_timer_fn(void *unused);
+static void cf_check vcpu_periodic_timer_fn(void *data);
+static void cf_check vcpu_singleshot_timer_fn(void *data);
+static void cf_check poll_timer_fn(void *data);
/* This is global for now so that private implementations can reach it */
DEFINE_PER_CPU_READ_MOSTLY(struct sched_resource *, sched_res);
return 0;
}
-static void domain_watchdog_timeout(void *data)
+static void cf_check domain_watchdog_timeout(void *data)
{
struct domain *d = data;
}
/* The scheduler timer: force a run through the scheduler */
-static void s_timer_fn(void *unused)
+static void cf_check s_timer_fn(void *unused)
{
raise_softirq(SCHEDULE_SOFTIRQ);
SCHED_STAT_CRANK(sched_irq);
}
/* Per-VCPU periodic timer function: sends a virtual timer interrupt. */
-static void vcpu_periodic_timer_fn(void *data)
+static void cf_check vcpu_periodic_timer_fn(void *data)
{
struct vcpu *v = data;
vcpu_periodic_timer_work(v);
}
/* Per-VCPU single-shot timer function: sends a virtual timer interrupt. */
-static void vcpu_singleshot_timer_fn(void *data)
+static void cf_check vcpu_singleshot_timer_fn(void *data)
{
struct vcpu *v = data;
send_timer_event(v);
}
/* SCHEDOP_poll timeout callback. */
-static void poll_timer_fn(void *data)
+static void cf_check poll_timer_fn(void *data)
{
struct vcpu *v = data;
struct timer master_ticker;
};
-static void csched_tick(void *_cpu);
-static void csched_acct(void *dummy);
+static void cf_check csched_tick(void *_cpu);
+static void cf_check csched_acct(void *dummy);
static inline int
__unit_on_runq(const struct csched_unit *svc)
pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
}
-static void
-csched_acct(void* dummy)
+static void cf_check csched_acct(void* dummy)
{
struct csched_private *prv = dummy;
unsigned long flags;
set_timer( &prv->master_ticker, NOW() + prv->tslice);
}
-static void
-csched_tick(void *_cpu)
+static void cf_check csched_tick(void *_cpu)
{
unsigned int cpu = (unsigned long)_cpu;
const struct sched_resource *sr = get_sched_res(cpu);
sdom->budget += sdom->tot_budget;
}
-static void replenish_domain_budget(void* data)
+static void cf_check replenish_domain_budget(void *data)
{
struct csched2_dom *sdom = data;
unsigned long flags;
#define TRC_RTDS_SCHED_TASKLET TRC_SCHED_CLASS_EVT(RTDS, 5)
#define TRC_RTDS_SCHEDULE TRC_SCHED_CLASS_EVT(RTDS, 6)
-static void repl_timer_handler(void *data);
+static void cf_check repl_timer_handler(void *data);
/*
* System-wide private data, include global RunQueue/DepletedQ
* The replenishment timer handler picks units
* from the replq and does the actual replenishment.
*/
-static void repl_timer_handler(void *data){
+static void cf_check repl_timer_handler(void *data)
+{
s_time_t now;
const struct scheduler *ops = data;
struct rt_private *prv = rt_priv(ops);
set_timer(&dbgp->timer, NOW() + timeout);
}
-static void ehci_dbgp_poll(void *data)
+static void cf_check ehci_dbgp_poll(void *data)
{
poll_port = data;
#ifdef run_in_exception_handler
static void enable_exar_enhanced_bits(const struct ns16550 *uart);
#endif
-static void ns16550_delayed_resume(void *data);
+static void cf_check ns16550_delayed_resume(void *data);
static u8 ns_read_reg(const struct ns16550 *uart, unsigned int reg)
{
set_timer(&uart->timer, NOW() + MILLISECS(uart->timeout_ms));
}
-static void ns16550_poll(void *data)
+static void cf_check ns16550_poll(void *data)
{
this_cpu(poll_port) = data;
#ifdef run_in_exception_handler
}
static int delayed_resume_tries;
-static void ns16550_delayed_resume(void *data)
+static void cf_check ns16550_delayed_resume(void *data)
{
struct serial_port *port = data;
struct ns16550 *uart = port->uart;
}
}
-static void do_dbs_timer(void *dbs)
+static void cf_check do_dbs_timer(void *dbs)
{
struct cpu_dbs_info_s *dbs_info = (struct cpu_dbs_info_s *)dbs;