CFLAGS += -g
endif
+CFLAGS += -std=gnu99
+
CFLAGS += -Wall -Wstrict-prototypes
# -Wunused-value makes GCC 4.x too aggressive for my taste: ignoring the
AFLAGS-y += -D__ASSEMBLY__
ALL_OBJS := $(ALL_OBJS-y)
+
CFLAGS := $(strip $(CFLAGS) $(CFLAGS-y))
+
+# Most CFLAGS are safe for assembly files:
+# -std=gnu{89,99} gets confused by #-prefixed end-of-line comments
AFLAGS := $(strip $(AFLAGS) $(AFLAGS-y))
+AFLAGS += $(patsubst -std=gnu%,,$(CFLAGS))
include Makefile
$(CC) $(CFLAGS) -c $< -o $@
%.o: %.S $(HDRS) Makefile
- $(CC) $(CFLAGS) $(AFLAGS) -c $< -o $@
+ $(CC) $(AFLAGS) -c $< -o $@
%.i: %.c $(HDRS) Makefile
$(CPP) $(CFLAGS) $< -o $@
+# -std=gnu{89,99} gets confused by # as an end-of-line comment marker
%.s: %.S $(HDRS) Makefile
- $(CPP) $(CFLAGS) $(AFLAGS) $< -o $@
+ $(CPP) $(AFLAGS) $< -o $@
/* acm global binary policy (points to 'local' primary and secondary policies */
struct acm_binary_policy acm_bin_pol;
/* acm binary policy lock */
-rwlock_t acm_bin_pol_rwlock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(acm_bin_pol_rwlock);
/* until we have endian support in Xen, we discover it at runtime */
u8 little_endian = 1;
# Used only by linux/Makefile.
AFLAGS_KERNEL += -mconstant-gp -nostdinc $(CPPFLAGS)
-# Note: .S -> .o rule uses AFLAGS and CFLAGS.
-
CFLAGS += -nostdinc -fno-builtin -fno-common -fno-strict-aliasing
CFLAGS += -mconstant-gp
#CFLAGS += -O3 # -O3 over-inlines making debugging tough!
for_each_cpu_mask(i, cpu_core_map[cpu])
cpu_clear(cpu, cpu_core_map[i]);
- cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE;
+ cpus_clear(cpu_sibling_map[cpu]);
+ cpus_clear(cpu_core_map[cpu]);
}
static void
$(CC) $(CFLAGS) -S -o $@ $<
xen.lds: xen.lds.S $(HDRS)
- $(CC) $(CFLAGS) -P -E $(AFLAGS) -o $@ $<
+ $(CC) -P -E $(AFLAGS) -o $@ $<
dom0.bin: $(DOM0_IMAGE)
cp $< $@
$(CC) $(CFLAGS) -S -o $@ $<
xen.lds: $(TARGET_SUBARCH)/xen.lds.S $(HDRS)
- $(CC) $(CFLAGS) -P -E -Ui386 $(AFLAGS) -o $@ $<
+ $(CC) -P -E -Ui386 $(AFLAGS) -o $@ $<
boot/mkelf32: boot/mkelf32.c
$(HOSTCC) $(HOSTCFLAGS) -o $@ $<
int pin;
struct irq_pin_list *entry = irq_2_pin + irq;
unsigned int apicid_value;
- cpumask_t tmp;
-
- cpus_and(tmp, cpumask, cpu_online_map);
- if (cpus_empty(tmp))
- tmp = TARGET_CPUS;
- cpus_and(cpumask, tmp, CPU_MASK_ALL);
+ cpus_and(cpumask, cpumask, cpu_online_map);
+ if (cpus_empty(cpumask))
+ cpumask = TARGET_CPUS;
apicid_value = cpu_mask_to_apicid(cpumask);
/* Prepare to do the io_apic_write */
action->in_flight = 0;
action->shareable = will_share;
action->ack_type = pirq_acktype(irq);
- action->cpu_eoi_map = CPU_MASK_NONE;
+ cpus_clear(action->cpu_eoi_map);
desc->depth = 0;
desc->status |= IRQ_GUEST;
#define MAX_OPROF_SHARED_PAGES 32
/* Lock protecting the following global state */
-static spinlock_t xenoprof_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(xenoprof_lock);
struct domain *active_domains[MAX_OPROF_DOMAINS];
int active_ready[MAX_OPROF_DOMAINS];
#include <public/vcpu.h>
/* Both these structures are protected by the domlist_lock. */
-rwlock_t domlist_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(domlist_lock);
struct domain *domain_hash[DOMAIN_HASH_SIZE];
struct domain *domain_list;
{
/* Freeing anonymous domain-heap pages. */
for ( i = 0; i < (1 << order); i++ )
- pg[i].u.free.cpumask = CPU_MASK_NONE;
+ cpus_clear(pg[i].u.free.cpumask);
free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
drop_dom_ref = 0;
}
if ( is_idle_domain(d) || ((d->domain_id == 0) && opt_dom0_vcpus_pin) )
v->cpu_affinity = cpumask_of_cpu(processor);
else
- v->cpu_affinity = CPU_MASK_ALL;
+ cpus_setall(v->cpu_affinity);
/* Initialise the per-domain timers. */
init_timer(&v->timer, vcpu_timer_fn, v, v->processor);
void console_force_unlock(void)
{
- console_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&console_lock);
serial_force_unlock(sercon_handle);
console_start_sync();
}
if ( handle == -1 )
return;
- port->rx_lock = SPIN_LOCK_UNLOCKED;
- port->tx_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&port->rx_lock);
+ spin_lock_init(&port->tx_lock);
serial_start_sync(handle);
}
#endif
} spinlock_t;
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0 }
#define spin_lock_init(x) ((x)->lock = 0)
#ifdef ASM_SUPPORTED
unsigned int break_lock;
#endif
} rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
+#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { 0, 0 }
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+#define rwlock_init(x) do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0)
#define read_can_lock(rw) (*(volatile int *)(rw) >= 0)
#define write_can_lock(rw) (*(volatile int *)(rw) == 0)
#define __UNLOCKED (0U)
#define __LOCKED (~__UNLOCKED)
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { __UNLOCKED }
+#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { __UNLOCKED }
static inline void spin_lock_init(spinlock_t *lock)
{
- *lock = SPIN_LOCK_UNLOCKED;
+ *lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
}
static inline int spin_is_locked(spinlock_t *lock)
static inline void _raw_spin_unlock(spinlock_t *lock)
{
sync_before_release();
- *lock = SPIN_LOCK_UNLOCKED;
+ *lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
}
static inline int _raw_spin_trylock(spinlock_t *lock)
volatile unsigned int lock;
} rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { __UNLOCKED }
+#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { __UNLOCKED }
static inline void rwlock_init(rwlock_t *lock)
{
- *lock = RW_LOCK_UNLOCKED;
+ *lock = (rwlock_t) RW_LOCK_UNLOCKED;
}
static inline void _raw_read_lock(rwlock_t *lock)
static inline void _raw_write_unlock(rwlock_t *lock)
{
sync_before_release();
- *lock = RW_LOCK_UNLOCKED;
+ *lock = (rwlock_t) RW_LOCK_UNLOCKED;
}
static inline void _raw_read_unlock(rwlock_t *lock)
u8 recurse_cnt;
} spinlock_t;
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1, -1, 0 }
+#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 1, -1, 0 }
-#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+#define spin_lock_init(x) do { *(x) = (spinlock_t) SPIN_LOCK_UNLOCKED; } while(0)
#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0)
static inline void _raw_spin_lock(spinlock_t *lock)
volatile unsigned int lock;
} rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS }
+#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { RW_LOCK_BIAS }
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+#define rwlock_init(x) do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0)
/*
* On x86, we implement read-write locks as a 32-bit counter
#if NR_CPUS <= BITS_PER_LONG
#define CPU_MASK_ALL \
-(cpumask_t) { { \
+/*(cpumask_t)*/ { { \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
} }
#else
#define CPU_MASK_ALL \
-(cpumask_t) { { \
+/*(cpumask_t)*/ { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
} }
#endif
#define CPU_MASK_NONE \
-(cpumask_t) { { \
+/*(cpumask_t)*/ { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
} }
#define CPU_MASK_CPU0 \
-(cpumask_t) { { \
+/*(cpumask_t)*/ { { \
[0] = 1UL \
} }
#if (__GNUC__ > 2)
typedef struct { } spinlock_t;
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { }
+#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { }
#else
typedef struct { int gcc_is_buggy; } spinlock_t;
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0 }
#endif
#define spin_lock_init(lock) do { } while(0)
#if (__GNUC__ > 2)
typedef struct { } rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { }
+#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { }
#else
typedef struct { int gcc_is_buggy; } rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
+#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { 0 }
#endif
#define rwlock_init(lock) do { } while(0)