obj-y += notifier.o
obj-y += page_alloc.o
obj-y += preempt.o
+obj-y += random.o
obj-y += rangeset.o
obj-y += sched_credit.o
obj-y += sched_credit2.o
--- /dev/null
+#include <xen/percpu.h>
+#include <xen/random.h>
+#include <xen/time.h>
+#include <asm/random.h>
+
+static DEFINE_PER_CPU(unsigned int, seed);
+
+unsigned int get_random(void)
+{
+ unsigned int next = this_cpu(seed), val = arch_get_random();
+
+ if ( unlikely(!next) )
+ next = val ?: NOW();
+
+ if ( !val )
+ {
+ unsigned int i;
+
+ for ( i = 0; i < sizeof(val) * 8; i += 11 )
+ {
+ next = next * 1103515245 + 12345;
+ val |= ((next >> 16) & 0x7ff) << i;
+ }
+ }
+
+ this_cpu(seed) = next;
+
+ return val;
+}
#define __ARM_PERCPU_H__
#ifndef __ASSEMBLY__
+
+#include <xen/types.h>
+#include <asm/cpregs.h>
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/processor.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/processor.h>
+#else
+# error "unknown ARM variant"
+#endif
+
extern char __per_cpu_start[], __per_cpu_data_end[];
extern unsigned long __per_cpu_offset[NR_CPUS];
void percpu_init_areas(void);
--- /dev/null
+#ifndef __ASM_RANDOM_H__
+#define __ASM_RANDOM_H__
+
+static inline unsigned int arch_get_random(void)
+{
+ return 0;
+}
+
+#endif /* __ASM_RANDOM_H__ */
--- /dev/null
+#ifndef __ASM_RANDOM_H__
+#define __ASM_RANDOM_H__
+
+#include <asm/processor.h>
+
+static inline unsigned int arch_get_random(void)
+{
+ unsigned int val = 0;
+
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_RDRAND) )
+ asm ( ".byte 0x0f,0xc7,0xf0" : "+a" (val) );
+
+ return val;
+}
+
+#endif /* __ASM_RANDOM_H__ */
#include <xen/bitmap.h>
#include <xen/kernel.h>
+#include <xen/random.h>
typedef struct cpumask{ DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
return nxt;
}
-#define cpumask_any(srcp) cpumask_first(srcp)
+static inline unsigned int cpumask_any(const cpumask_t *srcp)
+{
+ unsigned int cpu = cpumask_first(srcp);
+ unsigned int w = cpumask_weight(srcp);
+
+ if ( w > 1 && cpu < nr_cpu_ids )
+ for ( w = get_random() % w; w--; )
+ {
+ unsigned int next = cpumask_next(cpu, srcp);
+
+ if ( next >= nr_cpu_ids )
+ break;
+ cpu = next;
+ }
+
+ return cpu;
+}
/*
* Special-case data structure for "single bit set only" constant CPU masks.
--- /dev/null
+#ifndef __XEN_RANDOM_H__
+#define __XEN_RANDOM_H__
+
+unsigned int get_random(void);
+
+#endif /* __XEN_RANDOM_H__ */