u32 max_entries;
u32 map_flags;
u32 pages;
- bool unpriv_array;
+
struct user_struct *user;
const struct bpf_map_ops *ops;
+#ifdef __GENKSYMS__
struct work_struct work;
+#else
+ union {
+ struct work_struct work;
+ struct {
+ bool unpriv_array;
+ u32 index_mask;
+ };
+ };
+#endif
atomic_t usercnt;
};
struct bpf_array {
struct bpf_map map;
u32 elem_size;
- u32 index_mask;
/* 'ownership' of prog_array is claimed by the first program that
* is going to use this map or by the first program which FD is stored
* in the map to make sure that all callers and callees have the same
array = bpf_map_area_alloc(array_size);
if (!array)
return ERR_PTR(-ENOMEM);
- array->index_mask = index_mask;
+ array->map.index_mask = index_mask;
array->map.unpriv_array = unpriv;
/* copy mandatory map attributes */
if (unlikely(index >= array->map.max_entries))
return NULL;
- return array->value + array->elem_size * (index & array->index_mask);
+ return array->value + array->elem_size * (index & array->map.index_mask);
}
/* Called from eBPF program */
if (unlikely(index >= array->map.max_entries))
return NULL;
- return this_cpu_ptr(array->pptrs[index & array->index_mask]);
+ return this_cpu_ptr(array->pptrs[index & array->map.index_mask]);
}
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
*/
size = round_up(map->value_size, 8);
rcu_read_lock();
- pptr = array->pptrs[index & array->index_mask];
+ pptr = array->pptrs[index & array->map.index_mask];
for_each_possible_cpu(cpu) {
bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
off += size;
return -EEXIST;
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
- memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
+ memcpy(this_cpu_ptr(array->pptrs[index & array->map.index_mask]),
value, map->value_size);
else
memcpy(array->value +
- array->elem_size * (index & array->index_mask),
+ array->elem_size * (index & array->map.index_mask),
value, map->value_size);
return 0;
}
*/
size = round_up(map->value_size, 8);
rcu_read_lock();
- pptr = array->pptrs[index & array->index_mask];
+ pptr = array->pptrs[index & array->map.index_mask];
for_each_possible_cpu(cpu) {
bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
off += size;
insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
map_ptr->max_entries, 2);
insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
- container_of(map_ptr,
- struct bpf_array,
- map)->index_mask);
+ map_ptr->index_mask);
insn_buf[2] = *insn;
cnt = 3;
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);