Skip to content

Commit eb71ab2

Browse files
committed
Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Pull bpf fixes from Alexei Starovoitov: - Fix alignment of arm64 JIT buffer to prevent atomic tearing (Fuad Tabba) - Fix invariant violation for single value tnums in the verifier (Harishankar Vishwanathan, Paul Chaignon) - Fix a bunch of issues found by ASAN in selftests/bpf (Ihor Solodrai) - Fix race in devmpa and cpumap on PREEMPT_RT (Jiayuan Chen) - Fix show_fdinfo of kprobe_multi when cookies are not present (Jiri Olsa) - Fix race in freeing special fields in BPF maps to prevent memory leaks (Kumar Kartikeya Dwivedi) - Fix OOB read in dmabuf_collector (T.J. Mercier) * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: (36 commits) selftests/bpf: Avoid simplification of crafted bounds test selftests/bpf: Test refinement of single-value tnum bpf: Improve bounds when tnum has a single possible value bpf: Introduce tnum_step to step through tnum's members bpf: Fix race in devmap on PREEMPT_RT bpf: Fix race in cpumap on PREEMPT_RT selftests/bpf: Add tests for special fields races bpf: Retire rcu_trace_implies_rcu_gp() from local storage bpf: Delay freeing fields in local storage bpf: Lose const-ness of map in map_check_btf() bpf: Register dtor for freeing special fields selftests/bpf: Fix OOB read in dmabuf_collector selftests/bpf: Fix a memory leak in xdp_flowtable test bpf: Fix stack-out-of-bounds write in devmap bpf: Fix kprobe_multi cookies access in show_fdinfo callback bpf, arm64: Force 8-byte alignment for JIT buffer to prevent atomic tearing selftests/bpf: Don't override SIGSEGV handler with ASAN selftests/bpf: Check BPFTOOL env var in detect_bpftool_path() selftests/bpf: Fix out-of-bounds array access bugs reported by ASAN selftests/bpf: Fix array bounds warning in jit_disasm_helpers ...
2 parents 63a43fa + b9c0a5c commit eb71ab2

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

73 files changed

+1180
-236
lines changed

arch/arm64/net/bpf_jit_comp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2119,7 +2119,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
21192119
extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align);
21202120
image_size = extable_offset + extable_size;
21212121
ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr,
2122-
sizeof(u32), &header, &image_ptr,
2122+
sizeof(u64), &header, &image_ptr,
21232123
jit_fill_hole);
21242124
if (!ro_header) {
21252125
prog = orig_prog;

include/linux/bpf.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ struct bpf_map_ops {
124124
u32 (*map_fd_sys_lookup_elem)(void *ptr);
125125
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
126126
struct seq_file *m);
127-
int (*map_check_btf)(const struct bpf_map *map,
127+
int (*map_check_btf)(struct bpf_map *map,
128128
const struct btf *btf,
129129
const struct btf_type *key_type,
130130
const struct btf_type *value_type);
@@ -656,7 +656,7 @@ static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
656656
map->ops->map_seq_show_elem;
657657
}
658658

659-
int map_check_no_btf(const struct bpf_map *map,
659+
int map_check_no_btf(struct bpf_map *map,
660660
const struct btf *btf,
661661
const struct btf_type *key_type,
662662
const struct btf_type *value_type);

include/linux/bpf_local_storage.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ u32 bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
176176
void bpf_local_storage_map_free(struct bpf_map *map,
177177
struct bpf_local_storage_cache *cache);
178178

179-
int bpf_local_storage_map_check_btf(const struct bpf_map *map,
179+
int bpf_local_storage_map_check_btf(struct bpf_map *map,
180180
const struct btf *btf,
181181
const struct btf_type *key_type,
182182
const struct btf_type *value_type);

include/linux/bpf_mem_alloc.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@ struct bpf_mem_alloc {
1414
struct obj_cgroup *objcg;
1515
bool percpu;
1616
struct work_struct work;
17+
void (*dtor_ctx_free)(void *ctx);
18+
void *dtor_ctx;
1719
};
1820

1921
/* 'size != 0' is for bpf_mem_alloc which manages fixed-size objects.
@@ -32,6 +34,10 @@ int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg
3234
/* The percpu allocation with a specific unit size. */
3335
int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size);
3436
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
37+
void bpf_mem_alloc_set_dtor(struct bpf_mem_alloc *ma,
38+
void (*dtor)(void *obj, void *ctx),
39+
void (*dtor_ctx_free)(void *ctx),
40+
void *ctx);
3541

3642
/* Check the allocation size for kmalloc equivalent allocator */
3743
int bpf_mem_alloc_check_size(bool percpu, size_t size);

include/linux/tnum.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,4 +131,7 @@ static inline bool tnum_subreg_is_const(struct tnum a)
131131
return !(tnum_subreg(a)).mask;
132132
}
133133

134+
/* Returns the smallest member of t larger than z */
135+
u64 tnum_step(struct tnum t, u64 z);
136+
134137
#endif /* _LINUX_TNUM_H */

kernel/bpf/arena.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,7 @@ static long arena_map_update_elem(struct bpf_map *map, void *key,
303303
return -EOPNOTSUPP;
304304
}
305305

306-
static int arena_map_check_btf(const struct bpf_map *map, const struct btf *btf,
306+
static int arena_map_check_btf(struct bpf_map *map, const struct btf *btf,
307307
const struct btf_type *key_type, const struct btf_type *value_type)
308308
{
309309
return 0;

kernel/bpf/arraymap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -548,7 +548,7 @@ static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
548548
rcu_read_unlock();
549549
}
550550

551-
static int array_map_check_btf(const struct bpf_map *map,
551+
static int array_map_check_btf(struct bpf_map *map,
552552
const struct btf *btf,
553553
const struct btf_type *key_type,
554554
const struct btf_type *value_type)

kernel/bpf/bloom_filter.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ static long bloom_map_update_elem(struct bpf_map *map, void *key,
180180
return -EINVAL;
181181
}
182182

183-
static int bloom_map_check_btf(const struct bpf_map *map,
183+
static int bloom_map_check_btf(struct bpf_map *map,
184184
const struct btf *btf,
185185
const struct btf_type *key_type,
186186
const struct btf_type *value_type)

kernel/bpf/bpf_insn_array.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ static long insn_array_delete_elem(struct bpf_map *map, void *key)
9898
return -EINVAL;
9999
}
100100

101-
static int insn_array_check_btf(const struct bpf_map *map,
101+
static int insn_array_check_btf(struct bpf_map *map,
102102
const struct btf *btf,
103103
const struct btf_type *key_type,
104104
const struct btf_type *value_type)

kernel/bpf/bpf_local_storage.c

Lines changed: 39 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -107,14 +107,12 @@ static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
107107
{
108108
struct bpf_local_storage *local_storage;
109109

110-
/* If RCU Tasks Trace grace period implies RCU grace period, do
111-
* kfree(), else do kfree_rcu().
110+
/*
111+
* RCU Tasks Trace grace period implies RCU grace period, do
112+
* kfree() directly.
112113
*/
113114
local_storage = container_of(rcu, struct bpf_local_storage, rcu);
114-
if (rcu_trace_implies_rcu_gp())
115-
kfree(local_storage);
116-
else
117-
kfree_rcu(local_storage, rcu);
115+
kfree(local_storage);
118116
}
119117

120118
/* Handle use_kmalloc_nolock == false */
@@ -138,10 +136,11 @@ static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
138136

139137
static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
140138
{
141-
if (rcu_trace_implies_rcu_gp())
142-
bpf_local_storage_free_rcu(rcu);
143-
else
144-
call_rcu(rcu, bpf_local_storage_free_rcu);
139+
/*
140+
* RCU Tasks Trace grace period implies RCU grace period, do
141+
* kfree() directly.
142+
*/
143+
bpf_local_storage_free_rcu(rcu);
145144
}
146145

147146
static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
@@ -164,24 +163,37 @@ static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
164163
bpf_local_storage_free_trace_rcu);
165164
}
166165

167-
/* rcu tasks trace callback for use_kmalloc_nolock == false */
168-
static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
166+
/* rcu callback for use_kmalloc_nolock == false */
167+
static void __bpf_selem_free_rcu(struct rcu_head *rcu)
169168
{
170169
struct bpf_local_storage_elem *selem;
170+
struct bpf_local_storage_map *smap;
171171

172172
selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
173-
if (rcu_trace_implies_rcu_gp())
174-
kfree(selem);
175-
else
176-
kfree_rcu(selem, rcu);
173+
/* bpf_selem_unlink_nofail may have already cleared smap and freed fields. */
174+
smap = rcu_dereference_check(SDATA(selem)->smap, 1);
175+
176+
if (smap)
177+
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
178+
kfree(selem);
179+
}
180+
181+
/* rcu tasks trace callback for use_kmalloc_nolock == false */
182+
static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
183+
{
184+
/*
185+
* RCU Tasks Trace grace period implies RCU grace period, do
186+
* kfree() directly.
187+
*/
188+
__bpf_selem_free_rcu(rcu);
177189
}
178190

179191
/* Handle use_kmalloc_nolock == false */
180192
static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
181193
bool vanilla_rcu)
182194
{
183195
if (vanilla_rcu)
184-
kfree_rcu(selem, rcu);
196+
call_rcu(&selem->rcu, __bpf_selem_free_rcu);
185197
else
186198
call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
187199
}
@@ -195,37 +207,29 @@ static void bpf_selem_free_rcu(struct rcu_head *rcu)
195207
/* The bpf_local_storage_map_free will wait for rcu_barrier */
196208
smap = rcu_dereference_check(SDATA(selem)->smap, 1);
197209

198-
if (smap) {
199-
migrate_disable();
210+
if (smap)
200211
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
201-
migrate_enable();
202-
}
203212
kfree_nolock(selem);
204213
}
205214

206215
static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
207216
{
208-
if (rcu_trace_implies_rcu_gp())
209-
bpf_selem_free_rcu(rcu);
210-
else
211-
call_rcu(rcu, bpf_selem_free_rcu);
217+
/*
218+
* RCU Tasks Trace grace period implies RCU grace period, do
219+
* kfree() directly.
220+
*/
221+
bpf_selem_free_rcu(rcu);
212222
}
213223

214224
void bpf_selem_free(struct bpf_local_storage_elem *selem,
215225
bool reuse_now)
216226
{
217-
struct bpf_local_storage_map *smap;
218-
219-
smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
220-
221227
if (!selem->use_kmalloc_nolock) {
222228
/*
223229
* No uptr will be unpin even when reuse_now == false since uptr
224230
* is only supported in task local storage, where
225231
* smap->use_kmalloc_nolock == true.
226232
*/
227-
if (smap)
228-
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
229233
__bpf_selem_free(selem, reuse_now);
230234
return;
231235
}
@@ -797,7 +801,7 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
797801
return 0;
798802
}
799803

800-
int bpf_local_storage_map_check_btf(const struct bpf_map *map,
804+
int bpf_local_storage_map_check_btf(struct bpf_map *map,
801805
const struct btf *btf,
802806
const struct btf_type *key_type,
803807
const struct btf_type *value_type)
@@ -958,10 +962,9 @@ void bpf_local_storage_map_free(struct bpf_map *map,
958962
*/
959963
synchronize_rcu();
960964

961-
if (smap->use_kmalloc_nolock) {
962-
rcu_barrier_tasks_trace();
963-
rcu_barrier();
964-
}
965+
/* smap remains in use regardless of kmalloc_nolock, so wait unconditionally. */
966+
rcu_barrier_tasks_trace();
967+
rcu_barrier();
965968
kvfree(smap->buckets);
966969
bpf_map_area_free(smap);
967970
}

0 commit comments

Comments
 (0)