mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-02 06:17:26 -04:00
Merge branch 'bpf-use-gfp_kernel-in-bpf_event_entry_gen'
Hou Tao says: ==================== The simple patch set aims to replace GFP_ATOMIC by GFP_KERNEL in bpf_event_entry_gen(). These two patches in the patch set were preparatory patches in "Fix the release of inner map" patchset [1] and are not needed for v2, so re-post it to bpf-next tree. Patch #1 reduces the scope of rcu_read_lock when updating fd map and patch #2 replaces GFP_ATOMIC by GFP_KERNEL. Please see individual patches for more details. Change Log: v3: * patch #1: fallback to patch #1 in v1. Update comments in bpf_fd_htab_map_update_elem() to explain the reason for rcu_read_lock() (Alexei) v2: https://lore.kernel.org/bpf/20231211073843.1888058-1-houtao@huaweicloud.com/ * patch #1: add rcu_read_lock/unlock() for bpf_fd_array_map_update_elem as well to make it consistent with bpf_fd_htab_map_update_elem and update commit message accordingly (Alexei) * patch #1/#2: collects ack tags from Yonghong v1: https://lore.kernel.org/bpf/20231208103357.2637299-1-houtao@huaweicloud.com/ [1]: https://lore.kernel.org/bpf/20231107140702.1891778-1-houtao@huaweicloud.com/ ==================== Link: https://lore.kernel.org/r/20231214043010.3458072-1-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
@@ -1195,7 +1195,7 @@ static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
|
||||
{
|
||||
struct bpf_event_entry *ee;
|
||||
|
||||
ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
|
||||
ee = kzalloc(sizeof(*ee), GFP_KERNEL);
|
||||
if (ee) {
|
||||
ee->event = perf_file->private_data;
|
||||
ee->perf_file = perf_file;
|
||||
|
||||
@@ -2523,7 +2523,13 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
|
||||
if (IS_ERR(ptr))
|
||||
return PTR_ERR(ptr);
|
||||
|
||||
/* The htab bucket lock is always held during update operations in fd
|
||||
* htab map, and the following rcu_read_lock() is only used to avoid
|
||||
* the WARN_ON_ONCE in htab_map_update_elem().
|
||||
*/
|
||||
rcu_read_lock();
|
||||
ret = htab_map_update_elem(map, key, &ptr, map_flags);
|
||||
rcu_read_unlock();
|
||||
if (ret)
|
||||
map->ops->map_fd_put_ptr(map, ptr, false);
|
||||
|
||||
|
||||
@@ -184,15 +184,11 @@ static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
|
||||
err = bpf_percpu_cgroup_storage_update(map, key, value,
|
||||
flags);
|
||||
} else if (IS_FD_ARRAY(map)) {
|
||||
rcu_read_lock();
|
||||
err = bpf_fd_array_map_update_elem(map, map_file, key, value,
|
||||
flags);
|
||||
rcu_read_unlock();
|
||||
} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
|
||||
rcu_read_lock();
|
||||
err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
|
||||
flags);
|
||||
rcu_read_unlock();
|
||||
} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
|
||||
/* rcu_read_lock() is not needed */
|
||||
err = bpf_fd_reuseport_array_update_elem(map, key, value,
|
||||
|
||||
Reference in New Issue
Block a user