mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-14 00:54:36 -05:00
bpf: Convert hashtab.c to rqspinlock
Convert hashtab.c from raw_spinlock to rqspinlock, and drop the hashed per-cpu counter crud from the code base which is no longer necessary. Closes: https://lore.kernel.org/bpf/675302fd.050a0220.2477f.0004.GAE@google.com Closes: https://lore.kernel.org/bpf/000000000000b3e63e061eed3f6b@google.com Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20250316040541.108729-20-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
committed by
Alexei Starovoitov
parent
a6884f6f1d
commit
4fa8d68aa5
@@ -16,6 +16,7 @@
|
||||
#include "bpf_lru_list.h"
|
||||
#include "map_in_map.h"
|
||||
#include <linux/bpf_mem_alloc.h>
|
||||
#include <asm/rqspinlock.h>
|
||||
|
||||
#define HTAB_CREATE_FLAG_MASK \
|
||||
(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
|
||||
@@ -78,7 +79,7 @@
|
||||
*/
|
||||
struct bucket {
|
||||
struct hlist_nulls_head head;
|
||||
raw_spinlock_t raw_lock;
|
||||
rqspinlock_t raw_lock;
|
||||
};
|
||||
|
||||
#define HASHTAB_MAP_LOCK_COUNT 8
|
||||
@@ -104,8 +105,6 @@ struct bpf_htab {
|
||||
u32 n_buckets; /* number of hash buckets */
|
||||
u32 elem_size; /* size of each element in bytes */
|
||||
u32 hashrnd;
|
||||
struct lock_class_key lockdep_key;
|
||||
int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
|
||||
};
|
||||
|
||||
/* each htab element is struct htab_elem + key + value */
|
||||
@@ -140,45 +139,26 @@ static void htab_init_buckets(struct bpf_htab *htab)
|
||||
|
||||
for (i = 0; i < htab->n_buckets; i++) {
|
||||
INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
|
||||
raw_spin_lock_init(&htab->buckets[i].raw_lock);
|
||||
lockdep_set_class(&htab->buckets[i].raw_lock,
|
||||
&htab->lockdep_key);
|
||||
raw_res_spin_lock_init(&htab->buckets[i].raw_lock);
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
static inline int htab_lock_bucket(const struct bpf_htab *htab,
|
||||
struct bucket *b, u32 hash,
|
||||
unsigned long *pflags)
|
||||
static inline int htab_lock_bucket(struct bucket *b, unsigned long *pflags)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
|
||||
|
||||
preempt_disable();
|
||||
local_irq_save(flags);
|
||||
if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
|
||||
__this_cpu_dec(*(htab->map_locked[hash]));
|
||||
local_irq_restore(flags);
|
||||
preempt_enable();
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
raw_spin_lock(&b->raw_lock);
|
||||
ret = raw_res_spin_lock_irqsave(&b->raw_lock, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
*pflags = flags;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void htab_unlock_bucket(const struct bpf_htab *htab,
|
||||
struct bucket *b, u32 hash,
|
||||
unsigned long flags)
|
||||
static inline void htab_unlock_bucket(struct bucket *b, unsigned long flags)
|
||||
{
|
||||
hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
|
||||
raw_spin_unlock(&b->raw_lock);
|
||||
__this_cpu_dec(*(htab->map_locked[hash]));
|
||||
local_irq_restore(flags);
|
||||
preempt_enable();
|
||||
raw_res_spin_unlock_irqrestore(&b->raw_lock, flags);
|
||||
}
|
||||
|
||||
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
|
||||
@@ -483,14 +463,12 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
||||
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
|
||||
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
|
||||
struct bpf_htab *htab;
|
||||
int err, i;
|
||||
int err;
|
||||
|
||||
htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
|
||||
if (!htab)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
lockdep_register_key(&htab->lockdep_key);
|
||||
|
||||
bpf_map_init_from_attr(&htab->map, attr);
|
||||
|
||||
if (percpu_lru) {
|
||||
@@ -536,15 +514,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
||||
if (!htab->buckets)
|
||||
goto free_elem_count;
|
||||
|
||||
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
|
||||
htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
|
||||
sizeof(int),
|
||||
sizeof(int),
|
||||
GFP_USER);
|
||||
if (!htab->map_locked[i])
|
||||
goto free_map_locked;
|
||||
}
|
||||
|
||||
if (htab->map.map_flags & BPF_F_ZERO_SEED)
|
||||
htab->hashrnd = 0;
|
||||
else
|
||||
@@ -607,15 +576,12 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
||||
free_map_locked:
|
||||
if (htab->use_percpu_counter)
|
||||
percpu_counter_destroy(&htab->pcount);
|
||||
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
|
||||
free_percpu(htab->map_locked[i]);
|
||||
bpf_map_area_free(htab->buckets);
|
||||
bpf_mem_alloc_destroy(&htab->pcpu_ma);
|
||||
bpf_mem_alloc_destroy(&htab->ma);
|
||||
free_elem_count:
|
||||
bpf_map_free_elem_count(&htab->map);
|
||||
free_htab:
|
||||
lockdep_unregister_key(&htab->lockdep_key);
|
||||
bpf_map_area_free(htab);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
@@ -820,7 +786,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
|
||||
b = __select_bucket(htab, tgt_l->hash);
|
||||
head = &b->head;
|
||||
|
||||
ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
|
||||
ret = htab_lock_bucket(b, &flags);
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
@@ -831,7 +797,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
|
||||
break;
|
||||
}
|
||||
|
||||
htab_unlock_bucket(htab, b, tgt_l->hash, flags);
|
||||
htab_unlock_bucket(b, flags);
|
||||
|
||||
if (l == tgt_l)
|
||||
check_and_free_fields(htab, l);
|
||||
@@ -1150,7 +1116,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
*/
|
||||
}
|
||||
|
||||
ret = htab_lock_bucket(htab, b, hash, &flags);
|
||||
ret = htab_lock_bucket(b, &flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -1201,7 +1167,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
check_and_free_fields(htab, l_old);
|
||||
}
|
||||
}
|
||||
htab_unlock_bucket(htab, b, hash, flags);
|
||||
htab_unlock_bucket(b, flags);
|
||||
if (l_old) {
|
||||
if (old_map_ptr)
|
||||
map->ops->map_fd_put_ptr(map, old_map_ptr, true);
|
||||
@@ -1210,7 +1176,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
}
|
||||
return 0;
|
||||
err:
|
||||
htab_unlock_bucket(htab, b, hash, flags);
|
||||
htab_unlock_bucket(b, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1257,7 +1223,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
|
||||
copy_map_value(&htab->map,
|
||||
l_new->key + round_up(map->key_size, 8), value);
|
||||
|
||||
ret = htab_lock_bucket(htab, b, hash, &flags);
|
||||
ret = htab_lock_bucket(b, &flags);
|
||||
if (ret)
|
||||
goto err_lock_bucket;
|
||||
|
||||
@@ -1278,7 +1244,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
|
||||
ret = 0;
|
||||
|
||||
err:
|
||||
htab_unlock_bucket(htab, b, hash, flags);
|
||||
htab_unlock_bucket(b, flags);
|
||||
|
||||
err_lock_bucket:
|
||||
if (ret)
|
||||
@@ -1315,7 +1281,7 @@ static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
b = __select_bucket(htab, hash);
|
||||
head = &b->head;
|
||||
|
||||
ret = htab_lock_bucket(htab, b, hash, &flags);
|
||||
ret = htab_lock_bucket(b, &flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -1340,7 +1306,7 @@ static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
}
|
||||
ret = 0;
|
||||
err:
|
||||
htab_unlock_bucket(htab, b, hash, flags);
|
||||
htab_unlock_bucket(b, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1381,7 +1347,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = htab_lock_bucket(htab, b, hash, &flags);
|
||||
ret = htab_lock_bucket(b, &flags);
|
||||
if (ret)
|
||||
goto err_lock_bucket;
|
||||
|
||||
@@ -1405,7 +1371,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
}
|
||||
ret = 0;
|
||||
err:
|
||||
htab_unlock_bucket(htab, b, hash, flags);
|
||||
htab_unlock_bucket(b, flags);
|
||||
err_lock_bucket:
|
||||
if (l_new) {
|
||||
bpf_map_dec_elem_count(&htab->map);
|
||||
@@ -1447,7 +1413,7 @@ static long htab_map_delete_elem(struct bpf_map *map, void *key)
|
||||
b = __select_bucket(htab, hash);
|
||||
head = &b->head;
|
||||
|
||||
ret = htab_lock_bucket(htab, b, hash, &flags);
|
||||
ret = htab_lock_bucket(b, &flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -1457,7 +1423,7 @@ static long htab_map_delete_elem(struct bpf_map *map, void *key)
|
||||
else
|
||||
ret = -ENOENT;
|
||||
|
||||
htab_unlock_bucket(htab, b, hash, flags);
|
||||
htab_unlock_bucket(b, flags);
|
||||
|
||||
if (l)
|
||||
free_htab_elem(htab, l);
|
||||
@@ -1483,7 +1449,7 @@ static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
|
||||
b = __select_bucket(htab, hash);
|
||||
head = &b->head;
|
||||
|
||||
ret = htab_lock_bucket(htab, b, hash, &flags);
|
||||
ret = htab_lock_bucket(b, &flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -1494,7 +1460,7 @@ static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
|
||||
else
|
||||
ret = -ENOENT;
|
||||
|
||||
htab_unlock_bucket(htab, b, hash, flags);
|
||||
htab_unlock_bucket(b, flags);
|
||||
if (l)
|
||||
htab_lru_push_free(htab, l);
|
||||
return ret;
|
||||
@@ -1561,7 +1527,6 @@ static void htab_map_free_timers_and_wq(struct bpf_map *map)
|
||||
static void htab_map_free(struct bpf_map *map)
|
||||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
int i;
|
||||
|
||||
/* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
|
||||
* bpf_free_used_maps() is called after bpf prog is no longer executing.
|
||||
@@ -1586,9 +1551,6 @@ static void htab_map_free(struct bpf_map *map)
|
||||
bpf_mem_alloc_destroy(&htab->ma);
|
||||
if (htab->use_percpu_counter)
|
||||
percpu_counter_destroy(&htab->pcount);
|
||||
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
|
||||
free_percpu(htab->map_locked[i]);
|
||||
lockdep_unregister_key(&htab->lockdep_key);
|
||||
bpf_map_area_free(htab);
|
||||
}
|
||||
|
||||
@@ -1631,7 +1593,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
|
||||
b = __select_bucket(htab, hash);
|
||||
head = &b->head;
|
||||
|
||||
ret = htab_lock_bucket(htab, b, hash, &bflags);
|
||||
ret = htab_lock_bucket(b, &bflags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -1668,7 +1630,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
|
||||
hlist_nulls_del_rcu(&l->hash_node);
|
||||
|
||||
out_unlock:
|
||||
htab_unlock_bucket(htab, b, hash, bflags);
|
||||
htab_unlock_bucket(b, bflags);
|
||||
|
||||
if (l) {
|
||||
if (is_lru_map)
|
||||
@@ -1790,7 +1752,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
|
||||
head = &b->head;
|
||||
/* do not grab the lock unless need it (bucket_cnt > 0). */
|
||||
if (locked) {
|
||||
ret = htab_lock_bucket(htab, b, batch, &flags);
|
||||
ret = htab_lock_bucket(b, &flags);
|
||||
if (ret) {
|
||||
rcu_read_unlock();
|
||||
bpf_enable_instrumentation();
|
||||
@@ -1813,7 +1775,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
|
||||
/* Note that since bucket_cnt > 0 here, it is implicit
|
||||
* that the locked was grabbed, so release it.
|
||||
*/
|
||||
htab_unlock_bucket(htab, b, batch, flags);
|
||||
htab_unlock_bucket(b, flags);
|
||||
rcu_read_unlock();
|
||||
bpf_enable_instrumentation();
|
||||
goto after_loop;
|
||||
@@ -1824,7 +1786,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
|
||||
/* Note that since bucket_cnt > 0 here, it is implicit
|
||||
* that the locked was grabbed, so release it.
|
||||
*/
|
||||
htab_unlock_bucket(htab, b, batch, flags);
|
||||
htab_unlock_bucket(b, flags);
|
||||
rcu_read_unlock();
|
||||
bpf_enable_instrumentation();
|
||||
kvfree(keys);
|
||||
@@ -1887,7 +1849,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
|
||||
dst_val += value_size;
|
||||
}
|
||||
|
||||
htab_unlock_bucket(htab, b, batch, flags);
|
||||
htab_unlock_bucket(b, flags);
|
||||
locked = false;
|
||||
|
||||
while (node_to_free) {
|
||||
|
||||
Reference in New Issue
Block a user