mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-30 15:24:31 -04:00
fs: make insert_inode_locked() wait for inode destruction
This is the only routine which instead skipped instead of waiting. The current behavior is arguably a bug as it results in a corner case where the inode hash can have *two* matching inodes, one of which is on its way out. Ironing out this difference is an incremental step towards sanitizing the API. Signed-off-by: Mateusz Guzik <mjguzik@gmail.com> Link: https://patch.msgid.link/20260114094717.236202-1-mjguzik@gmail.com Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
committed by
Christian Brauner
parent
aaf7683961
commit
88ec797c46
41
fs/inode.c
41
fs/inode.c
@@ -1028,19 +1028,20 @@ long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
|
||||
return freed;
|
||||
}
|
||||
|
||||
static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked);
|
||||
static void __wait_on_freeing_inode(struct inode *inode, bool hash_locked, bool rcu_locked);
|
||||
|
||||
/*
|
||||
* Called with the inode lock held.
|
||||
*/
|
||||
static struct inode *find_inode(struct super_block *sb,
|
||||
struct hlist_head *head,
|
||||
int (*test)(struct inode *, void *),
|
||||
void *data, bool is_inode_hash_locked,
|
||||
void *data, bool hash_locked,
|
||||
bool *isnew)
|
||||
{
|
||||
struct inode *inode = NULL;
|
||||
|
||||
if (is_inode_hash_locked)
|
||||
if (hash_locked)
|
||||
lockdep_assert_held(&inode_hash_lock);
|
||||
else
|
||||
lockdep_assert_not_held(&inode_hash_lock);
|
||||
@@ -1054,7 +1055,7 @@ static struct inode *find_inode(struct super_block *sb,
|
||||
continue;
|
||||
spin_lock(&inode->i_lock);
|
||||
if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) {
|
||||
__wait_on_freeing_inode(inode, is_inode_hash_locked);
|
||||
__wait_on_freeing_inode(inode, hash_locked, true);
|
||||
goto repeat;
|
||||
}
|
||||
if (unlikely(inode_state_read(inode) & I_CREATING)) {
|
||||
@@ -1078,11 +1079,11 @@ static struct inode *find_inode(struct super_block *sb,
|
||||
*/
|
||||
static struct inode *find_inode_fast(struct super_block *sb,
|
||||
struct hlist_head *head, unsigned long ino,
|
||||
bool is_inode_hash_locked, bool *isnew)
|
||||
bool hash_locked, bool *isnew)
|
||||
{
|
||||
struct inode *inode = NULL;
|
||||
|
||||
if (is_inode_hash_locked)
|
||||
if (hash_locked)
|
||||
lockdep_assert_held(&inode_hash_lock);
|
||||
else
|
||||
lockdep_assert_not_held(&inode_hash_lock);
|
||||
@@ -1096,7 +1097,7 @@ static struct inode *find_inode_fast(struct super_block *sb,
|
||||
continue;
|
||||
spin_lock(&inode->i_lock);
|
||||
if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) {
|
||||
__wait_on_freeing_inode(inode, is_inode_hash_locked);
|
||||
__wait_on_freeing_inode(inode, hash_locked, true);
|
||||
goto repeat;
|
||||
}
|
||||
if (unlikely(inode_state_read(inode) & I_CREATING)) {
|
||||
@@ -1832,16 +1833,13 @@ int insert_inode_locked(struct inode *inode)
|
||||
while (1) {
|
||||
struct inode *old = NULL;
|
||||
spin_lock(&inode_hash_lock);
|
||||
repeat:
|
||||
hlist_for_each_entry(old, head, i_hash) {
|
||||
if (old->i_ino != ino)
|
||||
continue;
|
||||
if (old->i_sb != sb)
|
||||
continue;
|
||||
spin_lock(&old->i_lock);
|
||||
if (inode_state_read(old) & (I_FREEING | I_WILL_FREE)) {
|
||||
spin_unlock(&old->i_lock);
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (likely(!old)) {
|
||||
@@ -1852,6 +1850,11 @@ int insert_inode_locked(struct inode *inode)
|
||||
spin_unlock(&inode_hash_lock);
|
||||
return 0;
|
||||
}
|
||||
if (inode_state_read(old) & (I_FREEING | I_WILL_FREE)) {
|
||||
__wait_on_freeing_inode(old, true, false);
|
||||
old = NULL;
|
||||
goto repeat;
|
||||
}
|
||||
if (unlikely(inode_state_read(old) & I_CREATING)) {
|
||||
spin_unlock(&old->i_lock);
|
||||
spin_unlock(&inode_hash_lock);
|
||||
@@ -2504,16 +2507,18 @@ EXPORT_SYMBOL(inode_needs_sync);
|
||||
* wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
|
||||
* will DTRT.
|
||||
*/
|
||||
static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked)
|
||||
static void __wait_on_freeing_inode(struct inode *inode, bool hash_locked, bool rcu_locked)
|
||||
{
|
||||
struct wait_bit_queue_entry wqe;
|
||||
struct wait_queue_head *wq_head;
|
||||
|
||||
VFS_BUG_ON(!hash_locked && !rcu_locked);
|
||||
|
||||
/*
|
||||
* Handle racing against evict(), see that routine for more details.
|
||||
*/
|
||||
if (unlikely(inode_unhashed(inode))) {
|
||||
WARN_ON(is_inode_hash_locked);
|
||||
WARN_ON(hash_locked);
|
||||
spin_unlock(&inode->i_lock);
|
||||
return;
|
||||
}
|
||||
@@ -2521,14 +2526,16 @@ static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_lock
|
||||
wq_head = inode_bit_waitqueue(&wqe, inode, __I_NEW);
|
||||
prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
|
||||
spin_unlock(&inode->i_lock);
|
||||
rcu_read_unlock();
|
||||
if (is_inode_hash_locked)
|
||||
if (rcu_locked)
|
||||
rcu_read_unlock();
|
||||
if (hash_locked)
|
||||
spin_unlock(&inode_hash_lock);
|
||||
schedule();
|
||||
finish_wait(wq_head, &wqe.wq_entry);
|
||||
if (is_inode_hash_locked)
|
||||
if (hash_locked)
|
||||
spin_lock(&inode_hash_lock);
|
||||
rcu_read_lock();
|
||||
if (rcu_locked)
|
||||
rcu_read_lock();
|
||||
}
|
||||
|
||||
static __initdata unsigned long ihash_entries;
|
||||
|
||||
Reference in New Issue
Block a user