Merge tag 'fsnotify_for_v7.1-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull fsnotify fixes from Jan Kara:
 "Three fixes for fsnotify / fanotify"

* tag 'fsnotify_for_v7.1-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  fsnotify: fix inode reference leak in fsnotify_recalc_mask()
  fanotify: Fix spelling mistake "enforecement" -> "enforcement"
  fanotify: fix false positive on permission events
This commit is contained in:
Linus Torvalds
2026-04-27 16:40:24 -07:00
4 changed files with 50 additions and 12 deletions

View File

@@ -457,7 +457,7 @@ static int fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode,
/*
* Unlike file_handle, type and len of struct fanotify_fh are u8.
* Traditionally, filesystem return handle_type < 0xff, but there
* is no enforecement for that in vfs.
* is no enforcement for that in vfs.
*/
BUILD_BUG_ON(MAX_HANDLE_SZ > 0xff || FILEID_INVALID > 0xff);
if (type <= 0 || type >= FILEID_INVALID || fh_len != dwords << 2)

View File

@@ -388,7 +388,7 @@ static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector
return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
}
static struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark)
struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark)
{
struct hlist_node *node = NULL;

View File

@@ -238,7 +238,12 @@ static struct inode *fsnotify_update_iref(struct fsnotify_mark_connector *conn,
return inode;
}
static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
/*
* Calculate mask of events for a list of marks.
*
* Return true if any of the attached marks want to hold an inode reference.
*/
static bool __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
{
u32 new_mask = 0;
bool want_iref = false;
@@ -262,6 +267,34 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
*/
WRITE_ONCE(*fsnotify_conn_mask_p(conn), new_mask);
return want_iref;
}
/*
* Calculate mask of events for a list of marks after attach/modify mark
* and get an inode reference for the connector if needed.
*
* A concurrent add of evictable mark and detach of non-evictable mark can
* lead to __fsnotify_recalc_mask() returning false want_iref, but in this
* case we defer clearing iref to fsnotify_recalc_mask_clear_iref() called
* from fsnotify_put_mark().
*/
static void fsnotify_recalc_mask_set_iref(struct fsnotify_mark_connector *conn)
{
bool has_iref = conn->flags & FSNOTIFY_CONN_FLAG_HAS_IREF;
bool want_iref = __fsnotify_recalc_mask(conn) || has_iref;
(void) fsnotify_update_iref(conn, want_iref);
}
/*
* Calculate mask of events for a list of marks after detach mark
* and return the inode object if its reference is no longer needed.
*/
static void *fsnotify_recalc_mask_clear_iref(struct fsnotify_mark_connector *conn)
{
bool want_iref = __fsnotify_recalc_mask(conn);
return fsnotify_update_iref(conn, want_iref);
}
@@ -298,7 +331,7 @@ void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
spin_lock(&conn->lock);
update_children = !fsnotify_conn_watches_children(conn);
__fsnotify_recalc_mask(conn);
fsnotify_recalc_mask_set_iref(conn);
update_children &= fsnotify_conn_watches_children(conn);
spin_unlock(&conn->lock);
/*
@@ -419,7 +452,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
/* Update watched objects after detaching mark */
if (sb)
fsnotify_update_sb_watchers(sb, conn);
objp = __fsnotify_recalc_mask(conn);
objp = fsnotify_recalc_mask_clear_iref(conn);
type = conn->type;
}
WRITE_ONCE(mark->connector, NULL);
@@ -457,9 +490,6 @@ EXPORT_SYMBOL_GPL(fsnotify_put_mark);
*/
static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
{
if (!mark)
return true;
if (refcount_inc_not_zero(&mark->refcnt)) {
spin_lock(&mark->lock);
if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) {
@@ -500,15 +530,22 @@ bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
int type;
fsnotify_foreach_iter_type(type) {
struct fsnotify_mark *mark = iter_info->marks[type];
/* This can fail if mark is being removed */
if (!fsnotify_get_mark_safe(iter_info->marks[type])) {
__release(&fsnotify_mark_srcu);
goto fail;
while (mark && !fsnotify_get_mark_safe(mark)) {
if (mark->group == iter_info->current_group) {
__release(&fsnotify_mark_srcu);
goto fail;
}
/* This is a mark in an unrelated group, skip */
mark = fsnotify_next_mark(mark);
iter_info->marks[type] = mark;
}
}
/*
* Now that both marks are pinned by refcount in the inode / vfsmount
* Now that all marks are pinned by refcount in the inode / vfsmount / etc
* lists, we can drop SRCU lock, and safely resume the list iteration
* once userspace returns.
*/

View File

@@ -915,6 +915,7 @@ extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
unsigned int obj_type);
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);