mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-15 23:41:35 -04:00
Merge tag 'mm-hotfixes-stable-2026-04-06-15-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "Eight hotfixes. All are cc:stable and seven are for MM. All are singletons - please see the changelogs for details" * tag 'mm-hotfixes-stable-2026-04-06-15-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: ocfs2: fix out-of-bounds write in ocfs2_write_end_inline mm/damon/stat: deallocate damon_call() failure leaking damon_ctx mm/vma: fix memory leak in __mmap_region() mm/memory_hotplug: maintain N_NORMAL_MEMORY during hotplug mm/damon/sysfs: dealloc repeat_call_control if damon_call() fails mm: reinstate unconditional writeback start in balance_dirty_pages() liveupdate: propagate file deserialization failures mm: filemap: fix nr_pages calculation overflow in filemap_map_pages()
This commit is contained in:
@@ -1505,6 +1505,16 @@ int ocfs2_validate_inode_block(struct super_block *sb,
|
||||
goto bail;
|
||||
}
|
||||
|
||||
if (le16_to_cpu(data->id_count) >
|
||||
ocfs2_max_inline_data_with_xattr(sb, di)) {
|
||||
rc = ocfs2_error(sb,
|
||||
"Invalid dinode #%llu: inline data id_count %u exceeds max %d\n",
|
||||
(unsigned long long)bh->b_blocknr,
|
||||
le16_to_cpu(data->id_count),
|
||||
ocfs2_max_inline_data_with_xattr(sb, di));
|
||||
goto bail;
|
||||
}
|
||||
|
||||
if (le64_to_cpu(di->i_size) > le16_to_cpu(data->id_count)) {
|
||||
rc = ocfs2_error(sb,
|
||||
"Invalid dinode #%llu: inline data i_size %llu exceeds id_count %u\n",
|
||||
|
||||
@@ -558,8 +558,13 @@ int luo_session_deserialize(void)
|
||||
}
|
||||
|
||||
scoped_guard(mutex, &session->mutex) {
|
||||
luo_file_deserialize(&session->file_set,
|
||||
&sh->ser[i].file_set_ser);
|
||||
err = luo_file_deserialize(&session->file_set,
|
||||
&sh->ser[i].file_set_ser);
|
||||
}
|
||||
if (err) {
|
||||
pr_warn("Failed to deserialize files for session [%s] %pe\n",
|
||||
session->name, ERR_PTR(err));
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -245,6 +245,12 @@ static int damon_stat_start(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (damon_stat_context) {
|
||||
if (damon_is_running(damon_stat_context))
|
||||
return -EAGAIN;
|
||||
damon_destroy_ctx(damon_stat_context);
|
||||
}
|
||||
|
||||
damon_stat_context = damon_stat_build_ctx();
|
||||
if (!damon_stat_context)
|
||||
return -ENOMEM;
|
||||
@@ -261,6 +267,7 @@ static void damon_stat_stop(void)
|
||||
{
|
||||
damon_stop(&damon_stat_context, 1);
|
||||
damon_destroy_ctx(damon_stat_context);
|
||||
damon_stat_context = NULL;
|
||||
}
|
||||
|
||||
static int damon_stat_enabled_store(
|
||||
|
||||
@@ -1670,7 +1670,8 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
|
||||
repeat_call_control->data = kdamond;
|
||||
repeat_call_control->repeat = true;
|
||||
repeat_call_control->dealloc_on_cancel = true;
|
||||
damon_call(ctx, repeat_call_control);
|
||||
if (damon_call(ctx, repeat_call_control))
|
||||
kfree(repeat_call_control);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
11
mm/filemap.c
11
mm/filemap.c
@@ -3883,14 +3883,19 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
|
||||
unsigned int nr_pages = 0, folio_type;
|
||||
unsigned short mmap_miss = 0, mmap_miss_saved;
|
||||
|
||||
/*
|
||||
* Recalculate end_pgoff based on file_end before calling
|
||||
* next_uptodate_folio() to avoid races with concurrent
|
||||
* truncation.
|
||||
*/
|
||||
file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;
|
||||
end_pgoff = min(end_pgoff, file_end);
|
||||
|
||||
rcu_read_lock();
|
||||
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
|
||||
if (!folio)
|
||||
goto out;
|
||||
|
||||
file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;
|
||||
end_pgoff = min(end_pgoff, file_end);
|
||||
|
||||
/*
|
||||
* Do not allow to map with PMD across i_size to preserve
|
||||
* SIGBUS semantics.
|
||||
|
||||
@@ -1209,6 +1209,13 @@ int online_pages(unsigned long pfn, unsigned long nr_pages,
|
||||
|
||||
if (node_arg.nid >= 0)
|
||||
node_set_state(nid, N_MEMORY);
|
||||
/*
|
||||
* Check whether we are adding normal memory to the node for the first
|
||||
* time.
|
||||
*/
|
||||
if (!node_state(nid, N_NORMAL_MEMORY) && zone_idx(zone) <= ZONE_NORMAL)
|
||||
node_set_state(nid, N_NORMAL_MEMORY);
|
||||
|
||||
if (need_zonelists_rebuild)
|
||||
build_all_zonelists(NULL);
|
||||
|
||||
@@ -1908,6 +1915,8 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
|
||||
unsigned long flags;
|
||||
char *reason;
|
||||
int ret;
|
||||
unsigned long normal_pages = 0;
|
||||
enum zone_type zt;
|
||||
|
||||
/*
|
||||
* {on,off}lining is constrained to full memory sections (or more
|
||||
@@ -2055,6 +2064,17 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
|
||||
/* reinitialise watermarks and update pcp limits */
|
||||
init_per_zone_wmark_min();
|
||||
|
||||
/*
|
||||
* Check whether this operation removes the last normal memory from
|
||||
* the node. We do this before clearing N_MEMORY to avoid the possible
|
||||
* transient "!N_MEMORY && N_NORMAL_MEMORY" state.
|
||||
*/
|
||||
if (zone_idx(zone) <= ZONE_NORMAL) {
|
||||
for (zt = 0; zt <= ZONE_NORMAL; zt++)
|
||||
normal_pages += pgdat->node_zones[zt].present_pages;
|
||||
if (!normal_pages)
|
||||
node_clear_state(node, N_NORMAL_MEMORY);
|
||||
}
|
||||
/*
|
||||
* Make sure to mark the node as memory-less before rebuilding the zone
|
||||
* list. Otherwise this node would still appear in the fallback lists.
|
||||
|
||||
@@ -1858,6 +1858,27 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unconditionally start background writeback if it's not
|
||||
* already in progress. We need to do this because the global
|
||||
* dirty threshold check above (nr_dirty > gdtc->bg_thresh)
|
||||
* doesn't account for these cases:
|
||||
*
|
||||
* a) strictlimit BDIs: throttling is calculated using per-wb
|
||||
* thresholds. The per-wb threshold can be exceeded even when
|
||||
* nr_dirty < gdtc->bg_thresh
|
||||
*
|
||||
* b) memcg-based throttling: memcg uses its own dirty count and
|
||||
* thresholds and can trigger throttling even when global
|
||||
* nr_dirty < gdtc->bg_thresh
|
||||
*
|
||||
* Writeback needs to be started else the writer stalls in the
|
||||
* throttle loop waiting for dirty pages to be written back
|
||||
* while no writeback is running.
|
||||
*/
|
||||
if (unlikely(!writeback_in_progress(wb)))
|
||||
wb_start_background_writeback(wb);
|
||||
|
||||
mem_cgroup_flush_foreign(wb);
|
||||
|
||||
/*
|
||||
|
||||
7
mm/vma.c
7
mm/vma.c
@@ -2781,6 +2781,13 @@ static unsigned long __mmap_region(struct file *file, unsigned long addr,
|
||||
if (map.charged)
|
||||
vm_unacct_memory(map.charged);
|
||||
abort_munmap:
|
||||
/*
|
||||
* This indicates that .mmap_prepare has set a new file, differing from
|
||||
* desc->vm_file. But since we're aborting the operation, only the
|
||||
* original file will be cleaned up. Ensure we clean up both.
|
||||
*/
|
||||
if (map.file_doesnt_need_get)
|
||||
fput(map.file);
|
||||
vms_abort_munmap_vmas(&map.vms, &map.mas_detach);
|
||||
return error;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user