diff --git a/mm/backing-dev.c b/mm/backing-dev.c index a0e26d1b717f..0e315f770755 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -939,7 +939,7 @@ void wb_memcg_offline(struct mem_cgroup *memcg) memcg_cgwb_list->next = NULL; /* prevent new wb's */ spin_unlock_irq(&cgwb_lock); - queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work); + queue_work(system_dfl_wq, &cleanup_offline_cgwbs_work); } /** diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 4f79ec720752..1b779cee6ca2 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -900,7 +900,7 @@ static void toggle_allocation_gate(struct work_struct *work) /* Disable static key and reset timer. */ static_branch_disable(&kfence_allocation_key); #endif - queue_delayed_work(system_unbound_wq, &kfence_timer, + queue_delayed_work(system_dfl_wq, &kfence_timer, msecs_to_jiffies(kfence_sample_interval)); } @@ -950,7 +950,7 @@ static void kfence_init_enable(void) #endif WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + queue_delayed_work(system_dfl_wq, &kfence_timer, 0); pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool, @@ -1046,7 +1046,7 @@ static int kfence_enable_late(void) return kfence_init_late(); WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + queue_delayed_work(system_dfl_wq, &kfence_timer, 0); pr_info("re-enabled\n"); return 0; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7d6cf47e6d4c..21d17975c4ac 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -644,7 +644,7 @@ static void flush_memcg_stats_dwork(struct work_struct *w) * in latency-sensitive paths is as cheap as possible. */ __mem_cgroup_flush_stats(root_mem_cgroup, true); - queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME); + queue_delayed_work(system_dfl_wq, &stats_flush_dwork, FLUSH_TIME); } unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) @@ -3841,7 +3841,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css) goto offline_kmem; if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled()) - queue_delayed_work(system_unbound_wq, &stats_flush_dwork, + queue_delayed_work(system_dfl_wq, &stats_flush_dwork, FLUSH_TIME); lru_gen_online_memcg(memcg);