mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-02 04:41:10 -04:00
mm: replace use of system_unbound_wq with system_dfl_wq
Patch series "Replace wq users and add WQ_PERCPU to alloc_workqueue() users", v2. This series continues the effort to refactor the Workqueue API. No behavior changes are introduced by this series. === Recent changes to the WQ API === The following, address the recent changes in the Workqueue API: - commit128ea9f6cc("workqueue: Add system_percpu_wq and system_dfl_wq") - commit930c2ea566("workqueue: Add new WQ_PERCPU flag") The old workqueues will be removed in a future release cycle and unbound will become the implicit default. === Introduced Changes by this series === 1) [P 1-2] Replace use of system_wq and system_unbound_wq Workqueue users converted to the better named new workqueues: system_wq -> system_percpu_wq system_unbound_wq -> system_dfl_wq This way the old obsolete workqueues (system_wq, system_unbound_wq) can be removed in the future. 2) [P 3] add WQ_PERCPU to remaining alloc_workqueue() users With the introduction of the WQ_PERCPU flag (equivalent to !WQ_UNBOUND), any alloc_workqueue() caller that doesn’t explicitly specify WQ_UNBOUND must now use WQ_PERCPU. WQ_UNBOUND will be removed in future. For more information: https://lore.kernel.org/all/20250221112003.1dSuoGyc@linutronix.de/ This patch (of 3): This patch continues the effort to refactor workqueue APIs, which has begun with the changes introducing new workqueues and a new alloc_workqueue flag: commit128ea9f6cc("workqueue: Add system_percpu_wq and system_dfl_wq") commit930c2ea566("workqueue: Add new WQ_PERCPU flag") The point of the refactoring is to eventually alter the default behavior of workqueues to become unbound by default so that their workload placement is optimized by the scheduler. Before that to happen, workqueue users must be converted to the better named new workqueues with no intended behaviour changes: system_wq -> system_percpu_wq system_unbound_wq -> system_dfl_wq This way the old obsolete workqueues (system_wq, system_unbound_wq) can be removed in the future. Link: https://lkml.kernel.org/r/20260113114630.152942-1-marco.crivellari@suse.com Link: https://lore.kernel.org/all/20250221112003.1dSuoGyc@linutronix.de/ Link: https://lkml.kernel.org/r/20260113114630.152942-2-marco.crivellari@suse.com Signed-off-by: Marco Crivellari <marco.crivellari@suse.com> Suggested-by: Tejun Heo <tj@kernel.org> Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Cc: Alexander Potapenko <glider@google.com> Cc: David Hildenbrand <david@kernel.org> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Lai jiangshan <jiangshanlai@gmail.com> Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Marco Elver <elver@google.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Shakeel Butt <shakeel.butt@linux.dev> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
824b8c96c4
commit
0bcbd7cf65
@@ -939,7 +939,7 @@ void wb_memcg_offline(struct mem_cgroup *memcg)
|
||||
memcg_cgwb_list->next = NULL; /* prevent new wb's */
|
||||
spin_unlock_irq(&cgwb_lock);
|
||||
|
||||
queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work);
|
||||
queue_work(system_dfl_wq, &cleanup_offline_cgwbs_work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -900,7 +900,7 @@ static void toggle_allocation_gate(struct work_struct *work)
|
||||
/* Disable static key and reset timer. */
|
||||
static_branch_disable(&kfence_allocation_key);
|
||||
#endif
|
||||
queue_delayed_work(system_unbound_wq, &kfence_timer,
|
||||
queue_delayed_work(system_dfl_wq, &kfence_timer,
|
||||
msecs_to_jiffies(kfence_sample_interval));
|
||||
}
|
||||
|
||||
@@ -950,7 +950,7 @@ static void kfence_init_enable(void)
|
||||
#endif
|
||||
|
||||
WRITE_ONCE(kfence_enabled, true);
|
||||
queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
|
||||
queue_delayed_work(system_dfl_wq, &kfence_timer, 0);
|
||||
|
||||
pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
|
||||
CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
|
||||
@@ -1046,7 +1046,7 @@ static int kfence_enable_late(void)
|
||||
return kfence_init_late();
|
||||
|
||||
WRITE_ONCE(kfence_enabled, true);
|
||||
queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
|
||||
queue_delayed_work(system_dfl_wq, &kfence_timer, 0);
|
||||
pr_info("re-enabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -644,7 +644,7 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
|
||||
* in latency-sensitive paths is as cheap as possible.
|
||||
*/
|
||||
__mem_cgroup_flush_stats(root_mem_cgroup, true);
|
||||
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
|
||||
queue_delayed_work(system_dfl_wq, &stats_flush_dwork, FLUSH_TIME);
|
||||
}
|
||||
|
||||
unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
|
||||
@@ -3841,7 +3841,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
|
||||
goto offline_kmem;
|
||||
|
||||
if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
|
||||
queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
|
||||
queue_delayed_work(system_dfl_wq, &stats_flush_dwork,
|
||||
FLUSH_TIME);
|
||||
lru_gen_online_memcg(memcg);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user