Merge tag 'cgroup-for-6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup

Pull cgroup updates from Tejun Heo:

 - Add deprecation info messages to cgroup1-only features

 - rstat updates including a bug fix and breaking up a critical section
   to reduce interrupt latency impact

 - Other misc and doc updates

* tag 'cgroup-for-6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup:
  cgroup: rstat: Cleanup flushing functions and locking
  cgroup/rstat: avoid disabling irqs for O(num_cpu)
  mm: Fix a build breakage in memcontrol-v1.c
  blk-cgroup: Simplify policy files registration
  cgroup: Update file naming comment
  cgroup: Add deprecation message to legacy freezer controller
  mm: Add transformation message for per-memcg swappiness
  RFC cgroup/cpuset-v1: Add deprecation messages to sched_relax_domain_level
  cgroup/cpuset-v1: Add deprecation messages to memory_migrate
  cgroup/cpuset-v1: Add deprecation messages to mem_exclusive and mem_hardwall
  cgroup: Print message when /proc/cgroups is read on v2-only system
  cgroup/blkio: Add deprecation messages to reset_stats
  cgroup/cpuset-v1: Add deprecation messages to memory_spread_page and memory_spread_slab
  cgroup/cpuset-v1: Add deprecation messages to sched_load_balance and memory_pressure_enabled
  cgroup, docs: Be explicit about independence of RT_GROUP_SCHED and non-cpu controllers
  cgroup/rstat: Fix forceidle time in cpu.stat
  cgroup/misc: Remove unused misc_cg_res_total_usage
  cgroup/cpuset: Move procfs cpuset attribute under cgroup-v1.c
  cgroup: update comment about dropping cgroup kn refs
This commit is contained in:
Linus Torvalds
2025-03-24 16:49:40 -07:00
18 changed files with 142 additions and 188 deletions

View File

@@ -125,3 +125,7 @@ to unfreeze all tasks in the container::
This is the basic mechanism which should do the right thing for user space task
in a simple scenario.
This freezer implementation is affected by shortcomings (see commit
76f969e8948d8 ("cgroup: cgroup v2 freezer")) and cgroup v2 freezer is
recommended.

View File

@@ -90,6 +90,7 @@ Brief summary of control files.
used.
memory.swappiness set/show swappiness parameter of vmscan
(See sysctl's vm.swappiness)
Per memcg knob does not exist in cgroup v2.
memory.move_charge_at_immigrate This knob is deprecated.
memory.oom_control set/show oom controls.
This knob is deprecated and shouldn't be

View File

@@ -1076,15 +1076,20 @@ cpufreq governor about the minimum desired frequency which should always be
provided by a CPU, as well as the maximum desired frequency, which should not
be exceeded by a CPU.
WARNING: cgroup2 doesn't yet support control of realtime processes. For
a kernel built with the CONFIG_RT_GROUP_SCHED option enabled for group
scheduling of realtime processes, the cpu controller can only be enabled
when all RT processes are in the root cgroup. This limitation does
not apply if CONFIG_RT_GROUP_SCHED is disabled. Be aware that system
management software may already have placed RT processes into nonroot
cgroups during the system boot process, and these processes may need
to be moved to the root cgroup before the cpu controller can be enabled
with a CONFIG_RT_GROUP_SCHED enabled kernel.
WARNING: cgroup2 cpu controller doesn't yet fully support the control of
realtime processes. For a kernel built with the CONFIG_RT_GROUP_SCHED option
enabled for group scheduling of realtime processes, the cpu controller can only
be enabled when all RT processes are in the root cgroup. Be aware that system
management software may already have placed RT processes into non-root cgroups
during the system boot process, and these processes may need to be moved to the
root cgroup before the cpu controller can be enabled with a
CONFIG_RT_GROUP_SCHED enabled kernel.
With CONFIG_RT_GROUP_SCHED disabled, this limitation does not apply and some of
the interface files either affect realtime processes or account for them. See
the following section for details. Only the cpu controller is affected by
CONFIG_RT_GROUP_SCHED. Other controllers can be used for the resource control of
realtime processes irrespective of CONFIG_RT_GROUP_SCHED.
CPU Interface Files

View File

@@ -659,6 +659,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
struct blkcg_gq *blkg;
int i;
pr_info_once("blkio.%s is deprecated\n", cftype->name);
mutex_lock(&blkcg_pol_mutex);
spin_lock_irq(&blkcg->lock);
@@ -1770,12 +1771,15 @@ int blkcg_policy_register(struct blkcg_policy *pol)
mutex_unlock(&blkcg_pol_mutex);
/* everything is in place, add intf files for the new policy */
if (pol->dfl_cftypes)
if (pol->dfl_cftypes == pol->legacy_cftypes) {
WARN_ON(cgroup_add_cftypes(&io_cgrp_subsys,
pol->dfl_cftypes));
} else {
WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
pol->dfl_cftypes));
if (pol->legacy_cftypes)
WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
pol->legacy_cftypes));
}
mutex_unlock(&blkcg_pol_register_mutex);
return 0;

View File

@@ -113,27 +113,18 @@ static void ioprio_free_cpd(struct blkcg_policy_data *cpd)
kfree(blkcg);
}
#define IOPRIO_ATTRS \
{ \
.name = "prio.class", \
.seq_show = ioprio_show_prio_policy, \
.write = ioprio_set_prio_policy, \
}, \
{ } /* sentinel */
/* cgroup v2 attributes */
static struct cftype ioprio_files[] = {
IOPRIO_ATTRS
};
/* cgroup v1 attributes */
static struct cftype ioprio_legacy_files[] = {
IOPRIO_ATTRS
{
.name = "prio.class",
.seq_show = ioprio_show_prio_policy,
.write = ioprio_set_prio_policy,
},
{ } /* sentinel */
};
static struct blkcg_policy ioprio_policy = {
.dfl_cftypes = ioprio_files,
.legacy_cftypes = ioprio_legacy_files,
.legacy_cftypes = ioprio_files,
.cpd_alloc_fn = ioprio_alloc_cpd,
.cpd_free_fn = ioprio_free_cpd,

View File

@@ -619,9 +619,8 @@ struct cgroup_root {
*/
struct cftype {
/*
* By convention, the name should begin with the name of the
* subsystem, followed by a period. Zero length string indicates
* end of cftype array.
* Name of the subsystem is prepended in cgroup_file_name().
* Zero length string indicates end of cftype array.
*/
char name[MAX_CFTYPE_NAME];
unsigned long private;

View File

@@ -113,6 +113,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile);
void cgroup_file_show(struct cgroup_file *cfile, bool show);
@@ -689,8 +690,6 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
*/
void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
void cgroup_rstat_flush(struct cgroup *cgrp);
void cgroup_rstat_flush_hold(struct cgroup *cgrp);
void cgroup_rstat_flush_release(struct cgroup *cgrp);
/*
* Basic resource stats.

View File

@@ -60,7 +60,6 @@ struct misc_cg {
struct misc_res res[MISC_CG_RES_TYPES];
};
u64 misc_cg_res_total_usage(enum misc_res_type type);
int misc_cg_set_capacity(enum misc_res_type type, u64 capacity);
int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
@@ -104,11 +103,6 @@ static inline void put_misc_cg(struct misc_cg *cg)
#else /* !CONFIG_CGROUP_MISC */
static inline u64 misc_cg_res_total_usage(enum misc_res_type type)
{
return 0;
}
static inline int misc_cg_set_capacity(enum misc_res_type type, u64 capacity)
{
return 0;

View File

@@ -1198,7 +1198,8 @@ config CPUSETS_V1
help
Legacy cgroup v1 cpusets controller which has been deprecated by
cgroup v2 implementation. The v1 is there for legacy applications
which haven't migrated to the new cgroup v2 interface yet. If you
which haven't migrated to the new cgroup v2 interface yet. Legacy
interface includes cpuset filesystem and /proc/<pid>/cpuset. If you
do not have any such application then you are completely fine leaving
this option disabled.
@@ -1206,7 +1207,7 @@ config CPUSETS_V1
config PROC_PID_CPUSET
bool "Include legacy /proc/<pid>/cpuset file"
depends on CPUSETS
depends on CPUSETS_V1
default y
config CGROUP_DEVICE

View File

@@ -168,6 +168,7 @@ struct cgroup_mgctx {
extern struct cgroup_subsys *cgroup_subsys[];
extern struct list_head cgroup_roots;
extern bool cgrp_dfl_visible;
/* iterate across the hierarchies */
#define for_each_root(root) \

View File

@@ -673,6 +673,7 @@ struct cftype cgroup1_base_files[] = {
int proc_cgroupstats_show(struct seq_file *m, void *v)
{
struct cgroup_subsys *ss;
bool cgrp_v1_visible = false;
int i;
seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
@@ -684,12 +685,18 @@ int proc_cgroupstats_show(struct seq_file *m, void *v)
for_each_subsys(ss, i) {
if (cgroup1_subsys_absent(ss))
continue;
cgrp_v1_visible |= ss->root != &cgrp_dfl_root;
seq_printf(m, "%s\t%d\t%d\t%d\n",
ss->legacy_name, ss->root->hierarchy_id,
atomic_read(&ss->root->nr_cgrps),
cgroup_ssid_enabled(i));
}
if (cgrp_dfl_visible && !cgrp_v1_visible)
pr_info_once("/proc/cgroups lists only v1 controllers, use cgroup.controllers of root cgroup for v2 info\n");
return 0;
}

View File

@@ -171,7 +171,7 @@ EXPORT_SYMBOL_GPL(cgrp_dfl_root);
* The default hierarchy always exists but is hidden until mounted for the
* first time. This is for backward compatibility.
*/
static bool cgrp_dfl_visible;
bool cgrp_dfl_visible;
/* some controllers are not supported in the default hierarchy */
static u16 cgrp_dfl_inhibit_ss_mask;
@@ -4447,7 +4447,7 @@ int cgroup_rm_cftypes(struct cftype *cfts)
* function currently returns 0 as long as @cfts registration is successful
* even if some file creation attempts on existing cgroups fail.
*/
static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
int ret;
@@ -5831,7 +5831,7 @@ int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
}
/*
* This extra ref will be put in cgroup_free_fn() and guarantees
* This extra ref will be put in css_free_rwork_fn() and guarantees
* that @cgrp->kn is always accessible.
*/
kernfs_get(cgrp->kn);

View File

@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "cgroup-internal.h"
#include "cpuset-internal.h"
/*
@@ -175,6 +176,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
switch (type) {
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
pr_info_once("cpuset.%s is deprecated\n", cft->name);
retval = update_relax_domain_level(cs, val);
break;
default:
@@ -373,6 +375,46 @@ int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial)
return ret;
}
#ifdef CONFIG_PROC_PID_CPUSET
/*
* proc_cpuset_show()
* - Print tasks cpuset path into seq_file.
* - Used for /proc/<pid>/cpuset.
*/
int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk)
{
char *buf;
struct cgroup_subsys_state *css;
int retval;
retval = -ENOMEM;
buf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!buf)
goto out;
rcu_read_lock();
spin_lock_irq(&css_set_lock);
css = task_css(tsk, cpuset_cgrp_id);
retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX,
current->nsproxy->cgroup_ns);
spin_unlock_irq(&css_set_lock);
rcu_read_unlock();
if (retval == -E2BIG)
retval = -ENAMETOOLONG;
if (retval < 0)
goto out_free;
seq_puts(m, buf);
seq_putc(m, '\n');
retval = 0;
out_free:
kfree(buf);
out:
return retval;
}
#endif /* CONFIG_PROC_PID_CPUSET */
static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
{
struct cpuset *cs = css_cs(css);
@@ -424,24 +466,31 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
retval = cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, val);
break;
case FILE_MEM_EXCLUSIVE:
pr_info_once("cpuset.%s is deprecated\n", cft->name);
retval = cpuset_update_flag(CS_MEM_EXCLUSIVE, cs, val);
break;
case FILE_MEM_HARDWALL:
pr_info_once("cpuset.%s is deprecated\n", cft->name);
retval = cpuset_update_flag(CS_MEM_HARDWALL, cs, val);
break;
case FILE_SCHED_LOAD_BALANCE:
pr_info_once("cpuset.%s is deprecated, use cpuset.cpus.partition instead\n", cft->name);
retval = cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
break;
case FILE_MEMORY_MIGRATE:
pr_info_once("cpuset.%s is deprecated\n", cft->name);
retval = cpuset_update_flag(CS_MEMORY_MIGRATE, cs, val);
break;
case FILE_MEMORY_PRESSURE_ENABLED:
pr_info_once("cpuset.%s is deprecated, use memory.pressure with CONFIG_PSI instead\n", cft->name);
cpuset_memory_pressure_enabled = !!val;
break;
case FILE_SPREAD_PAGE:
pr_info_once("cpuset.%s is deprecated\n", cft->name);
retval = cpuset_update_flag(CS_SPREAD_PAGE, cs, val);
break;
case FILE_SPREAD_SLAB:
pr_warn_once("cpuset.%s is deprecated\n", cft->name);
retval = cpuset_update_flag(CS_SPREAD_SLAB, cs, val);
break;
default:

View File

@@ -21,7 +21,6 @@
* License. See the file COPYING in the main directory of the Linux
* distribution for more details.
*/
#include "cgroup-internal.h"
#include "cpuset-internal.h"
#include <linux/init.h>
@@ -4244,50 +4243,6 @@ void cpuset_print_current_mems_allowed(void)
rcu_read_unlock();
}
#ifdef CONFIG_PROC_PID_CPUSET
/*
* proc_cpuset_show()
* - Print tasks cpuset path into seq_file.
* - Used for /proc/<pid>/cpuset.
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it
* doesn't really matter if tsk->cpuset changes after we read it,
* and we take cpuset_mutex, keeping cpuset_attach() from changing it
* anyway.
*/
int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk)
{
char *buf;
struct cgroup_subsys_state *css;
int retval;
retval = -ENOMEM;
buf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!buf)
goto out;
rcu_read_lock();
spin_lock_irq(&css_set_lock);
css = task_css(tsk, cpuset_cgrp_id);
retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX,
current->nsproxy->cgroup_ns);
spin_unlock_irq(&css_set_lock);
rcu_read_unlock();
if (retval == -E2BIG)
retval = -ENAMETOOLONG;
if (retval < 0)
goto out_free;
seq_puts(m, buf);
seq_putc(m, '\n');
retval = 0;
out_free:
kfree(buf);
out:
return retval;
}
#endif /* CONFIG_PROC_PID_CPUSET */
/* Display task mems_allowed in /proc/<pid>/status file. */
void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
{

View File

@@ -430,9 +430,11 @@ static ssize_t freezer_write(struct kernfs_open_file *of,
if (strcmp(buf, freezer_state_strs(0)) == 0)
freeze = false;
else if (strcmp(buf, freezer_state_strs(CGROUP_FROZEN)) == 0)
else if (strcmp(buf, freezer_state_strs(CGROUP_FROZEN)) == 0) {
pr_info_once("Freezing with imperfect legacy cgroup freezer. "
"See cgroup.freeze of cgroup v2\n");
freeze = true;
else
} else
return -EINVAL;
freezer_change_state(css_freezer(of_css(of)), freeze);

View File

@@ -67,22 +67,6 @@ static inline bool valid_type(enum misc_res_type type)
return type >= 0 && type < MISC_CG_RES_TYPES;
}
/**
* misc_cg_res_total_usage() - Get the current total usage of the resource.
* @type: misc res type.
*
* Context: Any context.
* Return: Current total usage of the resource.
*/
u64 misc_cg_res_total_usage(enum misc_res_type type)
{
if (valid_type(type))
return atomic64_read(&root_cg.res[type].usage);
return 0;
}
EXPORT_SYMBOL_GPL(misc_cg_res_total_usage);
/**
* misc_cg_set_capacity() - Set the capacity of the misc cgroup res.
* @type: Type of the misc res.

View File

@@ -299,40 +299,6 @@ static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int cpu_in_loop)
spin_unlock_irq(&cgroup_rstat_lock);
}
/* see cgroup_rstat_flush() */
static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
__releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
{
int cpu;
lockdep_assert_held(&cgroup_rstat_lock);
for_each_possible_cpu(cpu) {
struct cgroup *pos = cgroup_rstat_updated_list(cgrp, cpu);
for (; pos; pos = pos->rstat_flush_next) {
struct cgroup_subsys_state *css;
cgroup_base_stat_flush(pos, cpu);
bpf_rstat_flush(pos, cgroup_parent(pos), cpu);
rcu_read_lock();
list_for_each_entry_rcu(css, &pos->rstat_css_list,
rstat_css_node)
css->ss->css_rstat_flush(css, cpu);
rcu_read_unlock();
}
/* play nice and yield if necessary */
if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
__cgroup_rstat_unlock(cgrp, cpu);
if (!cond_resched())
cpu_relax();
__cgroup_rstat_lock(cgrp, cpu);
}
}
}
/**
* cgroup_rstat_flush - flush stats in @cgrp's subtree
* @cgrp: target cgroup
@@ -348,38 +314,30 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
*/
__bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
{
int cpu;
might_sleep();
for_each_possible_cpu(cpu) {
struct cgroup *pos = cgroup_rstat_updated_list(cgrp, cpu);
__cgroup_rstat_lock(cgrp, -1);
cgroup_rstat_flush_locked(cgrp);
__cgroup_rstat_unlock(cgrp, -1);
}
/* Reacquire for each CPU to avoid disabling IRQs too long */
__cgroup_rstat_lock(cgrp, cpu);
for (; pos; pos = pos->rstat_flush_next) {
struct cgroup_subsys_state *css;
/**
* cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold
* @cgrp: target cgroup
*
* Flush stats in @cgrp's subtree and prevent further flushes. Must be
* paired with cgroup_rstat_flush_release().
*
* This function may block.
*/
void cgroup_rstat_flush_hold(struct cgroup *cgrp)
__acquires(&cgroup_rstat_lock)
{
might_sleep();
__cgroup_rstat_lock(cgrp, -1);
cgroup_rstat_flush_locked(cgrp);
}
cgroup_base_stat_flush(pos, cpu);
bpf_rstat_flush(pos, cgroup_parent(pos), cpu);
/**
* cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
* @cgrp: cgroup used by tracepoint
*/
void cgroup_rstat_flush_release(struct cgroup *cgrp)
__releases(&cgroup_rstat_lock)
{
__cgroup_rstat_unlock(cgrp, -1);
rcu_read_lock();
list_for_each_entry_rcu(css, &pos->rstat_css_list,
rstat_css_node)
css->ss->css_rstat_flush(css, cpu);
rcu_read_unlock();
}
__cgroup_rstat_unlock(cgrp, cpu);
if (!cond_resched())
cpu_relax();
}
}
int cgroup_rstat_init(struct cgroup *cgrp)
@@ -612,36 +570,34 @@ static void cgroup_force_idle_show(struct seq_file *seq, struct cgroup_base_stat
void cgroup_base_stat_cputime_show(struct seq_file *seq)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
u64 usage, utime, stime, ntime;
struct cgroup_base_stat bstat;
if (cgroup_parent(cgrp)) {
cgroup_rstat_flush_hold(cgrp);
usage = cgrp->bstat.cputime.sum_exec_runtime;
cgroup_rstat_flush(cgrp);
__cgroup_rstat_lock(cgrp, -1);
bstat = cgrp->bstat;
cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
&utime, &stime);
ntime = cgrp->bstat.ntime;
cgroup_rstat_flush_release(cgrp);
&bstat.cputime.utime, &bstat.cputime.stime);
__cgroup_rstat_unlock(cgrp, -1);
} else {
/* cgrp->bstat of root is not actually used, reuse it */
root_cgroup_cputime(&cgrp->bstat);
usage = cgrp->bstat.cputime.sum_exec_runtime;
utime = cgrp->bstat.cputime.utime;
stime = cgrp->bstat.cputime.stime;
ntime = cgrp->bstat.ntime;
root_cgroup_cputime(&bstat);
}
do_div(usage, NSEC_PER_USEC);
do_div(utime, NSEC_PER_USEC);
do_div(stime, NSEC_PER_USEC);
do_div(ntime, NSEC_PER_USEC);
do_div(bstat.cputime.sum_exec_runtime, NSEC_PER_USEC);
do_div(bstat.cputime.utime, NSEC_PER_USEC);
do_div(bstat.cputime.stime, NSEC_PER_USEC);
do_div(bstat.ntime, NSEC_PER_USEC);
seq_printf(seq, "usage_usec %llu\n"
"user_usec %llu\n"
"system_usec %llu\n"
"nice_usec %llu\n",
usage, utime, stime, ntime);
bstat.cputime.sum_exec_runtime,
bstat.cputime.utime,
bstat.cputime.stime,
bstat.ntime);
cgroup_force_idle_show(seq, &cgrp->bstat);
cgroup_force_idle_show(seq, &bstat);
}
/* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */

View File

@@ -1855,9 +1855,11 @@ static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
if (val > MAX_SWAPPINESS)
return -EINVAL;
if (!mem_cgroup_is_root(memcg))
if (!mem_cgroup_is_root(memcg)) {
pr_info_once("Per memcg swappiness does not exist in cgroup v2. "
"See memory.reclaim or memory.swap.max there\n ");
WRITE_ONCE(memcg->swappiness, val);
else
} else
WRITE_ONCE(vm_swappiness, val);
return 0;