mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 17:12:50 -04:00
sched/topology: Fix sched_domain_span()
Commit8e8e23dea4("sched/topology: Compute sd_weight considering cpuset partitions") ends up relying on the fact that structure initialization should not touch the flexible array. However, the official GCC specification for "Arrays of Length Zero" [*] says: Although the size of a zero-length array is zero, an array member of this kind may increase the size of the enclosing type as a result of tail padding. Additionally, structure initialization will zero tail padding. With the end result that since offsetof(*type, member) < sizeof(*type), array initialization will clobber the flex array. Luckily, the way flexible array sizes are calculated is: sizeof(*type) + count * sizeof(*type->member) This means we have the complete size of the flex array *outside* of sizeof(*type), so use that instead of relying on the broken flex array definition. [*] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html Fixes:8e8e23dea4("sched/topology: Compute sd_weight considering cpuset partitions") Reported-by: Nathan Chancellor <nathan@kernel.org> Debugged-by: K Prateek Nayak <kprateek.nayak@amd.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Jon Hunter <jonathanh@nvidia.com> Tested-by: Chen Yu <yu.c.chen@intel.com> Tested-by: K Prateek Nayak <kprateek.nayak@amd.com> Tested-by: Nathan Chancellor <nathan@kernel.org> Link: https://patch.msgid.link/20260323093627.GY3738010@noisy.programming.kicks-ass.net
This commit is contained in:
@@ -142,18 +142,30 @@ struct sched_domain {
|
||||
|
||||
unsigned int span_weight;
|
||||
/*
|
||||
* Span of all CPUs in this domain.
|
||||
* See sched_domain_span(), on why flex arrays are broken.
|
||||
*
|
||||
* NOTE: this field is variable length. (Allocated dynamically
|
||||
* by attaching extra space to the end of the structure,
|
||||
* depending on how many CPUs the kernel has booted up with)
|
||||
*/
|
||||
unsigned long span[];
|
||||
*/
|
||||
};
|
||||
|
||||
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
|
||||
{
|
||||
return to_cpumask(sd->span);
|
||||
/*
|
||||
* Turns out that C flexible arrays are fundamentally broken since it
|
||||
* is allowed for offsetof(*sd, span) < sizeof(*sd), this means that
|
||||
* structure initialzation *sd = { ... }; which writes every byte
|
||||
* inside sizeof(*type), will over-write the start of the flexible
|
||||
* array.
|
||||
*
|
||||
* Luckily, the way we allocate sched_domain is by:
|
||||
*
|
||||
* sizeof(*sd) + cpumask_size()
|
||||
*
|
||||
* this means that we have sufficient space for the whole flex array
|
||||
* *outside* of sizeof(*sd). So use that, and avoid using sd->span.
|
||||
*/
|
||||
unsigned long *bitmap = (void *)sd + sizeof(*sd);
|
||||
return to_cpumask(bitmap);
|
||||
}
|
||||
|
||||
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
|
||||
|
||||
Reference in New Issue
Block a user