diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 061f8c85f555..79bab80af8f2 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1645,13 +1645,17 @@ sd_init(struct sched_domain_topology_level *tl, struct cpumask *sd_span; u64 now = sched_clock(); - sd_weight = cpumask_weight(tl->mask(tl, cpu)); + sd_span = sched_domain_span(sd); + cpumask_and(sd_span, cpu_map, tl->mask(tl, cpu)); + sd_weight = cpumask_weight(sd_span); + sd_id = cpumask_first(sd_span); if (tl->sd_flags) sd_flags = (*tl->sd_flags)(); if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, - "wrong sd_flags in topology description\n")) + "wrong sd_flags in topology description\n")) sd_flags &= TOPOLOGY_SD_FLAGS; + sd_flags |= asym_cpu_capacity_classify(sd_span, cpu_map); *sd = (struct sched_domain){ .min_interval = sd_weight, @@ -1689,12 +1693,6 @@ sd_init(struct sched_domain_topology_level *tl, .name = tl->name, }; - sd_span = sched_domain_span(sd); - cpumask_and(sd_span, cpu_map, tl->mask(tl, cpu)); - sd_id = cpumask_first(sd_span); - - sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map); - WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) == (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY), "CPU capacity asymmetry not supported on SMT\n");