mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-07 06:50:43 -04:00
x86/cpu: Move phys_proc_id into topology info
Rename it to pkg_id which is the terminology used in the kernel. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Juergen Gross <jgross@suse.com> Tested-by: Sohil Mehta <sohil.mehta@intel.com> Tested-by: Michael Kelley <mikelley@microsoft.com> Tested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Zhang Rui <rui.zhang@intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20230814085112.329006989@linutronix.de
This commit is contained in:
@@ -59,7 +59,7 @@ Package-related topology information in the kernel:
|
||||
|
||||
The physical ID of the die. This information is retrieved via CPUID.
|
||||
|
||||
- cpuinfo_x86.phys_proc_id:
|
||||
- cpuinfo_x86.topo.pkg_id:
|
||||
|
||||
The physical ID of the package. This information is retrieved via CPUID
|
||||
and deduced from the APIC IDs of the cores in the package.
|
||||
|
||||
@@ -83,6 +83,9 @@ struct cpuinfo_topology {
|
||||
u32 apicid;
|
||||
// The initial APIC ID provided by CPUID
|
||||
u32 initial_apicid;
|
||||
|
||||
// Physical package ID
|
||||
u32 pkg_id;
|
||||
};
|
||||
|
||||
struct cpuinfo_x86 {
|
||||
@@ -134,8 +137,6 @@ struct cpuinfo_x86 {
|
||||
u16 x86_clflush_size;
|
||||
/* number of cores as seen by the OS: */
|
||||
u16 booted_cores;
|
||||
/* Physical processor id: */
|
||||
u16 phys_proc_id;
|
||||
/* Logical processor id: */
|
||||
u16 logical_proc_id;
|
||||
/* Core id: */
|
||||
|
||||
@@ -106,7 +106,7 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||
extern const struct cpumask *cpu_clustergroup_mask(int cpu);
|
||||
|
||||
#define topology_logical_package_id(cpu) (cpu_data(cpu).logical_proc_id)
|
||||
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
|
||||
#define topology_physical_package_id(cpu) (cpu_data(cpu).topo.pkg_id)
|
||||
#define topology_logical_die_id(cpu) (cpu_data(cpu).logical_die_id)
|
||||
#define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
|
||||
|
||||
@@ -177,7 +177,7 @@ struct x86_init_ops {
|
||||
* struct x86_cpuinit_ops - platform specific cpu hotplug setups
|
||||
* @setup_percpu_clockev: set up the per cpu clock event device
|
||||
* @early_percpu_clock_init: early init of the per cpu clock event device
|
||||
* @fixup_cpu_id: fixup function for cpuinfo_x86::phys_proc_id
|
||||
* @fixup_cpu_id: fixup function for cpuinfo_x86::topo.pkg_id
|
||||
* @parallel_bringup: Parallel bringup control
|
||||
*/
|
||||
struct x86_cpuinit_ops {
|
||||
|
||||
@@ -169,7 +169,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
|
||||
nodes = ((val >> 3) & 7) + 1;
|
||||
}
|
||||
|
||||
c->phys_proc_id = node / nodes;
|
||||
c->topo.pkg_id = node / nodes;
|
||||
}
|
||||
|
||||
static int __init numachip_system_init(void)
|
||||
|
||||
@@ -461,9 +461,9 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
|
||||
/* Low order bits define the core id (index of core in socket) */
|
||||
c->cpu_core_id = c->topo.initial_apicid & ((1 << bits)-1);
|
||||
/* Convert the initial APIC ID into the socket ID */
|
||||
c->phys_proc_id = c->topo.initial_apicid >> bits;
|
||||
c->topo.pkg_id = c->topo.initial_apicid >> bits;
|
||||
/* use socket ID also for last level cache */
|
||||
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
|
||||
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->topo.pkg_id;
|
||||
}
|
||||
|
||||
u32 amd_get_nodes_per_socket(void)
|
||||
|
||||
@@ -875,10 +875,10 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
* turns means that the only possibility is SMT (as indicated in
|
||||
* cpuid1). Since cpuid2 doesn't specify shared caches, and we know
|
||||
* that SMT shares all caches, we can unconditionally set cpu_llc_id to
|
||||
* c->phys_proc_id.
|
||||
* c->topo.pkg_id.
|
||||
*/
|
||||
if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
|
||||
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
|
||||
per_cpu(cpu_llc_id, cpu) = c->topo.pkg_id;
|
||||
#endif
|
||||
|
||||
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
|
||||
|
||||
@@ -914,7 +914,7 @@ void detect_ht(struct cpuinfo_x86 *c)
|
||||
return;
|
||||
|
||||
index_msb = get_count_order(smp_num_siblings);
|
||||
c->phys_proc_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb);
|
||||
c->topo.pkg_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb);
|
||||
|
||||
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
|
||||
|
||||
@@ -1769,7 +1769,7 @@ static void generic_identify(struct cpuinfo_x86 *c)
|
||||
c->topo.apicid = c->topo.initial_apicid;
|
||||
# endif
|
||||
#endif
|
||||
c->phys_proc_id = c->topo.initial_apicid;
|
||||
c->topo.pkg_id = c->topo.initial_apicid;
|
||||
}
|
||||
|
||||
get_model_name(c); /* Default name */
|
||||
@@ -1807,7 +1807,7 @@ static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
|
||||
pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
|
||||
cpu, apicid, c->topo.initial_apicid);
|
||||
}
|
||||
BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
|
||||
BUG_ON(topology_update_package_map(c->topo.pkg_id, cpu));
|
||||
BUG_ON(topology_update_die_map(c->cpu_die_id, cpu));
|
||||
#else
|
||||
c->logical_proc_id = 0;
|
||||
|
||||
@@ -92,7 +92,7 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
|
||||
* when running on host.
|
||||
*/
|
||||
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
|
||||
c->phys_proc_id = c->topo.apicid >> APICID_SOCKET_ID_BIT;
|
||||
c->topo.pkg_id = c->topo.apicid >> APICID_SOCKET_ID_BIT;
|
||||
|
||||
cacheinfo_hygon_init_llc_id(c, cpu);
|
||||
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
|
||||
@@ -122,9 +122,9 @@ static void hygon_detect_cmp(struct cpuinfo_x86 *c)
|
||||
/* Low order bits define the core id (index of core in socket) */
|
||||
c->cpu_core_id = c->topo.initial_apicid & ((1 << bits)-1);
|
||||
/* Convert the initial APIC ID into the socket ID */
|
||||
c->phys_proc_id = c->topo.initial_apicid >> bits;
|
||||
c->topo.pkg_id = c->topo.initial_apicid >> bits;
|
||||
/* use socket ID also for last level cache */
|
||||
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
|
||||
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->topo.pkg_id;
|
||||
}
|
||||
|
||||
static void srat_detect_node(struct cpuinfo_x86 *c)
|
||||
|
||||
@@ -105,7 +105,7 @@ int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id)
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpu_data(cpu).topo.initial_apicid == lapic_id) {
|
||||
m.extcpu = cpu;
|
||||
m.socketid = cpu_data(m.extcpu).phys_proc_id;
|
||||
m.socketid = cpu_data(m.extcpu).topo.pkg_id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ void mce_setup(struct mce *m)
|
||||
m->time = __ktime_get_real_seconds();
|
||||
m->cpuvendor = boot_cpu_data.x86_vendor;
|
||||
m->cpuid = cpuid_eax(1);
|
||||
m->socketid = cpu_data(m->extcpu).phys_proc_id;
|
||||
m->socketid = cpu_data(m->extcpu).topo.pkg_id;
|
||||
m->apicid = cpu_data(m->extcpu).topo.initial_apicid;
|
||||
m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
|
||||
m->ppin = cpu_data(m->extcpu).ppin;
|
||||
|
||||
@@ -20,7 +20,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
|
||||
unsigned int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
|
||||
seq_printf(m, "physical id\t: %d\n", c->topo.pkg_id);
|
||||
seq_printf(m, "siblings\t: %d\n",
|
||||
cpumask_weight(topology_core_cpumask(cpu)));
|
||||
seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
|
||||
|
||||
@@ -154,8 +154,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
||||
core_plus_mask_width) & die_select_mask;
|
||||
}
|
||||
|
||||
c->phys_proc_id = apic->phys_pkg_id(c->topo.initial_apicid,
|
||||
pkg_mask_width);
|
||||
c->topo.pkg_id = apic->phys_pkg_id(c->topo.initial_apicid, pkg_mask_width);
|
||||
/*
|
||||
* Reinit the apicid, now that we have extended initial_apicid.
|
||||
*/
|
||||
|
||||
@@ -339,7 +339,7 @@ int topology_phys_to_logical_pkg(unsigned int phys_pkg)
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
if (c->initialized && c->phys_proc_id == phys_pkg)
|
||||
if (c->initialized && c->topo.pkg_id == phys_pkg)
|
||||
return c->logical_proc_id;
|
||||
}
|
||||
return -1;
|
||||
@@ -355,13 +355,13 @@ EXPORT_SYMBOL(topology_phys_to_logical_pkg);
|
||||
*/
|
||||
static int topology_phys_to_logical_die(unsigned int die_id, unsigned int cur_cpu)
|
||||
{
|
||||
int cpu, proc_id = cpu_data(cur_cpu).phys_proc_id;
|
||||
int cpu, proc_id = cpu_data(cur_cpu).topo.pkg_id;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
if (c->initialized && c->cpu_die_id == die_id &&
|
||||
c->phys_proc_id == proc_id)
|
||||
c->topo.pkg_id == proc_id)
|
||||
return c->logical_die_id;
|
||||
}
|
||||
return -1;
|
||||
@@ -421,7 +421,7 @@ static void __init smp_store_boot_cpu_info(void)
|
||||
|
||||
*c = boot_cpu_data;
|
||||
c->cpu_index = id;
|
||||
topology_update_package_map(c->phys_proc_id, id);
|
||||
topology_update_package_map(c->topo.pkg_id, id);
|
||||
topology_update_die_map(c->cpu_die_id, id);
|
||||
c->initialized = true;
|
||||
}
|
||||
@@ -476,7 +476,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
||||
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
|
||||
|
||||
if (c->phys_proc_id == o->phys_proc_id &&
|
||||
if (c->topo.pkg_id == o->topo.pkg_id &&
|
||||
c->cpu_die_id == o->cpu_die_id &&
|
||||
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
|
||||
if (c->cpu_core_id == o->cpu_core_id)
|
||||
@@ -488,7 +488,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
return topology_sane(c, o, "smt");
|
||||
}
|
||||
|
||||
} else if (c->phys_proc_id == o->phys_proc_id &&
|
||||
} else if (c->topo.pkg_id == o->topo.pkg_id &&
|
||||
c->cpu_die_id == o->cpu_die_id &&
|
||||
c->cpu_core_id == o->cpu_core_id) {
|
||||
return topology_sane(c, o, "smt");
|
||||
@@ -499,7 +499,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
|
||||
static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
{
|
||||
if (c->phys_proc_id == o->phys_proc_id &&
|
||||
if (c->topo.pkg_id == o->topo.pkg_id &&
|
||||
c->cpu_die_id == o->cpu_die_id)
|
||||
return true;
|
||||
return false;
|
||||
@@ -527,7 +527,7 @@ static bool match_l2c(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
*/
|
||||
static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
{
|
||||
if (c->phys_proc_id == o->phys_proc_id)
|
||||
if (c->topo.pkg_id == o->topo.pkg_id)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -12442,9 +12442,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
|
||||
int max_core_id, min_core_id;
|
||||
struct lpfc_vector_map_info *cpup;
|
||||
struct lpfc_vector_map_info *new_cpup;
|
||||
#ifdef CONFIG_X86
|
||||
struct cpuinfo_x86 *cpuinfo;
|
||||
#endif
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
struct lpfc_hdwq_stat *c_stat;
|
||||
#endif
|
||||
@@ -12458,8 +12455,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
|
||||
for_each_present_cpu(cpu) {
|
||||
cpup = &phba->sli4_hba.cpu_map[cpu];
|
||||
#ifdef CONFIG_X86
|
||||
cpuinfo = &cpu_data(cpu);
|
||||
cpup->phys_id = cpuinfo->phys_proc_id;
|
||||
cpup->phys_id = topology_physical_package_id(cpu);
|
||||
cpup->core_id = cpuinfo->cpu_core_id;
|
||||
if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
|
||||
cpup->flag |= LPFC_CPU_MAP_HYPER;
|
||||
|
||||
Reference in New Issue
Block a user