x86/cacheinfo: Refactor CPUID leaf 0x2 cache descriptor lookup

Extract the cache descriptor lookup logic out of the leaf 0x2 parsing
code and into a dedicated function.  This disentangles such lookup from
the deeply nested leaf 0x2 parsing loop.

Remove the cache table termination entry, as it is no longer needed
after the ARRAY_SIZE()-based lookup.

[ darwi: Move refactoring logic into this separate commit + commit log.
	 Remove the cache table termination entry. ]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ahmed S. Darwish <darwi@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/20250324133324.23458-6-darwi@linutronix.de
This commit is contained in:
Thomas Gleixner
2025-03-24 14:33:00 +01:00
committed by Ingo Molnar
parent a078aaa38a
commit ee159792a4

View File

@@ -123,7 +123,6 @@ static const struct _cache_table cache_table[] =
{ 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
{ 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
{ 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
{ 0x00, 0, 0}
};
@@ -728,6 +727,16 @@ void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
ci->num_leaves = find_num_cache_leaves(c);
}
static const struct _cache_table *cache_table_get(u8 desc)
{
for (int i = 0; i < ARRAY_SIZE(cache_table); i++) {
if (cache_table[i].descriptor == desc)
return &cache_table[i];
}
return NULL;
}
void init_intel_cacheinfo(struct cpuinfo_x86 *c)
{
/* Cache sizes */
@@ -784,34 +793,21 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
/* Don't use CPUID(2) if CPUID(4) is supported. */
if (!ci->num_leaves && c->cpuid_level > 1) {
const struct _cache_table *entry;
union leaf_0x2_regs regs;
u8 *desc;
cpuid_get_leaf_0x2_regs(&regs);
for_each_leaf_0x2_desc(regs, desc) {
u8 k = 0;
entry = cache_table_get(*desc);
if (!entry)
continue;
/* look up this descriptor in the table */
while (cache_table[k].descriptor != 0) {
if (cache_table[k].descriptor == *desc) {
switch (cache_table[k].cache_type) {
case LVL_1_INST:
l1i += cache_table[k].size;
break;
case LVL_1_DATA:
l1d += cache_table[k].size;
break;
case LVL_2:
l2 += cache_table[k].size;
break;
case LVL_3:
l3 += cache_table[k].size;
break;
}
break;
}
k++;
switch (entry->cache_type) {
case LVL_1_INST: l1i += entry->size; break;
case LVL_1_DATA: l1d += entry->size; break;
case LVL_2: l2 += entry->size; break;
case LVL_3: l3 += entry->size; break;
}
}
}