powerpc/align: Convert emulate_spe() to scoped user access

Commit 861574d51b ("powerpc/uaccess: Implement masked user access")
provides optimised user access by avoiding the cost of access_ok().

Convert emulate_spe() to scoped user access to benefit from masked
user access.

Scoped user access also make the code simpler.

Signed-off-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/4ff83cb240da4e2d0c34e2bce4b8b6ef19a33777.1773136880.git.chleroy@kernel.org
This commit is contained in:
Christophe Leroy (CS GROUP)
2026-03-10 11:01:31 +01:00
committed by Madhavan Srinivasan
parent 679fa9c756
commit bf53ede003

View File

@@ -165,25 +165,23 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
temp.ll = data.ll = 0;
p = addr;
if (!user_read_access_begin(addr, nb))
return -EFAULT;
switch (nb) {
case 8:
unsafe_get_user(temp.v[0], p++, Efault_read);
unsafe_get_user(temp.v[1], p++, Efault_read);
unsafe_get_user(temp.v[2], p++, Efault_read);
unsafe_get_user(temp.v[3], p++, Efault_read);
fallthrough;
case 4:
unsafe_get_user(temp.v[4], p++, Efault_read);
unsafe_get_user(temp.v[5], p++, Efault_read);
fallthrough;
case 2:
unsafe_get_user(temp.v[6], p++, Efault_read);
unsafe_get_user(temp.v[7], p++, Efault_read);
scoped_user_read_access_size(addr, nb, efault) {
switch (nb) {
case 8:
unsafe_get_user(temp.v[0], p++, efault);
unsafe_get_user(temp.v[1], p++, efault);
unsafe_get_user(temp.v[2], p++, efault);
unsafe_get_user(temp.v[3], p++, efault);
fallthrough;
case 4:
unsafe_get_user(temp.v[4], p++, efault);
unsafe_get_user(temp.v[5], p++, efault);
fallthrough;
case 2:
unsafe_get_user(temp.v[6], p++, efault);
unsafe_get_user(temp.v[7], p++, efault);
}
}
user_read_access_end();
switch (instr) {
case EVLDD:
@@ -252,25 +250,23 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
if (flags & ST) {
p = addr;
if (!user_write_access_begin(addr, nb))
return -EFAULT;
switch (nb) {
case 8:
unsafe_put_user(data.v[0], p++, Efault_write);
unsafe_put_user(data.v[1], p++, Efault_write);
unsafe_put_user(data.v[2], p++, Efault_write);
unsafe_put_user(data.v[3], p++, Efault_write);
fallthrough;
case 4:
unsafe_put_user(data.v[4], p++, Efault_write);
unsafe_put_user(data.v[5], p++, Efault_write);
fallthrough;
case 2:
unsafe_put_user(data.v[6], p++, Efault_write);
unsafe_put_user(data.v[7], p++, Efault_write);
scoped_user_write_access_size(addr, nb, efault) {
switch (nb) {
case 8:
unsafe_put_user(data.v[0], p++, efault);
unsafe_put_user(data.v[1], p++, efault);
unsafe_put_user(data.v[2], p++, efault);
unsafe_put_user(data.v[3], p++, efault);
fallthrough;
case 4:
unsafe_put_user(data.v[4], p++, efault);
unsafe_put_user(data.v[5], p++, efault);
fallthrough;
case 2:
unsafe_put_user(data.v[6], p++, efault);
unsafe_put_user(data.v[7], p++, efault);
}
}
user_write_access_end();
} else {
*evr = data.w[0];
regs->gpr[reg] = data.w[1];
@@ -278,12 +274,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
return 1;
Efault_read:
user_read_access_end();
return -EFAULT;
Efault_write:
user_write_access_end();
efault:
return -EFAULT;
}
#endif /* CONFIG_SPE */