LoongArch: Adjust user accessors for 32BIT/64BIT

Adjust user accessors for both 32BIT and 64BIT, including: get_user(),
put_user(), copy_user(), clear_user(), etc.

Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
Huacai Chen
2025-12-08 18:09:17 +08:00
parent 14338e631a
commit 48c7294775
3 changed files with 91 additions and 22 deletions

View File

@@ -19,10 +19,16 @@
#include <asm/asm-extable.h>
#include <asm-generic/access_ok.h>
#define __LSW 0
#define __MSW 1
extern u64 __ua_limit;
#define __UA_ADDR ".dword"
#ifdef CONFIG_64BIT
#define __UA_LIMIT __ua_limit
#else
#define __UA_LIMIT 0x80000000UL
#endif
/*
* get_user: - Get a simple variable from user space.
@@ -126,6 +132,7 @@ extern u64 __ua_limit;
*
* Returns zero on success, or -EFAULT on error.
*/
#define __put_user(x, ptr) \
({ \
int __pu_err = 0; \
@@ -146,7 +153,7 @@ do { \
case 1: __get_data_asm(val, "ld.b", ptr); break; \
case 2: __get_data_asm(val, "ld.h", ptr); break; \
case 4: __get_data_asm(val, "ld.w", ptr); break; \
case 8: __get_data_asm(val, "ld.d", ptr); break; \
case 8: __get_data_asm_8(val, ptr); break; \
default: BUILD_BUG(); break; \
} \
} while (0)
@@ -167,13 +174,39 @@ do { \
(val) = (__typeof__(*(ptr))) __gu_tmp; \
}
#ifdef CONFIG_64BIT
#define __get_data_asm_8(val, ptr) \
__get_data_asm(val, "ld.d", ptr)
#else /* !CONFIG_64BIT */
#define __get_data_asm_8(val, ptr) \
{ \
u32 __lo, __hi; \
u32 __user *__ptr = (u32 __user *)(ptr); \
\
__asm__ __volatile__ ( \
"1:\n" \
" ld.w %1, %3 \n" \
"2:\n" \
" ld.w %2, %4 \n" \
"3:\n" \
_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \
: "+r" (__gu_err), "=&r" (__lo), "=r" (__hi) \
: "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \
if (__gu_err) \
__hi = 0; \
(val) = (__typeof__(val))((__typeof__((val)-(val))) \
((((u64)__hi << 32) | __lo))); \
}
#endif /* CONFIG_64BIT */
#define __put_user_common(ptr, size) \
do { \
switch (size) { \
case 1: __put_data_asm("st.b", ptr); break; \
case 2: __put_data_asm("st.h", ptr); break; \
case 4: __put_data_asm("st.w", ptr); break; \
case 8: __put_data_asm("st.d", ptr); break; \
case 8: __put_data_asm_8(ptr); break; \
default: BUILD_BUG(); break; \
} \
} while (0)
@@ -190,6 +223,30 @@ do { \
: "Jr" (__pu_val)); \
}
#ifdef CONFIG_64BIT
#define __put_data_asm_8(ptr) \
__put_data_asm("st.d", ptr)
#else /* !CONFIG_64BIT */
#define __put_data_asm_8(ptr) \
{ \
u32 __user *__ptr = (u32 __user *)(ptr); \
u64 __x = (__typeof__((__pu_val)-(__pu_val)))(__pu_val); \
\
__asm__ __volatile__ ( \
"1:\n" \
" st.w %z3, %1 \n" \
"2:\n" \
" st.w %z4, %2 \n" \
"3:\n" \
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \
: "+r" (__pu_err), \
"=m" (__ptr[__LSW]), \
"=m" (__ptr[__MSW]) \
: "rJ" (__x), "rJ" (__x >> 32)); \
}
#endif /* CONFIG_64BIT */
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
int __gu_err = 0; \

View File

@@ -13,11 +13,15 @@
#include <asm/unwind_hints.h>
SYM_FUNC_START(__clear_user)
#ifdef CONFIG_32BIT
b __clear_user_generic
#else
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __clear_user_generic", \
"b __clear_user_fast", CPU_FEATURE_UAL
#endif
SYM_FUNC_END(__clear_user)
EXPORT_SYMBOL(__clear_user)
@@ -29,19 +33,20 @@ EXPORT_SYMBOL(__clear_user)
* a1: size
*/
SYM_FUNC_START(__clear_user_generic)
beqz a1, 2f
beqz a1, 2f
1: st.b zero, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, -1
bgtz a1, 1b
1: st.b zero, a0, 0
PTR_ADDI a0, a0, 1
PTR_ADDI a1, a1, -1
bgtz a1, 1b
2: move a0, a1
jr ra
2: move a0, a1
jr ra
_asm_extable 1b, 2b
_asm_extable 1b, 2b
SYM_FUNC_END(__clear_user_generic)
#ifdef CONFIG_64BIT
/*
* unsigned long __clear_user_fast(void *addr, unsigned long size)
*
@@ -207,3 +212,4 @@ SYM_FUNC_START(__clear_user_fast)
SYM_FUNC_END(__clear_user_fast)
STACK_FRAME_NON_STANDARD __clear_user_fast
#endif

View File

@@ -13,11 +13,15 @@
#include <asm/unwind_hints.h>
SYM_FUNC_START(__copy_user)
#ifdef CONFIG_32BIT
b __copy_user_generic
#else
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __copy_user_generic", \
"b __copy_user_fast", CPU_FEATURE_UAL
#endif
SYM_FUNC_END(__copy_user)
EXPORT_SYMBOL(__copy_user)
@@ -30,22 +34,23 @@ EXPORT_SYMBOL(__copy_user)
* a2: n
*/
SYM_FUNC_START(__copy_user_generic)
beqz a2, 3f
beqz a2, 3f
1: ld.b t0, a1, 0
2: st.b t0, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, 1
addi.d a2, a2, -1
bgtz a2, 1b
1: ld.b t0, a1, 0
2: st.b t0, a0, 0
PTR_ADDI a0, a0, 1
PTR_ADDI a1, a1, 1
PTR_ADDI a2, a2, -1
bgtz a2, 1b
3: move a0, a2
jr ra
3: move a0, a2
jr ra
_asm_extable 1b, 3b
_asm_extable 2b, 3b
_asm_extable 1b, 3b
_asm_extable 2b, 3b
SYM_FUNC_END(__copy_user_generic)
#ifdef CONFIG_64BIT
/*
* unsigned long __copy_user_fast(void *to, const void *from, unsigned long n)
*
@@ -281,3 +286,4 @@ SYM_FUNC_START(__copy_user_fast)
SYM_FUNC_END(__copy_user_fast)
STACK_FRAME_NON_STANDARD __copy_user_fast
#endif