Files
linux/arch/mips/include/asm/ftrace.h
Thomas Huth 157f9533f9 mips: Replace __ASSEMBLY__ with __ASSEMBLER__ in the mips headers
While the GCC and Clang compilers already define __ASSEMBLER__
automatically when compiling assembler code, __ASSEMBLY__ is a macro
that only gets defined by the Makefiles in the kernel. Defining
such a macro was necessary in the early days of the kernel, since GCC
only started providing __ASSEMBLER__ since version 3.0 in 2000 (see
https://gcc.gnu.org/git/?p=gcc.git;a=commitdiff;h=f8f769ea4e69 ).
However, having two macros can be very confusing nowadays for the
developers when switching between userspace and kernelspace coding,
or when dealing with uapi headers that should use __ASSEMBLER__
instead. So let's now standardize on the __ASSEMBLER__ macro that is
provided by the compilers.

This is almost a completely mechanical patch (done with a simple
"sed -i" statement), with just one comment tweaked manually in
arch/mips/include/asm/cpu.h (that was missing some underscores).

Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: linux-mips@vger.kernel.org
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Maciej W. Rozycki <macro@orcam.me.uk>
Signed-off-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
2025-08-29 22:34:29 +02:00

111 lines
2.7 KiB
C

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive for
* more details.
*
* Copyright (C) 2009 DSLab, Lanzhou University, China
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*/
#ifndef _ASM_MIPS_FTRACE_H
#define _ASM_MIPS_FTRACE_H
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLER__
extern void _mcount(void);
#define mcount _mcount
#define safe_load(load, src, dst, error) \
do { \
asm volatile ( \
"1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
" li %[tmp_err], 0\n" \
"2: .insn\n" \
\
".section .fixup, \"ax\"\n" \
"3: li %[tmp_err], 1\n" \
" j 2b\n" \
".previous\n" \
\
".section\t__ex_table,\"a\"\n\t" \
STR(PTR_WD) "\t1b, 3b\n\t" \
".previous\n" \
\
: [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\
: [tmp_src] "r" (src) \
: "memory" \
); \
} while (0)
#define safe_store(store, src, dst, error) \
do { \
asm volatile ( \
"1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
" li %[tmp_err], 0\n" \
"2: .insn\n" \
\
".section .fixup, \"ax\"\n" \
"3: li %[tmp_err], 1\n" \
" j 2b\n" \
".previous\n" \
\
".section\t__ex_table,\"a\"\n\t"\
STR(PTR_WD) "\t1b, 3b\n\t" \
".previous\n" \
\
: [tmp_err] "=r" (error) \
: [tmp_dst] "r" (dst), [tmp_src] "r" (src)\
: "memory" \
); \
} while (0)
#define safe_load_code(dst, src, error) \
safe_load(STR(lw), src, dst, error)
#define safe_store_code(src, dst, error) \
safe_store(STR(sw), src, dst, error)
#define safe_load_stack(dst, src, error) \
safe_load(STR(PTR_L), src, dst, error)
#define safe_store_stack(src, dst, error) \
safe_store(STR(PTR_S), src, dst, error)
#ifdef CONFIG_DYNAMIC_FTRACE
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}
struct dyn_arch_ftrace {
};
#endif /* CONFIG_DYNAMIC_FTRACE */
void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
unsigned long fp);
#endif /* __ASSEMBLER__ */
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FTRACE_SYSCALLS
#ifndef __ASSEMBLER__
/*
* Some syscall entry functions on mips start with "__sys_" (fork and clone,
* for instance). We should also match the sys_ variant with those.
*/
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
static inline bool arch_syscall_match_sym_name(const char *sym,
const char *name)
{
return !strcmp(sym, name) ||
(!strncmp(sym, "__sys_", 6) && !strcmp(sym + 6, name + 4));
}
#endif /* __ASSEMBLER__ */
#endif /* CONFIG_FTRACE_SYSCALLS */
#endif /* _ASM_MIPS_FTRACE_H */