mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 15:51:30 -05:00
arm64: Fix typos and spelling errors in comments
This patch corrects several minor typographical and spelling errors in comments across multiple arm64 source files. No functional changes. Signed-off-by: mrigendrachaubey <mrigendra.chaubey@gmail.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
committed by
Catalin Marinas
parent
3a86608788
commit
96ac403ea2
@@ -371,7 +371,7 @@ alternative_endif
|
||||
* [start, end) with dcache line size explicitly provided.
|
||||
*
|
||||
* op: operation passed to dc instruction
|
||||
* domain: domain used in dsb instruciton
|
||||
* domain: domain used in dsb instruction
|
||||
* start: starting virtual address of the region
|
||||
* end: end virtual address of the region
|
||||
* linesz: dcache line size
|
||||
@@ -412,7 +412,7 @@ alternative_endif
|
||||
* [start, end)
|
||||
*
|
||||
* op: operation passed to dc instruction
|
||||
* domain: domain used in dsb instruciton
|
||||
* domain: domain used in dsb instruction
|
||||
* start: starting virtual address of the region
|
||||
* end: end virtual address of the region
|
||||
* fixup: optional label to branch to on user fault
|
||||
|
||||
@@ -199,7 +199,7 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
|
||||
* registers (e.g, SCTLR, TCR etc.) or patching the kernel via
|
||||
* alternatives. The kernel patching is batched and performed at later
|
||||
* point. The actions are always initiated only after the capability
|
||||
* is finalised. This is usally denoted by "enabling" the capability.
|
||||
* is finalised. This is usually denoted by "enabling" the capability.
|
||||
* The actions are initiated as follows :
|
||||
* a) Action is triggered on all online CPUs, after the capability is
|
||||
* finalised, invoked within the stop_machine() context from
|
||||
@@ -251,7 +251,7 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
|
||||
#define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
|
||||
#define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
|
||||
/*
|
||||
* The capabilitiy is detected on the Boot CPU and is used by kernel
|
||||
* The capability is detected on the Boot CPU and is used by kernel
|
||||
* during early boot. i.e, the capability should be "detected" and
|
||||
* "enabled" as early as possibly on all booting CPUs.
|
||||
*/
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*
|
||||
* Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H
|
||||
* Initialize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H
|
||||
* indicating whether the CPU is running in E2H mode.
|
||||
*/
|
||||
mrs_s x1, SYS_ID_AA64MMFR4_EL1
|
||||
|
||||
@@ -432,7 +432,7 @@ bool pgattr_change_is_safe(pteval_t old, pteval_t new);
|
||||
* 1 0 | 1 0 1
|
||||
* 1 1 | 0 1 x
|
||||
*
|
||||
* When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
|
||||
* When hardware DBM is not present, the software PTE_DIRTY bit is updated via
|
||||
* the page fault mechanism. Checking the dirty status of a pte becomes:
|
||||
*
|
||||
* PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
|
||||
@@ -598,7 +598,7 @@ static inline int pte_protnone(pte_t pte)
|
||||
/*
|
||||
* pte_present_invalid() tells us that the pte is invalid from HW
|
||||
* perspective but present from SW perspective, so the fields are to be
|
||||
* interpretted as per the HW layout. The second 2 checks are the unique
|
||||
* interpreted as per the HW layout. The second 2 checks are the unique
|
||||
* encoding that we use for PROT_NONE. It is insufficient to only use
|
||||
* the first check because we share the same encoding scheme with pmds
|
||||
* which support pmd_mkinvalid(), so can be present-invalid without
|
||||
|
||||
@@ -23,7 +23,7 @@ struct cpu_suspend_ctx {
|
||||
* __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter().
|
||||
* This data must survive until cpu_resume() is called.
|
||||
*
|
||||
* This struct desribes the size and the layout of the saved cpu state.
|
||||
* This struct describes the size and the layout of the saved cpu state.
|
||||
* The layout of the callee_saved_regs is defined by the implementation
|
||||
* of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed
|
||||
* in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it
|
||||
|
||||
@@ -133,7 +133,7 @@ static int __init acpi_fadt_sanity_check(void)
|
||||
|
||||
/*
|
||||
* FADT is required on arm64; retrieve it to check its presence
|
||||
* and carry out revision and ACPI HW reduced compliancy tests
|
||||
* and carry out revision and ACPI HW reduced compliance tests
|
||||
*/
|
||||
status = acpi_get_table(ACPI_SIG_FADT, 0, &table);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
||||
@@ -1002,7 +1002,7 @@ static void __init sort_ftr_regs(void)
|
||||
|
||||
/*
|
||||
* Initialise the CPU feature register from Boot CPU values.
|
||||
* Also initiliases the strict_mask for the register.
|
||||
* Also initialises the strict_mask for the register.
|
||||
* Any bits that are not covered by an arm64_ftr_bits entry are considered
|
||||
* RES0 for the system-wide value, and must strictly match.
|
||||
*/
|
||||
|
||||
@@ -492,7 +492,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* When using mcount, callsites in modules may have been initalized to
|
||||
* When using mcount, callsites in modules may have been initialized to
|
||||
* call an arbitrary module PLT (which redirects to the _mcount stub)
|
||||
* rather than the ftrace PLT we'll use at runtime (which redirects to
|
||||
* the ftrace trampoline). We can ignore the old PLT when initializing
|
||||
|
||||
@@ -251,7 +251,7 @@ void crash_post_resume(void)
|
||||
* marked as Reserved as memory was allocated via memblock_reserve().
|
||||
*
|
||||
* In hibernation, the pages which are Reserved and yet "nosave" are excluded
|
||||
* from the hibernation iamge. crash_is_nosave() does thich check for crash
|
||||
* from the hibernation image. crash_is_nosave() does thich check for crash
|
||||
* dump kernel and will reduce the total size of hibernation image.
|
||||
*/
|
||||
|
||||
|
||||
@@ -131,7 +131,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
struct uprobe_task *utask = current->utask;
|
||||
|
||||
/*
|
||||
* Task has received a fatal signal, so reset back to probbed
|
||||
* Task has received a fatal signal, so reset back to probed
|
||||
* address.
|
||||
*/
|
||||
instruction_pointer_set(regs, utask->vaddr);
|
||||
|
||||
@@ -202,7 +202,7 @@ unsigned long sdei_arch_get_entry_point(int conduit)
|
||||
/*
|
||||
* do_sdei_event() returns one of:
|
||||
* SDEI_EV_HANDLED - success, return to the interrupted context.
|
||||
* SDEI_EV_FAILED - failure, return this error code to firmare.
|
||||
* SDEI_EV_FAILED - failure, return this error code to firmware.
|
||||
* virtual-address - success, return to this address.
|
||||
*/
|
||||
unsigned long __kprobes do_sdei_event(struct pt_regs *regs,
|
||||
|
||||
@@ -350,7 +350,7 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
|
||||
|
||||
/*
|
||||
* Now that the dying CPU is beyond the point of no return w.r.t.
|
||||
* in-kernel synchronisation, try to get the firwmare to help us to
|
||||
* in-kernel synchronisation, try to get the firmware to help us to
|
||||
* verify that it has really left the kernel before we consider
|
||||
* clobbering anything it might still be using.
|
||||
*/
|
||||
@@ -523,7 +523,7 @@ int arch_register_cpu(int cpu)
|
||||
|
||||
/*
|
||||
* Availability of the acpi handle is sufficient to establish
|
||||
* that _STA has aleady been checked. No need to recheck here.
|
||||
* that _STA has already been checked. No need to recheck here.
|
||||
*/
|
||||
c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
|
||||
|
||||
|
||||
@@ -922,7 +922,7 @@ void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigne
|
||||
__show_regs(regs);
|
||||
|
||||
/*
|
||||
* We use nmi_panic to limit the potential for recusive overflows, and
|
||||
* We use nmi_panic to limit the potential for recursive overflows, and
|
||||
* to get a better stack trace.
|
||||
*/
|
||||
nmi_panic(NULL, "kernel stack overflow");
|
||||
|
||||
@@ -815,7 +815,7 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
|
||||
tpt = tpc = true;
|
||||
|
||||
/*
|
||||
* For the poor sods that could not correctly substract one value
|
||||
* For the poor sods that could not correctly subtract one value
|
||||
* from another, trap the full virtual timer and counter.
|
||||
*/
|
||||
if (has_broken_cntvoff() && timer_get_offset(map->direct_vtimer))
|
||||
|
||||
@@ -115,7 +115,7 @@ static void ffa_set_retval(struct kvm_cpu_context *ctxt,
|
||||
*
|
||||
* FFA-1.3 introduces 64-bit variants of the CPU cycle management
|
||||
* interfaces. Moreover, FF-A 1.3 clarifies that SMC32 direct requests
|
||||
* complete with SMC32 direct reponses which *should* allow us use the
|
||||
* complete with SMC32 direct responses which *should* allow us use the
|
||||
* function ID sent by the caller to determine whether to return x8-x17.
|
||||
*
|
||||
* Note that we also cannot rely on function IDs in the response.
|
||||
|
||||
@@ -1755,7 +1755,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
|
||||
/*
|
||||
* Check if this is non-struct page memory PFN, and cannot support
|
||||
* CMOs. It could potentially be unsafe to access as cachable.
|
||||
* CMOs. It could potentially be unsafe to access as cacheable.
|
||||
*/
|
||||
if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(pfn)) {
|
||||
if (is_vma_cacheable) {
|
||||
|
||||
@@ -85,7 +85,7 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
|
||||
/*
|
||||
* Let's treat memory allocation failures as benign: If we fail to
|
||||
* allocate anything, return an error and keep the allocated array
|
||||
* alive. Userspace may try to recover by intializing the vcpu
|
||||
* alive. Userspace may try to recover by initializing the vcpu
|
||||
* again, and there is no reason to affect the whole VM for this.
|
||||
*/
|
||||
num_mmus = atomic_read(&kvm->online_vcpus) * S2_MMU_PER_VCPU;
|
||||
|
||||
@@ -3053,7 +3053,7 @@ bool bpf_jit_supports_exceptions(void)
|
||||
/* We unwind through both kernel frames starting from within bpf_throw
|
||||
* call and BPF frames. Therefore we require FP unwinder to be enabled
|
||||
* to walk kernel frames and reach BPF frames in the stack trace.
|
||||
* ARM64 kernel is aways compiled with CONFIG_FRAME_POINTER=y
|
||||
* ARM64 kernel is always compiled with CONFIG_FRAME_POINTER=y
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user