Файловый менеджер - Редактировать - /var/www/xthruster/html/wp-content/uploads/flags/asm-generic.tar
Назад
exec.h 0000644 00000000714 14722073052 0005645 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Generic process execution definitions. * * It should be possible to use these on really simple architectures, * but it serves more as a starting point for new ports. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef __ASM_GENERIC_EXEC_H #define __ASM_GENERIC_EXEC_H #define arch_align_stack(x) (x) #endif /* __ASM_GENERIC_EXEC_H */ emergency-restart.h 0000644 00000000370 14722073052 0010357 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H #define _ASM_GENERIC_EMERGENCY_RESTART_H static inline void machine_emergency_restart(void) { machine_restart(NULL); } #endif /* _ASM_GENERIC_EMERGENCY_RESTART_H */ irq.h 0000644 00000000623 14722073052 0005513 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_IRQ_H #define __ASM_GENERIC_IRQ_H /* * NR_IRQS is the upper bound of how many interrupts can be handled * in the platform. It is used to size the static irq_map array, * so don't make it too big. */ #ifndef NR_IRQS #define NR_IRQS 64 #endif static inline int irq_canonicalize(int irq) { return irq; } #endif /* __ASM_GENERIC_IRQ_H */ kvm_para.h 0000644 00000001045 14722073052 0006517 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_KVM_PARA_H #define _ASM_GENERIC_KVM_PARA_H #include <uapi/asm-generic/kvm_para.h> /* * This function is used by architectures that support kvm to avoid issuing * false soft lockup messages. */ static inline bool kvm_check_and_clear_guest_paused(void) { return false; } static inline unsigned int kvm_arch_para_features(void) { return 0; } static inline unsigned int kvm_arch_para_hints(void) { return 0; } static inline bool kvm_para_available(void) { return false; } #endif 4level-fixup.h 0000644 00000002112 14722073052 0007237 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _4LEVEL_FIXUP_H #define _4LEVEL_FIXUP_H #define __ARCH_HAS_4LEVEL_HACK #define __PAGETABLE_PUD_FOLDED 1 #define PUD_SHIFT PGDIR_SHIFT #define PUD_SIZE PGDIR_SIZE #define PUD_MASK PGDIR_MASK #define PTRS_PER_PUD 1 #define pud_t pgd_t #define pmd_alloc(mm, pud, address) \ ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \ NULL: pmd_offset(pud, address)) #define pud_offset(pgd, start) (pgd) #define pud_none(pud) 0 #define pud_bad(pud) 0 #define pud_present(pud) 1 #define pud_ERROR(pud) do { } while (0) #define pud_clear(pud) pgd_clear(pud) #define pud_val(pud) pgd_val(pud) #define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd) #define pud_page(pud) pgd_page(pud) #define pud_page_vaddr(pud) pgd_page_vaddr(pud) #undef pud_free_tlb #define pud_free_tlb(tlb, x, addr) do { } while (0) #define pud_free(mm, x) do { } while (0) #define __pud_free_tlb(tlb, x, addr) do { } while (0) #undef pud_addr_end #define pud_addr_end(addr, end) (end) #include <asm-generic/5level-fixup.h> #endif tlbflush.h 0000644 00000000745 14722073052 0006550 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_TLBFLUSH_H #define __ASM_GENERIC_TLBFLUSH_H /* * This is a dummy tlbflush implementation that can be used on all * nommu architectures. * If you have an MMU, you need to write your own functions. */ #ifdef CONFIG_MMU #error need to implement an architecture specific asm/tlbflush.h #endif #include <linux/bug.h> static inline void flush_tlb_mm(struct mm_struct *mm) { BUG(); } #endif /* __ASM_GENERIC_TLBFLUSH_H */ serial.h 0000644 00000000531 14722073052 0006175 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_SERIAL_H #define __ASM_GENERIC_SERIAL_H /* * This should not be an architecture specific #define, oh well. * * Traditionally, it just describes i8250 and related serial ports * that have this clock rate. */ #define BASE_BAUD (1843200 / 16) #endif /* __ASM_GENERIC_SERIAL_H */ spinlock.h 0000644 00000000511 14722073052 0006536 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_SPINLOCK_H #define __ASM_GENERIC_SPINLOCK_H /* * You need to implement asm/spinlock.h for SMP support. The generic * version does not handle SMP. */ #ifdef CONFIG_SMP #error need an architecture specific asm/spinlock.h #endif #endif /* __ASM_GENERIC_SPINLOCK_H */ page.h 0000644 00000004630 14722073052 0005636 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_PAGE_H #define __ASM_GENERIC_PAGE_H /* * Generic page.h implementation, for NOMMU architectures. * This provides the dummy definitions for the memory management. */ #ifdef CONFIG_MMU #error need to provide a real asm/page.h #endif /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 #ifdef __ASSEMBLY__ #define PAGE_SIZE (1 << PAGE_SHIFT) #else #define PAGE_SIZE (1UL << PAGE_SHIFT) #endif #define PAGE_MASK (~(PAGE_SIZE-1)) #include <asm/setup.h> #ifndef __ASSEMBLY__ #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) /* * These are used to make use of C type-checking.. */ typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pmd[16]; } pmd_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; typedef struct page *pgtable_t; #define pte_val(x) ((x).pte) #define pmd_val(x) ((&x)->pmd[0]) #define pgd_val(x) ((x).pgd) #define pgprot_val(x) ((x).pgprot) #define __pte(x) ((pte_t) { (x) } ) #define __pmd(x) ((pmd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) extern unsigned long memory_start; extern unsigned long memory_end; #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS #define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS) #else #define PAGE_OFFSET (0) #endif #ifndef ARCH_PFN_OFFSET #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #endif #ifndef __ASSEMBLY__ #define __va(x) ((void *)((unsigned long) (x))) #define __pa(x) ((unsigned long) (x)) #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) #define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr)) #define page_to_virt(page) pfn_to_virt(page_to_pfn(page)) #ifndef page_to_phys #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) #endif #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) #define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ ((void *)(kaddr) < (void *)memory_end)) #endif /* __ASSEMBLY__ */ #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> #endif /* __ASM_GENERIC_PAGE_H */ cacheflush.h 0000644 00000004333 14722073052 0007027 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_CACHEFLUSH_H #define __ASM_CACHEFLUSH_H /* Keep includes the same across arches. */ #include <linux/mm.h> #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 /* * The cache doesn't need to be flushed when TLB entries change when * the cache is mapped to physical memory, not virtual memory */ #ifndef flush_cache_all static inline void flush_cache_all(void) { } #endif #ifndef flush_cache_mm static inline void flush_cache_mm(struct mm_struct *mm) { } #endif #ifndef flush_cache_dup_mm static inline void flush_cache_dup_mm(struct mm_struct *mm) { } #endif #ifndef flush_cache_range static inline void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { } #endif #ifndef flush_cache_page static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { } #endif #ifndef flush_dcache_page static inline void flush_dcache_page(struct page *page) { } #endif #ifndef flush_dcache_mmap_lock static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } #endif #ifndef flush_dcache_mmap_unlock static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } #endif #ifndef flush_icache_range static inline void flush_icache_range(unsigned long start, unsigned long end) { } #endif #ifndef flush_icache_page static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) { } #endif #ifndef flush_icache_user_range static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { } #endif #ifndef flush_cache_vmap static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } #endif #ifndef flush_cache_vunmap static inline void flush_cache_vunmap(unsigned long start, unsigned long end) { } #endif #ifndef copy_to_user_page #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ flush_icache_user_range(vma, page, vaddr, len); \ } while (0) #endif #ifndef copy_from_user_page #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) #endif #endif /* __ASM_CACHEFLUSH_H */ error-injection.h 0000644 00000002232 14722073052 0010027 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_ERROR_INJECTION_H #define _ASM_GENERIC_ERROR_INJECTION_H #if defined(__KERNEL__) && !defined(__ASSEMBLY__) enum { EI_ETYPE_NONE, /* Dummy value for undefined case */ EI_ETYPE_NULL, /* Return NULL if failure */ EI_ETYPE_ERRNO, /* Return -ERRNO if failure */ EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */ EI_ETYPE_TRUE, /* Return true if failure */ }; struct error_injection_entry { unsigned long addr; int etype; }; struct pt_regs; #ifdef CONFIG_FUNCTION_ERROR_INJECTION /* * Whitelist ganerating macro. Specify functions which can be * error-injectable using this macro. */ #define ALLOW_ERROR_INJECTION(fname, _etype) \ static struct error_injection_entry __used \ __attribute__((__section__("_error_injection_whitelist"))) \ _eil_addr_##fname = { \ .addr = (unsigned long)fname, \ .etype = EI_ETYPE_##_etype, \ }; void override_function_with_return(struct pt_regs *regs); #else #define ALLOW_ERROR_INJECTION(fname, _etype) static inline void override_function_with_return(struct pt_regs *regs) { } #endif #endif #endif /* _ASM_GENERIC_ERROR_INJECTION_H */ dma-contiguous.h 0000644 00000000356 14722073052 0007661 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_DMA_CONTIGUOUS_H #define _ASM_GENERIC_DMA_CONTIGUOUS_H #include <linux/types.h> static inline void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { } #endif topology.h 0000644 00000004272 14722073052 0006600 0 ustar 00 /* * linux/include/asm-generic/topology.h * * Written by: Matthew Dobson, IBM Corporation * * Copyright (C) 2002, IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <colpatch@us.ibm.com> */ #ifndef _ASM_GENERIC_TOPOLOGY_H #define _ASM_GENERIC_TOPOLOGY_H #ifndef CONFIG_NUMA /* Other architectures wishing to use this simple topology API should fill in the below functions as appropriate in their own <asm/topology.h> file. */ #ifndef cpu_to_node #define cpu_to_node(cpu) ((void)(cpu),0) #endif #ifndef set_numa_node #define set_numa_node(node) #endif #ifndef set_cpu_numa_node #define set_cpu_numa_node(cpu, node) #endif #ifndef cpu_to_mem #define cpu_to_mem(cpu) ((void)(cpu),0) #endif #ifndef cpumask_of_node #ifdef CONFIG_NEED_MULTIPLE_NODES #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) #else #define cpumask_of_node(node) ((void)(node), cpu_online_mask) #endif #endif #ifndef pcibus_to_node #define pcibus_to_node(bus) ((void)(bus), -1) #endif #ifndef cpumask_of_pcibus #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ cpu_all_mask : \ cpumask_of_node(pcibus_to_node(bus))) #endif #endif /* CONFIG_NUMA */ #if !defined(CONFIG_NUMA) || !defined(CONFIG_HAVE_MEMORYLESS_NODES) #ifndef set_numa_mem #define set_numa_mem(node) #endif #ifndef set_cpu_numa_mem #define set_cpu_numa_mem(cpu, node) #endif #endif /* !CONFIG_NUMA || !CONFIG_HAVE_MEMORYLESS_NODES */ #endif /* _ASM_GENERIC_TOPOLOGY_H */ barrier.h 0000644 00000013422 14722073052 0006347 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Generic barrier definitions. * * It should be possible to use these on really simple architectures, * but it serves more as a starting point for new ports. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef __ASM_GENERIC_BARRIER_H #define __ASM_GENERIC_BARRIER_H #ifndef __ASSEMBLY__ #include <linux/compiler.h> #ifndef nop #define nop() asm volatile ("nop") #endif /* * Force strict CPU ordering. And yes, this is required on UP too when we're * talking to devices. * * Fall back to compiler barriers if nothing better is provided. */ #ifndef mb #define mb() barrier() #endif #ifndef rmb #define rmb() mb() #endif #ifndef wmb #define wmb() mb() #endif #ifndef dma_rmb #define dma_rmb() rmb() #endif #ifndef dma_wmb #define dma_wmb() wmb() #endif #ifndef read_barrier_depends #define read_barrier_depends() do { } while (0) #endif #ifndef __smp_mb #define __smp_mb() mb() #endif #ifndef __smp_rmb #define __smp_rmb() rmb() #endif #ifndef __smp_wmb #define __smp_wmb() wmb() #endif #ifndef __smp_read_barrier_depends #define __smp_read_barrier_depends() read_barrier_depends() #endif #ifdef CONFIG_SMP #ifndef smp_mb #define smp_mb() __smp_mb() #endif #ifndef smp_rmb #define smp_rmb() __smp_rmb() #endif #ifndef smp_wmb #define smp_wmb() __smp_wmb() #endif #ifndef smp_read_barrier_depends #define smp_read_barrier_depends() __smp_read_barrier_depends() #endif #else /* !CONFIG_SMP */ #ifndef smp_mb #define smp_mb() barrier() #endif #ifndef smp_rmb #define smp_rmb() barrier() #endif #ifndef smp_wmb #define smp_wmb() barrier() #endif #ifndef smp_read_barrier_depends #define smp_read_barrier_depends() do { } while (0) #endif #endif /* CONFIG_SMP */ #ifndef __smp_store_mb #define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0) #endif #ifndef __smp_mb__before_atomic #define __smp_mb__before_atomic() __smp_mb() #endif #ifndef __smp_mb__after_atomic #define __smp_mb__after_atomic() __smp_mb() #endif #ifndef __smp_store_release #define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ __smp_mb(); \ WRITE_ONCE(*p, v); \ } while (0) #endif #ifndef __smp_load_acquire #define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ __smp_mb(); \ ___p1; \ }) #endif #ifdef CONFIG_SMP #ifndef smp_store_mb #define smp_store_mb(var, value) __smp_store_mb(var, value) #endif #ifndef smp_mb__before_atomic #define smp_mb__before_atomic() __smp_mb__before_atomic() #endif #ifndef smp_mb__after_atomic #define smp_mb__after_atomic() __smp_mb__after_atomic() #endif #ifndef smp_store_release #define smp_store_release(p, v) __smp_store_release(p, v) #endif #ifndef smp_load_acquire #define smp_load_acquire(p) __smp_load_acquire(p) #endif #else /* !CONFIG_SMP */ #ifndef smp_store_mb #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) #endif #ifndef smp_mb__before_atomic #define smp_mb__before_atomic() barrier() #endif #ifndef smp_mb__after_atomic #define smp_mb__after_atomic() barrier() #endif #ifndef smp_store_release #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) #endif #ifndef smp_load_acquire #define smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ ___p1; \ }) #endif #endif /* CONFIG_SMP */ /* Barriers for virtual machine guests when talking to an SMP host */ #define virt_mb() __smp_mb() #define virt_rmb() __smp_rmb() #define virt_wmb() __smp_wmb() #define virt_read_barrier_depends() __smp_read_barrier_depends() #define virt_store_mb(var, value) __smp_store_mb(var, value) #define virt_mb__before_atomic() __smp_mb__before_atomic() #define virt_mb__after_atomic() __smp_mb__after_atomic() #define virt_store_release(p, v) __smp_store_release(p, v) #define virt_load_acquire(p) __smp_load_acquire(p) /** * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency * * A control dependency provides a LOAD->STORE order, the additional RMB * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, * aka. (load)-ACQUIRE. * * Architectures that do not do load speculation can have this be barrier(). */ #ifndef smp_acquire__after_ctrl_dep #define smp_acquire__after_ctrl_dep() smp_rmb() #endif /** * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees * @ptr: pointer to the variable to wait on * @cond: boolean expression to wait for * * Equivalent to using READ_ONCE() on the condition variable. * * Due to C lacking lambda expressions we load the value of *ptr into a * pre-named variable @VAL to be used in @cond. */ #ifndef smp_cond_load_relaxed #define smp_cond_load_relaxed(ptr, cond_expr) ({ \ typeof(ptr) __PTR = (ptr); \ typeof(*ptr) VAL; \ for (;;) { \ VAL = READ_ONCE(*__PTR); \ if (cond_expr) \ break; \ cpu_relax(); \ } \ VAL; \ }) #endif /** * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering * @ptr: pointer to the variable to wait on * @cond: boolean expression to wait for * * Equivalent to using smp_load_acquire() on the condition variable but employs * the control dependency of the wait to reduce the barrier on many platforms. */ #ifndef smp_cond_load_acquire #define smp_cond_load_acquire(ptr, cond_expr) ({ \ typeof(*ptr) _val; \ _val = smp_cond_load_relaxed(ptr, cond_expr); \ smp_acquire__after_ctrl_dep(); \ _val; \ }) #endif #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ syscall.h 0000644 00000012714 14722073052 0006376 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Access to user system call parameters and results * * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. * * This file is a stub providing documentation for what functions * asm-ARCH/syscall.h files need to define. Most arch definitions * will be simple inlines. * * All of these functions expect to be called with no locks, * and only when the caller is sure that the task of interest * cannot return to user mode while we are looking at it. */ #ifndef _ASM_SYSCALL_H #define _ASM_SYSCALL_H 1 struct task_struct; struct pt_regs; /** * syscall_get_nr - find what system call a task is executing * @task: task of interest, must be blocked * @regs: task_pt_regs() of @task * * If @task is executing a system call or is at system call * tracing about to attempt one, returns the system call number. * If @task is not executing a system call, i.e. it's blocked * inside the kernel for a fault or signal, returns -1. * * Note this returns int even on 64-bit machines. Only 32 bits of * system call number can be meaningful. If the actual arch value * is 64 bits, this truncates to 32 bits so 0xffffffff means -1. * * It's only valid to call this when @task is known to be blocked. */ int syscall_get_nr(struct task_struct *task, struct pt_regs *regs); /** * syscall_rollback - roll back registers after an aborted system call * @task: task of interest, must be in system call exit tracing * @regs: task_pt_regs() of @task * * It's only valid to call this when @task is stopped for system * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT), * after tracehook_report_syscall_entry() returned nonzero to prevent * the system call from taking place. * * This rolls back the register state in @regs so it's as if the * system call instruction was a no-op. The registers containing * the system call number and arguments are as they were before the * system call instruction. This may not be the same as what the * register state looked like at system call entry tracing. */ void syscall_rollback(struct task_struct *task, struct pt_regs *regs); /** * syscall_get_error - check result of traced system call * @task: task of interest, must be blocked * @regs: task_pt_regs() of @task * * Returns 0 if the system call succeeded, or -ERRORCODE if it failed. * * It's only valid to call this when @task is stopped for tracing on exit * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. */ long syscall_get_error(struct task_struct *task, struct pt_regs *regs); /** * syscall_get_return_value - get the return value of a traced system call * @task: task of interest, must be blocked * @regs: task_pt_regs() of @task * * Returns the return value of the successful system call. * This value is meaningless if syscall_get_error() returned nonzero. * * It's only valid to call this when @task is stopped for tracing on exit * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. */ long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs); /** * syscall_set_return_value - change the return value of a traced system call * @task: task of interest, must be blocked * @regs: task_pt_regs() of @task * @error: negative error code, or zero to indicate success * @val: user return value if @error is zero * * This changes the results of the system call that user mode will see. * If @error is zero, the user sees a successful system call with a * return value of @val. If @error is nonzero, it's a negated errno * code; the user sees a failed system call with this errno code. * * It's only valid to call this when @task is stopped for tracing on exit * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. */ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val); /** * syscall_get_arguments - extract system call parameter values * @task: task of interest, must be blocked * @regs: task_pt_regs() of @task * @args: array filled with argument values * * Fetches 6 arguments to the system call. First argument is stored in * @args[0], and so on. * * It's only valid to call this when @task is stopped for tracing on * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. */ void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, unsigned long *args); /** * syscall_set_arguments - change system call parameter value * @task: task of interest, must be in system call entry tracing * @regs: task_pt_regs() of @task * @args: array of argument values to store * * Changes 6 arguments to the system call. * The first argument gets value @args[0], and so on. * * It's only valid to call this when @task is stopped for tracing on * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. */ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, const unsigned long *args); /** * syscall_get_arch - return the AUDIT_ARCH for the current system call * @task: task of interest, must be blocked * * Returns the AUDIT_ARCH_* based on the system call convention in use. * * It's only valid to call this when @task is stopped on entry to a system * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP. * * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must * provide an implementation of this. */ int syscall_get_arch(struct task_struct *task); #endif /* _ASM_SYSCALL_H */ ide_iops.h 0000644 00000001427 14722073052 0006516 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* Generic I/O and MEMIO string operations. */ #define __ide_insw insw #define __ide_insl insl #define __ide_outsw outsw #define __ide_outsl outsl static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count) { while (count--) { *(u16 *)addr = readw(port); addr += 2; } } static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count) { while (count--) { *(u32 *)addr = readl(port); addr += 4; } } static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count) { while (count--) { writew(*(u16 *)addr, port); addr += 2; } } static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count) { while (count--) { writel(*(u32 *)addr, port); addr += 4; } } early_ioremap.h 0000644 00000003240 14722073052 0007546 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_EARLY_IOREMAP_H_ #define _ASM_EARLY_IOREMAP_H_ #include <linux/types.h> /* * early_ioremap() and early_iounmap() are for temporary early boot-time * mappings, before the real ioremap() is functional. */ extern void __iomem *early_ioremap(resource_size_t phys_addr, unsigned long size); extern void *early_memremap(resource_size_t phys_addr, unsigned long size); extern void *early_memremap_ro(resource_size_t phys_addr, unsigned long size); extern void *early_memremap_prot(resource_size_t phys_addr, unsigned long size, unsigned long prot_val); extern void early_iounmap(void __iomem *addr, unsigned long size); extern void early_memunmap(void *addr, unsigned long size); /* * Weak function called by early_ioremap_reset(). It does nothing, but * architectures may provide their own version to do any needed cleanups. */ extern void early_ioremap_shutdown(void); #if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU) /* Arch-specific initialization */ extern void early_ioremap_init(void); /* Generic initialization called by architecture code */ extern void early_ioremap_setup(void); /* * Called as last step in paging_init() so library can act * accordingly for subsequent map/unmap requests. */ extern void early_ioremap_reset(void); /* * Early copy from unmapped memory to kernel mapped memory. */ extern void copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size); #else static inline void early_ioremap_init(void) { } static inline void early_ioremap_setup(void) { } static inline void early_ioremap_reset(void) { } #endif #endif /* _ASM_EARLY_IOREMAP_H_ */ user.h 0000644 00000000362 14722073052 0005676 0 ustar 00 #ifndef __ASM_GENERIC_USER_H #define __ASM_GENERIC_USER_H /* * This file may define a 'struct user' structure. However, it is only * used for a.out files, which are not supported on new architectures. */ #endif /* __ASM_GENERIC_USER_H */ local.h 0000644 00000004352 14722073052 0006015 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_LOCAL_H #define _ASM_GENERIC_LOCAL_H #include <linux/percpu.h> #include <linux/atomic.h> #include <asm/types.h> /* * A signed long type for operations which are atomic for a single CPU. * Usually used in combination with per-cpu variables. * * This is the default implementation, which uses atomic_long_t. Which is * rather pointless. The whole point behind local_t is that some processors * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs * running on this CPU. local_t allows exploitation of such capabilities. */ /* Implement in terms of atomics. */ /* Don't use typedef: don't want them to be mixed with atomic_t's. */ typedef struct { atomic_long_t a; } local_t; #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } #define local_read(l) atomic_long_read(&(l)->a) #define local_set(l,i) atomic_long_set((&(l)->a),(i)) #define local_inc(l) atomic_long_inc(&(l)->a) #define local_dec(l) atomic_long_dec(&(l)->a) #define local_add(i,l) atomic_long_add((i),(&(l)->a)) #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a)) #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a) #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a) #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a)) #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a)) #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a)) #define local_inc_return(l) atomic_long_inc_return(&(l)->a) #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u)) #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) /* Non-atomic variants, ie. preemption disabled and won't be touched * in interrupt, etc. Some archs can optimize this case well. */ #define __local_inc(l) local_set((l), local_read(l) + 1) #define __local_dec(l) local_set((l), local_read(l) - 1) #define __local_add(i,l) local_set((l), local_read(l) + (i)) #define __local_sub(i,l) local_set((l), local_read(l) - (i)) #endif /* _ASM_GENERIC_LOCAL_H */ trace_clock.h 0000644 00000000607 14722073052 0007173 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_TRACE_CLOCK_H #define _ASM_GENERIC_TRACE_CLOCK_H /* * Arch-specific trace clocks. */ /* * Additional trace clocks added to the trace_clocks * array in kernel/trace/trace.c * None if the architecture has not defined it. */ #ifndef ARCH_TRACE_CLOCKS # define ARCH_TRACE_CLOCKS #endif #endif /* _ASM_GENERIC_TRACE_CLOCK_H */ mmiowb_types.h 0000644 00000000371 14722073052 0007436 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_MMIOWB_TYPES_H #define __ASM_GENERIC_MMIOWB_TYPES_H #include <linux/types.h> struct mmiowb_state { u16 nesting_count; u16 mmiowb_pending; }; #endif /* __ASM_GENERIC_MMIOWB_TYPES_H */ qspinlock.h 0000644 00000006273 14722073052 0006732 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Queued spinlock * * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP * * Authors: Waiman Long <waiman.long@hpe.com> */ #ifndef __ASM_GENERIC_QSPINLOCK_H #define __ASM_GENERIC_QSPINLOCK_H #include <asm-generic/qspinlock_types.h> /** * queued_spin_is_locked - is the spinlock locked? * @lock: Pointer to queued spinlock structure * Return: 1 if it is locked, 0 otherwise */ static __always_inline int queued_spin_is_locked(struct qspinlock *lock) { /* * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL * isn't immediately observable. */ return atomic_read(&lock->val); } /** * queued_spin_value_unlocked - is the spinlock structure unlocked? * @lock: queued spinlock structure * Return: 1 if it is unlocked, 0 otherwise * * N.B. Whenever there are tasks waiting for the lock, it is considered * locked wrt the lockref code to avoid lock stealing by the lockref * code and change things underneath the lock. This also allows some * optimizations to be applied without conflict with lockref. */ static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) { return !lock.val.counter; } /** * queued_spin_is_contended - check if the lock is contended * @lock : Pointer to queued spinlock structure * Return: 1 if lock contended, 0 otherwise */ static __always_inline int queued_spin_is_contended(struct qspinlock *lock) { return atomic_read(&lock->val) & ~_Q_LOCKED_MASK; } /** * queued_spin_trylock - try to acquire the queued spinlock * @lock : Pointer to queued spinlock structure * Return: 1 if lock acquired, 0 if failed */ static __always_inline int queued_spin_trylock(struct qspinlock *lock) { u32 val = atomic_read(&lock->val); if (unlikely(val)) return 0; return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); } extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); /** * queued_spin_lock - acquire a queued spinlock * @lock: Pointer to queued spinlock structure */ static __always_inline void queued_spin_lock(struct qspinlock *lock) { u32 val = 0; if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) return; queued_spin_lock_slowpath(lock, val); } #ifndef queued_spin_unlock /** * queued_spin_unlock - release a queued spinlock * @lock : Pointer to queued spinlock structure */ static __always_inline void queued_spin_unlock(struct qspinlock *lock) { /* * unlock() needs release semantics: */ smp_store_release(&lock->locked, 0); } #endif #ifndef virt_spin_lock static __always_inline bool virt_spin_lock(struct qspinlock *lock) { return false; } #endif /* * Remapping spinlock architecture specific functions to the corresponding * queued spinlock functions. */ #define arch_spin_is_locked(l) queued_spin_is_locked(l) #define arch_spin_is_contended(l) queued_spin_is_contended(l) #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l) #define arch_spin_lock(l) queued_spin_lock(l) #define arch_spin_trylock(l) queued_spin_trylock(l) #define arch_spin_unlock(l) queued_spin_unlock(l) #endif /* __ASM_GENERIC_QSPINLOCK_H */ hw_irq.h 0000644 00000000416 14722073052 0006211 0 ustar 00 #ifndef __ASM_GENERIC_HW_IRQ_H #define __ASM_GENERIC_HW_IRQ_H /* * hw_irq.h has internal declarations for the low-level interrupt * controller, like the original i8259A. * In general, this is not needed for new architectures. */ #endif /* __ASM_GENERIC_HW_IRQ_H */ bitops-instrumented.h 0000644 00000017345 14722073052 0010750 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * This file provides wrappers with sanitizer instrumentation for bit * operations. * * To use this functionality, an arch's bitops.h file needs to define each of * the below bit operations with an arch_ prefix (e.g. arch_set_bit(), * arch___set_bit(), etc.). */ #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_H #define _ASM_GENERIC_BITOPS_INSTRUMENTED_H #include <linux/kasan-checks.h> /** * set_bit - Atomically set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * This is a relaxed atomic operation (no implied memory barriers). * * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ static inline void set_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); arch_set_bit(nr, addr); } /** * __set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * Unlike set_bit(), this function is non-atomic. If it is called on the same * region of memory concurrently, the effect may be that only one operation * succeeds. */ static inline void __set_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); arch___set_bit(nr, addr); } /** * clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * This is a relaxed atomic operation (no implied memory barriers). */ static inline void clear_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit(nr, addr); } /** * __clear_bit - Clears a bit in memory * @nr: the bit to clear * @addr: the address to start counting from * * Unlike clear_bit(), this function is non-atomic. If it is called on the same * region of memory concurrently, the effect may be that only one operation * succeeds. */ static inline void __clear_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit(nr, addr); } /** * clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * This operation is atomic and provides release barrier semantics. */ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit_unlock(nr, addr); } /** * __clear_bit_unlock - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * This is a non-atomic operation but implies a release barrier before the * memory operation. It can be used for an unlock if no other CPUs can * concurrently modify other bits in the word. */ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit_unlock(nr, addr); } /** * change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from * * This is a relaxed atomic operation (no implied memory barriers). * * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ static inline void change_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); arch_change_bit(nr, addr); } /** * __change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * * Unlike change_bit(), this function is non-atomic. If it is called on the same * region of memory concurrently, the effect may be that only one operation * succeeds. */ static inline void __change_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); arch___change_bit(nr, addr); } /** * test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This is an atomic fully-ordered operation (implied full memory barrier). */ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_set_bit(nr, addr); } /** * __test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); return arch___test_and_set_bit(nr, addr); } /** * test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and provides acquire barrier semantics if * the returned value is 0. * It can be used to implement bit locks. */ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_set_bit_lock(nr, addr); } /** * test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This is an atomic fully-ordered operation (implied full memory barrier). */ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_clear_bit(nr, addr); } /** * __test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); return arch___test_and_clear_bit(nr, addr); } /** * test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This is an atomic fully-ordered operation (implied full memory barrier). */ static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_change_bit(nr, addr); } /** * __test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); return arch___test_and_change_bit(nr, addr); } /** * test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ static inline bool test_bit(long nr, const volatile unsigned long *addr) { kasan_check_read(addr + BIT_WORD(nr), sizeof(long)); return arch_test_bit(nr, addr); } #if defined(arch_clear_bit_unlock_is_negative_byte) /** * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom * byte is negative, for unlock. * @nr: the bit to clear * @addr: the address to start counting from * * This operation is atomic and provides release barrier semantics. * * This is a bit of a one-trick-pony for the filemap code, which clears * PG_locked and tests PG_waiters, */ static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); return arch_clear_bit_unlock_is_negative_byte(nr, addr); } /* Let everybody know we have it. */ #define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte #endif #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_H */ asm-prototypes.h 0000644 00000000773 14722073052 0007734 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/bitops.h> #undef __memset extern void *__memset(void *, int, __kernel_size_t); #undef __memcpy extern void *__memcpy(void *, const void *, __kernel_size_t); #undef __memmove extern void *__memmove(void *, const void *, __kernel_size_t); #undef memset extern void *memset(void *, int, __kernel_size_t); #undef memcpy extern void *memcpy(void *, const void *, __kernel_size_t); #undef memmove extern void *memmove(void *, const void *, __kernel_size_t); preempt.h 0000644 00000003712 14722073052 0006376 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_PREEMPT_H #define __ASM_PREEMPT_H #include <linux/thread_info.h> #define PREEMPT_ENABLED (0) static __always_inline int preempt_count(void) { return READ_ONCE(current_thread_info()->preempt_count); } static __always_inline volatile int *preempt_count_ptr(void) { return ¤t_thread_info()->preempt_count; } static __always_inline void preempt_count_set(int pc) { *preempt_count_ptr() = pc; } /* * must be macros to avoid header recursion hell */ #define init_task_preempt_count(p) do { \ task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \ } while (0) #define init_idle_preempt_count(p, cpu) do { \ task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ } while (0) static __always_inline void set_preempt_need_resched(void) { } static __always_inline void clear_preempt_need_resched(void) { } static __always_inline bool test_preempt_need_resched(void) { return false; } /* * The various preempt_count add/sub methods */ static __always_inline void __preempt_count_add(int val) { *preempt_count_ptr() += val; } static __always_inline void __preempt_count_sub(int val) { *preempt_count_ptr() -= val; } static __always_inline bool __preempt_count_dec_and_test(void) { /* * Because of load-store architectures cannot do per-cpu atomic * operations; we cannot use PREEMPT_NEED_RESCHED because it might get * lost. */ return !--*preempt_count_ptr() && tif_need_resched(); } /* * Returns true when we need to resched and can (barring IRQ state). */ static __always_inline bool should_resched(int preempt_offset) { return unlikely(preempt_count() == preempt_offset && tif_need_resched()); } #ifdef CONFIG_PREEMPTION extern asmlinkage void preempt_schedule(void); #define __preempt_schedule() preempt_schedule() extern asmlinkage void preempt_schedule_notrace(void); #define __preempt_schedule_notrace() preempt_schedule_notrace() #endif /* CONFIG_PREEMPTION */ #endif /* __ASM_PREEMPT_H */ bitops.h 0000644 00000002137 14722073052 0006222 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_BITOPS_H #define __ASM_GENERIC_BITOPS_H /* * For the benefit of those who are trying to port Linux to another * architecture, here are some C-language equivalents. You should * recode these in the native assembly language, if at all possible. * * C language equivalents written by Theodore Ts'o, 9/26/92 */ #include <linux/irqflags.h> #include <linux/compiler.h> #include <asm/barrier.h> #include <asm-generic/bitops/__ffs.h> #include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/find.h> #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/ffs.h> #include <asm-generic/bitops/hweight.h> #include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/atomic.h> #include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic.h> #endif /* __ASM_GENERIC_BITOPS_H */ word-at-a-time.h 0000644 00000005412 14722073052 0007450 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_WORD_AT_A_TIME_H #define _ASM_WORD_AT_A_TIME_H #include <linux/kernel.h> #include <asm/byteorder.h> #ifdef __BIG_ENDIAN struct word_at_a_time { const unsigned long high_bits, low_bits; }; #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) } /* Bit set in the bytes that have a zero */ static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c) { unsigned long mask = (val & c->low_bits) + c->low_bits; return ~(mask | rhs); } #define create_zero_mask(mask) (mask) static inline long find_zero(unsigned long mask) { long byte = 0; #ifdef CONFIG_64BIT if (mask >> 32) mask >>= 32; else byte = 4; #endif if (mask >> 16) mask >>= 16; else byte += 2; return (mask >> 8) ? byte : byte + 1; } static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) { unsigned long rhs = val | c->low_bits; *data = rhs; return (val + c->high_bits) & ~rhs; } #ifndef zero_bytemask #define zero_bytemask(mask) (~1ul << __fls(mask)) #endif #else /* * The optimal byte mask counting is probably going to be something * that is architecture-specific. If you have a reliably fast * bit count instruction, that might be better than the multiply * and shift, for example. */ struct word_at_a_time { const unsigned long one_bits, high_bits; }; #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } #ifdef CONFIG_64BIT /* * Jan Achrenius on G+: microoptimized version of * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" * that works for the bytemasks without having to * mask them first. */ static inline long count_masked_bytes(unsigned long mask) { return mask*0x0001020304050608ul >> 56; } #else /* 32-bit case */ /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ static inline long count_masked_bytes(long mask) { /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ long a = (0x0ff0001+mask) >> 23; /* Fix the 1 for 00 case */ return a & mask; } #endif /* Return nonzero if it has a zero */ static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) { unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; *bits = mask; return mask; } static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) { return bits; } static inline unsigned long create_zero_mask(unsigned long bits) { bits = (bits - 1) & ~bits; return bits >> 7; } /* The mask we created is directly usable as a bytemask */ #define zero_bytemask(mask) (mask) static inline unsigned long find_zero(unsigned long mask) { return count_masked_bytes(mask); } #endif /* __BIG_ENDIAN */ #endif /* _ASM_WORD_AT_A_TIME_H */ mmiowb.h 0000644 00000003344 14722073052 0006215 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_MMIOWB_H #define __ASM_GENERIC_MMIOWB_H /* * Generic implementation of mmiowb() tracking for spinlocks. * * If your architecture doesn't ensure that writes to an I/O peripheral * within two spinlocked sections on two different CPUs are seen by the * peripheral in the order corresponding to the lock handover, then you * need to follow these FIVE easy steps: * * 1. Implement mmiowb() (and arch_mmiowb_state() if you're fancy) * in asm/mmiowb.h, then #include this file * 2. Ensure your I/O write accessors call mmiowb_set_pending() * 3. Select ARCH_HAS_MMIOWB * 4. Untangle the resulting mess of header files * 5. Complain to your architects */ #ifdef CONFIG_MMIOWB #include <linux/compiler.h> #include <asm-generic/mmiowb_types.h> #ifndef arch_mmiowb_state #include <asm/percpu.h> #include <asm/smp.h> DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state); #define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state) #else #define __mmiowb_state() arch_mmiowb_state() #endif /* arch_mmiowb_state */ static inline void mmiowb_set_pending(void) { struct mmiowb_state *ms = __mmiowb_state(); if (likely(ms->nesting_count)) ms->mmiowb_pending = ms->nesting_count; } static inline void mmiowb_spin_lock(void) { struct mmiowb_state *ms = __mmiowb_state(); ms->nesting_count++; } static inline void mmiowb_spin_unlock(void) { struct mmiowb_state *ms = __mmiowb_state(); if (unlikely(ms->mmiowb_pending)) { ms->mmiowb_pending = 0; mmiowb(); } ms->nesting_count--; } #else #define mmiowb_set_pending() do { } while (0) #define mmiowb_spin_lock() do { } while (0) #define mmiowb_spin_unlock() do { } while (0) #endif /* CONFIG_MMIOWB */ #endif /* __ASM_GENERIC_MMIOWB_H */ timex.h 0000644 00000000774 14722073052 0006055 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_TIMEX_H #define __ASM_GENERIC_TIMEX_H /* * If you have a cycle counter, return the value here. */ typedef unsigned long cycles_t; #ifndef get_cycles static inline cycles_t get_cycles(void) { return 0; } #endif /* * Architectures are encouraged to implement read_current_timer * and define this in order to avoid the expensive delay loop * calibration during boot. */ #undef ARCH_HAS_READ_CURRENT_TIMER #endif /* __ASM_GENERIC_TIMEX_H */ kmap_types.h 0000644 00000000306 14722073052 0007072 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_KMAP_TYPES_H #define _ASM_GENERIC_KMAP_TYPES_H #ifdef __WITH_KM_FENCE # define KM_TYPE_NR 41 #else # define KM_TYPE_NR 20 #endif #endif xor.h 0000644 00000032370 14722073052 0005534 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/asm-generic/xor.h * * Generic optimized RAID-5 checksumming functions. */ #include <linux/prefetch.h> static void xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { long lines = bytes / (sizeof (long)) / 8; do { p1[0] ^= p2[0]; p1[1] ^= p2[1]; p1[2] ^= p2[2]; p1[3] ^= p2[3]; p1[4] ^= p2[4]; p1[5] ^= p2[5]; p1[6] ^= p2[6]; p1[7] ^= p2[7]; p1 += 8; p2 += 8; } while (--lines > 0); } static void xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { long lines = bytes / (sizeof (long)) / 8; do { p1[0] ^= p2[0] ^ p3[0]; p1[1] ^= p2[1] ^ p3[1]; p1[2] ^= p2[2] ^ p3[2]; p1[3] ^= p2[3] ^ p3[3]; p1[4] ^= p2[4] ^ p3[4]; p1[5] ^= p2[5] ^ p3[5]; p1[6] ^= p2[6] ^ p3[6]; p1[7] ^= p2[7] ^ p3[7]; p1 += 8; p2 += 8; p3 += 8; } while (--lines > 0); } static void xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { long lines = bytes / (sizeof (long)) / 8; do { p1[0] ^= p2[0] ^ p3[0] ^ p4[0]; p1[1] ^= p2[1] ^ p3[1] ^ p4[1]; p1[2] ^= p2[2] ^ p3[2] ^ p4[2]; p1[3] ^= p2[3] ^ p3[3] ^ p4[3]; p1[4] ^= p2[4] ^ p3[4] ^ p4[4]; p1[5] ^= p2[5] ^ p3[5] ^ p4[5]; p1[6] ^= p2[6] ^ p3[6] ^ p4[6]; p1[7] ^= p2[7] ^ p3[7] ^ p4[7]; p1 += 8; p2 += 8; p3 += 8; p4 += 8; } while (--lines > 0); } static void xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { long lines = bytes / (sizeof (long)) / 8; do { p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0]; p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1]; p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2]; p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3]; p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4]; p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5]; p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6]; p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7]; p1 += 8; p2 += 8; p3 += 8; p4 += 8; p5 += 8; } while (--lines > 0); } static void xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { long lines = bytes / (sizeof (long)) / 8; do { register long d0, d1, d2, d3, d4, d5, d6, d7; d0 = p1[0]; /* Pull the stuff into registers */ d1 = p1[1]; /* ... in bursts, if possible. */ d2 = p1[2]; d3 = p1[3]; d4 = p1[4]; d5 = p1[5]; d6 = p1[6]; d7 = p1[7]; d0 ^= p2[0]; d1 ^= p2[1]; d2 ^= p2[2]; d3 ^= p2[3]; d4 ^= p2[4]; d5 ^= p2[5]; d6 ^= p2[6]; d7 ^= p2[7]; p1[0] = d0; /* Store the result (in bursts) */ p1[1] = d1; p1[2] = d2; p1[3] = d3; p1[4] = d4; p1[5] = d5; p1[6] = d6; p1[7] = d7; p1 += 8; p2 += 8; } while (--lines > 0); } static void xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { long lines = bytes / (sizeof (long)) / 8; do { register long d0, d1, d2, d3, d4, d5, d6, d7; d0 = p1[0]; /* Pull the stuff into registers */ d1 = p1[1]; /* ... in bursts, if possible. */ d2 = p1[2]; d3 = p1[3]; d4 = p1[4]; d5 = p1[5]; d6 = p1[6]; d7 = p1[7]; d0 ^= p2[0]; d1 ^= p2[1]; d2 ^= p2[2]; d3 ^= p2[3]; d4 ^= p2[4]; d5 ^= p2[5]; d6 ^= p2[6]; d7 ^= p2[7]; d0 ^= p3[0]; d1 ^= p3[1]; d2 ^= p3[2]; d3 ^= p3[3]; d4 ^= p3[4]; d5 ^= p3[5]; d6 ^= p3[6]; d7 ^= p3[7]; p1[0] = d0; /* Store the result (in bursts) */ p1[1] = d1; p1[2] = d2; p1[3] = d3; p1[4] = d4; p1[5] = d5; p1[6] = d6; p1[7] = d7; p1 += 8; p2 += 8; p3 += 8; } while (--lines > 0); } static void xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { long lines = bytes / (sizeof (long)) / 8; do { register long d0, d1, d2, d3, d4, d5, d6, d7; d0 = p1[0]; /* Pull the stuff into registers */ d1 = p1[1]; /* ... in bursts, if possible. */ d2 = p1[2]; d3 = p1[3]; d4 = p1[4]; d5 = p1[5]; d6 = p1[6]; d7 = p1[7]; d0 ^= p2[0]; d1 ^= p2[1]; d2 ^= p2[2]; d3 ^= p2[3]; d4 ^= p2[4]; d5 ^= p2[5]; d6 ^= p2[6]; d7 ^= p2[7]; d0 ^= p3[0]; d1 ^= p3[1]; d2 ^= p3[2]; d3 ^= p3[3]; d4 ^= p3[4]; d5 ^= p3[5]; d6 ^= p3[6]; d7 ^= p3[7]; d0 ^= p4[0]; d1 ^= p4[1]; d2 ^= p4[2]; d3 ^= p4[3]; d4 ^= p4[4]; d5 ^= p4[5]; d6 ^= p4[6]; d7 ^= p4[7]; p1[0] = d0; /* Store the result (in bursts) */ p1[1] = d1; p1[2] = d2; p1[3] = d3; p1[4] = d4; p1[5] = d5; p1[6] = d6; p1[7] = d7; p1 += 8; p2 += 8; p3 += 8; p4 += 8; } while (--lines > 0); } static void xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { long lines = bytes / (sizeof (long)) / 8; do { register long d0, d1, d2, d3, d4, d5, d6, d7; d0 = p1[0]; /* Pull the stuff into registers */ d1 = p1[1]; /* ... in bursts, if possible. */ d2 = p1[2]; d3 = p1[3]; d4 = p1[4]; d5 = p1[5]; d6 = p1[6]; d7 = p1[7]; d0 ^= p2[0]; d1 ^= p2[1]; d2 ^= p2[2]; d3 ^= p2[3]; d4 ^= p2[4]; d5 ^= p2[5]; d6 ^= p2[6]; d7 ^= p2[7]; d0 ^= p3[0]; d1 ^= p3[1]; d2 ^= p3[2]; d3 ^= p3[3]; d4 ^= p3[4]; d5 ^= p3[5]; d6 ^= p3[6]; d7 ^= p3[7]; d0 ^= p4[0]; d1 ^= p4[1]; d2 ^= p4[2]; d3 ^= p4[3]; d4 ^= p4[4]; d5 ^= p4[5]; d6 ^= p4[6]; d7 ^= p4[7]; d0 ^= p5[0]; d1 ^= p5[1]; d2 ^= p5[2]; d3 ^= p5[3]; d4 ^= p5[4]; d5 ^= p5[5]; d6 ^= p5[6]; d7 ^= p5[7]; p1[0] = d0; /* Store the result (in bursts) */ p1[1] = d1; p1[2] = d2; p1[3] = d3; p1[4] = d4; p1[5] = d5; p1[6] = d6; p1[7] = d7; p1 += 8; p2 += 8; p3 += 8; p4 += 8; p5 += 8; } while (--lines > 0); } static void xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { long lines = bytes / (sizeof (long)) / 8 - 1; prefetchw(p1); prefetch(p2); do { prefetchw(p1+8); prefetch(p2+8); once_more: p1[0] ^= p2[0]; p1[1] ^= p2[1]; p1[2] ^= p2[2]; p1[3] ^= p2[3]; p1[4] ^= p2[4]; p1[5] ^= p2[5]; p1[6] ^= p2[6]; p1[7] ^= p2[7]; p1 += 8; p2 += 8; } while (--lines > 0); if (lines == 0) goto once_more; } static void xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { long lines = bytes / (sizeof (long)) / 8 - 1; prefetchw(p1); prefetch(p2); prefetch(p3); do { prefetchw(p1+8); prefetch(p2+8); prefetch(p3+8); once_more: p1[0] ^= p2[0] ^ p3[0]; p1[1] ^= p2[1] ^ p3[1]; p1[2] ^= p2[2] ^ p3[2]; p1[3] ^= p2[3] ^ p3[3]; p1[4] ^= p2[4] ^ p3[4]; p1[5] ^= p2[5] ^ p3[5]; p1[6] ^= p2[6] ^ p3[6]; p1[7] ^= p2[7] ^ p3[7]; p1 += 8; p2 += 8; p3 += 8; } while (--lines > 0); if (lines == 0) goto once_more; } static void xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { long lines = bytes / (sizeof (long)) / 8 - 1; prefetchw(p1); prefetch(p2); prefetch(p3); prefetch(p4); do { prefetchw(p1+8); prefetch(p2+8); prefetch(p3+8); prefetch(p4+8); once_more: p1[0] ^= p2[0] ^ p3[0] ^ p4[0]; p1[1] ^= p2[1] ^ p3[1] ^ p4[1]; p1[2] ^= p2[2] ^ p3[2] ^ p4[2]; p1[3] ^= p2[3] ^ p3[3] ^ p4[3]; p1[4] ^= p2[4] ^ p3[4] ^ p4[4]; p1[5] ^= p2[5] ^ p3[5] ^ p4[5]; p1[6] ^= p2[6] ^ p3[6] ^ p4[6]; p1[7] ^= p2[7] ^ p3[7] ^ p4[7]; p1 += 8; p2 += 8; p3 += 8; p4 += 8; } while (--lines > 0); if (lines == 0) goto once_more; } static void xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { long lines = bytes / (sizeof (long)) / 8 - 1; prefetchw(p1); prefetch(p2); prefetch(p3); prefetch(p4); prefetch(p5); do { prefetchw(p1+8); prefetch(p2+8); prefetch(p3+8); prefetch(p4+8); prefetch(p5+8); once_more: p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0]; p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1]; p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2]; p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3]; p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4]; p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5]; p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6]; p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7]; p1 += 8; p2 += 8; p3 += 8; p4 += 8; p5 += 8; } while (--lines > 0); if (lines == 0) goto once_more; } static void xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { long lines = bytes / (sizeof (long)) / 8 - 1; prefetchw(p1); prefetch(p2); do { register long d0, d1, d2, d3, d4, d5, d6, d7; prefetchw(p1+8); prefetch(p2+8); once_more: d0 = p1[0]; /* Pull the stuff into registers */ d1 = p1[1]; /* ... in bursts, if possible. */ d2 = p1[2]; d3 = p1[3]; d4 = p1[4]; d5 = p1[5]; d6 = p1[6]; d7 = p1[7]; d0 ^= p2[0]; d1 ^= p2[1]; d2 ^= p2[2]; d3 ^= p2[3]; d4 ^= p2[4]; d5 ^= p2[5]; d6 ^= p2[6]; d7 ^= p2[7]; p1[0] = d0; /* Store the result (in bursts) */ p1[1] = d1; p1[2] = d2; p1[3] = d3; p1[4] = d4; p1[5] = d5; p1[6] = d6; p1[7] = d7; p1 += 8; p2 += 8; } while (--lines > 0); if (lines == 0) goto once_more; } static void xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { long lines = bytes / (sizeof (long)) / 8 - 1; prefetchw(p1); prefetch(p2); prefetch(p3); do { register long d0, d1, d2, d3, d4, d5, d6, d7; prefetchw(p1+8); prefetch(p2+8); prefetch(p3+8); once_more: d0 = p1[0]; /* Pull the stuff into registers */ d1 = p1[1]; /* ... in bursts, if possible. */ d2 = p1[2]; d3 = p1[3]; d4 = p1[4]; d5 = p1[5]; d6 = p1[6]; d7 = p1[7]; d0 ^= p2[0]; d1 ^= p2[1]; d2 ^= p2[2]; d3 ^= p2[3]; d4 ^= p2[4]; d5 ^= p2[5]; d6 ^= p2[6]; d7 ^= p2[7]; d0 ^= p3[0]; d1 ^= p3[1]; d2 ^= p3[2]; d3 ^= p3[3]; d4 ^= p3[4]; d5 ^= p3[5]; d6 ^= p3[6]; d7 ^= p3[7]; p1[0] = d0; /* Store the result (in bursts) */ p1[1] = d1; p1[2] = d2; p1[3] = d3; p1[4] = d4; p1[5] = d5; p1[6] = d6; p1[7] = d7; p1 += 8; p2 += 8; p3 += 8; } while (--lines > 0); if (lines == 0) goto once_more; } static void xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { long lines = bytes / (sizeof (long)) / 8 - 1; prefetchw(p1); prefetch(p2); prefetch(p3); prefetch(p4); do { register long d0, d1, d2, d3, d4, d5, d6, d7; prefetchw(p1+8); prefetch(p2+8); prefetch(p3+8); prefetch(p4+8); once_more: d0 = p1[0]; /* Pull the stuff into registers */ d1 = p1[1]; /* ... in bursts, if possible. */ d2 = p1[2]; d3 = p1[3]; d4 = p1[4]; d5 = p1[5]; d6 = p1[6]; d7 = p1[7]; d0 ^= p2[0]; d1 ^= p2[1]; d2 ^= p2[2]; d3 ^= p2[3]; d4 ^= p2[4]; d5 ^= p2[5]; d6 ^= p2[6]; d7 ^= p2[7]; d0 ^= p3[0]; d1 ^= p3[1]; d2 ^= p3[2]; d3 ^= p3[3]; d4 ^= p3[4]; d5 ^= p3[5]; d6 ^= p3[6]; d7 ^= p3[7]; d0 ^= p4[0]; d1 ^= p4[1]; d2 ^= p4[2]; d3 ^= p4[3]; d4 ^= p4[4]; d5 ^= p4[5]; d6 ^= p4[6]; d7 ^= p4[7]; p1[0] = d0; /* Store the result (in bursts) */ p1[1] = d1; p1[2] = d2; p1[3] = d3; p1[4] = d4; p1[5] = d5; p1[6] = d6; p1[7] = d7; p1 += 8; p2 += 8; p3 += 8; p4 += 8; } while (--lines > 0); if (lines == 0) goto once_more; } static void xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { long lines = bytes / (sizeof (long)) / 8 - 1; prefetchw(p1); prefetch(p2); prefetch(p3); prefetch(p4); prefetch(p5); do { register long d0, d1, d2, d3, d4, d5, d6, d7; prefetchw(p1+8); prefetch(p2+8); prefetch(p3+8); prefetch(p4+8); prefetch(p5+8); once_more: d0 = p1[0]; /* Pull the stuff into registers */ d1 = p1[1]; /* ... in bursts, if possible. */ d2 = p1[2]; d3 = p1[3]; d4 = p1[4]; d5 = p1[5]; d6 = p1[6]; d7 = p1[7]; d0 ^= p2[0]; d1 ^= p2[1]; d2 ^= p2[2]; d3 ^= p2[3]; d4 ^= p2[4]; d5 ^= p2[5]; d6 ^= p2[6]; d7 ^= p2[7]; d0 ^= p3[0]; d1 ^= p3[1]; d2 ^= p3[2]; d3 ^= p3[3]; d4 ^= p3[4]; d5 ^= p3[5]; d6 ^= p3[6]; d7 ^= p3[7]; d0 ^= p4[0]; d1 ^= p4[1]; d2 ^= p4[2]; d3 ^= p4[3]; d4 ^= p4[4]; d5 ^= p4[5]; d6 ^= p4[6]; d7 ^= p4[7]; d0 ^= p5[0]; d1 ^= p5[1]; d2 ^= p5[2]; d3 ^= p5[3]; d4 ^= p5[4]; d5 ^= p5[5]; d6 ^= p5[6]; d7 ^= p5[7]; p1[0] = d0; /* Store the result (in bursts) */ p1[1] = d1; p1[2] = d2; p1[3] = d3; p1[4] = d4; p1[5] = d5; p1[6] = d6; p1[7] = d7; p1 += 8; p2 += 8; p3 += 8; p4 += 8; p5 += 8; } while (--lines > 0); if (lines == 0) goto once_more; } static struct xor_block_template xor_block_8regs = { .name = "8regs", .do_2 = xor_8regs_2, .do_3 = xor_8regs_3, .do_4 = xor_8regs_4, .do_5 = xor_8regs_5, }; static struct xor_block_template xor_block_32regs = { .name = "32regs", .do_2 = xor_32regs_2, .do_3 = xor_32regs_3, .do_4 = xor_32regs_4, .do_5 = xor_32regs_5, }; static struct xor_block_template xor_block_8regs_p __maybe_unused = { .name = "8regs_prefetch", .do_2 = xor_8regs_p_2, .do_3 = xor_8regs_p_3, .do_4 = xor_8regs_p_4, .do_5 = xor_8regs_p_5, }; static struct xor_block_template xor_block_32regs_p __maybe_unused = { .name = "32regs_prefetch", .do_2 = xor_32regs_p_2, .do_3 = xor_32regs_p_3, .do_4 = xor_32regs_p_4, .do_5 = xor_32regs_p_5, }; #define XOR_TRY_TEMPLATES \ do { \ xor_speed(&xor_block_8regs); \ xor_speed(&xor_block_8regs_p); \ xor_speed(&xor_block_32regs); \ xor_speed(&xor_block_32regs_p); \ } while (0) ioctl.h 0000644 00000000772 14722073052 0006037 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_IOCTL_H #define _ASM_GENERIC_IOCTL_H #include <uapi/asm-generic/ioctl.h> #ifdef __CHECKER__ #define _IOC_TYPECHECK(t) (sizeof(t)) #else /* provoke compile error for invalid uses of size argument */ extern unsigned int __invalid_size_argument_for_IOC; #define _IOC_TYPECHECK(t) \ ((sizeof(t) == sizeof(t[1]) && \ sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ sizeof(t) : __invalid_size_argument_for_IOC) #endif #endif /* _ASM_GENERIC_IOCTL_H */ mmu.h 0000644 00000000701 14722073052 0005513 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_MMU_H #define __ASM_GENERIC_MMU_H /* * This is the mmu.h header for nommu implementations. * Architectures with an MMU need something more complex. */ #ifndef __ASSEMBLY__ typedef struct { unsigned long end_brk; #ifdef CONFIG_BINFMT_ELF_FDPIC unsigned long exec_fdpic_loadmap; unsigned long interp_fdpic_loadmap; #endif } mm_context_t; #endif #endif /* __ASM_GENERIC_MMU_H */ bitops/ffz.h 0000644 00000000505 14722073052 0007004 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_FFZ_H_ #define _ASM_GENERIC_BITOPS_FFZ_H_ /* * ffz - find first zero in word. * @word: The word to search * * Undefined if no zero exists, so code should check against ~0UL first. */ #define ffz(x) __ffs(~(x)) #endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */ bitops/ffs.h 0000644 00000001216 14722073052 0006775 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_FFS_H_ #define _ASM_GENERIC_BITOPS_FFS_H_ /** * ffs - find first bit set * @x: the word to search * * This is defined the same way as * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). */ static inline int ffs(int x) { int r = 1; if (!x) return 0; if (!(x & 0xffff)) { x >>= 16; r += 16; } if (!(x & 0xff)) { x >>= 8; r += 8; } if (!(x & 0xf)) { x >>= 4; r += 4; } if (!(x & 3)) { x >>= 2; r += 2; } if (!(x & 1)) { x >>= 1; r += 1; } return r; } #endif /* _ASM_GENERIC_BITOPS_FFS_H_ */ bitops/find.h 0000644 00000005020 14722073052 0007134 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_FIND_H_ #define _ASM_GENERIC_BITOPS_FIND_H_ #ifndef find_next_bit /** * find_next_bit - find the next set bit in a memory region * @addr: The address to base the search on * @offset: The bitnumber to start searching at * @size: The bitmap size in bits * * Returns the bit number for the next set bit * If no bits are set, returns @size. */ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); #endif #ifndef find_next_and_bit /** * find_next_and_bit - find the next set bit in both memory regions * @addr1: The first address to base the search on * @addr2: The second address to base the search on * @offset: The bitnumber to start searching at * @size: The bitmap size in bits * * Returns the bit number for the next set bit * If no bits are set, returns @size. */ extern unsigned long find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2, unsigned long size, unsigned long offset); #endif #ifndef find_next_zero_bit /** * find_next_zero_bit - find the next cleared bit in a memory region * @addr: The address to base the search on * @offset: The bitnumber to start searching at * @size: The bitmap size in bits * * Returns the bit number of the next zero bit * If no bits are zero, returns @size. */ extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset); #endif #ifdef CONFIG_GENERIC_FIND_FIRST_BIT /** * find_first_bit - find the first set bit in a memory region * @addr: The address to start the search at * @size: The maximum number of bits to search * * Returns the bit number of the first set bit. * If no bits are set, returns @size. */ extern unsigned long find_first_bit(const unsigned long *addr, unsigned long size); /** * find_first_zero_bit - find the first cleared bit in a memory region * @addr: The address to start the search at * @size: The maximum number of bits to search * * Returns the bit number of the first cleared bit. * If no bits are zero, returns @size. */ extern unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size); #else /* CONFIG_GENERIC_FIND_FIRST_BIT */ #ifndef find_first_bit #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) #endif #ifndef find_first_zero_bit #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) #endif #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ #endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ bitops/ext2-atomic.h 0000644 00000001130 14722073052 0010346 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ #define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ /* * Spinlock based version of ext2 atomic bitops */ #define ext2_set_bit_atomic(lock, nr, addr) \ ({ \ int ret; \ spin_lock(lock); \ ret = __test_and_set_bit_le(nr, addr); \ spin_unlock(lock); \ ret; \ }) #define ext2_clear_bit_atomic(lock, nr, addr) \ ({ \ int ret; \ spin_lock(lock); \ ret = __test_and_clear_bit_le(nr, addr); \ spin_unlock(lock); \ ret; \ }) #endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */ bitops/builtin-ffs.h 0000644 00000000632 14722073052 0010442 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_ #define _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_ /** * ffs - find first bit set * @x: the word to search * * This is defined the same way as * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). */ static __always_inline int ffs(int x) { return __builtin_ffs(x); } #endif bitops/arch_hweight.h 0000644 00000001053 14722073052 0010652 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ #define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ #include <asm/types.h> static inline unsigned int __arch_hweight32(unsigned int w) { return __sw_hweight32(w); } static inline unsigned int __arch_hweight16(unsigned int w) { return __sw_hweight16(w); } static inline unsigned int __arch_hweight8(unsigned int w) { return __sw_hweight8(w); } static inline unsigned long __arch_hweight64(__u64 w) { return __sw_hweight64(w); } #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ bitops/hweight.h 0000644 00000000376 14722073052 0007664 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ #define _ASM_GENERIC_BITOPS_HWEIGHT_H_ #include <asm-generic/bitops/arch_hweight.h> #include <asm-generic/bitops/const_hweight.h> #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ bitops/lock.h 0000644 00000004711 14722073052 0007152 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_LOCK_H_ #define _ASM_GENERIC_BITOPS_LOCK_H_ #include <linux/atomic.h> #include <linux/compiler.h> #include <asm/barrier.h> /** * test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and provides acquire barrier semantics if * the returned value is 0. * It can be used to implement bit locks. */ static inline int test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); if (READ_ONCE(*p) & mask) return 1; old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); return !!(old & mask); } /** * clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * This operation is atomic and provides release barrier semantics. */ static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); } /** * __clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all * the bits in the word are protected by this lock some archs can use weaker * ops to safely unlock. * * See for example x86's implementation. */ static inline void __clear_bit_unlock(unsigned int nr, volatile unsigned long *p) { unsigned long old; p += BIT_WORD(nr); old = READ_ONCE(*p); old &= ~BIT_MASK(nr); atomic_long_set_release((atomic_long_t *)p, old); } /** * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom * byte is negative, for unlock. * @nr: the bit to clear * @addr: the address to start counting from * * This is a bit of a one-trick-pony for the filemap code, which clears * PG_locked and tests PG_waiters, */ #ifndef clear_bit_unlock_is_negative_byte static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); return !!(old & BIT(7)); } #define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte #endif #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */ bitops/builtin-fls.h 0000644 00000000634 14722073052 0010452 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_ #define _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_ /** * fls - find last (most-significant) bit set * @x: the word to search * * This is defined the same way as ffs. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ static __always_inline int fls(unsigned int x) { return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; } #endif bitops/le.h 0000644 00000004277 14722073052 0006631 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_LE_H_ #define _ASM_GENERIC_BITOPS_LE_H_ #include <asm/types.h> #include <asm/byteorder.h> #if defined(__LITTLE_ENDIAN) #define BITOP_LE_SWIZZLE 0 static inline unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset) { return find_next_zero_bit(addr, size, offset); } static inline unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset) { return find_next_bit(addr, size, offset); } static inline unsigned long find_first_zero_bit_le(const void *addr, unsigned long size) { return find_first_zero_bit(addr, size); } #elif defined(__BIG_ENDIAN) #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) #ifndef find_next_zero_bit_le extern unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset); #endif #ifndef find_next_bit_le extern unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset); #endif #ifndef find_first_zero_bit_le #define find_first_zero_bit_le(addr, size) \ find_next_zero_bit_le((addr), (size), 0) #endif #else #error "Please fix <asm/byteorder.h>" #endif static inline int test_bit_le(int nr, const void *addr) { return test_bit(nr ^ BITOP_LE_SWIZZLE, addr); } static inline void set_bit_le(int nr, void *addr) { set_bit(nr ^ BITOP_LE_SWIZZLE, addr); } static inline void clear_bit_le(int nr, void *addr) { clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); } static inline void __set_bit_le(int nr, void *addr) { __set_bit(nr ^ BITOP_LE_SWIZZLE, addr); } static inline void __clear_bit_le(int nr, void *addr) { __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); } static inline int test_and_set_bit_le(int nr, void *addr) { return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); } static inline int test_and_clear_bit_le(int nr, void *addr) { return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); } static inline int __test_and_set_bit_le(int nr, void *addr) { return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); } static inline int __test_and_clear_bit_le(int nr, void *addr) { return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); } #endif /* _ASM_GENERIC_BITOPS_LE_H_ */ bitops/builtin-__fls.h 0000644 00000000664 14722073052 0010753 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_ #define _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_ /** * __fls - find last (most-significant) set bit in a long word * @word: the word to search * * Undefined if no set bit exists, so code should check against 0 first. */ static __always_inline unsigned long __fls(unsigned long word) { return (sizeof(word) * 8) - 1 - __builtin_clzl(word); } #endif bitops/non-atomic.h 0000644 00000005776 14722073052 0010302 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #include <asm/types.h> /** * __set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * Unlike set_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ static inline void __set_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p |= mask; } static inline void __clear_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p &= ~mask; } /** * __change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * * Unlike change_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ static inline void __change_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p ^= mask; } /** * __test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long old = *p; *p = old | mask; return (old & mask) != 0; } /** * __test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long old = *p; *p = old & ~mask; return (old & mask) != 0; } /* WARNING: non atomic and it can be reordered! */ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long old = *p; *p = old ^ mask; return (old & mask) != 0; } /** * test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ static inline int test_bit(int nr, const volatile unsigned long *addr) { return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ bitops/atomic.h 0000644 00000003002 14722073052 0007466 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ #define _ASM_GENERIC_BITOPS_ATOMIC_H_ #include <linux/atomic.h> #include <linux/compiler.h> #include <asm/barrier.h> /* * Implementation of atomic bitops using atomic-fetch ops. * See Documentation/atomic_bitops.txt for details. */ static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); } static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); } static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); } static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); old = atomic_long_fetch_or(mask, (atomic_long_t *)p); return !!(old & mask); } static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p); return !!(old & mask); } static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); old = atomic_long_fetch_xor(mask, (atomic_long_t *)p); return !!(old & mask); } #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ bitops/__fls.h 0000644 00000001630 14722073052 0007301 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS___FLS_H_ #define _ASM_GENERIC_BITOPS___FLS_H_ #include <asm/types.h> /** * __fls - find last (most-significant) set bit in a long word * @word: the word to search * * Undefined if no set bit exists, so code should check against 0 first. */ static __always_inline unsigned long __fls(unsigned long word) { int num = BITS_PER_LONG - 1; #if BITS_PER_LONG == 64 if (!(word & (~0ul << 32))) { num -= 32; word <<= 32; } #endif if (!(word & (~0ul << (BITS_PER_LONG-16)))) { num -= 16; word <<= 16; } if (!(word & (~0ul << (BITS_PER_LONG-8)))) { num -= 8; word <<= 8; } if (!(word & (~0ul << (BITS_PER_LONG-4)))) { num -= 4; word <<= 4; } if (!(word & (~0ul << (BITS_PER_LONG-2)))) { num -= 2; word <<= 2; } if (!(word & (~0ul << (BITS_PER_LONG-1)))) num -= 1; return num; } #endif /* _ASM_GENERIC_BITOPS___FLS_H_ */ bitops/builtin-__ffs.h 0000644 00000000573 14722073052 0010744 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_ #define _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_ /** * __ffs - find first bit in word. * @word: The word to search * * Undefined if no bit exists, so code should check against 0 first. */ static __always_inline unsigned long __ffs(unsigned long word) { return __builtin_ctzl(word); } #endif bitops/fls64.h 0000644 00000001534 14722073052 0007160 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_FLS64_H_ #define _ASM_GENERIC_BITOPS_FLS64_H_ #include <asm/types.h> /** * fls64 - find last set bit in a 64-bit word * @x: the word to search * * This is defined in a similar way as the libc and compiler builtin * ffsll, but returns the position of the most significant set bit. * * fls64(value) returns 0 if value is 0 or the position of the last * set bit if value is nonzero. The last (most significant) bit is * at position 64. */ #if BITS_PER_LONG == 32 static __always_inline int fls64(__u64 x) { __u32 h = x >> 32; if (h) return fls(h) + 32; return fls(x); } #elif BITS_PER_LONG == 64 static __always_inline int fls64(__u64 x) { if (x == 0) return 0; return __fls(x) + 1; } #else #error BITS_PER_LONG not 32 or 64 #endif #endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ bitops/ext2-atomic-setbit.h 0000644 00000000623 14722073052 0011644 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ #define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ /* * Atomic bitops based version of ext2 atomic bitops */ #define ext2_set_bit_atomic(l, nr, addr) test_and_set_bit_le(nr, addr) #define ext2_clear_bit_atomic(l, nr, addr) test_and_clear_bit_le(nr, addr) #endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ */ bitops/const_hweight.h 0000644 00000003257 14722073052 0011073 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ #define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ /* * Compile time versions of __arch_hweightN() */ #define __const_hweight8(w) \ ((unsigned int) \ ((!!((w) & (1ULL << 0))) + \ (!!((w) & (1ULL << 1))) + \ (!!((w) & (1ULL << 2))) + \ (!!((w) & (1ULL << 3))) + \ (!!((w) & (1ULL << 4))) + \ (!!((w) & (1ULL << 5))) + \ (!!((w) & (1ULL << 6))) + \ (!!((w) & (1ULL << 7))))) #define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 )) #define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16)) #define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32)) /* * Generic interface. */ #define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w)) #define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w)) #define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w)) #define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w)) /* * Interface for known constant arguments */ #define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w)) #define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w)) #define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w)) #define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w)) /* * Type invariant interface to the compile time constant hweight functions. */ #define HWEIGHT(w) HWEIGHT64((u64)w) #endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */ bitops/sched.h 0000644 00000001370 14722073052 0007306 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_SCHED_H_ #define _ASM_GENERIC_BITOPS_SCHED_H_ #include <linux/compiler.h> /* unlikely() */ #include <asm/types.h> /* * Every architecture must define this function. It's the fastest * way of searching a 100-bit bitmap. It's guaranteed that at least * one of the 100 bits is cleared. */ static inline int sched_find_first_bit(const unsigned long *b) { #if BITS_PER_LONG == 64 if (b[0]) return __ffs(b[0]); return __ffs(b[1]) + 64; #elif BITS_PER_LONG == 32 if (b[0]) return __ffs(b[0]); if (b[1]) return __ffs(b[1]) + 32; if (b[2]) return __ffs(b[2]) + 64; return __ffs(b[3]) + 96; #else #error BITS_PER_LONG not defined #endif } #endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */ bitops/__ffs.h 0000644 00000001411 14722073052 0007270 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS___FFS_H_ #define _ASM_GENERIC_BITOPS___FFS_H_ #include <asm/types.h> /** * __ffs - find first bit in word. * @word: The word to search * * Undefined if no bit exists, so code should check against 0 first. */ static __always_inline unsigned long __ffs(unsigned long word) { int num = 0; #if BITS_PER_LONG == 64 if ((word & 0xffffffff) == 0) { num += 32; word >>= 32; } #endif if ((word & 0xffff) == 0) { num += 16; word >>= 16; } if ((word & 0xff) == 0) { num += 8; word >>= 8; } if ((word & 0xf) == 0) { num += 4; word >>= 4; } if ((word & 0x3) == 0) { num += 2; word >>= 2; } if ((word & 0x1) == 0) num += 1; return num; } #endif /* _ASM_GENERIC_BITOPS___FFS_H_ */ bitops/fls.h 0000644 00000001253 14722073052 0007004 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_FLS_H_ #define _ASM_GENERIC_BITOPS_FLS_H_ /** * fls - find last (most-significant) bit set * @x: the word to search * * This is defined the same way as ffs. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ static __always_inline int fls(unsigned int x) { int r = 32; if (!x) return 0; if (!(x & 0xffff0000u)) { x <<= 16; r -= 16; } if (!(x & 0xff000000u)) { x <<= 8; r -= 8; } if (!(x & 0xf0000000u)) { x <<= 4; r -= 4; } if (!(x & 0xc0000000u)) { x <<= 2; r -= 2; } if (!(x & 0x80000000u)) { x <<= 1; r -= 1; } return r; } #endif /* _ASM_GENERIC_BITOPS_FLS_H_ */ uaccess.h 0000644 00000014307 14722073052 0006352 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_UACCESS_H #define __ASM_GENERIC_UACCESS_H /* * User space memory access functions, these should work * on any machine that has kernel and user data in the same * address space, e.g. all NOMMU machines. */ #include <linux/string.h> #ifdef CONFIG_UACCESS_MEMCPY static inline __must_check unsigned long raw_copy_from_user(void *to, const void __user * from, unsigned long n) { if (__builtin_constant_p(n)) { switch(n) { case 1: *(u8 *)to = *(u8 __force *)from; return 0; case 2: *(u16 *)to = *(u16 __force *)from; return 0; case 4: *(u32 *)to = *(u32 __force *)from; return 0; #ifdef CONFIG_64BIT case 8: *(u64 *)to = *(u64 __force *)from; return 0; #endif } } memcpy(to, (const void __force *)from, n); return 0; } static inline __must_check unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { if (__builtin_constant_p(n)) { switch(n) { case 1: *(u8 __force *)to = *(u8 *)from; return 0; case 2: *(u16 __force *)to = *(u16 *)from; return 0; case 4: *(u32 __force *)to = *(u32 *)from; return 0; #ifdef CONFIG_64BIT case 8: *(u64 __force *)to = *(u64 *)from; return 0; #endif default: break; } } memcpy((void __force *)to, from, n); return 0; } #define INLINE_COPY_FROM_USER #define INLINE_COPY_TO_USER #endif /* CONFIG_UACCESS_MEMCPY */ #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) #ifndef KERNEL_DS #define KERNEL_DS MAKE_MM_SEG(~0UL) #endif #ifndef USER_DS #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) #endif #ifndef get_fs #define get_fs() (current_thread_info()->addr_limit) static inline void set_fs(mm_segment_t fs) { current_thread_info()->addr_limit = fs; } #endif #ifndef segment_eq #define segment_eq(a, b) ((a).seg == (b).seg) #endif #define access_ok(addr, size) __access_ok((unsigned long)(addr),(size)) /* * The architecture should really override this if possible, at least * doing a check on the get_fs() */ #ifndef __access_ok static inline int __access_ok(unsigned long addr, unsigned long size) { return 1; } #endif /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. * This version just falls back to copy_{from,to}_user, which should * provide a fast-path for small values. */ #define __put_user(x, ptr) \ ({ \ __typeof__(*(ptr)) __x = (x); \ int __pu_err = -EFAULT; \ __chk_user_ptr(ptr); \ switch (sizeof (*(ptr))) { \ case 1: \ case 2: \ case 4: \ case 8: \ __pu_err = __put_user_fn(sizeof (*(ptr)), \ ptr, &__x); \ break; \ default: \ __put_user_bad(); \ break; \ } \ __pu_err; \ }) #define put_user(x, ptr) \ ({ \ void __user *__p = (ptr); \ might_fault(); \ access_ok(__p, sizeof(*ptr)) ? \ __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \ -EFAULT; \ }) #ifndef __put_user_fn static inline int __put_user_fn(size_t size, void __user *ptr, void *x) { return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0; } #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) #endif extern int __put_user_bad(void) __attribute__((noreturn)); #define __get_user(x, ptr) \ ({ \ int __gu_err = -EFAULT; \ __chk_user_ptr(ptr); \ switch (sizeof(*(ptr))) { \ case 1: { \ unsigned char __x = 0; \ __gu_err = __get_user_fn(sizeof (*(ptr)), \ ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 2: { \ unsigned short __x = 0; \ __gu_err = __get_user_fn(sizeof (*(ptr)), \ ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 4: { \ unsigned int __x = 0; \ __gu_err = __get_user_fn(sizeof (*(ptr)), \ ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 8: { \ unsigned long long __x = 0; \ __gu_err = __get_user_fn(sizeof (*(ptr)), \ ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ default: \ __get_user_bad(); \ break; \ } \ __gu_err; \ }) #define get_user(x, ptr) \ ({ \ const void __user *__p = (ptr); \ might_fault(); \ access_ok(__p, sizeof(*ptr)) ? \ __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\ ((x) = (__typeof__(*(ptr)))0,-EFAULT); \ }) #ifndef __get_user_fn static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) { return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0; } #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) #endif extern int __get_user_bad(void) __attribute__((noreturn)); /* * Copy a null terminated string from userspace. */ #ifndef __strncpy_from_user static inline long __strncpy_from_user(char *dst, const char __user *src, long count) { char *tmp; strncpy(dst, (const char __force *)src, count); for (tmp = dst; *tmp && count > 0; tmp++, count--) ; return (tmp - dst); } #endif static inline long strncpy_from_user(char *dst, const char __user *src, long count) { if (!access_ok(src, 1)) return -EFAULT; return __strncpy_from_user(dst, src, count); } /* * Return the size of a string (including the ending 0) * * Return 0 on exception, a value greater than N if too long */ #ifndef __strnlen_user #define __strnlen_user(s, n) (strnlen((s), (n)) + 1) #endif /* * Unlike strnlen, strnlen_user includes the nul terminator in * its returned count. Callers should check for a returned value * greater than N as an indication the string is too long. */ static inline long strnlen_user(const char __user *src, long n) { if (!access_ok(src, 1)) return 0; return __strnlen_user(src, n); } /* * Zero Userspace */ #ifndef __clear_user static inline __must_check unsigned long __clear_user(void __user *to, unsigned long n) { memset((void __force *)to, 0, n); return 0; } #endif static inline __must_check unsigned long clear_user(void __user *to, unsigned long n) { might_fault(); if (!access_ok(to, n)) return n; return __clear_user(to, n); } #include <asm/extable.h> #endif /* __ASM_GENERIC_UACCESS_H */ bug.h 0000644 00000015117 14722073052 0005501 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BUG_H #define _ASM_GENERIC_BUG_H #include <linux/compiler.h> #define CUT_HERE "------------[ cut here ]------------\n" #ifdef CONFIG_GENERIC_BUG #define BUGFLAG_WARNING (1 << 0) #define BUGFLAG_ONCE (1 << 1) #define BUGFLAG_DONE (1 << 2) #define BUGFLAG_NO_CUT_HERE (1 << 3) /* CUT_HERE already sent */ #define BUGFLAG_TAINT(taint) ((taint) << 8) #define BUG_GET_TAINT(bug) ((bug)->flags >> 8) #endif #ifndef __ASSEMBLY__ #include <linux/kernel.h> #ifdef CONFIG_BUG #ifdef CONFIG_GENERIC_BUG struct bug_entry { #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS unsigned long bug_addr; #else signed int bug_addr_disp; #endif #ifdef CONFIG_DEBUG_BUGVERBOSE #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS const char *file; #else signed int file_disp; #endif unsigned short line; #endif unsigned short flags; }; #endif /* CONFIG_GENERIC_BUG */ /* * Don't use BUG() or BUG_ON() unless there's really no way out; one * example might be detecting data structure corruption in the middle * of an operation that can't be backed out of. If the (sub)system * can somehow continue operating, perhaps with reduced functionality, * it's probably not BUG-worthy. * * If you're tempted to BUG(), think again: is completely giving up * really the *only* solution? There are usually better options, where * users don't need to reboot ASAP and can mostly shut down cleanly. */ #ifndef HAVE_ARCH_BUG #define BUG() do { \ printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ barrier_before_unreachable(); \ panic("BUG!"); \ } while (0) #endif #ifndef HAVE_ARCH_BUG_ON #define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) #endif /* * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report * significant kernel issues that need prompt attention if they should ever * appear at runtime. * * Do not use these macros when checking for invalid external inputs * (e.g. invalid system call arguments, or invalid data coming from * network/devices), and on transient conditions like ENOMEM or EAGAIN. * These macros should be used for recoverable kernel issues only. * For invalid external inputs, transient conditions, etc use * pr_err[_once/_ratelimited]() followed by dump_stack(), if necessary. * Do not include "BUG"/"WARNING" in format strings manually to make these * conditions distinguishable from kernel issues. * * Use the versions with printk format strings to provide better diagnostics. */ #ifndef __WARN_FLAGS extern __printf(4, 5) void warn_slowpath_fmt(const char *file, const int line, unsigned taint, const char *fmt, ...); #define __WARN() __WARN_printf(TAINT_WARN, NULL) #define __WARN_printf(taint, arg...) \ warn_slowpath_fmt(__FILE__, __LINE__, taint, arg) #else extern __printf(1, 2) void __warn_printk(const char *fmt, ...); #define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN)) #define __WARN_printf(taint, arg...) do { \ __warn_printk(arg); \ __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\ } while (0) #define WARN_ON_ONCE(condition) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ __WARN_FLAGS(BUGFLAG_ONCE | \ BUGFLAG_TAINT(TAINT_WARN)); \ unlikely(__ret_warn_on); \ }) #endif /* used internally by panic.c */ struct warn_args; struct pt_regs; void __warn(const char *file, int line, void *caller, unsigned taint, struct pt_regs *regs, struct warn_args *args); #ifndef WARN_ON #define WARN_ON(condition) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ __WARN(); \ unlikely(__ret_warn_on); \ }) #endif #ifndef WARN #define WARN(condition, format...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ __WARN_printf(TAINT_WARN, format); \ unlikely(__ret_warn_on); \ }) #endif #define WARN_TAINT(condition, taint, format...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ __WARN_printf(taint, format); \ unlikely(__ret_warn_on); \ }) #ifndef WARN_ON_ONCE #define WARN_ON_ONCE(condition) ({ \ static bool __section(.data.once) __warned; \ int __ret_warn_once = !!(condition); \ \ if (unlikely(__ret_warn_once && !__warned)) { \ __warned = true; \ WARN_ON(1); \ } \ unlikely(__ret_warn_once); \ }) #endif #define WARN_ONCE(condition, format...) ({ \ static bool __section(.data.once) __warned; \ int __ret_warn_once = !!(condition); \ \ if (unlikely(__ret_warn_once && !__warned)) { \ __warned = true; \ WARN(1, format); \ } \ unlikely(__ret_warn_once); \ }) #define WARN_TAINT_ONCE(condition, taint, format...) ({ \ static bool __section(.data.once) __warned; \ int __ret_warn_once = !!(condition); \ \ if (unlikely(__ret_warn_once && !__warned)) { \ __warned = true; \ WARN_TAINT(1, taint, format); \ } \ unlikely(__ret_warn_once); \ }) #else /* !CONFIG_BUG */ #ifndef HAVE_ARCH_BUG #define BUG() do {} while (1) #endif #ifndef HAVE_ARCH_BUG_ON #define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) #endif #ifndef HAVE_ARCH_WARN_ON #define WARN_ON(condition) ({ \ int __ret_warn_on = !!(condition); \ unlikely(__ret_warn_on); \ }) #endif #ifndef WARN #define WARN(condition, format...) ({ \ int __ret_warn_on = !!(condition); \ no_printk(format); \ unlikely(__ret_warn_on); \ }) #endif #define WARN_ON_ONCE(condition) WARN_ON(condition) #define WARN_ONCE(condition, format...) WARN(condition, format) #define WARN_TAINT(condition, taint, format...) WARN(condition, format) #define WARN_TAINT_ONCE(condition, taint, format...) WARN(condition, format) #endif /* * WARN_ON_SMP() is for cases that the warning is either * meaningless for !SMP or may even cause failures. * It can also be used with values that are only defined * on SMP: * * struct foo { * [...] * #ifdef CONFIG_SMP * int bar; * #endif * }; * * void func(struct foo *zoot) * { * WARN_ON_SMP(!zoot->bar); * * For CONFIG_SMP, WARN_ON_SMP() should act the same as WARN_ON(), * and should be a nop and return false for uniprocessor. * * if (WARN_ON_SMP(x)) returns true only when CONFIG_SMP is set * and x is true. */ #ifdef CONFIG_SMP # define WARN_ON_SMP(x) WARN_ON(x) #else /* * Use of ({0;}) because WARN_ON_SMP(x) may be used either as * a stand alone line statement or as a condition in an if () * statement. * A simple "0" would cause gcc to give a "statement has no effect" * warning. */ # define WARN_ON_SMP(x) ({0;}) #endif #endif /* __ASSEMBLY__ */ #endif mcs_spinlock.h 0000644 00000000404 14722073052 0007401 0 ustar 00 #ifndef __ASM_MCS_SPINLOCK_H #define __ASM_MCS_SPINLOCK_H /* * Architectures can define their own: * * arch_mcs_spin_lock_contended(l) * arch_mcs_spin_unlock_contended(l) * * See kernel/locking/mcs_spinlock.c. */ #endif /* __ASM_MCS_SPINLOCK_H */ gpio.h 0000644 00000010716 14722073052 0005662 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_GPIO_H #define _ASM_GENERIC_GPIO_H #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/of.h> #ifdef CONFIG_GPIOLIB #include <linux/compiler.h> #include <linux/gpio/driver.h> #include <linux/gpio/consumer.h> /* Platforms may implement their GPIO interface with library code, * at a small performance cost for non-inlined operations and some * extra memory (for code and for per-GPIO table entries). * * While the GPIO programming interface defines valid GPIO numbers * to be in the range 0..MAX_INT, this library restricts them to the * smaller range 0..ARCH_NR_GPIOS-1. * * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is * actually an estimate of a board-specific value. */ #ifndef ARCH_NR_GPIOS #if defined(CONFIG_ARCH_NR_GPIO) && CONFIG_ARCH_NR_GPIO > 0 #define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO #else #define ARCH_NR_GPIOS 512 #endif #endif /* * "valid" GPIO numbers are nonnegative and may be passed to * setup routines like gpio_request(). only some valid numbers * can successfully be requested and used. * * Invalid GPIO numbers are useful for indicating no-such-GPIO in * platform data and other tables. */ static inline bool gpio_is_valid(int number) { return number >= 0 && number < ARCH_NR_GPIOS; } struct device; struct gpio; struct seq_file; struct module; struct device_node; struct gpio_desc; /* caller holds gpio_lock *OR* gpio is marked as requested */ static inline struct gpio_chip *gpio_to_chip(unsigned gpio) { return gpiod_to_chip(gpio_to_desc(gpio)); } /* Always use the library code for GPIO management calls, * or when sleeping may be involved. */ extern int gpio_request(unsigned gpio, const char *label); extern void gpio_free(unsigned gpio); static inline int gpio_direction_input(unsigned gpio) { return gpiod_direction_input(gpio_to_desc(gpio)); } static inline int gpio_direction_output(unsigned gpio, int value) { return gpiod_direction_output_raw(gpio_to_desc(gpio), value); } static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) { return gpiod_set_debounce(gpio_to_desc(gpio), debounce); } static inline int gpio_get_value_cansleep(unsigned gpio) { return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio)); } static inline void gpio_set_value_cansleep(unsigned gpio, int value) { return gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value); } /* A platform's <asm/gpio.h> code may want to inline the I/O calls when * the GPIO is constant and refers to some always-present controller, * giving direct access to chip registers and tight bitbanging loops. */ static inline int __gpio_get_value(unsigned gpio) { return gpiod_get_raw_value(gpio_to_desc(gpio)); } static inline void __gpio_set_value(unsigned gpio, int value) { return gpiod_set_raw_value(gpio_to_desc(gpio), value); } static inline int __gpio_cansleep(unsigned gpio) { return gpiod_cansleep(gpio_to_desc(gpio)); } static inline int __gpio_to_irq(unsigned gpio) { return gpiod_to_irq(gpio_to_desc(gpio)); } extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); extern int gpio_request_array(const struct gpio *array, size_t num); extern void gpio_free_array(const struct gpio *array, size_t num); /* * A sysfs interface can be exported by individual drivers if they want, * but more typically is configured entirely from userspace. */ static inline int gpio_export(unsigned gpio, bool direction_may_change) { return gpiod_export(gpio_to_desc(gpio), direction_may_change); } static inline int gpio_export_link(struct device *dev, const char *name, unsigned gpio) { return gpiod_export_link(dev, name, gpio_to_desc(gpio)); } static inline void gpio_unexport(unsigned gpio) { gpiod_unexport(gpio_to_desc(gpio)); } #else /* !CONFIG_GPIOLIB */ static inline bool gpio_is_valid(int number) { /* only non-negative numbers are valid */ return number >= 0; } /* platforms that don't directly support access to GPIOs through I2C, SPI, * or other blocking infrastructure can use these wrappers. */ static inline int gpio_cansleep(unsigned gpio) { return 0; } static inline int gpio_get_value_cansleep(unsigned gpio) { might_sleep(); return __gpio_get_value(gpio); } static inline void gpio_set_value_cansleep(unsigned gpio, int value) { might_sleep(); __gpio_set_value(gpio, value); } #endif /* !CONFIG_GPIOLIB */ #endif /* _ASM_GENERIC_GPIO_H */ cache.h 0000644 00000000600 14722073052 0005756 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_CACHE_H #define __ASM_GENERIC_CACHE_H /* * 32 bytes appears to be the most common cache line size, * so make that the default here. Architectures with larger * cache lines need to provide their own cache.h. */ #define L1_CACHE_SHIFT 5 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #endif /* __ASM_GENERIC_CACHE_H */ audit_write.h 0000644 00000000715 14722073052 0007242 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #include <asm-generic/audit_dir_write.h> __NR_acct, #ifdef __NR_swapon __NR_swapon, #endif __NR_quotactl, #ifdef __NR_truncate __NR_truncate, #endif #ifdef __NR_truncate64 __NR_truncate64, #endif #ifdef __NR_ftruncate __NR_ftruncate, #endif #ifdef __NR_ftruncate64 __NR_ftruncate64, #endif #ifdef __NR_bind __NR_bind, /* bind can affect fs object only in one way... */ #endif #ifdef __NR_fallocate __NR_fallocate, #endif 5level-fixup.h 0000644 00000002553 14722073052 0007251 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _5LEVEL_FIXUP_H #define _5LEVEL_FIXUP_H #define __ARCH_HAS_5LEVEL_HACK #define __PAGETABLE_P4D_FOLDED 1 #define P4D_SHIFT PGDIR_SHIFT #define P4D_SIZE PGDIR_SIZE #define P4D_MASK PGDIR_MASK #define MAX_PTRS_PER_P4D 1 #define PTRS_PER_P4D 1 #define p4d_t pgd_t #define pud_alloc(mm, p4d, address) \ ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \ NULL : pud_offset(p4d, address)) #define p4d_alloc(mm, pgd, address) (pgd) #define p4d_offset(pgd, start) (pgd) #ifndef __ASSEMBLY__ static inline int p4d_none(p4d_t p4d) { return 0; } static inline int p4d_bad(p4d_t p4d) { return 0; } static inline int p4d_present(p4d_t p4d) { return 1; } #endif #define p4d_ERROR(p4d) do { } while (0) #define p4d_clear(p4d) pgd_clear(p4d) #define p4d_val(p4d) pgd_val(p4d) #define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud) #define p4d_populate_safe(mm, p4d, pud) pgd_populate(mm, p4d, pud) #define p4d_page(p4d) pgd_page(p4d) #define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d) #define __p4d(x) __pgd(x) #define set_p4d(p4dp, p4d) set_pgd(p4dp, p4d) #undef p4d_free_tlb #define p4d_free_tlb(tlb, x, addr) do { } while (0) #define p4d_free(mm, x) do { } while (0) #define __p4d_free_tlb(tlb, x, addr) do { } while (0) #undef p4d_addr_end #define p4d_addr_end(addr, end) (end) #endif mm-arch-hooks.h 0000644 00000000604 14722073052 0007364 0 ustar 00 /* * Architecture specific mm hooks */ #ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H #define _ASM_GENERIC_MM_ARCH_HOOKS_H /* * This file should be included through arch/../include/asm/Kbuild for * the architecture which doesn't need specific mm hooks. * * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h * are used. */ #endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */ pgtable-nop4d-hack.h 0000644 00000003655 14722073052 0010274 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PGTABLE_NOP4D_HACK_H #define _PGTABLE_NOP4D_HACK_H #ifndef __ASSEMBLY__ #include <asm-generic/5level-fixup.h> #define __PAGETABLE_PUD_FOLDED 1 /* * Having the pud type consist of a pgd gets the size right, and allows * us to conceptually access the pgd entry that this pud is folded into * without casting. */ typedef struct { pgd_t pgd; } pud_t; #define PUD_SHIFT PGDIR_SHIFT #define PTRS_PER_PUD 1 #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) /* * The "pgd_xxx()" functions here are trivial for a folded two-level * setup: the pud is never bad, and a pud always exists (as it's folded * into the pgd entry) */ static inline int pgd_none(pgd_t pgd) { return 0; } static inline int pgd_bad(pgd_t pgd) { return 0; } static inline int pgd_present(pgd_t pgd) { return 1; } static inline void pgd_clear(pgd_t *pgd) { } #define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) #define pgd_populate(mm, pgd, pud) do { } while (0) #define pgd_populate_safe(mm, pgd, pud) do { } while (0) /* * (puds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) */ #define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) { return (pud_t *)pgd; } #define pud_val(x) (pgd_val((x).pgd)) #define __pud(x) ((pud_t) { __pgd(x) }) #define pgd_page(pgd) (pud_page((pud_t){ pgd })) #define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) /* * allocating and freeing a pud is trivial: the 1-entry pud is * inside the pgd, so has no extra memory associated with it. */ #define pud_alloc_one(mm, address) NULL #define pud_free(mm, x) do { } while (0) #define __pud_free_tlb(tlb, x, a) do { } while (0) #undef pud_addr_end #define pud_addr_end(addr, end) (end) #endif /* __ASSEMBLY__ */ #endif /* _PGTABLE_NOP4D_HACK_H */ pgtable-nop4d.h 0000644 00000003354 14722073052 0007364 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PGTABLE_NOP4D_H #define _PGTABLE_NOP4D_H #ifndef __ASSEMBLY__ #define __PAGETABLE_P4D_FOLDED 1 typedef struct { pgd_t pgd; } p4d_t; #define P4D_SHIFT PGDIR_SHIFT #define MAX_PTRS_PER_P4D 1 #define PTRS_PER_P4D 1 #define P4D_SIZE (1UL << P4D_SHIFT) #define P4D_MASK (~(P4D_SIZE-1)) /* * The "pgd_xxx()" functions here are trivial for a folded two-level * setup: the p4d is never bad, and a p4d always exists (as it's folded * into the pgd entry) */ static inline int pgd_none(pgd_t pgd) { return 0; } static inline int pgd_bad(pgd_t pgd) { return 0; } static inline int pgd_present(pgd_t pgd) { return 1; } static inline void pgd_clear(pgd_t *pgd) { } #define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd)) #define pgd_populate(mm, pgd, p4d) do { } while (0) #define pgd_populate_safe(mm, pgd, p4d) do { } while (0) /* * (p4ds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) */ #define set_pgd(pgdptr, pgdval) set_p4d((p4d_t *)(pgdptr), (p4d_t) { pgdval }) static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) { return (p4d_t *)pgd; } #define p4d_val(x) (pgd_val((x).pgd)) #define __p4d(x) ((p4d_t) { __pgd(x) }) #define pgd_page(pgd) (p4d_page((p4d_t){ pgd })) #define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd })) /* * allocating and freeing a p4d is trivial: the 1-entry p4d is * inside the pgd, so has no extra memory associated with it. */ #define p4d_alloc_one(mm, address) NULL #define p4d_free(mm, x) do { } while (0) #define __p4d_free_tlb(tlb, x, a) do { } while (0) #undef p4d_addr_end #define p4d_addr_end(addr, end) (end) #endif /* __ASSEMBLY__ */ #endif /* _PGTABLE_NOP4D_H */ kprobes.h 0000644 00000001544 14722073052 0006370 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_KPROBES_H #define _ASM_GENERIC_KPROBES_H #if defined(__KERNEL__) && !defined(__ASSEMBLY__) #ifdef CONFIG_KPROBES /* * Blacklist ganerating macro. Specify functions which is not probed * by using this macro. */ # define __NOKPROBE_SYMBOL(fname) \ static unsigned long __used \ __attribute__((__section__("_kprobe_blacklist"))) \ _kbl_addr_##fname = (unsigned long)fname; # define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) /* Use this to forbid a kprobes attach on very low level functions */ # define __kprobes __attribute__((__section__(".kprobes.text"))) # define nokprobe_inline __always_inline #else # define NOKPROBE_SYMBOL(fname) # define __kprobes # define nokprobe_inline inline #endif #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ #endif /* _ASM_GENERIC_KPROBES_H */ unaligned.h 0000644 00000002066 14722073052 0006671 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_UNALIGNED_H #define __ASM_GENERIC_UNALIGNED_H /* * This is the most generic implementation of unaligned accesses * and should work almost anywhere. */ #include <asm/byteorder.h> /* Set by the arch if it can handle unaligned accesses in hardware. */ #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS # include <linux/unaligned/access_ok.h> #endif #if defined(__LITTLE_ENDIAN) # ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS # include <linux/unaligned/le_struct.h> # include <linux/unaligned/be_byteshift.h> # endif # include <linux/unaligned/generic.h> # define get_unaligned __get_unaligned_le # define put_unaligned __put_unaligned_le #elif defined(__BIG_ENDIAN) # ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS # include <linux/unaligned/be_struct.h> # include <linux/unaligned/le_byteshift.h> # endif # include <linux/unaligned/generic.h> # define get_unaligned __get_unaligned_be # define put_unaligned __put_unaligned_be #else # error need to define endianess #endif #endif /* __ASM_GENERIC_UNALIGNED_H */ asm-offsets.h 0000644 00000000043 14722073052 0007143 0 ustar 00 #include <generated/asm-offsets.h> compat.h 0000644 00000001222 14722073052 0006177 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_COMPAT_H #define __ASM_GENERIC_COMPAT_H /* These types are common across all compat ABIs */ typedef u32 compat_size_t; typedef s32 compat_ssize_t; typedef s32 compat_clock_t; typedef s32 compat_pid_t; typedef u32 compat_ino_t; typedef s32 compat_off_t; typedef s64 compat_loff_t; typedef s32 compat_daddr_t; typedef s32 compat_timer_t; typedef s32 compat_key_t; typedef s16 compat_short_t; typedef s32 compat_int_t; typedef s32 compat_long_t; typedef u16 compat_ushort_t; typedef u32 compat_uint_t; typedef u32 compat_ulong_t; typedef u32 compat_uptr_t; typedef u32 compat_aio_context_t; #endif memory_model.h 0000644 00000004241 14722073052 0007410 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_MEMORY_MODEL_H #define __ASM_MEMORY_MODEL_H #include <linux/pfn.h> #ifndef __ASSEMBLY__ #if defined(CONFIG_FLATMEM) #ifndef ARCH_PFN_OFFSET #define ARCH_PFN_OFFSET (0UL) #endif #elif defined(CONFIG_DISCONTIGMEM) #ifndef arch_pfn_to_nid #define arch_pfn_to_nid(pfn) pfn_to_nid(pfn) #endif #ifndef arch_local_page_offset #define arch_local_page_offset(pfn, nid) \ ((pfn) - NODE_DATA(nid)->node_start_pfn) #endif #endif /* CONFIG_DISCONTIGMEM */ /* * supports 3 memory models. */ #if defined(CONFIG_FLATMEM) #define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) #define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ ARCH_PFN_OFFSET) #elif defined(CONFIG_DISCONTIGMEM) #define __pfn_to_page(pfn) \ ({ unsigned long __pfn = (pfn); \ unsigned long __nid = arch_pfn_to_nid(__pfn); \ NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ }) #define __page_to_pfn(pg) \ ({ const struct page *__pg = (pg); \ struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ (unsigned long)(__pg - __pgdat->node_mem_map) + \ __pgdat->node_start_pfn; \ }) #elif defined(CONFIG_SPARSEMEM_VMEMMAP) /* memmap is virtually contiguous. */ #define __pfn_to_page(pfn) (vmemmap + (pfn)) #define __page_to_pfn(page) (unsigned long)((page) - vmemmap) #elif defined(CONFIG_SPARSEMEM) /* * Note: section's mem_map is encoded to reflect its start_pfn. * section[i].section_mem_map == mem_map's address - start_pfn; */ #define __page_to_pfn(pg) \ ({ const struct page *__pg = (pg); \ int __sec = page_to_section(__pg); \ (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \ }) #define __pfn_to_page(pfn) \ ({ unsigned long __pfn = (pfn); \ struct mem_section *__sec = __pfn_to_section(__pfn); \ __section_mem_map_addr(__sec) + __pfn; \ }) #endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ /* * Convert a physical address to a Page Frame Number and back */ #define __phys_to_pfn(paddr) PHYS_PFN(paddr) #define __pfn_to_phys(pfn) PFN_PHYS(pfn) #define page_to_pfn __page_to_pfn #define pfn_to_page __pfn_to_page #endif /* __ASSEMBLY__ */ #endif atomic64.h 0000644 00000003066 14722073052 0006352 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Generic implementation of 64-bit atomics using spinlocks, * useful on processors that don't have 64-bit atomic instructions. * * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> */ #ifndef _ASM_GENERIC_ATOMIC64_H #define _ASM_GENERIC_ATOMIC64_H #include <linux/types.h> typedef struct { s64 counter; } atomic64_t; #define ATOMIC64_INIT(i) { (i) } extern s64 atomic64_read(const atomic64_t *v); extern void atomic64_set(atomic64_t *v, s64 i); #define atomic64_set_release(v, i) atomic64_set((v), (i)) #define ATOMIC64_OP(op) \ extern void atomic64_##op(s64 a, atomic64_t *v); #define ATOMIC64_OP_RETURN(op) \ extern s64 atomic64_##op##_return(s64 a, atomic64_t *v); #define ATOMIC64_FETCH_OP(op) \ extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v); #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) ATOMIC64_OPS(add) ATOMIC64_OPS(sub) #undef ATOMIC64_OPS #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op) ATOMIC64_OPS(and) ATOMIC64_OPS(or) ATOMIC64_OPS(xor) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP extern s64 atomic64_dec_if_positive(atomic64_t *v); #define atomic64_dec_if_positive atomic64_dec_if_positive extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n); extern s64 atomic64_xchg(atomic64_t *v, s64 new); extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u); #define atomic64_fetch_add_unless atomic64_fetch_add_unless #endif /* _ASM_GENERIC_ATOMIC64_H */ irqflags.h 0000644 00000003007 14722073052 0006527 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_IRQFLAGS_H #define __ASM_GENERIC_IRQFLAGS_H /* * All architectures should implement at least the first two functions, * usually inline assembly will be the best way. */ #ifndef ARCH_IRQ_DISABLED #define ARCH_IRQ_DISABLED 0 #define ARCH_IRQ_ENABLED 1 #endif /* read interrupt enabled status */ #ifndef arch_local_save_flags unsigned long arch_local_save_flags(void); #endif /* set interrupt enabled status */ #ifndef arch_local_irq_restore void arch_local_irq_restore(unsigned long flags); #endif /* get status and disable interrupts */ #ifndef arch_local_irq_save static inline unsigned long arch_local_irq_save(void) { unsigned long flags; flags = arch_local_save_flags(); arch_local_irq_restore(ARCH_IRQ_DISABLED); return flags; } #endif /* test flags */ #ifndef arch_irqs_disabled_flags static inline int arch_irqs_disabled_flags(unsigned long flags) { return flags == ARCH_IRQ_DISABLED; } #endif /* unconditionally enable interrupts */ #ifndef arch_local_irq_enable static inline void arch_local_irq_enable(void) { arch_local_irq_restore(ARCH_IRQ_ENABLED); } #endif /* unconditionally disable interrupts */ #ifndef arch_local_irq_disable static inline void arch_local_irq_disable(void) { arch_local_irq_restore(ARCH_IRQ_DISABLED); } #endif /* test hardware interrupt enable bit */ #ifndef arch_irqs_disabled static inline int arch_irqs_disabled(void) { return arch_irqs_disabled_flags(arch_local_save_flags()); } #endif #endif /* __ASM_GENERIC_IRQFLAGS_H */ fixmap.h 0000644 00000005564 14722073052 0006215 0 ustar 00 /* * fixmap.h: compile-time virtual memory allocation * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Ingo Molnar * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 * Break out common bits to asm-generic by Mark Salter, November 2013 */ #ifndef __ASM_GENERIC_FIXMAP_H #define __ASM_GENERIC_FIXMAP_H #include <linux/bug.h> #include <linux/mm_types.h> #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) #ifndef __ASSEMBLY__ /* * 'index to address' translation. If anyone tries to use the idx * directly without translation, we catch the bug with a NULL-deference * kernel oops. Illegal ranges of incoming indices are caught too. */ static __always_inline unsigned long fix_to_virt(const unsigned int idx) { BUILD_BUG_ON(idx >= __end_of_fixed_addresses); return __fix_to_virt(idx); } static inline unsigned long virt_to_fix(const unsigned long vaddr) { BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); return __virt_to_fix(vaddr); } /* * Provide some reasonable defaults for page flags. * Not all architectures use all of these different types and some * architectures use different names. */ #ifndef FIXMAP_PAGE_NORMAL #define FIXMAP_PAGE_NORMAL PAGE_KERNEL #endif #if !defined(FIXMAP_PAGE_RO) && defined(PAGE_KERNEL_RO) #define FIXMAP_PAGE_RO PAGE_KERNEL_RO #endif #ifndef FIXMAP_PAGE_NOCACHE #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE #endif #ifndef FIXMAP_PAGE_IO #define FIXMAP_PAGE_IO PAGE_KERNEL_IO #endif #ifndef FIXMAP_PAGE_CLEAR #define FIXMAP_PAGE_CLEAR __pgprot(0) #endif #ifndef set_fixmap #define set_fixmap(idx, phys) \ __set_fixmap(idx, phys, FIXMAP_PAGE_NORMAL) #endif #ifndef clear_fixmap #define clear_fixmap(idx) \ __set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR) #endif /* Return a pointer with offset calculated */ #define __set_fixmap_offset(idx, phys, flags) \ ({ \ unsigned long ________addr; \ __set_fixmap(idx, phys, flags); \ ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \ ________addr; \ }) #define set_fixmap_offset(idx, phys) \ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL) /* * Some hardware wants to get fixmapped without caching. */ #define set_fixmap_nocache(idx, phys) \ __set_fixmap(idx, phys, FIXMAP_PAGE_NOCACHE) #define set_fixmap_offset_nocache(idx, phys) \ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NOCACHE) /* * Some fixmaps are for IO */ #define set_fixmap_io(idx, phys) \ __set_fixmap(idx, phys, FIXMAP_PAGE_IO) #define set_fixmap_offset_io(idx, phys) \ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO) #endif /* __ASSEMBLY__ */ #endif /* __ASM_GENERIC_FIXMAP_H */ pci_iomap.h 0000644 00000003357 14722073052 0006667 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* Generic I/O port emulation. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef __ASM_GENERIC_PCI_IOMAP_H #define __ASM_GENERIC_PCI_IOMAP_H struct pci_dev; #ifdef CONFIG_PCI /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); extern void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max); extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, unsigned long offset, unsigned long maxlen); extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, unsigned long offset, unsigned long maxlen); /* Create a virtual mapping cookie for a port on a given PCI device. * Do not call this directly, it exists to make it easier for architectures * to override */ #ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port, unsigned int nr); #else #define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr)) #endif #elif defined(CONFIG_GENERIC_PCI_IOMAP) static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) { return NULL; } static inline void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max) { return NULL; } static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, unsigned long offset, unsigned long maxlen) { return NULL; } static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, unsigned long offset, unsigned long maxlen) { return NULL; } #endif #endif /* __ASM_GENERIC_IO_H */ audit_read.h 0000644 00000000361 14722073052 0007020 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifdef __NR_readlink __NR_readlink, #endif __NR_quotactl, __NR_listxattr, __NR_llistxattr, __NR_flistxattr, __NR_getxattr, __NR_lgetxattr, __NR_fgetxattr, #ifdef __NR_readlinkat __NR_readlinkat, #endif device.h 0000644 00000000365 14722073052 0006162 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Arch specific extensions to struct device */ #ifndef _ASM_GENERIC_DEVICE_H #define _ASM_GENERIC_DEVICE_H struct dev_archdata { }; struct pdev_archdata { }; #endif /* _ASM_GENERIC_DEVICE_H */ set_memory.h 0000644 00000000552 14722073052 0007104 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_SET_MEMORY_H #define __ASM_SET_MEMORY_H /* * Functions to change memory attributes. */ int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); #endif int-ll64.h 0000644 00000001537 14722073052 0006276 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * asm-generic/int-ll64.h * * Integer declarations for architectures which use "long long" * for 64-bit types. */ #ifndef _ASM_GENERIC_INT_LL64_H #define _ASM_GENERIC_INT_LL64_H #include <uapi/asm-generic/int-ll64.h> #ifndef __ASSEMBLY__ typedef __s8 s8; typedef __u8 u8; typedef __s16 s16; typedef __u16 u16; typedef __s32 s32; typedef __u32 u32; typedef __s64 s64; typedef __u64 u64; #define S8_C(x) x #define U8_C(x) x ## U #define S16_C(x) x #define U16_C(x) x ## U #define S32_C(x) x #define U32_C(x) x ## U #define S64_C(x) x ## LL #define U64_C(x) x ## ULL #else /* __ASSEMBLY__ */ #define S8_C(x) x #define U8_C(x) x #define S16_C(x) x #define U16_C(x) x #define S32_C(x) x #define U32_C(x) x #define S64_C(x) x #define U64_C(x) x #endif /* __ASSEMBLY__ */ #endif /* _ASM_GENERIC_INT_LL64_H */ signal.h 0000644 00000000464 14722073052 0006200 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_SIGNAL_H #define __ASM_GENERIC_SIGNAL_H #include <uapi/asm-generic/signal.h> #ifndef __ASSEMBLY__ #ifdef SA_RESTORER #endif #include <asm/sigcontext.h> #undef __HAVE_ARCH_SIG_BITOPS #endif /* __ASSEMBLY__ */ #endif /* _ASM_GENERIC_SIGNAL_H */ seccomp.h 0000644 00000002236 14722073052 0006353 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * include/asm-generic/seccomp.h * * Copyright (C) 2014 Linaro Limited * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> */ #ifndef _ASM_GENERIC_SECCOMP_H #define _ASM_GENERIC_SECCOMP_H #include <linux/unistd.h> #if defined(CONFIG_COMPAT) && !defined(__NR_seccomp_read_32) #define __NR_seccomp_read_32 __NR_read #define __NR_seccomp_write_32 __NR_write #define __NR_seccomp_exit_32 __NR_exit #ifndef __NR_seccomp_sigreturn_32 #define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn #endif #endif /* CONFIG_COMPAT && ! already defined */ #define __NR_seccomp_read __NR_read #define __NR_seccomp_write __NR_write #define __NR_seccomp_exit __NR_exit #ifndef __NR_seccomp_sigreturn #define __NR_seccomp_sigreturn __NR_rt_sigreturn #endif #ifdef CONFIG_COMPAT #ifndef get_compat_mode1_syscalls static inline const int *get_compat_mode1_syscalls(void) { static const int mode1_syscalls_32[] = { __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, 0, /* null terminated */ }; return mode1_syscalls_32; } #endif #endif /* CONFIG_COMPAT */ #endif /* _ASM_GENERIC_SECCOMP_H */ vga.h 0000644 00000001113 14722073052 0005470 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Access to VGA videoram * * (c) 1998 Martin Mares <mj@ucw.cz> */ #ifndef __ASM_GENERIC_VGA_H #define __ASM_GENERIC_VGA_H /* * On most architectures that support VGA, we can just * recalculate addresses and then access the videoram * directly without any black magic. * * Everyone else needs to ioremap the address and use * proper I/O accesses. */ #ifndef VGA_MAP_MEM #define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x) #endif #define vga_readb(x) (*(x)) #define vga_writeb(x, y) (*(y) = (x)) #endif /* _ASM_GENERIC_VGA_H */ msi.h 0000644 00000001506 14722073052 0005511 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_MSI_H #define __ASM_GENERIC_MSI_H #include <linux/types.h> #ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS # define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2 #endif struct msi_desc; /** * struct msi_alloc_info - Default structure for MSI interrupt allocation. * @desc: Pointer to msi descriptor * @hwirq: Associated hw interrupt number in the domain * @scratchpad: Storage for implementation specific scratch data * * Architectures can provide their own implementation by not including * asm-generic/msi.h into their arch specific header file. */ typedef struct msi_alloc_info { struct msi_desc *desc; irq_hw_number_t hwirq; union { unsigned long ul; void *ptr; } scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS]; } msi_alloc_info_t; #define GENERIC_MSI_DOMAIN_OPS 1 #endif vdso/vsyscall.h 0000644 00000001716 14722073052 0007537 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_VSYSCALL_H #define __ASM_GENERIC_VSYSCALL_H #ifndef __ASSEMBLY__ #ifndef __arch_get_k_vdso_data static __always_inline struct vdso_data *__arch_get_k_vdso_data(void) { return NULL; } #endif /* __arch_get_k_vdso_data */ #ifndef __arch_update_vdso_data static __always_inline bool __arch_update_vdso_data(void) { return true; } #endif /* __arch_update_vdso_data */ #ifndef __arch_get_clock_mode static __always_inline int __arch_get_clock_mode(struct timekeeper *tk) { return 0; } #endif /* __arch_get_clock_mode */ #ifndef __arch_update_vsyscall static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk) { } #endif /* __arch_update_vsyscall */ #ifndef __arch_sync_vdso_data static __always_inline void __arch_sync_vdso_data(struct vdso_data *vdata) { } #endif /* __arch_sync_vdso_data */ #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_VSYSCALL_H */ pgtable.h 0000644 00000101050 14722073052 0006332 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_PGTABLE_H #define _ASM_GENERIC_PGTABLE_H #include <linux/pfn.h> #ifndef __ASSEMBLY__ #ifdef CONFIG_MMU #include <linux/mm_types.h> #include <linux/bug.h> #include <linux/errno.h> #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED #endif /* * On almost all architectures and configurations, 0 can be used as the * upper ceiling to free_pgtables(): on many architectures it has the same * effect as using TASK_SIZE. However, there is one configuration which * must impose a more careful limit, to avoid freeing kernel pgtables. */ #ifndef USER_PGTABLES_CEILING #define USER_PGTABLES_CEILING 0UL #endif #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty); #endif #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty); extern int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, pud_t entry, int dirty); #else static inline int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty) { BUILD_BUG(); return 0; } static inline int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, pud_t entry, int dirty) { BUILD_BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t pte = *ptep; int r = 1; if (!pte_young(pte)) r = 0; else set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); return r; } #endif #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_t pmd = *pmdp; int r = 1; if (!pmd_young(pmd)) r = 0; else set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); return r; } #else static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { BUILD_BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #endif #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #else /* * Despite relevant to THP only, this API is called from generic rmap code * under PageTransHuge(), hence needs a dummy implementation for !THP */ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { BUILD_BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t pte = *ptep; pte_clear(mm, address, ptep); return pte; } #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { pmd_t pmd = *pmdp; pmd_clear(pmdp); return pmd; } #endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, unsigned long address, pud_t *pudp) { pud_t pud = *pudp; pud_clear(pudp); return pud; } #endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, unsigned long address, pmd_t *pmdp, int full) { return pmdp_huge_get_and_clear(mm, address, pmdp); } #endif #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm, unsigned long address, pud_t *pudp, int full) { return pudp_huge_get_and_clear(mm, address, pudp); } #endif #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, int full) { pte_t pte; pte = ptep_get_and_clear(mm, address, ptep); return pte; } #endif /* * Some architectures may be able to avoid expensive synchronization * primitives when modifications are made to PTE's which are already * not present, or in the process of an address space destruction. */ #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL static inline void pte_clear_not_present_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, int full) { pte_clear(mm, address, ptep); } #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH extern pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #endif #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, pud_t *pudp); #endif #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT struct mm_struct; static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t old_pte = *ptep; set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); } #endif #ifndef pte_savedwrite #define pte_savedwrite pte_write #endif #ifndef pte_mk_savedwrite #define pte_mk_savedwrite pte_mkwrite #endif #ifndef pte_clear_savedwrite #define pte_clear_savedwrite pte_wrprotect #endif #ifndef pmd_savedwrite #define pmd_savedwrite pmd_write #endif #ifndef pmd_mk_savedwrite #define pmd_mk_savedwrite pmd_mkwrite #endif #ifndef pmd_clear_savedwrite #define pmd_clear_savedwrite pmd_wrprotect #endif #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { pmd_t old_pmd = *pmdp; set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); } #else static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { BUILD_BUG(); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static inline void pudp_set_wrprotect(struct mm_struct *mm, unsigned long address, pud_t *pudp) { pud_t old_pud = *pudp; set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); } #else static inline void pudp_set_wrprotect(struct mm_struct *mm, unsigned long address, pud_t *pudp) { BUILD_BUG(); } #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ #endif #ifndef pmdp_collapse_flush #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #else static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { BUILD_BUG(); return *pmdp; } #define pmdp_collapse_flush pmdp_collapse_flush #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable); #endif #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * This is an implementation of pmdp_establish() that is only suitable for an * architecture that doesn't have hardware dirty/accessed bits. In this case we * can't race with CPU which sets these bits and non-atomic aproach is fine. */ static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t pmd) { pmd_t old_pmd = *pmdp; set_pmd_at(vma->vm_mm, address, pmdp, pmd); return old_pmd; } #endif #ifndef __HAVE_ARCH_PMDP_INVALIDATE extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #endif #ifndef __HAVE_ARCH_PTE_SAME static inline int pte_same(pte_t pte_a, pte_t pte_b) { return pte_val(pte_a) == pte_val(pte_b); } #endif #ifndef __HAVE_ARCH_PTE_UNUSED /* * Some architectures provide facilities to virtualization guests * so that they can flag allocated pages as unused. This allows the * host to transparently reclaim unused pages. This function returns * whether the pte's page is unused. */ static inline int pte_unused(pte_t pte) { return 0; } #endif #ifndef pte_access_permitted #define pte_access_permitted(pte, write) \ (pte_present(pte) && (!(write) || pte_write(pte))) #endif #ifndef pmd_access_permitted #define pmd_access_permitted(pmd, write) \ (pmd_present(pmd) && (!(write) || pmd_write(pmd))) #endif #ifndef pud_access_permitted #define pud_access_permitted(pud, write) \ (pud_present(pud) && (!(write) || pud_write(pud))) #endif #ifndef p4d_access_permitted #define p4d_access_permitted(p4d, write) \ (p4d_present(p4d) && (!(write) || p4d_write(p4d))) #endif #ifndef pgd_access_permitted #define pgd_access_permitted(pgd, write) \ (pgd_present(pgd) && (!(write) || pgd_write(pgd))) #endif #ifndef __HAVE_ARCH_PMD_SAME static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { return pmd_val(pmd_a) == pmd_val(pmd_b); } static inline int pud_same(pud_t pud_a, pud_t pud_b) { return pud_val(pud_a) == pud_val(pud_b); } #endif #ifndef __HAVE_ARCH_P4D_SAME static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b) { return p4d_val(p4d_a) == p4d_val(p4d_b); } #endif #ifndef __HAVE_ARCH_PGD_SAME static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) { return pgd_val(pgd_a) == pgd_val(pgd_b); } #endif /* * Use set_p*_safe(), and elide TLB flushing, when confident that *no* * TLB flush will be required as a result of the "set". For example, use * in scenarios where it is known ahead of time that the routine is * setting non-present entries, or re-setting an existing entry to the * same value. Otherwise, use the typical "set" helpers and flush the * TLB. */ #define set_pte_safe(ptep, pte) \ ({ \ WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \ set_pte(ptep, pte); \ }) #define set_pmd_safe(pmdp, pmd) \ ({ \ WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \ set_pmd(pmdp, pmd); \ }) #define set_pud_safe(pudp, pud) \ ({ \ WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \ set_pud(pudp, pud); \ }) #define set_p4d_safe(p4dp, p4d) \ ({ \ WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ set_p4d(p4dp, p4d); \ }) #define set_pgd_safe(pgdp, pgd) \ ({ \ WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ set_pgd(pgdp, pgd); \ }) #ifndef __HAVE_ARCH_DO_SWAP_PAGE /* * Some architectures support metadata associated with a page. When a * page is being swapped out, this metadata must be saved so it can be * restored when the page is swapped back in. SPARC M7 and newer * processors support an ADI (Application Data Integrity) tag for the * page as metadata for the page. arch_do_swap_page() can restore this * metadata when a page is swapped back in. */ static inline void arch_do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t pte, pte_t oldpte) { } #endif #ifndef __HAVE_ARCH_UNMAP_ONE /* * Some architectures support metadata associated with a page. When a * page is being swapped out, this metadata must be saved so it can be * restored when the page is swapped back in. SPARC M7 and newer * processors support an ADI (Application Data Integrity) tag for the * page as metadata for the page. arch_unmap_one() can save this * metadata on a swap-out of a page. */ static inline int arch_unmap_one(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t orig_pte) { return 0; } #endif #ifndef __HAVE_ARCH_PGD_OFFSET_GATE #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) #endif #ifndef __HAVE_ARCH_MOVE_PTE #define move_pte(pte, prot, old_addr, new_addr) (pte) #endif #ifndef pte_accessible # define pte_accessible(mm, pte) ((void)(pte), 1) #endif #ifndef flush_tlb_fix_spurious_fault #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) #endif #ifndef pgprot_noncached #define pgprot_noncached(prot) (prot) #endif #ifndef pgprot_writecombine #define pgprot_writecombine pgprot_noncached #endif #ifndef pgprot_writethrough #define pgprot_writethrough pgprot_noncached #endif #ifndef pgprot_device #define pgprot_device pgprot_noncached #endif #ifndef pgprot_modify #define pgprot_modify pgprot_modify static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) { if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) newprot = pgprot_noncached(newprot); if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) newprot = pgprot_writecombine(newprot); if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) newprot = pgprot_device(newprot); return newprot; } #endif /* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. */ #define pgd_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #ifndef p4d_addr_end #define p4d_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #endif #ifndef pud_addr_end #define pud_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #endif #ifndef pmd_addr_end #define pmd_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #endif /* * When walking page tables, we usually want to skip any p?d_none entries; * and any p?d_bad entries - reporting the error before resetting to none. * Do the tests inline, but report and clear the bad entry in mm/memory.c. */ void pgd_clear_bad(pgd_t *); void p4d_clear_bad(p4d_t *); void pud_clear_bad(pud_t *); void pmd_clear_bad(pmd_t *); static inline int pgd_none_or_clear_bad(pgd_t *pgd) { if (pgd_none(*pgd)) return 1; if (unlikely(pgd_bad(*pgd))) { pgd_clear_bad(pgd); return 1; } return 0; } static inline int p4d_none_or_clear_bad(p4d_t *p4d) { if (p4d_none(*p4d)) return 1; if (unlikely(p4d_bad(*p4d))) { p4d_clear_bad(p4d); return 1; } return 0; } static inline int pud_none_or_clear_bad(pud_t *pud) { if (pud_none(*pud)) return 1; if (unlikely(pud_bad(*pud))) { pud_clear_bad(pud); return 1; } return 0; } static inline int pmd_none_or_clear_bad(pmd_t *pmd) { if (pmd_none(*pmd)) return 1; if (unlikely(pmd_bad(*pmd))) { pmd_clear_bad(pmd); return 1; } return 0; } static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { /* * Get the current pte state, but zero it out to make it * non-present, preventing the hardware from asynchronously * updating it. */ return ptep_get_and_clear(vma->vm_mm, addr, ptep); } static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte) { /* * The pte is non-present, so there's no hardware state to * preserve. */ set_pte_at(vma->vm_mm, addr, ptep, pte); } #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION /* * Start a pte protection read-modify-write transaction, which * protects against asynchronous hardware modifications to the pte. * The intention is not to prevent the hardware from making pte * updates, but to prevent any updates it may make from being lost. * * This does not protect against other software modifications of the * pte; the appropriate pte lock must be held over the transation. * * Note that this interface is intended to be batchable, meaning that * ptep_modify_prot_commit may not actually update the pte, but merely * queue the update to be done at some later time. The update must be * actually committed before the pte lock is released, however. */ static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return __ptep_modify_prot_start(vma, addr, ptep); } /* * Commit an update to a pte, leaving any hardware-controlled bits in * the PTE unmodified. */ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte) { __ptep_modify_prot_commit(vma, addr, ptep, pte); } #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ #endif /* CONFIG_MMU */ /* * No-op macros that just return the current protection value. Defined here * because these macros can be used used even if CONFIG_MMU is not defined. */ #ifndef pgprot_encrypted #define pgprot_encrypted(prot) (prot) #endif #ifndef pgprot_decrypted #define pgprot_decrypted(prot) (prot) #endif /* * A facility to provide lazy MMU batching. This allows PTE updates and * page invalidations to be delayed until a call to leave lazy MMU mode * is issued. Some architectures may benefit from doing this, and it is * beneficial for both shadow and direct mode hypervisors, which may batch * the PTE updates which happen during this window. Note that using this * interface requires that read hazards be removed from the code. A read * hazard could result in the direct mode hypervisor case, since the actual * write to the page tables may not yet have taken place, so reads though * a raw PTE pointer after it has been modified are not guaranteed to be * up to date. This mode can only be entered and left under the protection of * the page table locks for all page tables which may be modified. In the UP * case, this is required so that preemption is disabled, and in the SMP case, * it must synchronize the delayed page table writes properly on other CPUs. */ #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE #define arch_enter_lazy_mmu_mode() do {} while (0) #define arch_leave_lazy_mmu_mode() do {} while (0) #define arch_flush_lazy_mmu_mode() do {} while (0) #endif /* * A facility to provide batching of the reload of page tables and * other process state with the actual context switch code for * paravirtualized guests. By convention, only one of the batched * update (lazy) modes (CPU, MMU) should be active at any given time, * entry should never be nested, and entry and exits should always be * paired. This is for sanity of maintaining and reasoning about the * kernel code. In this case, the exit (end of the context switch) is * in architecture-specific code, and so doesn't need a generic * definition. */ #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH #define arch_start_context_switch(prev) do {} while (0) #endif #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY #ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) { return pmd; } static inline int pmd_swp_soft_dirty(pmd_t pmd) { return 0; } static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) { return pmd; } #endif #else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */ static inline int pte_soft_dirty(pte_t pte) { return 0; } static inline int pmd_soft_dirty(pmd_t pmd) { return 0; } static inline pte_t pte_mksoft_dirty(pte_t pte) { return pte; } static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) { return pmd; } static inline pte_t pte_clear_soft_dirty(pte_t pte) { return pte; } static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) { return pmd; } static inline pte_t pte_swp_mksoft_dirty(pte_t pte) { return pte; } static inline int pte_swp_soft_dirty(pte_t pte) { return 0; } static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) { return pte; } static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) { return pmd; } static inline int pmd_swp_soft_dirty(pmd_t pmd) { return 0; } static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) { return pmd; } #endif #ifndef __HAVE_PFNMAP_TRACKING /* * Interfaces that can be used by architecture code to keep track of * memory type of pfn mappings specified by the remap_pfn_range, * vmf_insert_pfn. */ /* * track_pfn_remap is called when a _new_ pfn mapping is being established * by remap_pfn_range() for physical range indicated by pfn and size. */ static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long addr, unsigned long size) { return 0; } /* * track_pfn_insert is called when a _new_ single pfn is established * by vmf_insert_pfn(). */ static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn) { } /* * track_pfn_copy is called when vma that is covering the pfnmap gets * copied through copy_page_range(). */ static inline int track_pfn_copy(struct vm_area_struct *vma) { return 0; } /* * untrack_pfn is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or * can be for the entire vma (in which case pfn, size are zero). */ static inline void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size) { } /* * untrack_pfn_moved is called while mremapping a pfnmap for a new region. */ static inline void untrack_pfn_moved(struct vm_area_struct *vma) { } #else extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long addr, unsigned long size); extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn); extern int track_pfn_copy(struct vm_area_struct *vma); extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size); extern void untrack_pfn_moved(struct vm_area_struct *vma); #endif #ifdef __HAVE_COLOR_ZERO_PAGE static inline int is_zero_pfn(unsigned long pfn) { extern unsigned long zero_pfn; unsigned long offset_from_zero_pfn = pfn - zero_pfn; return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); } #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) #else static inline int is_zero_pfn(unsigned long pfn) { extern unsigned long zero_pfn; return pfn == zero_pfn; } static inline unsigned long my_zero_pfn(unsigned long addr) { extern unsigned long zero_pfn; return zero_pfn; } #endif #ifdef CONFIG_MMU #ifndef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_trans_huge(pmd_t pmd) { return 0; } #ifndef pmd_write static inline int pmd_write(pmd_t pmd) { BUG(); return 0; } #endif /* pmd_write */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #ifndef pud_write static inline int pud_write(pud_t pud) { BUG(); return 0; } #endif /* pud_write */ #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) static inline int pud_trans_huge(pud_t pud) { return 0; } #endif #ifndef pmd_read_atomic static inline pmd_t pmd_read_atomic(pmd_t *pmdp) { /* * Depend on compiler for an atomic pmd read. NOTE: this is * only going to work, if the pmdval_t isn't larger than * an unsigned long. */ return *pmdp; } #endif #ifndef arch_needs_pgtable_deposit #define arch_needs_pgtable_deposit() (false) #endif /* * This function is meant to be used by sites walking pagetables with * the mmap_sem hold in read mode to protect against MADV_DONTNEED and * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd * into a null pmd and the transhuge page fault can convert a null pmd * into an hugepmd or into a regular pmd (if the hugepage allocation * fails). While holding the mmap_sem in read mode the pmd becomes * stable and stops changing under us only if it's not null and not a * transhuge pmd. When those races occurs and this function makes a * difference vs the standard pmd_none_or_clear_bad, the result is * undefined so behaving like if the pmd was none is safe (because it * can return none anyway). The compiler level barrier() is critically * important to compute the two checks atomically on the same pmdval. * * For 32bit kernels with a 64bit large pmd_t this automatically takes * care of reading the pmd atomically to avoid SMP race conditions * against pmd_populate() when the mmap_sem is hold for reading by the * caller (a special atomic read not done by "gcc" as in the generic * version above, is also needed when THP is disabled because the page * fault can populate the pmd from under us). */ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) { pmd_t pmdval = pmd_read_atomic(pmd); /* * The barrier will stabilize the pmdval in a register or on * the stack so that it will stop changing under the code. * * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, * pmd_read_atomic is allowed to return a not atomic pmdval * (for example pointing to an hugepage that has never been * mapped in the pmd). The below checks will only care about * the low part of the pmd with 32bit PAE x86 anyway, with the * exception of pmd_none(). So the important thing is that if * the low part of the pmd is found null, the high part will * be also null or the pmd_none() check below would be * confused. */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE barrier(); #endif /* * !pmd_present() checks for pmd migration entries * * The complete check uses is_pmd_migration_entry() in linux/swapops.h * But using that requires moving current function and pmd_trans_unstable() * to linux/swapops.h to resovle dependency, which is too much code move. * * !pmd_present() is equivalent to is_pmd_migration_entry() currently, * because !pmd_present() pages can only be under migration not swapped * out. * * pmd_none() is preseved for future condition checks on pmd migration * entries and not confusing with this function name, although it is * redundant with !pmd_present(). */ if (pmd_none(pmdval) || pmd_trans_huge(pmdval) || (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval))) return 1; if (unlikely(pmd_bad(pmdval))) { pmd_clear_bad(pmd); return 1; } return 0; } /* * This is a noop if Transparent Hugepage Support is not built into * the kernel. Otherwise it is equivalent to * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in * places that already verified the pmd is not none and they want to * walk ptes while holding the mmap sem in read mode (write mode don't * need this). If THP is not enabled, the pmd can't go away under the * code even if MADV_DONTNEED runs, but if THP is enabled we need to * run a pmd_trans_unstable before walking the ptes after * split_huge_pmd returns (because it may have run when the pmd become * null, but then a page fault can map in a THP and not a regular page). */ static inline int pmd_trans_unstable(pmd_t *pmd) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE return pmd_none_or_trans_huge_or_clear_bad(pmd); #else return 0; #endif } #ifndef CONFIG_NUMA_BALANCING /* * Technically a PTE can be PROTNONE even when not doing NUMA balancing but * the only case the kernel cares is for NUMA balancing and is only ever set * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked * _PAGE_PROTNONE so by by default, implement the helper as "always no". It * is the responsibility of the caller to distinguish between PROT_NONE * protections and NUMA hinting fault protections. */ static inline int pte_protnone(pte_t pte) { return 0; } static inline int pmd_protnone(pmd_t pmd) { return 0; } #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_MMU */ #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP #ifndef __PAGETABLE_P4D_FOLDED int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); int p4d_clear_huge(p4d_t *p4d); #else static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) { return 0; } static inline int p4d_clear_huge(p4d_t *p4d) { return 0; } #endif /* !__PAGETABLE_P4D_FOLDED */ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); int pud_clear_huge(pud_t *pud); int pmd_clear_huge(pmd_t *pmd); int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); int pud_free_pmd_page(pud_t *pud, unsigned long addr); int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) { return 0; } static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) { return 0; } static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) { return 0; } static inline int p4d_clear_huge(p4d_t *p4d) { return 0; } static inline int pud_clear_huge(pud_t *pud) { return 0; } static inline int pmd_clear_huge(pmd_t *pmd) { return 0; } static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) { return 0; } static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) { return 0; } static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { return 0; } #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * ARCHes with special requirements for evicting THP backing TLB entries can * implement this. Otherwise also, it can help optimize normal TLB flush in * THP regime. stock flush_tlb_range() typically has optimization to nuke the * entire TLB TLB if flush span is greater than a threshold, which will * likely be true for a single huge page. Thus a single thp flush will * invalidate the entire TLB which is not desitable. * e.g. see arch/arc: flush_pmd_tlb_range */ #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) #define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) #else #define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() #define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() #endif #endif struct file; int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot); #ifndef CONFIG_X86_ESPFIX64 static inline void init_espfix_bsp(void) { } #endif extern void __init pgtable_cache_init(void); #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) { return true; } static inline bool arch_has_pfn_modify_check(void) { return false; } #endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ /* * Architecture PAGE_KERNEL_* fallbacks * * Some architectures don't define certain PAGE_KERNEL_* flags. This is either * because they really don't support them, or the port needs to be updated to * reflect the required functionality. Below are a set of relatively safe * fallbacks, as best effort, which we can count on in lieu of the architectures * not defining them on their own yet. */ #ifndef PAGE_KERNEL_RO # define PAGE_KERNEL_RO PAGE_KERNEL #endif #ifndef PAGE_KERNEL_EXEC # define PAGE_KERNEL_EXEC PAGE_KERNEL #endif #endif /* !__ASSEMBLY__ */ #if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT) #ifdef CONFIG_PHYS_ADDR_T_64BIT /* * ZSMALLOC needs to know the highest PFN on 32-bit architectures * with physical address space extension, but falls back to * BITS_PER_LONG otherwise. */ #error Missing MAX_POSSIBLE_PHYSMEM_BITS definition #else #define MAX_POSSIBLE_PHYSMEM_BITS 32 #endif #endif #ifndef has_transparent_hugepage #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define has_transparent_hugepage() 1 #else #define has_transparent_hugepage() 0 #endif #endif #ifndef p4d_offset_lockless #define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address) #endif #ifndef pud_offset_lockless #define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address) #endif #ifndef pmd_offset_lockless #define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address) #endif /* * On some architectures it depends on the mm if the p4d/pud or pmd * layer of the page table hierarchy is folded or not. */ #ifndef mm_p4d_folded #define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED) #endif #ifndef mm_pud_folded #define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED) #endif #ifndef mm_pmd_folded #define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) #endif #endif /* _ASM_GENERIC_PGTABLE_H */ hugetlb.h 0000644 00000005715 14722073052 0006361 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_HUGETLB_H #define _ASM_GENERIC_HUGETLB_H static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) { return mk_pte(page, pgprot); } static inline unsigned long huge_pte_write(pte_t pte) { return pte_write(pte); } static inline unsigned long huge_pte_dirty(pte_t pte) { return pte_dirty(pte); } static inline pte_t huge_pte_mkwrite(pte_t pte) { return pte_mkwrite(pte); } static inline pte_t huge_pte_mkdirty(pte_t pte) { return pte_mkdirty(pte); } static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) { return pte_modify(pte, newprot); } #ifndef __HAVE_ARCH_HUGE_PTE_CLEAR static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz) { pte_clear(mm, addr, ptep); } #endif #ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { free_pgd_range(tlb, addr, end, floor, ceiling); } #endif #ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { set_pte_at(mm, addr, ptep, pte); } #endif #ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { return ptep_get_and_clear(mm, addr, ptep); } #endif #ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { ptep_clear_flush(vma, addr, ptep); } #endif #ifndef __HAVE_ARCH_HUGE_PTE_NONE static inline int huge_pte_none(pte_t pte) { return pte_none(pte); } #endif #ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT static inline pte_t huge_pte_wrprotect(pte_t pte) { return pte_wrprotect(pte); } #endif #ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE static inline int prepare_hugepage_range(struct file *file, unsigned long addr, unsigned long len) { struct hstate *h = hstate_file(file); if (len & ~huge_page_mask(h)) return -EINVAL; if (addr & ~huge_page_mask(h)) return -EINVAL; return 0; } #endif #ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { ptep_set_wrprotect(mm, addr, ptep); } #endif #ifndef __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { return ptep_set_access_flags(vma, addr, ptep, pte, dirty); } #endif #ifndef __HAVE_ARCH_HUGE_PTEP_GET static inline pte_t huge_ptep_get(pte_t *ptep) { return *ptep; } #endif #ifndef __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED static inline bool gigantic_page_runtime_supported(void) { return IS_ENABLED(CONFIG_ARCH_HAS_GIGANTIC_PAGE); } #endif /* __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED */ #endif /* _ASM_GENERIC_HUGETLB_H */ atomic.h 0000644 00000010170 14722073052 0006172 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Generic C implementation of atomic counter operations. Usable on * UP systems only. Do not include in machine independent code. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef __ASM_GENERIC_ATOMIC_H #define __ASM_GENERIC_ATOMIC_H #include <asm/cmpxchg.h> #include <asm/barrier.h> /* * atomic_$op() - $op integer to atomic variable * @i: integer value to $op * @v: pointer to the atomic variable * * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use * smp_mb__{before,after}_atomic(). */ /* * atomic_$op_return() - $op interer to atomic variable and returns the result * @i: integer value to $op * @v: pointer to the atomic variable * * Atomically $ops @i to @v. Does imply a full memory barrier. */ #ifdef CONFIG_SMP /* we can build all atomic primitives from cmpxchg */ #define ATOMIC_OP(op, c_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ int c, old; \ \ c = v->counter; \ while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ } #define ATOMIC_OP_RETURN(op, c_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ int c, old; \ \ c = v->counter; \ while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ \ return c c_op i; \ } #define ATOMIC_FETCH_OP(op, c_op) \ static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ int c, old; \ \ c = v->counter; \ while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ \ return c; \ } #else #include <linux/irqflags.h> #define ATOMIC_OP(op, c_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ \ raw_local_irq_save(flags); \ v->counter = v->counter c_op i; \ raw_local_irq_restore(flags); \ } #define ATOMIC_OP_RETURN(op, c_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long flags; \ int ret; \ \ raw_local_irq_save(flags); \ ret = (v->counter = v->counter c_op i); \ raw_local_irq_restore(flags); \ \ return ret; \ } #define ATOMIC_FETCH_OP(op, c_op) \ static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ int ret; \ \ raw_local_irq_save(flags); \ ret = v->counter; \ v->counter = v->counter c_op i; \ raw_local_irq_restore(flags); \ \ return ret; \ } #endif /* CONFIG_SMP */ #ifndef atomic_add_return ATOMIC_OP_RETURN(add, +) #endif #ifndef atomic_sub_return ATOMIC_OP_RETURN(sub, -) #endif #ifndef atomic_fetch_add ATOMIC_FETCH_OP(add, +) #endif #ifndef atomic_fetch_sub ATOMIC_FETCH_OP(sub, -) #endif #ifndef atomic_fetch_and ATOMIC_FETCH_OP(and, &) #endif #ifndef atomic_fetch_or ATOMIC_FETCH_OP(or, |) #endif #ifndef atomic_fetch_xor ATOMIC_FETCH_OP(xor, ^) #endif #ifndef atomic_and ATOMIC_OP(and, &) #endif #ifndef atomic_or ATOMIC_OP(or, |) #endif #ifndef atomic_xor ATOMIC_OP(xor, ^) #endif #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. */ #define ATOMIC_INIT(i) { (i) } /** * atomic_read - read atomic variable * @v: pointer of type atomic_t * * Atomically reads the value of @v. */ #ifndef atomic_read #define atomic_read(v) READ_ONCE((v)->counter) #endif /** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value * * Atomically sets the value of @v to @i. */ #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #include <linux/irqflags.h> static inline void atomic_add(int i, atomic_t *v) { atomic_add_return(i, v); } static inline void atomic_sub(int i, atomic_t *v) { atomic_sub_return(i, v); } #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) #endif /* __ASM_GENERIC_ATOMIC_H */ div64.h 0000644 00000016560 14722073052 0005663 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_DIV64_H #define _ASM_GENERIC_DIV64_H /* * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com> * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h * * Optimization for constant divisors on 32-bit machines: * Copyright (C) 2006-2015 Nicolas Pitre * * The semantics of do_div() are: * * uint32_t do_div(uint64_t *n, uint32_t base) * { * uint32_t remainder = *n % base; * *n = *n / base; * return remainder; * } * * NOTE: macro parameter n is evaluated multiple times, * beware of side effects! */ #include <linux/types.h> #include <linux/compiler.h> #if BITS_PER_LONG == 64 /** * do_div - returns 2 values: calculate remainder and update new dividend * @n: uint64_t dividend (will be updated) * @base: uint32_t divisor * * Summary: * ``uint32_t remainder = n % base;`` * ``n = n / base;`` * * Return: (uint32_t)remainder * * NOTE: macro parameter @n is evaluated multiple times, * beware of side effects! */ # define do_div(n,base) ({ \ uint32_t __base = (base); \ uint32_t __rem; \ __rem = ((uint64_t)(n)) % __base; \ (n) = ((uint64_t)(n)) / __base; \ __rem; \ }) #elif BITS_PER_LONG == 32 #include <linux/log2.h> /* * If the divisor happens to be constant, we determine the appropriate * inverse at compile time to turn the division into a few inline * multiplications which ought to be much faster. And yet only if compiling * with a sufficiently recent gcc version to perform proper 64-bit constant * propagation. * * (It is unfortunate that gcc doesn't perform all this internally.) */ #ifndef __div64_const32_is_OK #define __div64_const32_is_OK (__GNUC__ >= 4) #endif #define __div64_const32(n, ___b) \ ({ \ /* \ * Multiplication by reciprocal of b: n / b = n * (p / b) / p \ * \ * We rely on the fact that most of this code gets optimized \ * away at compile time due to constant propagation and only \ * a few multiplication instructions should remain. \ * Hence this monstrous macro (static inline doesn't always \ * do the trick here). \ */ \ uint64_t ___res, ___x, ___t, ___m, ___n = (n); \ uint32_t ___p, ___bias; \ \ /* determine MSB of b */ \ ___p = 1 << ilog2(___b); \ \ /* compute m = ((p << 64) + b - 1) / b */ \ ___m = (~0ULL / ___b) * ___p; \ ___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b; \ \ /* one less than the dividend with highest result */ \ ___x = ~0ULL / ___b * ___b - 1; \ \ /* test our ___m with res = m * x / (p << 64) */ \ ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \ ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \ ___res += (___x & 0xffffffff) * (___m >> 32); \ ___t = (___res < ___t) ? (1ULL << 32) : 0; \ ___res = (___res >> 32) + ___t; \ ___res += (___m >> 32) * (___x >> 32); \ ___res /= ___p; \ \ /* Now sanitize and optimize what we've got. */ \ if (~0ULL % (___b / (___b & -___b)) == 0) { \ /* special case, can be simplified to ... */ \ ___n /= (___b & -___b); \ ___m = ~0ULL / (___b / (___b & -___b)); \ ___p = 1; \ ___bias = 1; \ } else if (___res != ___x / ___b) { \ /* \ * We can't get away without a bias to compensate \ * for bit truncation errors. To avoid it we'd need an \ * additional bit to represent m which would overflow \ * a 64-bit variable. \ * \ * Instead we do m = p / b and n / b = (n * m + m) / p. \ */ \ ___bias = 1; \ /* Compute m = (p << 64) / b */ \ ___m = (~0ULL / ___b) * ___p; \ ___m += ((~0ULL % ___b + 1) * ___p) / ___b; \ } else { \ /* \ * Reduce m / p, and try to clear bit 31 of m when \ * possible, otherwise that'll need extra overflow \ * handling later. \ */ \ uint32_t ___bits = -(___m & -___m); \ ___bits |= ___m >> 32; \ ___bits = (~___bits) << 1; \ /* \ * If ___bits == 0 then setting bit 31 is unavoidable. \ * Simply apply the maximum possible reduction in that \ * case. Otherwise the MSB of ___bits indicates the \ * best reduction we should apply. \ */ \ if (!___bits) { \ ___p /= (___m & -___m); \ ___m /= (___m & -___m); \ } else { \ ___p >>= ilog2(___bits); \ ___m >>= ilog2(___bits); \ } \ /* No bias needed. */ \ ___bias = 0; \ } \ \ /* \ * Now we have a combination of 2 conditions: \ * \ * 1) whether or not we need to apply a bias, and \ * \ * 2) whether or not there might be an overflow in the cross \ * product determined by (___m & ((1 << 63) | (1 << 31))). \ * \ * Select the best way to do (m_bias + m * n) / (1 << 64). \ * From now on there will be actual runtime code generated. \ */ \ ___res = __arch_xprod_64(___m, ___n, ___bias); \ \ ___res /= ___p; \ }) #ifndef __arch_xprod_64 /* * Default C implementation for __arch_xprod_64() * * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) * Semantic: retval = ((bias ? m : 0) + m * n) >> 64 * * The product is a 128-bit value, scaled down to 64 bits. * Assuming constant propagation to optimize away unused conditional code. * Architectures may provide their own optimized assembly implementation. */ static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) { uint32_t m_lo = m; uint32_t m_hi = m >> 32; uint32_t n_lo = n; uint32_t n_hi = n >> 32; uint64_t res; uint32_t res_lo, res_hi, tmp; if (!bias) { res = ((uint64_t)m_lo * n_lo) >> 32; } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) { /* there can't be any overflow here */ res = (m + (uint64_t)m_lo * n_lo) >> 32; } else { res = m + (uint64_t)m_lo * n_lo; res_lo = res >> 32; res_hi = (res_lo < m_hi); res = res_lo | ((uint64_t)res_hi << 32); } if (!(m & ((1ULL << 63) | (1ULL << 31)))) { /* there can't be any overflow here */ res += (uint64_t)m_lo * n_hi; res += (uint64_t)m_hi * n_lo; res >>= 32; } else { res += (uint64_t)m_lo * n_hi; tmp = res >> 32; res += (uint64_t)m_hi * n_lo; res_lo = res >> 32; res_hi = (res_lo < tmp); res = res_lo | ((uint64_t)res_hi << 32); } res += (uint64_t)m_hi * n_hi; return res; } #endif #ifndef __div64_32 extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); #endif /* The unnecessary pointer compare is there * to check for type safety (n must be 64bit) */ # define do_div(n,base) ({ \ uint32_t __base = (base); \ uint32_t __rem; \ (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \ if (__builtin_constant_p(__base) && \ is_power_of_2(__base)) { \ __rem = (n) & (__base - 1); \ (n) >>= ilog2(__base); \ } else if (__div64_const32_is_OK && \ __builtin_constant_p(__base) && \ __base != 0) { \ uint32_t __res_lo, __n_lo = (n); \ (n) = __div64_const32(n, __base); \ /* the remainder can be computed with 32-bit regs */ \ __res_lo = (n); \ __rem = __n_lo - __res_lo * __base; \ } else if (likely(((n) >> 32) == 0)) { \ __rem = (uint32_t)(n) % __base; \ (n) = (uint32_t)(n) / __base; \ } else \ __rem = __div64_32(&(n), __base); \ __rem; \ }) #else /* BITS_PER_LONG == ?? */ # error do_div() does not yet support the C64 #endif /* BITS_PER_LONG */ #endif /* _ASM_GENERIC_DIV64_H */ iomap.h 0000644 00000007701 14722073052 0006031 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __GENERIC_IO_H #define __GENERIC_IO_H #include <linux/linkage.h> #include <asm/byteorder.h> /* * These are the "generic" interfaces for doing new-style * memory-mapped or PIO accesses. Architectures may do * their own arch-optimized versions, these just act as * wrappers around the old-style IO register access functions: * read[bwl]/write[bwl]/in[bwl]/out[bwl] * * Don't include this directly, include it from <asm/io.h>. */ /* * Read/write from/to an (offsettable) iomem cookie. It might be a PIO * access or a MMIO access, these functions don't care. The info is * encoded in the hardware mapping set up by the mapping functions * (or the cookie itself, depending on implementation and hw). * * The generic routines just encode the PIO/MMIO as part of the * cookie, and coldly assume that the MMIO IO mappings are not * in the low address range. Architectures for which this is not * true can't use this generic implementation. */ extern unsigned int ioread8(void __iomem *); extern unsigned int ioread16(void __iomem *); extern unsigned int ioread16be(void __iomem *); extern unsigned int ioread32(void __iomem *); extern unsigned int ioread32be(void __iomem *); #ifdef CONFIG_64BIT extern u64 ioread64(void __iomem *); extern u64 ioread64be(void __iomem *); #endif #ifdef readq #define ioread64_lo_hi ioread64_lo_hi #define ioread64_hi_lo ioread64_hi_lo #define ioread64be_lo_hi ioread64be_lo_hi #define ioread64be_hi_lo ioread64be_hi_lo extern u64 ioread64_lo_hi(void __iomem *addr); extern u64 ioread64_hi_lo(void __iomem *addr); extern u64 ioread64be_lo_hi(void __iomem *addr); extern u64 ioread64be_hi_lo(void __iomem *addr); #endif extern void iowrite8(u8, void __iomem *); extern void iowrite16(u16, void __iomem *); extern void iowrite16be(u16, void __iomem *); extern void iowrite32(u32, void __iomem *); extern void iowrite32be(u32, void __iomem *); #ifdef CONFIG_64BIT extern void iowrite64(u64, void __iomem *); extern void iowrite64be(u64, void __iomem *); #endif #ifdef writeq #define iowrite64_lo_hi iowrite64_lo_hi #define iowrite64_hi_lo iowrite64_hi_lo #define iowrite64be_lo_hi iowrite64be_lo_hi #define iowrite64be_hi_lo iowrite64be_hi_lo extern void iowrite64_lo_hi(u64 val, void __iomem *addr); extern void iowrite64_hi_lo(u64 val, void __iomem *addr); extern void iowrite64be_lo_hi(u64 val, void __iomem *addr); extern void iowrite64be_hi_lo(u64 val, void __iomem *addr); #endif /* * "string" versions of the above. Note that they * use native byte ordering for the accesses (on * the assumption that IO and memory agree on a * byte order, and CPU byteorder is irrelevant). * * They do _not_ update the port address. If you * want MMIO that copies stuff laid out in MMIO * memory across multiple ports, use "memcpy_toio()" * and friends. */ extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count); extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count); extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count); extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count); extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); #ifdef CONFIG_HAS_IOPORT_MAP /* Create a virtual mapping cookie for an IO port range */ extern void __iomem *ioport_map(unsigned long port, unsigned int nr); extern void ioport_unmap(void __iomem *); #endif #ifndef ARCH_HAS_IOREMAP_WC #define ioremap_wc ioremap_nocache #endif #ifndef ARCH_HAS_IOREMAP_WT #define ioremap_wt ioremap_nocache #endif #ifdef CONFIG_PCI /* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */ struct pci_dev; extern void pci_iounmap(struct pci_dev *dev, void __iomem *); #elif defined(CONFIG_GENERIC_IOMAP) struct pci_dev; static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) { } #endif #include <asm-generic/pci_iomap.h> #endif pgalloc.h 0000644 00000005425 14722073052 0006346 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_PGALLOC_H #define __ASM_GENERIC_PGALLOC_H #ifdef CONFIG_MMU #define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO) #define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT) /** * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table * @mm: the mm_struct of the current context * * This function is intended for architectures that need * anything beyond simple page allocation. * * Return: pointer to the allocated memory or %NULL on error */ static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) { return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL); } #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL /** * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table * @mm: the mm_struct of the current context * * Return: pointer to the allocated memory or %NULL on error */ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { return __pte_alloc_one_kernel(mm); } #endif /** * pte_free_kernel - free PTE-level kernel page table page * @mm: the mm_struct of the current context * @pte: pointer to the memory containing the page table */ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { free_page((unsigned long)pte); } /** * __pte_alloc_one - allocate a page for PTE-level user page table * @mm: the mm_struct of the current context * @gfp: GFP flags to use for the allocation * * Allocates a page and runs the pgtable_pte_page_ctor(). * * This function is intended for architectures that need * anything beyond simple page allocation or must have custom GFP flags. * * Return: `struct page` initialized as page table or %NULL on error */ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) { struct page *pte; pte = alloc_page(gfp); if (!pte) return NULL; if (!pgtable_pte_page_ctor(pte)) { __free_page(pte); return NULL; } return pte; } #ifndef __HAVE_ARCH_PTE_ALLOC_ONE /** * pte_alloc_one - allocate a page for PTE-level user page table * @mm: the mm_struct of the current context * * Allocates a page and runs the pgtable_pte_page_ctor(). * * Return: `struct page` initialized as page table or %NULL on error */ static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { return __pte_alloc_one(mm, GFP_PGTABLE_USER); } #endif /* * Should really implement gc for free page table pages. This could be * done with a reference count in struct page. */ /** * pte_free - free PTE-level user page table page * @mm: the mm_struct of the current context * @pte_page: the `struct page` representing the page table */ static inline void pte_free(struct mm_struct *mm, struct page *pte_page) { pgtable_pte_page_dtor(pte_page); __free_page(pte_page); } #endif /* CONFIG_MMU */ #endif /* __ASM_GENERIC_PGALLOC_H */ mmu_context.h 0000644 00000001561 14722073052 0007264 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_MMU_CONTEXT_H #define __ASM_GENERIC_MMU_CONTEXT_H /* * Generic hooks for NOMMU architectures, which do not need to do * anything special here. */ #include <asm-generic/mm_hooks.h> struct task_struct; struct mm_struct; static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { return 0; } static inline void destroy_context(struct mm_struct *mm) { } static inline void deactivate_mm(struct task_struct *task, struct mm_struct *mm) { } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { } static inline void activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) { } #endif /* __ASM_GENERIC_MMU_CONTEXT_H */ futex.h 0000644 00000005230 14722073052 0006052 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_FUTEX_H #define _ASM_GENERIC_FUTEX_H #include <linux/futex.h> #include <linux/uaccess.h> #include <asm/errno.h> #ifndef CONFIG_SMP /* * The following implementation only for uniprocessor machines. * It relies on preempt_disable() ensuring mutual exclusion. * */ /** * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant * argument and comparison of the previous * futex value with another constant. * * @encoded_op: encoded operation to execute * @uaddr: pointer to user space address * * Return: * 0 - On success * -EFAULT - User access resulted in a page fault * -EAGAIN - Atomic operation was unable to complete due to contention * -ENOSYS - Operation not supported */ static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) { int oldval, ret; u32 tmp; preempt_disable(); pagefault_disable(); ret = -EFAULT; if (unlikely(get_user(oldval, uaddr) != 0)) goto out_pagefault_enable; ret = 0; tmp = oldval; switch (op) { case FUTEX_OP_SET: tmp = oparg; break; case FUTEX_OP_ADD: tmp += oparg; break; case FUTEX_OP_OR: tmp |= oparg; break; case FUTEX_OP_ANDN: tmp &= ~oparg; break; case FUTEX_OP_XOR: tmp ^= oparg; break; default: ret = -ENOSYS; } if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0)) ret = -EFAULT; out_pagefault_enable: pagefault_enable(); preempt_enable(); if (ret == 0) *oval = oldval; return ret; } /** * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the * uaddr with newval if the current value is * oldval. * @uval: pointer to store content of @uaddr * @uaddr: pointer to user space address * @oldval: old value * @newval: new value to store to @uaddr * * Return: * 0 - On success * -EFAULT - User access resulted in a page fault * -EAGAIN - Atomic operation was unable to complete due to contention * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG) */ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { u32 val; preempt_disable(); if (unlikely(get_user(val, uaddr) != 0)) { preempt_enable(); return -EFAULT; } if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) { preempt_enable(); return -EFAULT; } *uval = val; preempt_enable(); return 0; } #else static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) { return -ENOSYS; } static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { return -ENOSYS; } #endif /* CONFIG_SMP */ #endif irq_work.h 0000644 00000000302 14722073052 0006547 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_IRQ_WORK_H #define __ASM_IRQ_WORK_H static inline bool arch_irq_work_has_interrupt(void) { return false; } #endif /* __ASM_IRQ_WORK_H */ audit_change_attr.h 0000644 00000000744 14722073052 0010371 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifdef __NR_chmod __NR_chmod, #endif __NR_fchmod, #ifdef __NR_chown __NR_chown, __NR_lchown, #endif #ifdef __NR_fchown __NR_fchown, #endif __NR_setxattr, __NR_lsetxattr, __NR_fsetxattr, __NR_removexattr, __NR_lremovexattr, __NR_fremovexattr, #ifdef __NR_fchownat __NR_fchownat, __NR_fchmodat, #endif #ifdef __NR_chown32 __NR_chown32, __NR_fchown32, __NR_lchown32, #endif #ifdef __NR_link __NR_link, #endif #ifdef __NR_linkat __NR_linkat, #endif mshyperv.h 0000644 00000012437 14722073052 0006603 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Linux-specific definitions for managing interactions with Microsoft's * Hyper-V hypervisor. The definitions in this file are architecture * independent. See arch/<arch>/include/asm/mshyperv.h for definitions * that are specific to architecture <arch>. * * Definitions that are specified in the Hyper-V Top Level Functional * Spec (TLFS) should not go in this file, but should instead go in * hyperv-tlfs.h. * * Copyright (C) 2019, Microsoft, Inc. * * Author : Michael Kelley <mikelley@microsoft.com> */ #ifndef _ASM_GENERIC_MSHYPERV_H #define _ASM_GENERIC_MSHYPERV_H #include <linux/types.h> #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/cpumask.h> #include <asm/ptrace.h> #include <asm/hyperv-tlfs.h> struct ms_hyperv_info { u32 features; u32 misc_features; u32 hints; u32 nested_features; u32 max_vp_index; u32 max_lp_index; }; extern struct ms_hyperv_info ms_hyperv; extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr); extern u64 hv_do_fast_hypercall8(u16 control, u64 input8); /* Generate the guest OS identifier as described in the Hyper-V TLFS */ static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version, __u64 d_info2) { __u64 guest_id = 0; guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48); guest_id |= (d_info1 << 48); guest_id |= (kernel_version << 16); guest_id |= d_info2; return guest_id; } /* Free the message slot and signal end-of-message if required */ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) { /* * On crash we're reading some other CPU's message page and we need * to be careful: this other CPU may already had cleared the header * and the host may already had delivered some other message there. * In case we blindly write msg->header.message_type we're going * to lose it. We can still lose a message of the same type but * we count on the fact that there can only be one * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages * on crash. */ if (cmpxchg(&msg->header.message_type, old_msg_type, HVMSG_NONE) != old_msg_type) return; /* * The cmxchg() above does an implicit memory barrier to * ensure the write to MessageType (ie set to * HVMSG_NONE) happens before we read the * MessagePending and EOMing. Otherwise, the EOMing * will not deliver any more messages since there is * no empty slot */ if (msg->header.message_flags.msg_pending) { /* * This will cause message queue rescan to * possibly deliver another msg from the * hypervisor */ hv_signal_eom(); } } void hv_setup_vmbus_irq(void (*handler)(void)); void hv_remove_vmbus_irq(void); void hv_enable_vmbus_irq(void); void hv_disable_vmbus_irq(void); void hv_setup_kexec_handler(void (*handler)(void)); void hv_remove_kexec_handler(void); void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); void hv_remove_crash_handler(void); #if IS_ENABLED(CONFIG_HYPERV) /* * Hypervisor's notion of virtual processor ID is different from * Linux' notion of CPU ID. This information can only be retrieved * in the context of the calling CPU. Setup a map for easy access * to this information. */ extern u32 *hv_vp_index; extern u32 hv_max_vp_index; /* Sentinel value for an uninitialized entry in hv_vp_index array */ #define VP_INVAL U32_MAX /** * hv_cpu_number_to_vp_number() - Map CPU to VP. * @cpu_number: CPU number in Linux terms * * This function returns the mapping between the Linux processor * number and the hypervisor's virtual processor number, useful * in making hypercalls and such that talk about specific * processors. * * Return: Virtual processor number in Hyper-V terms */ static inline int hv_cpu_number_to_vp_number(int cpu_number) { return hv_vp_index[cpu_number]; } static inline int cpumask_to_vpset(struct hv_vpset *vpset, const struct cpumask *cpus) { int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; /* valid_bank_mask can represent up to 64 banks */ if (hv_max_vp_index / 64 >= 64) return 0; /* * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex * structs are not cleared between calls, we risk flushing unneeded * vCPUs otherwise. */ for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++) vpset->bank_contents[vcpu_bank] = 0; /* * Some banks may end up being empty but this is acceptable. */ for_each_cpu(cpu, cpus) { vcpu = hv_cpu_number_to_vp_number(cpu); if (vcpu == VP_INVAL) return -1; vcpu_bank = vcpu / 64; vcpu_offset = vcpu % 64; __set_bit(vcpu_offset, (unsigned long *) &vpset->bank_contents[vcpu_bank]); if (vcpu_bank >= nr_bank) nr_bank = vcpu_bank + 1; } vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0); return nr_bank; } void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); void hyperv_report_panic_msg(phys_addr_t pa, size_t size); bool hv_is_hyperv_initialized(void); void hyperv_cleanup(void); void hv_setup_sched_clock(void *sched_clock); #else /* CONFIG_HYPERV */ static inline bool hv_is_hyperv_initialized(void) { return false; } static inline void hyperv_cleanup(void) {} #endif /* CONFIG_HYPERV */ #if IS_ENABLED(CONFIG_HYPERV) extern int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void)); extern void hv_remove_stimer0_irq(int irq); #endif #endif percpu.h 0000644 00000031234 14722073052 0006220 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_PERCPU_H_ #define _ASM_GENERIC_PERCPU_H_ #include <linux/compiler.h> #include <linux/threads.h> #include <linux/percpu-defs.h> #ifdef CONFIG_SMP /* * per_cpu_offset() is the offset that has to be added to a * percpu variable to get to the instance for a certain processor. * * Most arches use the __per_cpu_offset array for those offsets but * some arches have their own ways of determining the offset (x86_64, s390). */ #ifndef __per_cpu_offset extern unsigned long __per_cpu_offset[NR_CPUS]; #define per_cpu_offset(x) (__per_cpu_offset[x]) #endif /* * Determine the offset for the currently active processor. * An arch may define __my_cpu_offset to provide a more effective * means of obtaining the offset to the per cpu variables of the * current processor. */ #ifndef __my_cpu_offset #define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) #endif #ifdef CONFIG_DEBUG_PREEMPT #define my_cpu_offset per_cpu_offset(smp_processor_id()) #else #define my_cpu_offset __my_cpu_offset #endif /* * Arch may define arch_raw_cpu_ptr() to provide more efficient address * translations for raw_cpu_ptr(). */ #ifndef arch_raw_cpu_ptr #define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) #endif #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void setup_per_cpu_areas(void); #endif #endif /* SMP */ #ifndef PER_CPU_BASE_SECTION #ifdef CONFIG_SMP #define PER_CPU_BASE_SECTION ".data..percpu" #else #define PER_CPU_BASE_SECTION ".data" #endif #endif #ifndef PER_CPU_ATTRIBUTES #define PER_CPU_ATTRIBUTES #endif #define raw_cpu_generic_read(pcp) \ ({ \ *raw_cpu_ptr(&(pcp)); \ }) #define raw_cpu_generic_to_op(pcp, val, op) \ do { \ *raw_cpu_ptr(&(pcp)) op val; \ } while (0) #define raw_cpu_generic_add_return(pcp, val) \ ({ \ typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ \ *__p += val; \ *__p; \ }) #define raw_cpu_generic_xchg(pcp, nval) \ ({ \ typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ typeof(pcp) __ret; \ __ret = *__p; \ *__p = nval; \ __ret; \ }) #define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ ({ \ typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ typeof(pcp) __ret; \ __ret = *__p; \ if (__ret == (oval)) \ *__p = nval; \ __ret; \ }) #define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ ({ \ typeof(&(pcp1)) __p1 = raw_cpu_ptr(&(pcp1)); \ typeof(&(pcp2)) __p2 = raw_cpu_ptr(&(pcp2)); \ int __ret = 0; \ if (*__p1 == (oval1) && *__p2 == (oval2)) { \ *__p1 = nval1; \ *__p2 = nval2; \ __ret = 1; \ } \ (__ret); \ }) #define __this_cpu_generic_read_nopreempt(pcp) \ ({ \ typeof(pcp) __ret; \ preempt_disable_notrace(); \ __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \ preempt_enable_notrace(); \ __ret; \ }) #define __this_cpu_generic_read_noirq(pcp) \ ({ \ typeof(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ __ret = raw_cpu_generic_read(pcp); \ raw_local_irq_restore(__flags); \ __ret; \ }) #define this_cpu_generic_read(pcp) \ ({ \ typeof(pcp) __ret; \ if (__native_word(pcp)) \ __ret = __this_cpu_generic_read_nopreempt(pcp); \ else \ __ret = __this_cpu_generic_read_noirq(pcp); \ __ret; \ }) #define this_cpu_generic_to_op(pcp, val, op) \ do { \ unsigned long __flags; \ raw_local_irq_save(__flags); \ raw_cpu_generic_to_op(pcp, val, op); \ raw_local_irq_restore(__flags); \ } while (0) #define this_cpu_generic_add_return(pcp, val) \ ({ \ typeof(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ __ret = raw_cpu_generic_add_return(pcp, val); \ raw_local_irq_restore(__flags); \ __ret; \ }) #define this_cpu_generic_xchg(pcp, nval) \ ({ \ typeof(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ __ret = raw_cpu_generic_xchg(pcp, nval); \ raw_local_irq_restore(__flags); \ __ret; \ }) #define this_cpu_generic_cmpxchg(pcp, oval, nval) \ ({ \ typeof(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \ raw_local_irq_restore(__flags); \ __ret; \ }) #define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ ({ \ int __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \ oval1, oval2, nval1, nval2); \ raw_local_irq_restore(__flags); \ __ret; \ }) #ifndef raw_cpu_read_1 #define raw_cpu_read_1(pcp) raw_cpu_generic_read(pcp) #endif #ifndef raw_cpu_read_2 #define raw_cpu_read_2(pcp) raw_cpu_generic_read(pcp) #endif #ifndef raw_cpu_read_4 #define raw_cpu_read_4(pcp) raw_cpu_generic_read(pcp) #endif #ifndef raw_cpu_read_8 #define raw_cpu_read_8(pcp) raw_cpu_generic_read(pcp) #endif #ifndef raw_cpu_write_1 #define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op(pcp, val, =) #endif #ifndef raw_cpu_write_2 #define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op(pcp, val, =) #endif #ifndef raw_cpu_write_4 #define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op(pcp, val, =) #endif #ifndef raw_cpu_write_8 #define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op(pcp, val, =) #endif #ifndef raw_cpu_add_1 #define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) #endif #ifndef raw_cpu_add_2 #define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) #endif #ifndef raw_cpu_add_4 #define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) #endif #ifndef raw_cpu_add_8 #define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) #endif #ifndef raw_cpu_and_1 #define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) #endif #ifndef raw_cpu_and_2 #define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) #endif #ifndef raw_cpu_and_4 #define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) #endif #ifndef raw_cpu_and_8 #define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) #endif #ifndef raw_cpu_or_1 #define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) #endif #ifndef raw_cpu_or_2 #define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) #endif #ifndef raw_cpu_or_4 #define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) #endif #ifndef raw_cpu_or_8 #define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) #endif #ifndef raw_cpu_add_return_1 #define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val) #endif #ifndef raw_cpu_add_return_2 #define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val) #endif #ifndef raw_cpu_add_return_4 #define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val) #endif #ifndef raw_cpu_add_return_8 #define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val) #endif #ifndef raw_cpu_xchg_1 #define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval) #endif #ifndef raw_cpu_xchg_2 #define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval) #endif #ifndef raw_cpu_xchg_4 #define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval) #endif #ifndef raw_cpu_xchg_8 #define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval) #endif #ifndef raw_cpu_cmpxchg_1 #define raw_cpu_cmpxchg_1(pcp, oval, nval) \ raw_cpu_generic_cmpxchg(pcp, oval, nval) #endif #ifndef raw_cpu_cmpxchg_2 #define raw_cpu_cmpxchg_2(pcp, oval, nval) \ raw_cpu_generic_cmpxchg(pcp, oval, nval) #endif #ifndef raw_cpu_cmpxchg_4 #define raw_cpu_cmpxchg_4(pcp, oval, nval) \ raw_cpu_generic_cmpxchg(pcp, oval, nval) #endif #ifndef raw_cpu_cmpxchg_8 #define raw_cpu_cmpxchg_8(pcp, oval, nval) \ raw_cpu_generic_cmpxchg(pcp, oval, nval) #endif #ifndef raw_cpu_cmpxchg_double_1 #define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) #endif #ifndef raw_cpu_cmpxchg_double_2 #define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) #endif #ifndef raw_cpu_cmpxchg_double_4 #define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) #endif #ifndef raw_cpu_cmpxchg_double_8 #define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) #endif #ifndef this_cpu_read_1 #define this_cpu_read_1(pcp) this_cpu_generic_read(pcp) #endif #ifndef this_cpu_read_2 #define this_cpu_read_2(pcp) this_cpu_generic_read(pcp) #endif #ifndef this_cpu_read_4 #define this_cpu_read_4(pcp) this_cpu_generic_read(pcp) #endif #ifndef this_cpu_read_8 #define this_cpu_read_8(pcp) this_cpu_generic_read(pcp) #endif #ifndef this_cpu_write_1 #define this_cpu_write_1(pcp, val) this_cpu_generic_to_op(pcp, val, =) #endif #ifndef this_cpu_write_2 #define this_cpu_write_2(pcp, val) this_cpu_generic_to_op(pcp, val, =) #endif #ifndef this_cpu_write_4 #define this_cpu_write_4(pcp, val) this_cpu_generic_to_op(pcp, val, =) #endif #ifndef this_cpu_write_8 #define this_cpu_write_8(pcp, val) this_cpu_generic_to_op(pcp, val, =) #endif #ifndef this_cpu_add_1 #define this_cpu_add_1(pcp, val) this_cpu_generic_to_op(pcp, val, +=) #endif #ifndef this_cpu_add_2 #define this_cpu_add_2(pcp, val) this_cpu_generic_to_op(pcp, val, +=) #endif #ifndef this_cpu_add_4 #define this_cpu_add_4(pcp, val) this_cpu_generic_to_op(pcp, val, +=) #endif #ifndef this_cpu_add_8 #define this_cpu_add_8(pcp, val) this_cpu_generic_to_op(pcp, val, +=) #endif #ifndef this_cpu_and_1 #define this_cpu_and_1(pcp, val) this_cpu_generic_to_op(pcp, val, &=) #endif #ifndef this_cpu_and_2 #define this_cpu_and_2(pcp, val) this_cpu_generic_to_op(pcp, val, &=) #endif #ifndef this_cpu_and_4 #define this_cpu_and_4(pcp, val) this_cpu_generic_to_op(pcp, val, &=) #endif #ifndef this_cpu_and_8 #define this_cpu_and_8(pcp, val) this_cpu_generic_to_op(pcp, val, &=) #endif #ifndef this_cpu_or_1 #define this_cpu_or_1(pcp, val) this_cpu_generic_to_op(pcp, val, |=) #endif #ifndef this_cpu_or_2 #define this_cpu_or_2(pcp, val) this_cpu_generic_to_op(pcp, val, |=) #endif #ifndef this_cpu_or_4 #define this_cpu_or_4(pcp, val) this_cpu_generic_to_op(pcp, val, |=) #endif #ifndef this_cpu_or_8 #define this_cpu_or_8(pcp, val) this_cpu_generic_to_op(pcp, val, |=) #endif #ifndef this_cpu_add_return_1 #define this_cpu_add_return_1(pcp, val) this_cpu_generic_add_return(pcp, val) #endif #ifndef this_cpu_add_return_2 #define this_cpu_add_return_2(pcp, val) this_cpu_generic_add_return(pcp, val) #endif #ifndef this_cpu_add_return_4 #define this_cpu_add_return_4(pcp, val) this_cpu_generic_add_return(pcp, val) #endif #ifndef this_cpu_add_return_8 #define this_cpu_add_return_8(pcp, val) this_cpu_generic_add_return(pcp, val) #endif #ifndef this_cpu_xchg_1 #define this_cpu_xchg_1(pcp, nval) this_cpu_generic_xchg(pcp, nval) #endif #ifndef this_cpu_xchg_2 #define this_cpu_xchg_2(pcp, nval) this_cpu_generic_xchg(pcp, nval) #endif #ifndef this_cpu_xchg_4 #define this_cpu_xchg_4(pcp, nval) this_cpu_generic_xchg(pcp, nval) #endif #ifndef this_cpu_xchg_8 #define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval) #endif #ifndef this_cpu_cmpxchg_1 #define this_cpu_cmpxchg_1(pcp, oval, nval) \ this_cpu_generic_cmpxchg(pcp, oval, nval) #endif #ifndef this_cpu_cmpxchg_2 #define this_cpu_cmpxchg_2(pcp, oval, nval) \ this_cpu_generic_cmpxchg(pcp, oval, nval) #endif #ifndef this_cpu_cmpxchg_4 #define this_cpu_cmpxchg_4(pcp, oval, nval) \ this_cpu_generic_cmpxchg(pcp, oval, nval) #endif #ifndef this_cpu_cmpxchg_8 #define this_cpu_cmpxchg_8(pcp, oval, nval) \ this_cpu_generic_cmpxchg(pcp, oval, nval) #endif #ifndef this_cpu_cmpxchg_double_1 #define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) #endif #ifndef this_cpu_cmpxchg_double_2 #define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) #endif #ifndef this_cpu_cmpxchg_double_4 #define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) #endif #ifndef this_cpu_cmpxchg_double_8 #define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) #endif #endif /* _ASM_GENERIC_PERCPU_H_ */ ftrace.h 0000644 00000000472 14722073052 0006166 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/asm-generic/ftrace.h */ #ifndef __ASM_GENERIC_FTRACE_H__ #define __ASM_GENERIC_FTRACE_H__ /* * Not all architectures need their own ftrace.h, the most * common definitions are already in linux/ftrace.h. */ #endif /* __ASM_GENERIC_FTRACE_H__ */ vmlinux.lds.h 0000644 00000072621 14722073052 0007212 0 ustar 00 /* * Helper macros to support writing architecture specific * linker scripts. * * A minimal linker scripts has following content: * [This is a sample, architectures may have special requiriements] * * OUTPUT_FORMAT(...) * OUTPUT_ARCH(...) * ENTRY(...) * SECTIONS * { * . = START; * __init_begin = .; * HEAD_TEXT_SECTION * INIT_TEXT_SECTION(PAGE_SIZE) * INIT_DATA_SECTION(...) * PERCPU_SECTION(CACHELINE_SIZE) * __init_end = .; * * _stext = .; * TEXT_SECTION = 0 * _etext = .; * * _sdata = .; * RO_DATA_SECTION(PAGE_SIZE) * RW_DATA_SECTION(...) * _edata = .; * * EXCEPTION_TABLE(...) * NOTES * * BSS_SECTION(0, 0, 0) * _end = .; * * STABS_DEBUG * DWARF_DEBUG * * DISCARDS // must be the last * } * * [__init_begin, __init_end] is the init section that may be freed after init * // __init_begin and __init_end should be page aligned, so that we can * // free the whole .init memory * [_stext, _etext] is the text section * [_sdata, _edata] is the data section * * Some of the included output section have their own set of constants. * Examples are: [__initramfs_start, __initramfs_end] for initramfs and * [__nosave_begin, __nosave_end] for the nosave data */ #ifndef LOAD_OFFSET #define LOAD_OFFSET 0 #endif /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) /* * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which * generates .data.identifier sections, which need to be pulled in with * .data. We don't want to pull in .data..other sections, which Linux * has defined. Same for text and bss. * * RODATA_MAIN is not used because existing code already defines .rodata.x * sections to be brought in with rodata. */ #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* #else #define TEXT_MAIN .text #define DATA_MAIN .data #define SDATA_MAIN .sdata #define RODATA_MAIN .rodata #define BSS_MAIN .bss #define SBSS_MAIN .sbss #endif /* * Align to a 32 byte boundary equal to the * alignment gcc 4.5 uses for a struct */ #define STRUCT_ALIGNMENT 32 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) /* The actual configuration determine if the init/exit sections * are handled as text/data or they can be discarded (which * often happens at runtime) */ #ifdef CONFIG_HOTPLUG_CPU #define CPU_KEEP(sec) *(.cpu##sec) #define CPU_DISCARD(sec) #else #define CPU_KEEP(sec) #define CPU_DISCARD(sec) *(.cpu##sec) #endif #if defined(CONFIG_MEMORY_HOTPLUG) #define MEM_KEEP(sec) *(.mem##sec) #define MEM_DISCARD(sec) #else #define MEM_KEEP(sec) #define MEM_DISCARD(sec) *(.mem##sec) #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD /* * The ftrace call sites are logged to a section whose name depends on the * compiler option used. A given kernel image will only use one, AKA * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header * dependencies for FTRACE_CALLSITE_SECTION's definition. */ #define MCOUNT_REC() . = ALIGN(8); \ __start_mcount_loc = .; \ KEEP(*(__mcount_loc)) \ KEEP(*(__patchable_function_entries)) \ __stop_mcount_loc = .; #else #define MCOUNT_REC() #endif #ifdef CONFIG_TRACE_BRANCH_PROFILING #define LIKELY_PROFILE() __start_annotated_branch_profile = .; \ KEEP(*(_ftrace_annotated_branch)) \ __stop_annotated_branch_profile = .; #else #define LIKELY_PROFILE() #endif #ifdef CONFIG_PROFILE_ALL_BRANCHES #define BRANCH_PROFILE() __start_branch_profile = .; \ KEEP(*(_ftrace_branch)) \ __stop_branch_profile = .; #else #define BRANCH_PROFILE() #endif #ifdef CONFIG_KPROBES #define KPROBE_BLACKLIST() . = ALIGN(8); \ __start_kprobe_blacklist = .; \ KEEP(*(_kprobe_blacklist)) \ __stop_kprobe_blacklist = .; #else #define KPROBE_BLACKLIST() #endif #ifdef CONFIG_FUNCTION_ERROR_INJECTION #define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ __start_error_injection_whitelist = .; \ KEEP(*(_error_injection_whitelist)) \ __stop_error_injection_whitelist = .; #else #define ERROR_INJECT_WHITELIST() #endif #ifdef CONFIG_EVENT_TRACING #define FTRACE_EVENTS() . = ALIGN(8); \ __start_ftrace_events = .; \ KEEP(*(_ftrace_events)) \ __stop_ftrace_events = .; \ __start_ftrace_eval_maps = .; \ KEEP(*(_ftrace_eval_map)) \ __stop_ftrace_eval_maps = .; #else #define FTRACE_EVENTS() #endif #ifdef CONFIG_TRACING #define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \ KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ __stop___trace_bprintk_fmt = .; #define TRACEPOINT_STR() __start___tracepoint_str = .; \ KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ __stop___tracepoint_str = .; #else #define TRACE_PRINTKS() #define TRACEPOINT_STR() #endif #ifdef CONFIG_FTRACE_SYSCALLS #define TRACE_SYSCALLS() . = ALIGN(8); \ __start_syscalls_metadata = .; \ KEEP(*(__syscalls_metadata)) \ __stop_syscalls_metadata = .; #else #define TRACE_SYSCALLS() #endif #ifdef CONFIG_BPF_EVENTS #define BPF_RAW_TP() STRUCT_ALIGN(); \ __start__bpf_raw_tp = .; \ KEEP(*(__bpf_raw_tp_map)) \ __stop__bpf_raw_tp = .; #else #define BPF_RAW_TP() #endif #ifdef CONFIG_SERIAL_EARLYCON #define EARLYCON_TABLE() . = ALIGN(8); \ __earlycon_table = .; \ KEEP(*(__earlycon_table)) \ __earlycon_table_end = .; #else #define EARLYCON_TABLE() #endif #ifdef CONFIG_SECURITY #define LSM_TABLE() . = ALIGN(8); \ __start_lsm_info = .; \ KEEP(*(.lsm_info.init)) \ __end_lsm_info = .; #define EARLY_LSM_TABLE() . = ALIGN(8); \ __start_early_lsm_info = .; \ KEEP(*(.early_lsm_info.init)) \ __end_early_lsm_info = .; #else #define LSM_TABLE() #define EARLY_LSM_TABLE() #endif #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) #define _OF_TABLE_0(name) #define _OF_TABLE_1(name) \ . = ALIGN(8); \ __##name##_of_table = .; \ KEEP(*(__##name##_of_table)) \ KEEP(*(__##name##_of_table_end)) #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer) #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) #ifdef CONFIG_ACPI #define ACPI_PROBE_TABLE(name) \ . = ALIGN(8); \ __##name##_acpi_probe_table = .; \ KEEP(*(__##name##_acpi_probe_table)) \ __##name##_acpi_probe_table_end = .; #else #define ACPI_PROBE_TABLE(name) #endif #ifdef CONFIG_THERMAL #define THERMAL_TABLE(name) \ . = ALIGN(8); \ __##name##_thermal_table = .; \ KEEP(*(__##name##_thermal_table)) \ __##name##_thermal_table_end = .; #else #define THERMAL_TABLE(name) #endif #define KERNEL_DTB() \ STRUCT_ALIGN(); \ __dtb_start = .; \ KEEP(*(.dtb.init.rodata)) \ __dtb_end = .; /* * .data section */ #define DATA_DATA \ *(.xiptext) \ *(DATA_MAIN) \ *(.data..decrypted) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ MEM_KEEP(init.data*) \ MEM_KEEP(exit.data*) \ *(.data.unlikely) \ __start_once = .; \ *(.data.once) \ __end_once = .; \ STRUCT_ALIGN(); \ *(__tracepoints) \ /* implement dynamic printk debug */ \ . = ALIGN(8); \ __start___verbose = .; \ KEEP(*(__verbose)) \ __stop___verbose = .; \ LIKELY_PROFILE() \ BRANCH_PROFILE() \ TRACE_PRINTKS() \ BPF_RAW_TP() \ TRACEPOINT_STR() /* * Data section helpers */ #define NOSAVE_DATA \ . = ALIGN(PAGE_SIZE); \ __nosave_begin = .; \ *(.data..nosave) \ . = ALIGN(PAGE_SIZE); \ __nosave_end = .; #define PAGE_ALIGNED_DATA(page_align) \ . = ALIGN(page_align); \ *(.data..page_aligned) \ . = ALIGN(page_align); #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ *(.data..read_mostly) \ . = ALIGN(align); #define CACHELINE_ALIGNED_DATA(align) \ . = ALIGN(align); \ *(.data..cacheline_aligned) #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ __start_init_task = .; \ init_thread_union = .; \ init_stack = .; \ KEEP(*(.data..init_task)) \ KEEP(*(.data..init_thread_info)) \ . = __start_init_task + THREAD_SIZE; \ __end_init_task = .; #define JUMP_TABLE_DATA \ . = ALIGN(8); \ __start___jump_table = .; \ KEEP(*(__jump_table)) \ __stop___jump_table = .; /* * Allow architectures to handle ro_after_init data on their * own by defining an empty RO_AFTER_INIT_DATA. */ #ifndef RO_AFTER_INIT_DATA #define RO_AFTER_INIT_DATA \ . = ALIGN(8); \ __start_ro_after_init = .; \ *(.data..ro_after_init) \ JUMP_TABLE_DATA \ __end_ro_after_init = .; #endif /* * Read only Data */ #define RO_DATA_SECTION(align) \ . = ALIGN((align)); \ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ __start_rodata = .; \ *(.rodata) *(.rodata.*) \ RO_AFTER_INIT_DATA /* Read only after init */ \ . = ALIGN(8); \ __start___tracepoints_ptrs = .; \ KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ __stop___tracepoints_ptrs = .; \ *(__tracepoints_strings)/* Tracepoints: strings */ \ } \ \ .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ *(.rodata1) \ } \ \ /* PCI quirks */ \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ __start_pci_fixups_early = .; \ KEEP(*(.pci_fixup_early)) \ __end_pci_fixups_early = .; \ __start_pci_fixups_header = .; \ KEEP(*(.pci_fixup_header)) \ __end_pci_fixups_header = .; \ __start_pci_fixups_final = .; \ KEEP(*(.pci_fixup_final)) \ __end_pci_fixups_final = .; \ __start_pci_fixups_enable = .; \ KEEP(*(.pci_fixup_enable)) \ __end_pci_fixups_enable = .; \ __start_pci_fixups_resume = .; \ KEEP(*(.pci_fixup_resume)) \ __end_pci_fixups_resume = .; \ __start_pci_fixups_resume_early = .; \ KEEP(*(.pci_fixup_resume_early)) \ __end_pci_fixups_resume_early = .; \ __start_pci_fixups_suspend = .; \ KEEP(*(.pci_fixup_suspend)) \ __end_pci_fixups_suspend = .; \ __start_pci_fixups_suspend_late = .; \ KEEP(*(.pci_fixup_suspend_late)) \ __end_pci_fixups_suspend_late = .; \ } \ \ /* Built-in firmware blobs */ \ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \ __start_builtin_fw = .; \ KEEP(*(.builtin_fw)) \ __end_builtin_fw = .; \ } \ \ TRACEDATA \ \ /* Kernel symbol table: Normal symbols */ \ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ __start___ksymtab = .; \ KEEP(*(SORT(___ksymtab+*))) \ __stop___ksymtab = .; \ } \ \ /* Kernel symbol table: GPL-only symbols */ \ __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ __start___ksymtab_gpl = .; \ KEEP(*(SORT(___ksymtab_gpl+*))) \ __stop___ksymtab_gpl = .; \ } \ \ /* Kernel symbol table: Normal unused symbols */ \ __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ __start___ksymtab_unused = .; \ KEEP(*(SORT(___ksymtab_unused+*))) \ __stop___ksymtab_unused = .; \ } \ \ /* Kernel symbol table: GPL-only unused symbols */ \ __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ __start___ksymtab_unused_gpl = .; \ KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ __stop___ksymtab_unused_gpl = .; \ } \ \ /* Kernel symbol table: GPL-future-only symbols */ \ __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ __start___ksymtab_gpl_future = .; \ KEEP(*(SORT(___ksymtab_gpl_future+*))) \ __stop___ksymtab_gpl_future = .; \ } \ \ /* Kernel symbol table: Normal symbols */ \ __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ __start___kcrctab = .; \ KEEP(*(SORT(___kcrctab+*))) \ __stop___kcrctab = .; \ } \ \ /* Kernel symbol table: GPL-only symbols */ \ __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ __start___kcrctab_gpl = .; \ KEEP(*(SORT(___kcrctab_gpl+*))) \ __stop___kcrctab_gpl = .; \ } \ \ /* Kernel symbol table: Normal unused symbols */ \ __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ __start___kcrctab_unused = .; \ KEEP(*(SORT(___kcrctab_unused+*))) \ __stop___kcrctab_unused = .; \ } \ \ /* Kernel symbol table: GPL-only unused symbols */ \ __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ __start___kcrctab_unused_gpl = .; \ KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ __stop___kcrctab_unused_gpl = .; \ } \ \ /* Kernel symbol table: GPL-future-only symbols */ \ __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ __start___kcrctab_gpl_future = .; \ KEEP(*(SORT(___kcrctab_gpl_future+*))) \ __stop___kcrctab_gpl_future = .; \ } \ \ /* Kernel symbol table: strings */ \ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ *(__ksymtab_strings) \ } \ \ /* __*init sections */ \ __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ *(.ref.rodata) \ MEM_KEEP(init.rodata) \ MEM_KEEP(exit.rodata) \ } \ \ /* Built-in module parameters. */ \ __param : AT(ADDR(__param) - LOAD_OFFSET) { \ __start___param = .; \ KEEP(*(__param)) \ __stop___param = .; \ } \ \ /* Built-in module versions. */ \ __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ __start___modver = .; \ KEEP(*(__modver)) \ __stop___modver = .; \ } \ \ BTF \ \ . = ALIGN((align)); \ __end_rodata = .; /* RODATA & RO_DATA provided for backward compatibility. * All archs are supposed to use RO_DATA() */ #define RODATA RO_DATA_SECTION(4096) #define RO_DATA(align) RO_DATA_SECTION(align) /* * Non-instrumentable text section */ #define NOINSTR_TEXT \ ALIGN_FUNCTION(); \ __noinstr_text_start = .; \ *(.noinstr.text) \ __noinstr_text_end = .; /* * .text section. Map to function alignment to avoid address changes * during second ld run in second ld pass when generating System.map * * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead * code elimination is enabled, so these sections should be converted * to use ".." first. */ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ *(.text.hot .text.hot.*) \ *(TEXT_MAIN .text.fixup) \ *(.text.unlikely .text.unlikely.*) \ *(.text.unknown .text.unknown.*) \ NOINSTR_TEXT \ *(.text..refcount) \ *(.ref.text) \ *(.text.asan.* .text.tsan.*) \ MEM_KEEP(init.text*) \ MEM_KEEP(exit.text*) \ /* sched.text is aling to function alignment to secure we have same * address even at second ld pass when generating System.map */ #define SCHED_TEXT \ ALIGN_FUNCTION(); \ __sched_text_start = .; \ *(.sched.text) \ __sched_text_end = .; /* spinlock.text is aling to function alignment to secure we have same * address even at second ld pass when generating System.map */ #define LOCK_TEXT \ ALIGN_FUNCTION(); \ __lock_text_start = .; \ *(.spinlock.text) \ __lock_text_end = .; #define CPUIDLE_TEXT \ ALIGN_FUNCTION(); \ __cpuidle_text_start = .; \ *(.cpuidle.text) \ __cpuidle_text_end = .; #define KPROBES_TEXT \ ALIGN_FUNCTION(); \ __kprobes_text_start = .; \ *(.kprobes.text) \ __kprobes_text_end = .; #define ENTRY_TEXT \ ALIGN_FUNCTION(); \ __entry_text_start = .; \ *(.entry.text) \ __entry_text_end = .; #define IRQENTRY_TEXT \ ALIGN_FUNCTION(); \ __irqentry_text_start = .; \ *(.irqentry.text) \ __irqentry_text_end = .; #define SOFTIRQENTRY_TEXT \ ALIGN_FUNCTION(); \ __softirqentry_text_start = .; \ *(.softirqentry.text) \ __softirqentry_text_end = .; /* Section used for early init (in .S files) */ #define HEAD_TEXT KEEP(*(.head.text)) #define HEAD_TEXT_SECTION \ .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ HEAD_TEXT \ } /* * Exception table */ #define EXCEPTION_TABLE(align) \ . = ALIGN(align); \ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ __start___ex_table = .; \ KEEP(*(__ex_table)) \ __stop___ex_table = .; \ } /* * .BTF */ #ifdef CONFIG_DEBUG_INFO_BTF #define BTF \ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ __start_BTF = .; \ KEEP(*(.BTF)) \ __stop_BTF = .; \ } #else #define BTF #endif /* * Init task */ #define INIT_TASK_DATA_SECTION(align) \ . = ALIGN(align); \ .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ INIT_TASK_DATA(align) \ } #ifdef CONFIG_CONSTRUCTORS #define KERNEL_CTORS() . = ALIGN(8); \ __ctors_start = .; \ KEEP(*(.ctors)) \ KEEP(*(SORT(.init_array.*))) \ KEEP(*(.init_array)) \ __ctors_end = .; #else #define KERNEL_CTORS() #endif /* init and exit section handling */ #define INIT_DATA \ KEEP(*(SORT(___kentry+*))) \ *(.init.data init.data.*) \ MEM_DISCARD(init.data*) \ KERNEL_CTORS() \ MCOUNT_REC() \ *(.init.rodata .init.rodata.*) \ FTRACE_EVENTS() \ TRACE_SYSCALLS() \ KPROBE_BLACKLIST() \ ERROR_INJECT_WHITELIST() \ MEM_DISCARD(init.rodata) \ CLK_OF_TABLES() \ RESERVEDMEM_OF_TABLES() \ TIMER_OF_TABLES() \ CPU_METHOD_OF_TABLES() \ CPUIDLE_METHOD_OF_TABLES() \ KERNEL_DTB() \ IRQCHIP_OF_MATCH_TABLE() \ ACPI_PROBE_TABLE(irqchip) \ ACPI_PROBE_TABLE(timer) \ THERMAL_TABLE(governor) \ EARLYCON_TABLE() \ LSM_TABLE() \ EARLY_LSM_TABLE() #define INIT_TEXT \ *(.init.text .init.text.*) \ *(.text.startup) \ MEM_DISCARD(init.text*) #define EXIT_DATA \ *(.exit.data .exit.data.*) \ *(.fini_array .fini_array.*) \ *(.dtors .dtors.*) \ MEM_DISCARD(exit.data*) \ MEM_DISCARD(exit.rodata*) #define EXIT_TEXT \ *(.exit.text) \ *(.text.exit) \ MEM_DISCARD(exit.text) #define EXIT_CALL \ *(.exitcall.exit) /* * bss (Block Started by Symbol) - uninitialized data * zeroed during startup */ #define SBSS(sbss_align) \ . = ALIGN(sbss_align); \ .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ *(.dynsbss) \ *(SBSS_MAIN) \ *(.scommon) \ } /* * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra * sections to the front of bss. */ #ifndef BSS_FIRST_SECTIONS #define BSS_FIRST_SECTIONS #endif #define BSS(bss_align) \ . = ALIGN(bss_align); \ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ BSS_FIRST_SECTIONS \ . = ALIGN(PAGE_SIZE); \ *(.bss..page_aligned) \ . = ALIGN(PAGE_SIZE); \ *(.dynbss) \ *(BSS_MAIN) \ *(COMMON) \ } /* * DWARF debug sections. * Symbols in the DWARF debugging sections are relative to * the beginning of the section so we begin them at 0. */ #define DWARF_DEBUG \ /* DWARF 1 */ \ .debug 0 : { *(.debug) } \ .line 0 : { *(.line) } \ /* GNU DWARF 1 extensions */ \ .debug_srcinfo 0 : { *(.debug_srcinfo) } \ .debug_sfnames 0 : { *(.debug_sfnames) } \ /* DWARF 1.1 and DWARF 2 */ \ .debug_aranges 0 : { *(.debug_aranges) } \ .debug_pubnames 0 : { *(.debug_pubnames) } \ /* DWARF 2 */ \ .debug_info 0 : { *(.debug_info \ .gnu.linkonce.wi.*) } \ .debug_abbrev 0 : { *(.debug_abbrev) } \ .debug_line 0 : { *(.debug_line) } \ .debug_frame 0 : { *(.debug_frame) } \ .debug_str 0 : { *(.debug_str) } \ .debug_loc 0 : { *(.debug_loc) } \ .debug_macinfo 0 : { *(.debug_macinfo) } \ .debug_pubtypes 0 : { *(.debug_pubtypes) } \ /* DWARF 3 */ \ .debug_ranges 0 : { *(.debug_ranges) } \ /* SGI/MIPS DWARF 2 extensions */ \ .debug_weaknames 0 : { *(.debug_weaknames) } \ .debug_funcnames 0 : { *(.debug_funcnames) } \ .debug_typenames 0 : { *(.debug_typenames) } \ .debug_varnames 0 : { *(.debug_varnames) } \ /* GNU DWARF 2 extensions */ \ .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \ .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \ /* DWARF 4 */ \ .debug_types 0 : { *(.debug_types) } \ /* DWARF 5 */ \ .debug_addr 0 : { *(.debug_addr) } \ .debug_line_str 0 : { *(.debug_line_str) } \ .debug_loclists 0 : { *(.debug_loclists) } \ .debug_macro 0 : { *(.debug_macro) } \ .debug_names 0 : { *(.debug_names) } \ .debug_rnglists 0 : { *(.debug_rnglists) } \ .debug_str_offsets 0 : { *(.debug_str_offsets) } /* Stabs debugging sections. */ #define STABS_DEBUG \ .stab 0 : { *(.stab) } \ .stabstr 0 : { *(.stabstr) } \ .stab.excl 0 : { *(.stab.excl) } \ .stab.exclstr 0 : { *(.stab.exclstr) } \ .stab.index 0 : { *(.stab.index) } \ .stab.indexstr 0 : { *(.stab.indexstr) } \ .comment 0 : { *(.comment) } #ifdef CONFIG_GENERIC_BUG #define BUG_TABLE \ . = ALIGN(8); \ __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ __start___bug_table = .; \ KEEP(*(__bug_table)) \ __stop___bug_table = .; \ } #else #define BUG_TABLE #endif #ifdef CONFIG_UNWINDER_ORC #define ORC_UNWIND_TABLE \ . = ALIGN(4); \ .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ __start_orc_unwind_ip = .; \ KEEP(*(.orc_unwind_ip)) \ __stop_orc_unwind_ip = .; \ } \ . = ALIGN(2); \ .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ __start_orc_unwind = .; \ KEEP(*(.orc_unwind)) \ __stop_orc_unwind = .; \ } \ . = ALIGN(4); \ .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ orc_lookup = .; \ . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \ LOOKUP_BLOCK_SIZE) + 1) * 4; \ orc_lookup_end = .; \ } #else #define ORC_UNWIND_TABLE #endif #ifdef CONFIG_PM_TRACE #define TRACEDATA \ . = ALIGN(4); \ .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ __tracedata_start = .; \ KEEP(*(.tracedata)) \ __tracedata_end = .; \ } #else #define TRACEDATA #endif /* * Discard .note.GNU-stack, which is emitted as PROGBITS by the compiler. * Otherwise, the type of .notes section would become PROGBITS instead of NOTES. */ #define NOTES \ /DISCARD/ : { *(.note.GNU-stack) } \ .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ __start_notes = .; \ KEEP(*(.note.*)) \ __stop_notes = .; \ } #define INIT_SETUP(initsetup_align) \ . = ALIGN(initsetup_align); \ __setup_start = .; \ KEEP(*(.init.setup)) \ __setup_end = .; #define INIT_CALLS_LEVEL(level) \ __initcall##level##_start = .; \ KEEP(*(.initcall##level##.init)) \ KEEP(*(.initcall##level##s.init)) \ #define INIT_CALLS \ __initcall_start = .; \ KEEP(*(.initcallearly.init)) \ INIT_CALLS_LEVEL(0) \ INIT_CALLS_LEVEL(1) \ INIT_CALLS_LEVEL(2) \ INIT_CALLS_LEVEL(3) \ INIT_CALLS_LEVEL(4) \ INIT_CALLS_LEVEL(5) \ INIT_CALLS_LEVEL(rootfs) \ INIT_CALLS_LEVEL(6) \ INIT_CALLS_LEVEL(7) \ __initcall_end = .; #define CON_INITCALL \ __con_initcall_start = .; \ KEEP(*(.con_initcall.init)) \ __con_initcall_end = .; #ifdef CONFIG_BLK_DEV_INITRD #define INIT_RAM_FS \ . = ALIGN(4); \ __initramfs_start = .; \ KEEP(*(.init.ramfs)) \ . = ALIGN(8); \ KEEP(*(.init.ramfs.info)) #else #define INIT_RAM_FS #endif /* * Memory encryption operates on a page basis. Since we need to clear * the memory encryption mask for this section, it needs to be aligned * on a page boundary and be a page-size multiple in length. * * Note: We use a separate section so that only this section gets * decrypted to avoid exposing more than we wish. */ #ifdef CONFIG_AMD_MEM_ENCRYPT #define PERCPU_DECRYPTED_SECTION \ . = ALIGN(PAGE_SIZE); \ *(.data..percpu..decrypted) \ . = ALIGN(PAGE_SIZE); #else #define PERCPU_DECRYPTED_SECTION #endif /* * Default discarded sections. * * Some archs want to discard exit text/data at runtime rather than * link time due to cross-section references such as alt instructions, * bug table, eh_frame, etc. DISCARDS must be the last of output * section definitions so that such archs put those in earlier section * definitions. */ #ifdef RUNTIME_DISCARD_EXIT #define EXIT_DISCARDS #else #define EXIT_DISCARDS \ EXIT_TEXT \ EXIT_DATA #endif #define DISCARDS \ /DISCARD/ : { \ EXIT_DISCARDS \ EXIT_CALL \ *(.discard) \ *(.discard.*) \ *(.modinfo) \ } /** * PERCPU_INPUT - the percpu input sections * @cacheline: cacheline size * * The core percpu section names and core symbols which do not rely * directly upon load addresses. * * @cacheline is used to align subsections to avoid false cacheline * sharing between subsections for different purposes. */ #define PERCPU_INPUT(cacheline) \ __per_cpu_start = .; \ *(.data..percpu..first) \ . = ALIGN(PAGE_SIZE); \ *(.data..percpu..page_aligned) \ . = ALIGN(cacheline); \ *(.data..percpu..read_mostly) \ . = ALIGN(cacheline); \ *(.data..percpu) \ *(.data..percpu..shared_aligned) \ PERCPU_DECRYPTED_SECTION \ __per_cpu_end = .; /** * PERCPU_VADDR - define output section for percpu area * @cacheline: cacheline size * @vaddr: explicit base address (optional) * @phdr: destination PHDR (optional) * * Macro which expands to output section for percpu area. * * @cacheline is used to align subsections to avoid false cacheline * sharing between subsections for different purposes. * * If @vaddr is not blank, it specifies explicit base address and all * percpu symbols will be offset from the given address. If blank, * @vaddr always equals @laddr + LOAD_OFFSET. * * @phdr defines the output PHDR to use if not blank. Be warned that * output PHDR is sticky. If @phdr is specified, the next output * section in the linker script will go there too. @phdr should have * a leading colon. * * Note that this macros defines __per_cpu_load as an absolute symbol. * If there is no need to put the percpu section at a predetermined * address, use PERCPU_SECTION. */ #define PERCPU_VADDR(cacheline, vaddr, phdr) \ __per_cpu_load = .; \ .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \ PERCPU_INPUT(cacheline) \ } phdr \ . = __per_cpu_load + SIZEOF(.data..percpu); /** * PERCPU_SECTION - define output section for percpu area, simple version * @cacheline: cacheline size * * Align to PAGE_SIZE and outputs output section for percpu area. This * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and * __per_cpu_start will be identical. * * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) * except that __per_cpu_load is defined as a relative symbol against * .data..percpu which is required for relocatable x86_32 configuration. */ #define PERCPU_SECTION(cacheline) \ . = ALIGN(PAGE_SIZE); \ .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ __per_cpu_load = .; \ PERCPU_INPUT(cacheline) \ } /* * Definition of the high level *_SECTION macros * They will fit only a subset of the architectures */ /* * Writeable data. * All sections are combined in a single .data section. * The sections following CONSTRUCTORS are arranged so their * typical alignment matches. * A cacheline is typical/always less than a PAGE_SIZE so * the sections that has this restriction (or similar) * is located before the ones requiring PAGE_SIZE alignment. * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which * matches the requirement of PAGE_ALIGNED_DATA. * * use 0 as page_align if page_aligned data is not used */ #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ . = ALIGN(PAGE_SIZE); \ .data : AT(ADDR(.data) - LOAD_OFFSET) { \ INIT_TASK_DATA(inittask) \ NOSAVE_DATA \ PAGE_ALIGNED_DATA(pagealigned) \ CACHELINE_ALIGNED_DATA(cacheline) \ READ_MOSTLY_DATA(cacheline) \ DATA_DATA \ CONSTRUCTORS \ } \ BUG_TABLE \ #define INIT_TEXT_SECTION(inittext_align) \ . = ALIGN(inittext_align); \ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ _sinittext = .; \ INIT_TEXT \ _einittext = .; \ } #define INIT_DATA_SECTION(initsetup_align) \ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ INIT_DATA \ INIT_SETUP(initsetup_align) \ INIT_CALLS \ CON_INITCALL \ INIT_RAM_FS \ } #define BSS_SECTION(sbss_align, bss_align, stop_align) \ . = ALIGN(sbss_align); \ __bss_start = .; \ SBSS(sbss_align) \ BSS(bss_align) \ . = ALIGN(stop_align); \ __bss_stop = .; hardirq.h 0000644 00000001024 14722073052 0006346 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_HARDIRQ_H #define __ASM_GENERIC_HARDIRQ_H #include <linux/cache.h> #include <linux/threads.h> typedef struct { unsigned int __softirq_pending; } ____cacheline_aligned irq_cpustat_t; #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ #include <linux/irq.h> #ifndef ack_bad_irq static inline void ack_bad_irq(unsigned int irq) { printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); } #endif #endif /* __ASM_GENERIC_HARDIRQ_H */ resource.h 0000644 00000002114 14722073052 0006544 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_RESOURCE_H #define _ASM_GENERIC_RESOURCE_H #include <uapi/asm-generic/resource.h> /* * boot-time rlimit defaults for the init task: */ #define INIT_RLIMITS \ { \ [RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_STACK] = { _STK_LIM, RLIM_INFINITY }, \ [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \ [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_NPROC] = { 0, 0 }, \ [RLIMIT_NOFILE] = { INR_OPEN_CUR, INR_OPEN_MAX }, \ [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \ [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_SIGPENDING] = { 0, 0 }, \ [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ [RLIMIT_NICE] = { 0, 0 }, \ [RLIMIT_RTPRIO] = { 0, 0 }, \ [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ } #endif qspinlock_types.h 0000644 00000004510 14722073052 0010146 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Queued spinlock * * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. * * Authors: Waiman Long <waiman.long@hp.com> */ #ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H #define __ASM_GENERIC_QSPINLOCK_TYPES_H /* * Including atomic.h with PARAVIRT on will cause compilation errors because * of recursive header file incluson via paravirt_types.h. So don't include * it if PARAVIRT is on. */ #ifndef CONFIG_PARAVIRT #include <linux/types.h> #include <linux/atomic.h> #endif typedef struct qspinlock { union { atomic_t val; /* * By using the whole 2nd least significant byte for the * pending bit, we can allow better optimization of the lock * acquisition for the pending bit holder. */ #ifdef __LITTLE_ENDIAN struct { u8 locked; u8 pending; }; struct { u16 locked_pending; u16 tail; }; #else struct { u16 tail; u16 locked_pending; }; struct { u8 reserved[2]; u8 pending; u8 locked; }; #endif }; } arch_spinlock_t; /* * Initializier */ #define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } } /* * Bitfields in the atomic value: * * When NR_CPUS < 16K * 0- 7: locked byte * 8: pending * 9-15: not used * 16-17: tail index * 18-31: tail cpu (+1) * * When NR_CPUS >= 16K * 0- 7: locked byte * 8: pending * 9-10: tail index * 11-31: tail cpu (+1) */ #define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\ << _Q_ ## type ## _OFFSET) #define _Q_LOCKED_OFFSET 0 #define _Q_LOCKED_BITS 8 #define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) #define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS) #if CONFIG_NR_CPUS < (1U << 14) #define _Q_PENDING_BITS 8 #else #define _Q_PENDING_BITS 1 #endif #define _Q_PENDING_MASK _Q_SET_MASK(PENDING) #define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS) #define _Q_TAIL_IDX_BITS 2 #define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) #define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS) #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) #define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET #define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ pgtable-nopmd.h 0000644 00000003630 14722073052 0007452 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PGTABLE_NOPMD_H #define _PGTABLE_NOPMD_H #ifndef __ASSEMBLY__ #include <asm-generic/pgtable-nopud.h> struct mm_struct; #define __PAGETABLE_PMD_FOLDED 1 /* * Having the pmd type consist of a pud gets the size right, and allows * us to conceptually access the pud entry that this pmd is folded into * without casting. */ typedef struct { pud_t pud; } pmd_t; #define PMD_SHIFT PUD_SHIFT #define PTRS_PER_PMD 1 #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) /* * The "pud_xxx()" functions here are trivial for a folded two-level * setup: the pmd is never bad, and a pmd always exists (as it's folded * into the pud entry) */ static inline int pud_none(pud_t pud) { return 0; } static inline int pud_bad(pud_t pud) { return 0; } static inline int pud_present(pud_t pud) { return 1; } static inline void pud_clear(pud_t *pud) { } #define pmd_ERROR(pmd) (pud_ERROR((pmd).pud)) #define pud_populate(mm, pmd, pte) do { } while (0) /* * (pmds are folded into puds so this doesn't get actually called, * but the define is needed for a generic inline function.) */ #define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval }) static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) { return (pmd_t *)pud; } #define pmd_val(x) (pud_val((x).pud)) #define __pmd(x) ((pmd_t) { __pud(x) } ) #define pud_page(pud) (pmd_page((pmd_t){ pud })) #define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud })) /* * allocating and freeing a pmd is trivial: the 1-entry pmd is * inside the pud, so has no extra memory associated with it. */ #define pmd_alloc_one(mm, address) NULL static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { } #define __pmd_free_tlb(tlb, x, a) do { } while (0) #undef pmd_addr_end #define pmd_addr_end(addr, end) (end) #endif /* __ASSEMBLY__ */ #endif /* _PGTABLE_NOPMD_H */ dma-mapping.h 0000644 00000000370 14722073052 0007111 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_DMA_MAPPING_H #define _ASM_GENERIC_DMA_MAPPING_H static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return NULL; } #endif /* _ASM_GENERIC_DMA_MAPPING_H */ module.h 0000644 00000002136 14722073052 0006206 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_MODULE_H #define __ASM_GENERIC_MODULE_H /* * Many architectures just need a simple module * loader without arch specific data. */ #ifndef CONFIG_HAVE_MOD_ARCH_SPECIFIC struct mod_arch_specific { }; #endif #ifdef CONFIG_64BIT #define Elf_Shdr Elf64_Shdr #define Elf_Phdr Elf64_Phdr #define Elf_Sym Elf64_Sym #define Elf_Dyn Elf64_Dyn #define Elf_Ehdr Elf64_Ehdr #define Elf_Addr Elf64_Addr #ifdef CONFIG_MODULES_USE_ELF_REL #define Elf_Rel Elf64_Rel #endif #ifdef CONFIG_MODULES_USE_ELF_RELA #define Elf_Rela Elf64_Rela #endif #define ELF_R_TYPE(X) ELF64_R_TYPE(X) #define ELF_R_SYM(X) ELF64_R_SYM(X) #else /* CONFIG_64BIT */ #define Elf_Shdr Elf32_Shdr #define Elf_Phdr Elf32_Phdr #define Elf_Sym Elf32_Sym #define Elf_Dyn Elf32_Dyn #define Elf_Ehdr Elf32_Ehdr #define Elf_Addr Elf32_Addr #ifdef CONFIG_MODULES_USE_ELF_REL #define Elf_Rel Elf32_Rel #endif #ifdef CONFIG_MODULES_USE_ELF_RELA #define Elf_Rela Elf32_Rela #endif #define ELF_R_TYPE(X) ELF32_R_TYPE(X) #define ELF_R_SYM(X) ELF32_R_SYM(X) #endif #endif /* __ASM_GENERIC_MODULE_H */ qrwlock_types.h 0000644 00000001252 14722073052 0007625 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_QRWLOCK_TYPES_H #define __ASM_GENERIC_QRWLOCK_TYPES_H #include <linux/types.h> #include <asm/byteorder.h> #include <asm/spinlock_types.h> /* * The queue read/write lock data structure */ typedef struct qrwlock { union { atomic_t cnts; struct { #ifdef __LITTLE_ENDIAN u8 wlocked; /* Locked for write? */ u8 __lstate[3]; #else u8 __lstate[3]; u8 wlocked; /* Locked for write? */ #endif }; }; arch_spinlock_t wait_lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { \ { .cnts = ATOMIC_INIT(0), }, \ .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ } #endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */ current.h 0000644 00000000400 14722073052 0006373 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_CURRENT_H #define __ASM_GENERIC_CURRENT_H #include <linux/thread_info.h> #define get_current() (current_thread_info()->task) #define current get_current() #endif /* __ASM_GENERIC_CURRENT_H */ delay.h 0000644 00000002205 14722073052 0006014 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_DELAY_H #define __ASM_GENERIC_DELAY_H /* Undefined functions to get compile-time errors */ extern void __bad_udelay(void); extern void __bad_ndelay(void); extern void __udelay(unsigned long usecs); extern void __ndelay(unsigned long nsecs); extern void __const_udelay(unsigned long xloops); extern void __delay(unsigned long loops); /* * The weird n/20000 thing suppresses a "comparison is always false due to * limited range of data type" warning with non-const 8-bit arguments. */ /* 0x10c7 is 2**32 / 1000000 (rounded up) */ #define udelay(n) \ ({ \ if (__builtin_constant_p(n)) { \ if ((n) / 20000 >= 1) \ __bad_udelay(); \ else \ __const_udelay((n) * 0x10c7ul); \ } else { \ __udelay(n); \ } \ }) /* 0x5 is 2**32 / 1000000000 (rounded up) */ #define ndelay(n) \ ({ \ if (__builtin_constant_p(n)) { \ if ((n) / 20000 >= 1) \ __bad_ndelay(); \ else \ __const_udelay((n) * 5ul); \ } else { \ __ndelay(n); \ } \ }) #endif /* __ASM_GENERIC_DELAY_H */ switch_to.h 0000644 00000001363 14722073052 0006725 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Generic task switch macro wrapper. * * It should be possible to use these on really simple architectures, * but it serves more as a starting point for new ports. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef __ASM_GENERIC_SWITCH_TO_H #define __ASM_GENERIC_SWITCH_TO_H #include <linux/thread_info.h> /* * Context switching is now performed out-of-line in switch_to.S */ extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); #define switch_to(prev, next, last) \ do { \ ((last) = __switch_to((prev), (next))); \ } while (0) #endif /* __ASM_GENERIC_SWITCH_TO_H */ dma.h 0000644 00000001051 14722073052 0005455 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_DMA_H #define __ASM_GENERIC_DMA_H /* * This file traditionally describes the i8237 PC style DMA controller. * Most architectures don't have these any more and can get the minimal * implementation from kernel/dma.c by not defining MAX_DMA_CHANNELS. * * Some code relies on seeing MAX_DMA_ADDRESS though. */ #define MAX_DMA_ADDRESS PAGE_OFFSET extern int request_dma(unsigned int dmanr, const char *device_id); extern void free_dma(unsigned int dmanr); #endif /* __ASM_GENERIC_DMA_H */ pgtable-nopud.h 0000644 00000004027 14722073052 0007463 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PGTABLE_NOPUD_H #define _PGTABLE_NOPUD_H #ifndef __ASSEMBLY__ #ifdef __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nop4d-hack.h> #else #include <asm-generic/pgtable-nop4d.h> #define __PAGETABLE_PUD_FOLDED 1 /* * Having the pud type consist of a p4d gets the size right, and allows * us to conceptually access the p4d entry that this pud is folded into * without casting. */ typedef struct { p4d_t p4d; } pud_t; #define PUD_SHIFT P4D_SHIFT #define PTRS_PER_PUD 1 #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) /* * The "p4d_xxx()" functions here are trivial for a folded two-level * setup: the pud is never bad, and a pud always exists (as it's folded * into the p4d entry) */ static inline int p4d_none(p4d_t p4d) { return 0; } static inline int p4d_bad(p4d_t p4d) { return 0; } static inline int p4d_present(p4d_t p4d) { return 1; } static inline void p4d_clear(p4d_t *p4d) { } #define pud_ERROR(pud) (p4d_ERROR((pud).p4d)) #define p4d_populate(mm, p4d, pud) do { } while (0) #define p4d_populate_safe(mm, p4d, pud) do { } while (0) /* * (puds are folded into p4ds so this doesn't get actually called, * but the define is needed for a generic inline function.) */ #define set_p4d(p4dptr, p4dval) set_pud((pud_t *)(p4dptr), (pud_t) { p4dval }) static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) { return (pud_t *)p4d; } #define pud_val(x) (p4d_val((x).p4d)) #define __pud(x) ((pud_t) { __p4d(x) }) #define p4d_page(p4d) (pud_page((pud_t){ p4d })) #define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d })) /* * allocating and freeing a pud is trivial: the 1-entry pud is * inside the p4d, so has no extra memory associated with it. */ #define pud_alloc_one(mm, address) NULL #define pud_free(mm, x) do { } while (0) #define __pud_free_tlb(tlb, x, a) do { } while (0) #undef pud_addr_end #define pud_addr_end(addr, end) (end) #endif /* __ASSEMBLY__ */ #endif /* !__ARCH_USE_5LEVEL_HACK */ #endif /* _PGTABLE_NOPUD_H */ kdebug.h 0000644 00000000266 14722073052 0006164 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_KDEBUG_H #define _ASM_GENERIC_KDEBUG_H enum die_val { DIE_UNUSED, DIE_OOPS = 1, }; #endif /* _ASM_GENERIC_KDEBUG_H */ audit_signal.h 0000644 00000000044 14722073052 0007360 0 ustar 00 __NR_kill, __NR_tgkill, __NR_tkill, fb.h 0000644 00000000417 14722073052 0005310 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_FB_H_ #define __ASM_GENERIC_FB_H_ #include <linux/fb.h> #define fb_pgprotect(...) do {} while (0) static inline int fb_is_primary_device(struct fb_info *info) { return 0; } #endif /* __ASM_GENERIC_FB_H_ */ statfs.h 0000644 00000000251 14722073052 0006221 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _GENERIC_STATFS_H #define _GENERIC_STATFS_H #include <uapi/asm-generic/statfs.h> typedef __kernel_fsid_t fsid_t; #endif io.h 0000644 00000054575 14722073052 0005346 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Generic I/O port emulation. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef __ASM_GENERIC_IO_H #define __ASM_GENERIC_IO_H #include <asm/page.h> /* I/O is all done through memory accesses */ #include <linux/string.h> /* for memset() and memcpy() */ #include <linux/types.h> #ifdef CONFIG_GENERIC_IOMAP #include <asm-generic/iomap.h> #endif #include <asm/mmiowb.h> #include <asm-generic/pci_iomap.h> #ifndef __io_br #define __io_br() barrier() #endif /* prevent prefetching of coherent DMA data ahead of a dma-complete */ #ifndef __io_ar #ifdef rmb #define __io_ar(v) rmb() #else #define __io_ar(v) barrier() #endif #endif /* flush writes to coherent DMA data before possibly triggering a DMA read */ #ifndef __io_bw #ifdef wmb #define __io_bw() wmb() #else #define __io_bw() barrier() #endif #endif /* serialize device access against a spin_unlock, usually handled there. */ #ifndef __io_aw #define __io_aw() mmiowb_set_pending() #endif #ifndef __io_pbw #define __io_pbw() __io_bw() #endif #ifndef __io_paw #define __io_paw() __io_aw() #endif #ifndef __io_pbr #define __io_pbr() __io_br() #endif #ifndef __io_par #define __io_par(v) __io_ar(v) #endif /* * __raw_{read,write}{b,w,l,q}() access memory in native endianness. * * On some architectures memory mapped IO needs to be accessed differently. * On the simple architectures, we just read/write the memory location * directly. */ #ifndef __raw_readb #define __raw_readb __raw_readb static inline u8 __raw_readb(const volatile void __iomem *addr) { return *(const volatile u8 __force *)addr; } #endif #ifndef __raw_readw #define __raw_readw __raw_readw static inline u16 __raw_readw(const volatile void __iomem *addr) { return *(const volatile u16 __force *)addr; } #endif #ifndef __raw_readl #define __raw_readl __raw_readl static inline u32 __raw_readl(const volatile void __iomem *addr) { return *(const volatile u32 __force *)addr; } #endif #ifdef CONFIG_64BIT #ifndef __raw_readq #define __raw_readq __raw_readq static inline u64 __raw_readq(const volatile void __iomem *addr) { return *(const volatile u64 __force *)addr; } #endif #endif /* CONFIG_64BIT */ #ifndef __raw_writeb #define __raw_writeb __raw_writeb static inline void __raw_writeb(u8 value, volatile void __iomem *addr) { *(volatile u8 __force *)addr = value; } #endif #ifndef __raw_writew #define __raw_writew __raw_writew static inline void __raw_writew(u16 value, volatile void __iomem *addr) { *(volatile u16 __force *)addr = value; } #endif #ifndef __raw_writel #define __raw_writel __raw_writel static inline void __raw_writel(u32 value, volatile void __iomem *addr) { *(volatile u32 __force *)addr = value; } #endif #ifdef CONFIG_64BIT #ifndef __raw_writeq #define __raw_writeq __raw_writeq static inline void __raw_writeq(u64 value, volatile void __iomem *addr) { *(volatile u64 __force *)addr = value; } #endif #endif /* CONFIG_64BIT */ /* * {read,write}{b,w,l,q}() access little endian memory and return result in * native endianness. */ #ifndef readb #define readb readb static inline u8 readb(const volatile void __iomem *addr) { u8 val; __io_br(); val = __raw_readb(addr); __io_ar(val); return val; } #endif #ifndef readw #define readw readw static inline u16 readw(const volatile void __iomem *addr) { u16 val; __io_br(); val = __le16_to_cpu(__raw_readw(addr)); __io_ar(val); return val; } #endif #ifndef readl #define readl readl static inline u32 readl(const volatile void __iomem *addr) { u32 val; __io_br(); val = __le32_to_cpu(__raw_readl(addr)); __io_ar(val); return val; } #endif #ifdef CONFIG_64BIT #ifndef readq #define readq readq static inline u64 readq(const volatile void __iomem *addr) { u64 val; __io_br(); val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); __io_ar(val); return val; } #endif #endif /* CONFIG_64BIT */ #ifndef writeb #define writeb writeb static inline void writeb(u8 value, volatile void __iomem *addr) { __io_bw(); __raw_writeb(value, addr); __io_aw(); } #endif #ifndef writew #define writew writew static inline void writew(u16 value, volatile void __iomem *addr) { __io_bw(); __raw_writew(cpu_to_le16(value), addr); __io_aw(); } #endif #ifndef writel #define writel writel static inline void writel(u32 value, volatile void __iomem *addr) { __io_bw(); __raw_writel(__cpu_to_le32(value), addr); __io_aw(); } #endif #ifdef CONFIG_64BIT #ifndef writeq #define writeq writeq static inline void writeq(u64 value, volatile void __iomem *addr) { __io_bw(); __raw_writeq((u64 __force)__cpu_to_le64(value), addr); __io_aw(); } #endif #endif /* CONFIG_64BIT */ /* * {read,write}{b,w,l,q}_relaxed() are like the regular version, but * are not guaranteed to provide ordering against spinlocks or memory * accesses. */ #ifndef readb_relaxed #define readb_relaxed readb_relaxed static inline u8 readb_relaxed(const volatile void __iomem *addr) { return __raw_readb(addr); } #endif #ifndef readw_relaxed #define readw_relaxed readw_relaxed static inline u16 readw_relaxed(const volatile void __iomem *addr) { return __le16_to_cpu(__raw_readw(addr)); } #endif #ifndef readl_relaxed #define readl_relaxed readl_relaxed static inline u32 readl_relaxed(const volatile void __iomem *addr) { return __le32_to_cpu(__raw_readl(addr)); } #endif #if defined(readq) && !defined(readq_relaxed) #define readq_relaxed readq_relaxed static inline u64 readq_relaxed(const volatile void __iomem *addr) { return __le64_to_cpu(__raw_readq(addr)); } #endif #ifndef writeb_relaxed #define writeb_relaxed writeb_relaxed static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) { __raw_writeb(value, addr); } #endif #ifndef writew_relaxed #define writew_relaxed writew_relaxed static inline void writew_relaxed(u16 value, volatile void __iomem *addr) { __raw_writew(cpu_to_le16(value), addr); } #endif #ifndef writel_relaxed #define writel_relaxed writel_relaxed static inline void writel_relaxed(u32 value, volatile void __iomem *addr) { __raw_writel(__cpu_to_le32(value), addr); } #endif #if defined(writeq) && !defined(writeq_relaxed) #define writeq_relaxed writeq_relaxed static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) { __raw_writeq(__cpu_to_le64(value), addr); } #endif /* * {read,write}s{b,w,l,q}() repeatedly access the same memory address in * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). */ #ifndef readsb #define readsb readsb static inline void readsb(const volatile void __iomem *addr, void *buffer, unsigned int count) { if (count) { u8 *buf = buffer; do { u8 x = __raw_readb(addr); *buf++ = x; } while (--count); } } #endif #ifndef readsw #define readsw readsw static inline void readsw(const volatile void __iomem *addr, void *buffer, unsigned int count) { if (count) { u16 *buf = buffer; do { u16 x = __raw_readw(addr); *buf++ = x; } while (--count); } } #endif #ifndef readsl #define readsl readsl static inline void readsl(const volatile void __iomem *addr, void *buffer, unsigned int count) { if (count) { u32 *buf = buffer; do { u32 x = __raw_readl(addr); *buf++ = x; } while (--count); } } #endif #ifdef CONFIG_64BIT #ifndef readsq #define readsq readsq static inline void readsq(const volatile void __iomem *addr, void *buffer, unsigned int count) { if (count) { u64 *buf = buffer; do { u64 x = __raw_readq(addr); *buf++ = x; } while (--count); } } #endif #endif /* CONFIG_64BIT */ #ifndef writesb #define writesb writesb static inline void writesb(volatile void __iomem *addr, const void *buffer, unsigned int count) { if (count) { const u8 *buf = buffer; do { __raw_writeb(*buf++, addr); } while (--count); } } #endif #ifndef writesw #define writesw writesw static inline void writesw(volatile void __iomem *addr, const void *buffer, unsigned int count) { if (count) { const u16 *buf = buffer; do { __raw_writew(*buf++, addr); } while (--count); } } #endif #ifndef writesl #define writesl writesl static inline void writesl(volatile void __iomem *addr, const void *buffer, unsigned int count) { if (count) { const u32 *buf = buffer; do { __raw_writel(*buf++, addr); } while (--count); } } #endif #ifdef CONFIG_64BIT #ifndef writesq #define writesq writesq static inline void writesq(volatile void __iomem *addr, const void *buffer, unsigned int count) { if (count) { const u64 *buf = buffer; do { __raw_writeq(*buf++, addr); } while (--count); } } #endif #endif /* CONFIG_64BIT */ #ifndef PCI_IOBASE #define PCI_IOBASE ((void __iomem *)0) #endif #ifndef IO_SPACE_LIMIT #define IO_SPACE_LIMIT 0xffff #endif #include <linux/logic_pio.h> /* * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be * implemented on hardware that needs an additional delay for I/O accesses to * take effect. */ #ifndef inb #define inb inb static inline u8 inb(unsigned long addr) { u8 val; __io_pbr(); val = __raw_readb(PCI_IOBASE + addr); __io_par(val); return val; } #endif #ifndef inw #define inw inw static inline u16 inw(unsigned long addr) { u16 val; __io_pbr(); val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr)); __io_par(val); return val; } #endif #ifndef inl #define inl inl static inline u32 inl(unsigned long addr) { u32 val; __io_pbr(); val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr)); __io_par(val); return val; } #endif #ifndef outb #define outb outb static inline void outb(u8 value, unsigned long addr) { __io_pbw(); __raw_writeb(value, PCI_IOBASE + addr); __io_paw(); } #endif #ifndef outw #define outw outw static inline void outw(u16 value, unsigned long addr) { __io_pbw(); __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr); __io_paw(); } #endif #ifndef outl #define outl outl static inline void outl(u32 value, unsigned long addr) { __io_pbw(); __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr); __io_paw(); } #endif #ifndef inb_p #define inb_p inb_p static inline u8 inb_p(unsigned long addr) { return inb(addr); } #endif #ifndef inw_p #define inw_p inw_p static inline u16 inw_p(unsigned long addr) { return inw(addr); } #endif #ifndef inl_p #define inl_p inl_p static inline u32 inl_p(unsigned long addr) { return inl(addr); } #endif #ifndef outb_p #define outb_p outb_p static inline void outb_p(u8 value, unsigned long addr) { outb(value, addr); } #endif #ifndef outw_p #define outw_p outw_p static inline void outw_p(u16 value, unsigned long addr) { outw(value, addr); } #endif #ifndef outl_p #define outl_p outl_p static inline void outl_p(u32 value, unsigned long addr) { outl(value, addr); } #endif /* * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a * single I/O port multiple times. */ #ifndef insb #define insb insb static inline void insb(unsigned long addr, void *buffer, unsigned int count) { readsb(PCI_IOBASE + addr, buffer, count); } #endif #ifndef insw #define insw insw static inline void insw(unsigned long addr, void *buffer, unsigned int count) { readsw(PCI_IOBASE + addr, buffer, count); } #endif #ifndef insl #define insl insl static inline void insl(unsigned long addr, void *buffer, unsigned int count) { readsl(PCI_IOBASE + addr, buffer, count); } #endif #ifndef outsb #define outsb outsb static inline void outsb(unsigned long addr, const void *buffer, unsigned int count) { writesb(PCI_IOBASE + addr, buffer, count); } #endif #ifndef outsw #define outsw outsw static inline void outsw(unsigned long addr, const void *buffer, unsigned int count) { writesw(PCI_IOBASE + addr, buffer, count); } #endif #ifndef outsl #define outsl outsl static inline void outsl(unsigned long addr, const void *buffer, unsigned int count) { writesl(PCI_IOBASE + addr, buffer, count); } #endif #ifndef insb_p #define insb_p insb_p static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) { insb(addr, buffer, count); } #endif #ifndef insw_p #define insw_p insw_p static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) { insw(addr, buffer, count); } #endif #ifndef insl_p #define insl_p insl_p static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) { insl(addr, buffer, count); } #endif #ifndef outsb_p #define outsb_p outsb_p static inline void outsb_p(unsigned long addr, const void *buffer, unsigned int count) { outsb(addr, buffer, count); } #endif #ifndef outsw_p #define outsw_p outsw_p static inline void outsw_p(unsigned long addr, const void *buffer, unsigned int count) { outsw(addr, buffer, count); } #endif #ifndef outsl_p #define outsl_p outsl_p static inline void outsl_p(unsigned long addr, const void *buffer, unsigned int count) { outsl(addr, buffer, count); } #endif #ifndef CONFIG_GENERIC_IOMAP #ifndef ioread8 #define ioread8 ioread8 static inline u8 ioread8(const volatile void __iomem *addr) { return readb(addr); } #endif #ifndef ioread16 #define ioread16 ioread16 static inline u16 ioread16(const volatile void __iomem *addr) { return readw(addr); } #endif #ifndef ioread32 #define ioread32 ioread32 static inline u32 ioread32(const volatile void __iomem *addr) { return readl(addr); } #endif #ifdef CONFIG_64BIT #ifndef ioread64 #define ioread64 ioread64 static inline u64 ioread64(const volatile void __iomem *addr) { return readq(addr); } #endif #endif /* CONFIG_64BIT */ #ifndef iowrite8 #define iowrite8 iowrite8 static inline void iowrite8(u8 value, volatile void __iomem *addr) { writeb(value, addr); } #endif #ifndef iowrite16 #define iowrite16 iowrite16 static inline void iowrite16(u16 value, volatile void __iomem *addr) { writew(value, addr); } #endif #ifndef iowrite32 #define iowrite32 iowrite32 static inline void iowrite32(u32 value, volatile void __iomem *addr) { writel(value, addr); } #endif #ifdef CONFIG_64BIT #ifndef iowrite64 #define iowrite64 iowrite64 static inline void iowrite64(u64 value, volatile void __iomem *addr) { writeq(value, addr); } #endif #endif /* CONFIG_64BIT */ #ifndef ioread16be #define ioread16be ioread16be static inline u16 ioread16be(const volatile void __iomem *addr) { return swab16(readw(addr)); } #endif #ifndef ioread32be #define ioread32be ioread32be static inline u32 ioread32be(const volatile void __iomem *addr) { return swab32(readl(addr)); } #endif #ifdef CONFIG_64BIT #ifndef ioread64be #define ioread64be ioread64be static inline u64 ioread64be(const volatile void __iomem *addr) { return swab64(readq(addr)); } #endif #endif /* CONFIG_64BIT */ #ifndef iowrite16be #define iowrite16be iowrite16be static inline void iowrite16be(u16 value, void volatile __iomem *addr) { writew(swab16(value), addr); } #endif #ifndef iowrite32be #define iowrite32be iowrite32be static inline void iowrite32be(u32 value, volatile void __iomem *addr) { writel(swab32(value), addr); } #endif #ifdef CONFIG_64BIT #ifndef iowrite64be #define iowrite64be iowrite64be static inline void iowrite64be(u64 value, volatile void __iomem *addr) { writeq(swab64(value), addr); } #endif #endif /* CONFIG_64BIT */ #ifndef ioread8_rep #define ioread8_rep ioread8_rep static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, unsigned int count) { readsb(addr, buffer, count); } #endif #ifndef ioread16_rep #define ioread16_rep ioread16_rep static inline void ioread16_rep(const volatile void __iomem *addr, void *buffer, unsigned int count) { readsw(addr, buffer, count); } #endif #ifndef ioread32_rep #define ioread32_rep ioread32_rep static inline void ioread32_rep(const volatile void __iomem *addr, void *buffer, unsigned int count) { readsl(addr, buffer, count); } #endif #ifdef CONFIG_64BIT #ifndef ioread64_rep #define ioread64_rep ioread64_rep static inline void ioread64_rep(const volatile void __iomem *addr, void *buffer, unsigned int count) { readsq(addr, buffer, count); } #endif #endif /* CONFIG_64BIT */ #ifndef iowrite8_rep #define iowrite8_rep iowrite8_rep static inline void iowrite8_rep(volatile void __iomem *addr, const void *buffer, unsigned int count) { writesb(addr, buffer, count); } #endif #ifndef iowrite16_rep #define iowrite16_rep iowrite16_rep static inline void iowrite16_rep(volatile void __iomem *addr, const void *buffer, unsigned int count) { writesw(addr, buffer, count); } #endif #ifndef iowrite32_rep #define iowrite32_rep iowrite32_rep static inline void iowrite32_rep(volatile void __iomem *addr, const void *buffer, unsigned int count) { writesl(addr, buffer, count); } #endif #ifdef CONFIG_64BIT #ifndef iowrite64_rep #define iowrite64_rep iowrite64_rep static inline void iowrite64_rep(volatile void __iomem *addr, const void *buffer, unsigned int count) { writesq(addr, buffer, count); } #endif #endif /* CONFIG_64BIT */ #endif /* CONFIG_GENERIC_IOMAP */ #ifdef __KERNEL__ #include <linux/vmalloc.h> #define __io_virt(x) ((void __force *)(x)) #ifndef CONFIG_GENERIC_IOMAP struct pci_dev; extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); #ifndef pci_iounmap #define pci_iounmap pci_iounmap static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) { } #endif #endif /* CONFIG_GENERIC_IOMAP */ /* * Change virtual addresses to physical addresses and vv. * These are pretty trivial */ #ifndef virt_to_phys #define virt_to_phys virt_to_phys static inline unsigned long virt_to_phys(volatile void *address) { return __pa((unsigned long)address); } #endif #ifndef phys_to_virt #define phys_to_virt phys_to_virt static inline void *phys_to_virt(unsigned long address) { return __va(address); } #endif /** * DOC: ioremap() and ioremap_*() variants * * If you have an IOMMU your architecture is expected to have both ioremap() * and iounmap() implemented otherwise the asm-generic helpers will provide a * direct mapping. * * There are ioremap_*() call variants, if you have no IOMMU we naturally will * default to direct mapping for all of them, you can override these defaults. * If you have an IOMMU you are highly encouraged to provide your own * ioremap variant implementation as there currently is no safe architecture * agnostic default. To avoid possible improper behaviour default asm-generic * ioremap_*() variants all return NULL when an IOMMU is available. If you've * defined your own ioremap_*() variant you must then declare your own * ioremap_*() variant as defined to itself to avoid the default NULL return. */ #ifdef CONFIG_MMU #ifndef ioremap_uc #define ioremap_uc ioremap_uc static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) { return NULL; } #endif #else /* !CONFIG_MMU */ /* * Change "struct page" to physical address. * * This implementation is for the no-MMU case only... if you have an MMU * you'll need to provide your own definitions. */ #ifndef ioremap #define ioremap ioremap static inline void __iomem *ioremap(phys_addr_t offset, size_t size) { return (void __iomem *)(unsigned long)offset; } #endif #ifndef iounmap #define iounmap iounmap static inline void iounmap(void __iomem *addr) { } #endif #endif /* CONFIG_MMU */ #ifndef ioremap_nocache void __iomem *ioremap(phys_addr_t phys_addr, size_t size); #define ioremap_nocache ioremap_nocache static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) { return ioremap(offset, size); } #endif #ifndef ioremap_uc #define ioremap_uc ioremap_uc static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) { return ioremap_nocache(offset, size); } #endif #ifndef ioremap_wc #define ioremap_wc ioremap_wc static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) { return ioremap_nocache(offset, size); } #endif #ifndef ioremap_wt #define ioremap_wt ioremap_wt static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size) { return ioremap_nocache(offset, size); } #endif #ifdef CONFIG_HAS_IOPORT_MAP #ifndef CONFIG_GENERIC_IOMAP #ifndef ioport_map #define ioport_map ioport_map static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { port &= IO_SPACE_LIMIT; return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; } #endif #ifndef ioport_unmap #define ioport_unmap ioport_unmap static inline void ioport_unmap(void __iomem *p) { } #endif #else /* CONFIG_GENERIC_IOMAP */ extern void __iomem *ioport_map(unsigned long port, unsigned int nr); extern void ioport_unmap(void __iomem *p); #endif /* CONFIG_GENERIC_IOMAP */ #endif /* CONFIG_HAS_IOPORT_MAP */ /* * Convert a virtual cached pointer to an uncached pointer */ #ifndef xlate_dev_kmem_ptr #define xlate_dev_kmem_ptr xlate_dev_kmem_ptr static inline void *xlate_dev_kmem_ptr(void *addr) { return addr; } #endif #ifndef xlate_dev_mem_ptr #define xlate_dev_mem_ptr xlate_dev_mem_ptr static inline void *xlate_dev_mem_ptr(phys_addr_t addr) { return __va(addr); } #endif #ifndef unxlate_dev_mem_ptr #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) { } #endif #ifdef CONFIG_VIRT_TO_BUS #ifndef virt_to_bus static inline unsigned long virt_to_bus(void *address) { return (unsigned long)address; } static inline void *bus_to_virt(unsigned long address) { return (void *)address; } #endif #endif #ifndef memset_io #define memset_io memset_io /** * memset_io Set a range of I/O memory to a constant value * @addr: The beginning of the I/O-memory range to set * @val: The value to set the memory to * @count: The number of bytes to set * * Set a range of I/O memory to a given value. */ static inline void memset_io(volatile void __iomem *addr, int value, size_t size) { memset(__io_virt(addr), value, size); } #endif #ifndef memcpy_fromio #define memcpy_fromio memcpy_fromio /** * memcpy_fromio Copy a block of data from I/O memory * @dst: The (RAM) destination for the copy * @src: The (I/O memory) source for the data * @count: The number of bytes to copy * * Copy a block of data from I/O memory. */ static inline void memcpy_fromio(void *buffer, const volatile void __iomem *addr, size_t size) { memcpy(buffer, __io_virt(addr), size); } #endif #ifndef memcpy_toio #define memcpy_toio memcpy_toio /** * memcpy_toio Copy a block of data into I/O memory * @dst: The (I/O memory) destination for the copy * @src: The (RAM) source for the data * @count: The number of bytes to copy * * Copy a block of data to I/O memory. */ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size) { memcpy(__io_virt(addr), buffer, size); } #endif #endif /* __KERNEL__ */ #endif /* __ASM_GENERIC_IO_H */ atomic-long.h 0000644 00000045412 14722073052 0007136 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 // Generated by scripts/atomic/gen-atomic-long.sh // DO NOT MODIFY THIS FILE DIRECTLY #ifndef _ASM_GENERIC_ATOMIC_LONG_H #define _ASM_GENERIC_ATOMIC_LONG_H #include <asm/types.h> #ifdef CONFIG_64BIT typedef atomic64_t atomic_long_t; #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) #define atomic_long_cond_read_acquire atomic64_cond_read_acquire #define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed #else typedef atomic_t atomic_long_t; #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) #define atomic_long_cond_read_acquire atomic_cond_read_acquire #define atomic_long_cond_read_relaxed atomic_cond_read_relaxed #endif #ifdef CONFIG_64BIT static inline long atomic_long_read(const atomic_long_t *v) { return atomic64_read(v); } static inline long atomic_long_read_acquire(const atomic_long_t *v) { return atomic64_read_acquire(v); } static inline void atomic_long_set(atomic_long_t *v, long i) { atomic64_set(v, i); } static inline void atomic_long_set_release(atomic_long_t *v, long i) { atomic64_set_release(v, i); } static inline void atomic_long_add(long i, atomic_long_t *v) { atomic64_add(i, v); } static inline long atomic_long_add_return(long i, atomic_long_t *v) { return atomic64_add_return(i, v); } static inline long atomic_long_add_return_acquire(long i, atomic_long_t *v) { return atomic64_add_return_acquire(i, v); } static inline long atomic_long_add_return_release(long i, atomic_long_t *v) { return atomic64_add_return_release(i, v); } static inline long atomic_long_add_return_relaxed(long i, atomic_long_t *v) { return atomic64_add_return_relaxed(i, v); } static inline long atomic_long_fetch_add(long i, atomic_long_t *v) { return atomic64_fetch_add(i, v); } static inline long atomic_long_fetch_add_acquire(long i, atomic_long_t *v) { return atomic64_fetch_add_acquire(i, v); } static inline long atomic_long_fetch_add_release(long i, atomic_long_t *v) { return atomic64_fetch_add_release(i, v); } static inline long atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_add_relaxed(i, v); } static inline void atomic_long_sub(long i, atomic_long_t *v) { atomic64_sub(i, v); } static inline long atomic_long_sub_return(long i, atomic_long_t *v) { return atomic64_sub_return(i, v); } static inline long atomic_long_sub_return_acquire(long i, atomic_long_t *v) { return atomic64_sub_return_acquire(i, v); } static inline long atomic_long_sub_return_release(long i, atomic_long_t *v) { return atomic64_sub_return_release(i, v); } static inline long atomic_long_sub_return_relaxed(long i, atomic_long_t *v) { return atomic64_sub_return_relaxed(i, v); } static inline long atomic_long_fetch_sub(long i, atomic_long_t *v) { return atomic64_fetch_sub(i, v); } static inline long atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) { return atomic64_fetch_sub_acquire(i, v); } static inline long atomic_long_fetch_sub_release(long i, atomic_long_t *v) { return atomic64_fetch_sub_release(i, v); } static inline long atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_sub_relaxed(i, v); } static inline void atomic_long_inc(atomic_long_t *v) { atomic64_inc(v); } static inline long atomic_long_inc_return(atomic_long_t *v) { return atomic64_inc_return(v); } static inline long atomic_long_inc_return_acquire(atomic_long_t *v) { return atomic64_inc_return_acquire(v); } static inline long atomic_long_inc_return_release(atomic_long_t *v) { return atomic64_inc_return_release(v); } static inline long atomic_long_inc_return_relaxed(atomic_long_t *v) { return atomic64_inc_return_relaxed(v); } static inline long atomic_long_fetch_inc(atomic_long_t *v) { return atomic64_fetch_inc(v); } static inline long atomic_long_fetch_inc_acquire(atomic_long_t *v) { return atomic64_fetch_inc_acquire(v); } static inline long atomic_long_fetch_inc_release(atomic_long_t *v) { return atomic64_fetch_inc_release(v); } static inline long atomic_long_fetch_inc_relaxed(atomic_long_t *v) { return atomic64_fetch_inc_relaxed(v); } static inline void atomic_long_dec(atomic_long_t *v) { atomic64_dec(v); } static inline long atomic_long_dec_return(atomic_long_t *v) { return atomic64_dec_return(v); } static inline long atomic_long_dec_return_acquire(atomic_long_t *v) { return atomic64_dec_return_acquire(v); } static inline long atomic_long_dec_return_release(atomic_long_t *v) { return atomic64_dec_return_release(v); } static inline long atomic_long_dec_return_relaxed(atomic_long_t *v) { return atomic64_dec_return_relaxed(v); } static inline long atomic_long_fetch_dec(atomic_long_t *v) { return atomic64_fetch_dec(v); } static inline long atomic_long_fetch_dec_acquire(atomic_long_t *v) { return atomic64_fetch_dec_acquire(v); } static inline long atomic_long_fetch_dec_release(atomic_long_t *v) { return atomic64_fetch_dec_release(v); } static inline long atomic_long_fetch_dec_relaxed(atomic_long_t *v) { return atomic64_fetch_dec_relaxed(v); } static inline void atomic_long_and(long i, atomic_long_t *v) { atomic64_and(i, v); } static inline long atomic_long_fetch_and(long i, atomic_long_t *v) { return atomic64_fetch_and(i, v); } static inline long atomic_long_fetch_and_acquire(long i, atomic_long_t *v) { return atomic64_fetch_and_acquire(i, v); } static inline long atomic_long_fetch_and_release(long i, atomic_long_t *v) { return atomic64_fetch_and_release(i, v); } static inline long atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_and_relaxed(i, v); } static inline void atomic_long_andnot(long i, atomic_long_t *v) { atomic64_andnot(i, v); } static inline long atomic_long_fetch_andnot(long i, atomic_long_t *v) { return atomic64_fetch_andnot(i, v); } static inline long atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) { return atomic64_fetch_andnot_acquire(i, v); } static inline long atomic_long_fetch_andnot_release(long i, atomic_long_t *v) { return atomic64_fetch_andnot_release(i, v); } static inline long atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_andnot_relaxed(i, v); } static inline void atomic_long_or(long i, atomic_long_t *v) { atomic64_or(i, v); } static inline long atomic_long_fetch_or(long i, atomic_long_t *v) { return atomic64_fetch_or(i, v); } static inline long atomic_long_fetch_or_acquire(long i, atomic_long_t *v) { return atomic64_fetch_or_acquire(i, v); } static inline long atomic_long_fetch_or_release(long i, atomic_long_t *v) { return atomic64_fetch_or_release(i, v); } static inline long atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_or_relaxed(i, v); } static inline void atomic_long_xor(long i, atomic_long_t *v) { atomic64_xor(i, v); } static inline long atomic_long_fetch_xor(long i, atomic_long_t *v) { return atomic64_fetch_xor(i, v); } static inline long atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) { return atomic64_fetch_xor_acquire(i, v); } static inline long atomic_long_fetch_xor_release(long i, atomic_long_t *v) { return atomic64_fetch_xor_release(i, v); } static inline long atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_xor_relaxed(i, v); } static inline long atomic_long_xchg(atomic_long_t *v, long i) { return atomic64_xchg(v, i); } static inline long atomic_long_xchg_acquire(atomic_long_t *v, long i) { return atomic64_xchg_acquire(v, i); } static inline long atomic_long_xchg_release(atomic_long_t *v, long i) { return atomic64_xchg_release(v, i); } static inline long atomic_long_xchg_relaxed(atomic_long_t *v, long i) { return atomic64_xchg_relaxed(v, i); } static inline long atomic_long_cmpxchg(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg(v, old, new); } static inline long atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg_acquire(v, old, new); } static inline long atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg_release(v, old, new); } static inline long atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg_relaxed(v, old, new); } static inline bool atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg(v, (s64 *)old, new); } static inline bool atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); } static inline bool atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg_release(v, (s64 *)old, new); } static inline bool atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); } static inline bool atomic_long_sub_and_test(long i, atomic_long_t *v) { return atomic64_sub_and_test(i, v); } static inline bool atomic_long_dec_and_test(atomic_long_t *v) { return atomic64_dec_and_test(v); } static inline bool atomic_long_inc_and_test(atomic_long_t *v) { return atomic64_inc_and_test(v); } static inline bool atomic_long_add_negative(long i, atomic_long_t *v) { return atomic64_add_negative(i, v); } static inline long atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { return atomic64_fetch_add_unless(v, a, u); } static inline bool atomic_long_add_unless(atomic_long_t *v, long a, long u) { return atomic64_add_unless(v, a, u); } static inline bool atomic_long_inc_not_zero(atomic_long_t *v) { return atomic64_inc_not_zero(v); } static inline bool atomic_long_inc_unless_negative(atomic_long_t *v) { return atomic64_inc_unless_negative(v); } static inline bool atomic_long_dec_unless_positive(atomic_long_t *v) { return atomic64_dec_unless_positive(v); } static inline long atomic_long_dec_if_positive(atomic_long_t *v) { return atomic64_dec_if_positive(v); } #else /* CONFIG_64BIT */ static inline long atomic_long_read(const atomic_long_t *v) { return atomic_read(v); } static inline long atomic_long_read_acquire(const atomic_long_t *v) { return atomic_read_acquire(v); } static inline void atomic_long_set(atomic_long_t *v, long i) { atomic_set(v, i); } static inline void atomic_long_set_release(atomic_long_t *v, long i) { atomic_set_release(v, i); } static inline void atomic_long_add(long i, atomic_long_t *v) { atomic_add(i, v); } static inline long atomic_long_add_return(long i, atomic_long_t *v) { return atomic_add_return(i, v); } static inline long atomic_long_add_return_acquire(long i, atomic_long_t *v) { return atomic_add_return_acquire(i, v); } static inline long atomic_long_add_return_release(long i, atomic_long_t *v) { return atomic_add_return_release(i, v); } static inline long atomic_long_add_return_relaxed(long i, atomic_long_t *v) { return atomic_add_return_relaxed(i, v); } static inline long atomic_long_fetch_add(long i, atomic_long_t *v) { return atomic_fetch_add(i, v); } static inline long atomic_long_fetch_add_acquire(long i, atomic_long_t *v) { return atomic_fetch_add_acquire(i, v); } static inline long atomic_long_fetch_add_release(long i, atomic_long_t *v) { return atomic_fetch_add_release(i, v); } static inline long atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) { return atomic_fetch_add_relaxed(i, v); } static inline void atomic_long_sub(long i, atomic_long_t *v) { atomic_sub(i, v); } static inline long atomic_long_sub_return(long i, atomic_long_t *v) { return atomic_sub_return(i, v); } static inline long atomic_long_sub_return_acquire(long i, atomic_long_t *v) { return atomic_sub_return_acquire(i, v); } static inline long atomic_long_sub_return_release(long i, atomic_long_t *v) { return atomic_sub_return_release(i, v); } static inline long atomic_long_sub_return_relaxed(long i, atomic_long_t *v) { return atomic_sub_return_relaxed(i, v); } static inline long atomic_long_fetch_sub(long i, atomic_long_t *v) { return atomic_fetch_sub(i, v); } static inline long atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) { return atomic_fetch_sub_acquire(i, v); } static inline long atomic_long_fetch_sub_release(long i, atomic_long_t *v) { return atomic_fetch_sub_release(i, v); } static inline long atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) { return atomic_fetch_sub_relaxed(i, v); } static inline void atomic_long_inc(atomic_long_t *v) { atomic_inc(v); } static inline long atomic_long_inc_return(atomic_long_t *v) { return atomic_inc_return(v); } static inline long atomic_long_inc_return_acquire(atomic_long_t *v) { return atomic_inc_return_acquire(v); } static inline long atomic_long_inc_return_release(atomic_long_t *v) { return atomic_inc_return_release(v); } static inline long atomic_long_inc_return_relaxed(atomic_long_t *v) { return atomic_inc_return_relaxed(v); } static inline long atomic_long_fetch_inc(atomic_long_t *v) { return atomic_fetch_inc(v); } static inline long atomic_long_fetch_inc_acquire(atomic_long_t *v) { return atomic_fetch_inc_acquire(v); } static inline long atomic_long_fetch_inc_release(atomic_long_t *v) { return atomic_fetch_inc_release(v); } static inline long atomic_long_fetch_inc_relaxed(atomic_long_t *v) { return atomic_fetch_inc_relaxed(v); } static inline void atomic_long_dec(atomic_long_t *v) { atomic_dec(v); } static inline long atomic_long_dec_return(atomic_long_t *v) { return atomic_dec_return(v); } static inline long atomic_long_dec_return_acquire(atomic_long_t *v) { return atomic_dec_return_acquire(v); } static inline long atomic_long_dec_return_release(atomic_long_t *v) { return atomic_dec_return_release(v); } static inline long atomic_long_dec_return_relaxed(atomic_long_t *v) { return atomic_dec_return_relaxed(v); } static inline long atomic_long_fetch_dec(atomic_long_t *v) { return atomic_fetch_dec(v); } static inline long atomic_long_fetch_dec_acquire(atomic_long_t *v) { return atomic_fetch_dec_acquire(v); } static inline long atomic_long_fetch_dec_release(atomic_long_t *v) { return atomic_fetch_dec_release(v); } static inline long atomic_long_fetch_dec_relaxed(atomic_long_t *v) { return atomic_fetch_dec_relaxed(v); } static inline void atomic_long_and(long i, atomic_long_t *v) { atomic_and(i, v); } static inline long atomic_long_fetch_and(long i, atomic_long_t *v) { return atomic_fetch_and(i, v); } static inline long atomic_long_fetch_and_acquire(long i, atomic_long_t *v) { return atomic_fetch_and_acquire(i, v); } static inline long atomic_long_fetch_and_release(long i, atomic_long_t *v) { return atomic_fetch_and_release(i, v); } static inline long atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) { return atomic_fetch_and_relaxed(i, v); } static inline void atomic_long_andnot(long i, atomic_long_t *v) { atomic_andnot(i, v); } static inline long atomic_long_fetch_andnot(long i, atomic_long_t *v) { return atomic_fetch_andnot(i, v); } static inline long atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) { return atomic_fetch_andnot_acquire(i, v); } static inline long atomic_long_fetch_andnot_release(long i, atomic_long_t *v) { return atomic_fetch_andnot_release(i, v); } static inline long atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) { return atomic_fetch_andnot_relaxed(i, v); } static inline void atomic_long_or(long i, atomic_long_t *v) { atomic_or(i, v); } static inline long atomic_long_fetch_or(long i, atomic_long_t *v) { return atomic_fetch_or(i, v); } static inline long atomic_long_fetch_or_acquire(long i, atomic_long_t *v) { return atomic_fetch_or_acquire(i, v); } static inline long atomic_long_fetch_or_release(long i, atomic_long_t *v) { return atomic_fetch_or_release(i, v); } static inline long atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) { return atomic_fetch_or_relaxed(i, v); } static inline void atomic_long_xor(long i, atomic_long_t *v) { atomic_xor(i, v); } static inline long atomic_long_fetch_xor(long i, atomic_long_t *v) { return atomic_fetch_xor(i, v); } static inline long atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) { return atomic_fetch_xor_acquire(i, v); } static inline long atomic_long_fetch_xor_release(long i, atomic_long_t *v) { return atomic_fetch_xor_release(i, v); } static inline long atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) { return atomic_fetch_xor_relaxed(i, v); } static inline long atomic_long_xchg(atomic_long_t *v, long i) { return atomic_xchg(v, i); } static inline long atomic_long_xchg_acquire(atomic_long_t *v, long i) { return atomic_xchg_acquire(v, i); } static inline long atomic_long_xchg_release(atomic_long_t *v, long i) { return atomic_xchg_release(v, i); } static inline long atomic_long_xchg_relaxed(atomic_long_t *v, long i) { return atomic_xchg_relaxed(v, i); } static inline long atomic_long_cmpxchg(atomic_long_t *v, long old, long new) { return atomic_cmpxchg(v, old, new); } static inline long atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { return atomic_cmpxchg_acquire(v, old, new); } static inline long atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) { return atomic_cmpxchg_release(v, old, new); } static inline long atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { return atomic_cmpxchg_relaxed(v, old, new); } static inline bool atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg(v, (int *)old, new); } static inline bool atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg_acquire(v, (int *)old, new); } static inline bool atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg_release(v, (int *)old, new); } static inline bool atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg_relaxed(v, (int *)old, new); } static inline bool atomic_long_sub_and_test(long i, atomic_long_t *v) { return atomic_sub_and_test(i, v); } static inline bool atomic_long_dec_and_test(atomic_long_t *v) { return atomic_dec_and_test(v); } static inline bool atomic_long_inc_and_test(atomic_long_t *v) { return atomic_inc_and_test(v); } static inline bool atomic_long_add_negative(long i, atomic_long_t *v) { return atomic_add_negative(i, v); } static inline long atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { return atomic_fetch_add_unless(v, a, u); } static inline bool atomic_long_add_unless(atomic_long_t *v, long a, long u) { return atomic_add_unless(v, a, u); } static inline bool atomic_long_inc_not_zero(atomic_long_t *v) { return atomic_inc_not_zero(v); } static inline bool atomic_long_inc_unless_negative(atomic_long_t *v) { return atomic_inc_unless_negative(v); } static inline bool atomic_long_dec_unless_positive(atomic_long_t *v) { return atomic_dec_unless_positive(v); } static inline long atomic_long_dec_if_positive(atomic_long_t *v) { return atomic_dec_if_positive(v); } #endif /* CONFIG_64BIT */ #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ // 77558968132ce4f911ad53f6f52ce423006f6268 shmparam.h 0000644 00000000317 14722073052 0006530 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_SHMPARAM_H #define __ASM_GENERIC_SHMPARAM_H #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ #endif /* _ASM_GENERIC_SHMPARAM_H */ syscalls.h 0000644 00000001343 14722073052 0006555 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_SYSCALLS_H #define __ASM_GENERIC_SYSCALLS_H #include <linux/compiler.h> #include <linux/linkage.h> /* * Calling conventions for these system calls can differ, so * it's possible to override them. */ #ifndef sys_mmap2 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); #endif #ifndef sys_mmap asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, off_t pgoff); #endif #ifndef sys_rt_sigreturn asmlinkage long sys_rt_sigreturn(struct pt_regs *regs); #endif #endif /* __ASM_GENERIC_SYSCALLS_H */ vtime.h 0000644 00000000064 14722073052 0006043 0 ustar 00 /* no content, but patch(1) dislikes empty files */ sections.h 0000644 00000013704 14722073052 0006553 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_SECTIONS_H_ #define _ASM_GENERIC_SECTIONS_H_ /* References to section boundaries */ #include <linux/compiler.h> #include <linux/types.h> /* * Usage guidelines: * _text, _data: architecture specific, don't use them in arch-independent code * [_stext, _etext]: contains .text.* sections, may also contain .rodata.* * and/or .init.* sections * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* * and/or .init.* sections. * [__start_rodata, __end_rodata]: contains .rodata.* sections * [__start_ro_after_init, __end_ro_after_init]: * contains .data..ro_after_init section * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* * may be out of this range on some architectures. * [_sinittext, _einittext]: contains .init.text.* sections * [__bss_start, __bss_stop]: contains BSS sections * * Following global variables are optional and may be unavailable on some * architectures and/or kernel configurations. * _text, _data * __kprobes_text_start, __kprobes_text_end * __entry_text_start, __entry_text_end * __ctors_start, __ctors_end * __irqentry_text_start, __irqentry_text_end * __softirqentry_text_start, __softirqentry_text_end * __start_opd, __end_opd */ extern char _text[], _stext[], _etext[]; extern char _data[], _sdata[], _edata[]; extern char __bss_start[], __bss_stop[]; extern char __init_begin[], __init_end[]; extern char _sinittext[], _einittext[]; extern char __start_ro_after_init[], __end_ro_after_init[]; extern char _end[]; extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; extern char __kprobes_text_start[], __kprobes_text_end[]; extern char __entry_text_start[], __entry_text_end[]; extern char __start_rodata[], __end_rodata[]; extern char __irqentry_text_start[], __irqentry_text_end[]; extern char __softirqentry_text_start[], __softirqentry_text_end[]; extern char __start_once[], __end_once[]; /* Start and end of .ctors section - used for constructor calls. */ extern char __ctors_start[], __ctors_end[]; /* Start and end of .opd section - used for function descriptors. */ extern char __start_opd[], __end_opd[]; /* Start and end of instrumentation protected text section */ extern char __noinstr_text_start[], __noinstr_text_end[]; extern __visible const void __nosave_begin, __nosave_end; /* Function descriptor handling (if any). Override in asm/sections.h */ #ifndef dereference_function_descriptor #define dereference_function_descriptor(p) (p) #define dereference_kernel_function_descriptor(p) (p) #endif /* random extra sections (if any). Override * in asm/sections.h */ #ifndef arch_is_kernel_text static inline int arch_is_kernel_text(unsigned long addr) { return 0; } #endif #ifndef arch_is_kernel_data static inline int arch_is_kernel_data(unsigned long addr) { return 0; } #endif /* * Check if an address is part of freed initmem. This is needed on architectures * with virt == phys kernel mapping, for code that wants to check if an address * is part of a static object within [_stext, _end]. After initmem is freed, * memory can be allocated from it, and such allocations would then have * addresses within the range [_stext, _end]. */ #ifndef arch_is_kernel_initmem_freed static inline int arch_is_kernel_initmem_freed(unsigned long addr) { return 0; } #endif /** * memory_contains - checks if an object is contained within a memory region * @begin: virtual address of the beginning of the memory region * @end: virtual address of the end of the memory region * @virt: virtual address of the memory object * @size: size of the memory object * * Returns: true if the object specified by @virt and @size is entirely * contained within the memory region defined by @begin and @end, false * otherwise. */ static inline bool memory_contains(void *begin, void *end, void *virt, size_t size) { return virt >= begin && virt + size <= end; } /** * memory_intersects - checks if the region occupied by an object intersects * with another memory region * @begin: virtual address of the beginning of the memory region * @end: virtual address of the end of the memory region * @virt: virtual address of the memory object * @size: size of the memory object * * Returns: true if an object's memory region, specified by @virt and @size, * intersects with the region specified by @begin and @end, false otherwise. */ static inline bool memory_intersects(void *begin, void *end, void *virt, size_t size) { void *vend = virt + size; if (virt < end && vend > begin) return true; return false; } /** * init_section_contains - checks if an object is contained within the init * section * @virt: virtual address of the memory object * @size: size of the memory object * * Returns: true if the object specified by @virt and @size is entirely * contained within the init section, false otherwise. */ static inline bool init_section_contains(void *virt, size_t size) { return memory_contains(__init_begin, __init_end, virt, size); } /** * init_section_intersects - checks if the region occupied by an object * intersects with the init section * @virt: virtual address of the memory object * @size: size of the memory object * * Returns: true if an object's memory region, specified by @virt and @size, * intersects with the init section, false otherwise. */ static inline bool init_section_intersects(void *virt, size_t size) { return memory_intersects(__init_begin, __init_end, virt, size); } /** * is_kernel_rodata - checks if the pointer address is located in the * .rodata section * * @addr: address to check * * Returns: true if the address is located in .rodata, false otherwise. */ static inline bool is_kernel_rodata(unsigned long addr) { return addr >= (unsigned long)__start_rodata && addr < (unsigned long)__end_rodata; } #endif /* _ASM_GENERIC_SECTIONS_H_ */ flat.h 0000644 00000001142 14722073052 0005643 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_FLAT_H #define _ASM_GENERIC_FLAT_H #include <linux/uaccess.h> static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags, u32 *addr) { #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS return copy_from_user(addr, rp, 4) ? -EFAULT : 0; #else return get_user(*addr, rp); #endif } static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel) { #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS return copy_to_user(rp, &addr, 4) ? -EFAULT : 0; #else return put_user(addr, rp); #endif } #endif /* _ASM_GENERIC_FLAT_H */ audit_dir_write.h 0000644 00000001020 14722073052 0010066 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifdef __NR_rename __NR_rename, #endif #ifdef __NR_mkdir __NR_mkdir, #endif #ifdef __NR_rmdir __NR_rmdir, #endif #ifdef __NR_creat __NR_creat, #endif #ifdef __NR_link __NR_link, #endif #ifdef __NR_unlink __NR_unlink, #endif #ifdef __NR_symlink __NR_symlink, #endif #ifdef __NR_mknod __NR_mknod, #endif #ifdef __NR_mkdirat __NR_mkdirat, __NR_mknodat, __NR_unlinkat, #ifdef __NR_renameat __NR_renameat, #endif __NR_linkat, __NR_symlinkat, #endif #ifdef __NR_renameat2 __NR_renameat2, #endif bitsperlong.h 0000644 00000001120 14722073052 0007241 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_BITS_PER_LONG #define __ASM_GENERIC_BITS_PER_LONG #include <uapi/asm-generic/bitsperlong.h> #ifdef CONFIG_64BIT #define BITS_PER_LONG 64 #else #define BITS_PER_LONG 32 #endif /* CONFIG_64BIT */ /* * FIXME: The check currently breaks x86-64 build, so it's * temporarily disabled. Please fix x86-64 and reenable */ #if 0 && BITS_PER_LONG != __BITS_PER_LONG #error Inconsistent word size. Check asm/bitsperlong.h #endif #ifndef BITS_PER_LONG_LONG #define BITS_PER_LONG_LONG 64 #endif #endif /* __ASM_GENERIC_BITS_PER_LONG */ getorder.h 0000644 00000002345 14722073052 0006536 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_GETORDER_H #define __ASM_GENERIC_GETORDER_H #ifndef __ASSEMBLY__ #include <linux/compiler.h> #include <linux/log2.h> /** * get_order - Determine the allocation order of a memory size * @size: The size for which to get the order * * Determine the allocation order of a particular sized block of memory. This * is on a logarithmic scale, where: * * 0 -> 2^0 * PAGE_SIZE and below * 1 -> 2^1 * PAGE_SIZE to 2^0 * PAGE_SIZE + 1 * 2 -> 2^2 * PAGE_SIZE to 2^1 * PAGE_SIZE + 1 * 3 -> 2^3 * PAGE_SIZE to 2^2 * PAGE_SIZE + 1 * 4 -> 2^4 * PAGE_SIZE to 2^3 * PAGE_SIZE + 1 * ... * * The order returned is used to find the smallest allocation granule required * to hold an object of the specified size. * * The result is undefined if the size is 0. */ static inline __attribute_const__ int get_order(unsigned long size) { if (__builtin_constant_p(size)) { if (!size) return BITS_PER_LONG - PAGE_SHIFT; if (size < (1UL << PAGE_SHIFT)) return 0; return ilog2((size) - 1) - PAGE_SHIFT + 1; } size--; size >>= PAGE_SHIFT; #if BITS_PER_LONG == 32 return fls(size); #else return fls64(size); #endif } #endif /* __ASSEMBLY__ */ #endif /* __ASM_GENERIC_GETORDER_H */ tlb.h 0000644 00000045337 14722073052 0005514 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* include/asm-generic/tlb.h * * Generic TLB shootdown code * * Copyright 2001 Red Hat, Inc. * Based on code from mm/memory.c Copyright Linus Torvalds and others. * * Copyright 2011 Red Hat, Inc., Peter Zijlstra */ #ifndef _ASM_GENERIC__TLB_H #define _ASM_GENERIC__TLB_H #include <linux/mmu_notifier.h> #include <linux/swap.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> /* * Blindly accessing user memory from NMI context can be dangerous * if we're in the middle of switching the current user task or switching * the loaded mm. */ #ifndef nmi_uaccess_okay # define nmi_uaccess_okay() true #endif #ifdef CONFIG_MMU /* * Generic MMU-gather implementation. * * The mmu_gather data structure is used by the mm code to implement the * correct and efficient ordering of freeing pages and TLB invalidations. * * This correct ordering is: * * 1) unhook page * 2) TLB invalidate page * 3) free page * * That is, we must never free a page before we have ensured there are no live * translations left to it. Otherwise it might be possible to observe (or * worse, change) the page content after it has been reused. * * The mmu_gather API consists of: * * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather * * Finish in particular will issue a (final) TLB invalidate and free * all (remaining) queued pages. * * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA * * Defaults to flushing at tlb_end_vma() to reset the range; helps when * there's large holes between the VMAs. * * - tlb_remove_page() / __tlb_remove_page() * - tlb_remove_page_size() / __tlb_remove_page_size() * * __tlb_remove_page_size() is the basic primitive that queues a page for * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a * boolean indicating if the queue is (now) full and a call to * tlb_flush_mmu() is required. * * tlb_remove_page() and tlb_remove_page_size() imply the call to * tlb_flush_mmu() when required and has no return value. * * - tlb_change_page_size() * * call before __tlb_remove_page*() to set the current page-size; implies a * possible tlb_flush_mmu() call. * * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() * * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets * related state, like the range) * * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees * whatever pages are still batched. * * - mmu_gather::fullmm * * A flag set by tlb_gather_mmu() to indicate we're going to free * the entire mm; this allows a number of optimizations. * * - We can ignore tlb_{start,end}_vma(); because we don't * care about ranges. Everything will be shot down. * * - (RISC) architectures that use ASIDs can cycle to a new ASID * and delay the invalidation until ASID space runs out. * * - mmu_gather::need_flush_all * * A flag that can be set by the arch code if it wants to force * flush the entire TLB irrespective of the range. For instance * x86-PAE needs this when changing top-level entries. * * And allows the architecture to provide and implement tlb_flush(): * * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make * use of: * * - mmu_gather::start / mmu_gather::end * * which provides the range that needs to be flushed to cover the pages to * be freed. * * - mmu_gather::freed_tables * * set when we freed page table pages * * - tlb_get_unmap_shift() / tlb_get_unmap_size() * * returns the smallest TLB entry size unmapped in this range. * * If an architecture does not provide tlb_flush() a default implementation * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is * specified, in which case we'll default to flush_tlb_mm(). * * Additionally there are a few opt-in features: * * HAVE_MMU_GATHER_PAGE_SIZE * * This ensures we call tlb_flush() every time tlb_change_page_size() actually * changes the size and provides mmu_gather::page_size to tlb_flush(). * * HAVE_RCU_TABLE_FREE * * This provides tlb_remove_table(), to be used instead of tlb_remove_page() * for page directores (__p*_free_tlb()). This provides separate freeing of * the page-table pages themselves in a semi-RCU fashion (see comment below). * Useful if your architecture doesn't use IPIs for remote TLB invalidates * and therefore doesn't naturally serialize with software page-table walkers. * * When used, an architecture is expected to provide __tlb_remove_table() * which does the actual freeing of these pages. * * MMU_GATHER_NO_RANGE * * Use this if your architecture lacks an efficient flush_tlb_range(). */ #ifdef CONFIG_HAVE_RCU_TABLE_FREE /* * Semi RCU freeing of the page directories. * * This is needed by some architectures to implement software pagetable walkers. * * gup_fast() and other software pagetable walkers do a lockless page-table * walk and therefore needs some synchronization with the freeing of the page * directories. The chosen means to accomplish that is by disabling IRQs over * the walk. * * Architectures that use IPIs to flush TLBs will then automagically DTRT, * since we unlink the page, flush TLBs, free the page. Since the disabling of * IRQs delays the completion of the TLB flush we can never observe an already * freed page. * * Architectures that do not have this (PPC) need to delay the freeing by some * other means, this is that means. * * What we do is batch the freed directory pages (tables) and RCU free them. * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling * holds off grace periods. * * However, in order to batch these pages we need to allocate storage, this * allocation is deep inside the MM code and can thus easily fail on memory * pressure. To guarantee progress we fall back to single table freeing, see * the implementation of tlb_remove_table_one(). * */ struct mmu_table_batch { struct rcu_head rcu; unsigned int nr; void *tables[0]; }; #define MAX_TABLE_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) extern void tlb_remove_table(struct mmu_gather *tlb, void *table); /* * This allows an architecture that does not use the linux page-tables for * hardware to skip the TLBI when freeing page tables. */ #ifndef tlb_needs_table_invalidate #define tlb_needs_table_invalidate() (true) #endif void tlb_remove_table_sync_one(void); #else #ifdef tlb_needs_table_invalidate #error tlb_needs_table_invalidate() requires HAVE_RCU_TABLE_FREE #endif static inline void tlb_remove_table_sync_one(void) { } #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER /* * If we can't allocate a page to make a big batch of page pointers * to work on, then just handle a few from the on-stack structure. */ #define MMU_GATHER_BUNDLE 8 struct mmu_gather_batch { struct mmu_gather_batch *next; unsigned int nr; unsigned int max; struct page *pages[0]; }; #define MAX_GATHER_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) /* * Limit the maximum number of mmu_gather batches to reduce a risk of soft * lockups for non-preemptible kernels on huge machines when a lot of memory * is zapped during unmapping. * 10K pages freed at once should be safe even without a preemption point. */ #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size); #endif /* * struct mmu_gather is an opaque type used by the mm code for passing around * any data needed by arch specific code for tlb_remove_page. */ struct mmu_gather { struct mm_struct *mm; #ifdef CONFIG_HAVE_RCU_TABLE_FREE struct mmu_table_batch *batch; #endif unsigned long start; unsigned long end; /* * we are in the middle of an operation to clear * a full mm and can make some optimizations */ unsigned int fullmm : 1; /* * we have performed an operation which * requires a complete flush of the tlb */ unsigned int need_flush_all : 1; /* * we have removed page directories */ unsigned int freed_tables : 1; /* * at which levels have we cleared entries? */ unsigned int cleared_ptes : 1; unsigned int cleared_pmds : 1; unsigned int cleared_puds : 1; unsigned int cleared_p4ds : 1; /* * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma */ unsigned int vma_exec : 1; unsigned int vma_huge : 1; unsigned int batch_count; #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE unsigned int page_size; #endif #endif }; void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); void tlb_flush_mmu(struct mmu_gather *tlb); void arch_tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end, bool force); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address, unsigned int range_size) { tlb->start = min(tlb->start, address); tlb->end = max(tlb->end, address + range_size); } static inline void __tlb_reset_range(struct mmu_gather *tlb) { if (tlb->fullmm) { tlb->start = tlb->end = ~0; } else { tlb->start = TASK_SIZE; tlb->end = 0; } tlb->freed_tables = 0; tlb->cleared_ptes = 0; tlb->cleared_pmds = 0; tlb->cleared_puds = 0; tlb->cleared_p4ds = 0; /* * Do not reset mmu_gather::vma_* fields here, we do not * call into tlb_start_vma() again to set them if there is an * intermediate flush. */ } #ifdef CONFIG_MMU_GATHER_NO_RANGE #if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma) #error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma() #endif /* * When an architecture does not have efficient means of range flushing TLBs * there is no point in doing intermediate flushes on tlb_end_vma() to keep the * range small. We equally don't have to worry about page granularity or other * things. * * All we need to do is issue a full flush for any !0 range. */ static inline void tlb_flush(struct mmu_gather *tlb) { if (tlb->end) flush_tlb_mm(tlb->mm); } static inline void tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } #define tlb_end_vma tlb_end_vma static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { } #else /* CONFIG_MMU_GATHER_NO_RANGE */ #ifndef tlb_flush #if defined(tlb_start_vma) || defined(tlb_end_vma) #error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma() #endif /* * When an architecture does not provide its own tlb_flush() implementation * but does have a reasonably efficient flush_vma_range() implementation * use that. */ static inline void tlb_flush(struct mmu_gather *tlb) { if (tlb->fullmm || tlb->need_flush_all) { flush_tlb_mm(tlb->mm); } else if (tlb->end) { struct vm_area_struct vma = { .vm_mm = tlb->mm, .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | (tlb->vma_huge ? VM_HUGETLB : 0), }; flush_tlb_range(&vma, tlb->start, tlb->end); } } static inline void tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { /* * flush_tlb_range() implementations that look at VM_HUGETLB (tile, * mips-4k) flush only large pages. * * flush_tlb_range() implementations that flush I-TLB also flush D-TLB * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing * range. * * We rely on tlb_end_vma() to issue a flush, such that when we reset * these values the batch is empty. */ tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB); tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); } #else static inline void tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } #endif #endif /* CONFIG_MMU_GATHER_NO_RANGE */ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { if (!tlb->end) return; tlb_flush(tlb); mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); __tlb_reset_range(tlb); } static inline void tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { if (__tlb_remove_page_size(tlb, page, page_size)) tlb_flush_mmu(tlb); } static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { return __tlb_remove_page_size(tlb, page, PAGE_SIZE); } /* tlb_remove_page * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when * required. */ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { return tlb_remove_page_size(tlb, page, PAGE_SIZE); } static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size) { #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE if (tlb->page_size && tlb->page_size != page_size) { if (!tlb->fullmm) tlb_flush_mmu(tlb); } tlb->page_size = page_size; #endif } static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) { if (tlb->cleared_ptes) return PAGE_SHIFT; if (tlb->cleared_pmds) return PMD_SHIFT; if (tlb->cleared_puds) return PUD_SHIFT; if (tlb->cleared_p4ds) return P4D_SHIFT; return PAGE_SHIFT; } static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) { return 1UL << tlb_get_unmap_shift(tlb); } /* * In the case of tlb vma handling, we can optimise these away in the * case where we're doing a full MM flush. When we're doing a munmap, * the vmas are adjusted to only cover the region to be torn down. */ #ifndef tlb_start_vma static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { if (tlb->fullmm) return; tlb_update_vma_flags(tlb, vma); flush_cache_range(vma, vma->vm_start, vma->vm_end); } #endif #ifndef tlb_end_vma static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { if (tlb->fullmm) return; /* * Do a TLB flush and reset the range at VMA boundaries; this avoids * the ranges growing with the unused space between consecutive VMAs, * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on * this. */ tlb_flush_mmu_tlbonly(tlb); } #endif /* * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end, * and set corresponding cleared_*. */ static inline void tlb_flush_pte_range(struct mmu_gather *tlb, unsigned long address, unsigned long size) { __tlb_adjust_range(tlb, address, size); tlb->cleared_ptes = 1; } static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, unsigned long size) { __tlb_adjust_range(tlb, address, size); tlb->cleared_pmds = 1; } static inline void tlb_flush_pud_range(struct mmu_gather *tlb, unsigned long address, unsigned long size) { __tlb_adjust_range(tlb, address, size); tlb->cleared_puds = 1; } static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, unsigned long address, unsigned long size) { __tlb_adjust_range(tlb, address, size); tlb->cleared_p4ds = 1; } #ifndef __tlb_remove_tlb_entry #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) #endif /** * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. * * Record the fact that pte's were really unmapped by updating the range, * so we can later optimise away the tlb invalidate. This helps when * userspace is unmapping already-unmapped pages, which happens quite a lot. */ #define tlb_remove_tlb_entry(tlb, ptep, address) \ do { \ tlb_flush_pte_range(tlb, address, PAGE_SIZE); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ do { \ unsigned long _sz = huge_page_size(h); \ if (_sz >= P4D_SIZE) \ tlb_flush_p4d_range(tlb, address, _sz); \ else if (_sz >= PUD_SIZE) \ tlb_flush_pud_range(tlb, address, _sz); \ else if (_sz >= PMD_SIZE) \ tlb_flush_pmd_range(tlb, address, _sz); \ else \ tlb_flush_pte_range(tlb, address, _sz); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) /** * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation * This is a nop so far, because only x86 needs it. */ #ifndef __tlb_remove_pmd_tlb_entry #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) #endif #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ do { \ tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \ __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ } while (0) /** * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb * invalidation. This is a nop so far, because only x86 needs it. */ #ifndef __tlb_remove_pud_tlb_entry #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) #endif #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ do { \ tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \ __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ } while (0) /* * For things like page tables caches (ie caching addresses "inside" the * page tables, like x86 does), for legacy reasons, flushing an * individual page had better flush the page table caches behind it. This * is definitely how x86 works, for example. And if you have an * architected non-legacy page table cache (which I'm not aware of * anybody actually doing), you're going to have some architecturally * explicit flushing for that, likely *separate* from a regular TLB entry * flush, and thus you'd need more than just some range expansion.. * * So if we ever find an architecture * that would want something that odd, I think it is up to that * architecture to do its own odd thing, not cause pain for others * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com * * For now w.r.t page table cache, mark the range_size as PAGE_SIZE */ #ifndef pte_free_tlb #define pte_free_tlb(tlb, ptep, address) \ do { \ tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \ tlb->freed_tables = 1; \ __pte_free_tlb(tlb, ptep, address); \ } while (0) #endif #ifndef pmd_free_tlb #define pmd_free_tlb(tlb, pmdp, address) \ do { \ tlb_flush_pud_range(tlb, address, PAGE_SIZE); \ tlb->freed_tables = 1; \ __pmd_free_tlb(tlb, pmdp, address); \ } while (0) #endif #ifndef __ARCH_HAS_4LEVEL_HACK #ifndef pud_free_tlb #define pud_free_tlb(tlb, pudp, address) \ do { \ tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \ tlb->freed_tables = 1; \ __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif #endif #ifndef __ARCH_HAS_5LEVEL_HACK #ifndef p4d_free_tlb #define p4d_free_tlb(tlb, pudp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ tlb->freed_tables = 1; \ __p4d_free_tlb(tlb, pudp, address); \ } while (0) #endif #endif #endif /* CONFIG_MMU */ #endif /* _ASM_GENERIC__TLB_H */ irq_regs.h 0000644 00000001405 14722073052 0006532 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Fallback per-CPU frame pointer holder * * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _ASM_GENERIC_IRQ_REGS_H #define _ASM_GENERIC_IRQ_REGS_H #include <linux/percpu.h> /* * Per-cpu current frame pointer - the location of the last exception frame on * the stack */ DECLARE_PER_CPU(struct pt_regs *, __irq_regs); static inline struct pt_regs *get_irq_regs(void) { return __this_cpu_read(__irq_regs); } static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) { struct pt_regs *old_regs; old_regs = __this_cpu_read(__irq_regs); __this_cpu_write(__irq_regs, new_regs); return old_regs; } #endif /* _ASM_GENERIC_IRQ_REGS_H */ parport.h 0000644 00000001134 14722073052 0006405 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_PARPORT_H #define __ASM_GENERIC_PARPORT_H /* * An ISA bus may have i8255 parallel ports at well-known * locations in the I/O space, which are scanned by * parport_pc_find_isa_ports. * * Without ISA support, the driver will only attach * to devices on the PCI bus. */ static int parport_pc_find_isa_ports(int autoirq, int autodma); static int parport_pc_find_nonpci_ports(int autoirq, int autodma) { #ifdef CONFIG_ISA return parport_pc_find_isa_ports(autoirq, autodma); #else return 0; #endif } #endif /* __ASM_GENERIC_PARPORT_H */ pci.h 0000644 00000000621 14722073052 0005471 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/asm-generic/pci.h * * Copyright (C) 2003 Russell King */ #ifndef _ASM_GENERIC_PCI_H #define _ASM_GENERIC_PCI_H #ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { return channel ? 15 : 14; } #endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ #endif /* _ASM_GENERIC_PCI_H */ param.h 0000644 00000000557 14722073052 0006026 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_PARAM_H #define __ASM_GENERIC_PARAM_H #include <uapi/asm-generic/param.h> # undef HZ # define HZ CONFIG_HZ /* Internal kernel timer frequency */ # define USER_HZ 100 /* some user interfaces are */ # define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */ #endif /* __ASM_GENERIC_PARAM_H */ Kbuild 0000644 00000000351 14722073052 0005702 0 ustar 00 # SPDX-License-Identifier: GPL-2.0 # # asm headers that all architectures except um should have # (This file is not included when SRCARCH=um since UML borrows several # asm headers from the host architecutre.) mandatory-y += simd.h cmpxchg-local.h 0000644 00000002670 14722073052 0007445 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_CMPXCHG_LOCAL_H #define __ASM_GENERIC_CMPXCHG_LOCAL_H #include <linux/types.h> #include <linux/irqflags.h> extern unsigned long wrong_size_cmpxchg(volatile void *ptr) __noreturn; /* * Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned * long parameter, supporting various types of architectures. */ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long flags, prev; /* * Sanity checking, compile-time. */ if (size == 8 && sizeof(unsigned long) != 8) wrong_size_cmpxchg(ptr); raw_local_irq_save(flags); switch (size) { case 1: prev = *(u8 *)ptr; if (prev == old) *(u8 *)ptr = (u8)new; break; case 2: prev = *(u16 *)ptr; if (prev == old) *(u16 *)ptr = (u16)new; break; case 4: prev = *(u32 *)ptr; if (prev == old) *(u32 *)ptr = (u32)new; break; case 8: prev = *(u64 *)ptr; if (prev == old) *(u64 *)ptr = (u64)new; break; default: wrong_size_cmpxchg(ptr); } raw_local_irq_restore(flags); return prev; } /* * Generic version of __cmpxchg64_local. Takes an u64 parameter. */ static inline u64 __cmpxchg64_local_generic(volatile void *ptr, u64 old, u64 new) { u64 prev; unsigned long flags; raw_local_irq_save(flags); prev = *(u64 *)ptr; if (prev == old) *(u64 *)ptr = new; raw_local_irq_restore(flags); return prev; } #endif local64.h 0000644 00000007457 14722073052 0006200 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_LOCAL64_H #define _ASM_GENERIC_LOCAL64_H #include <linux/percpu.h> #include <asm/types.h> /* * A signed long type for operations which are atomic for a single CPU. * Usually used in combination with per-cpu variables. * * This is the default implementation, which uses atomic64_t. Which is * rather pointless. The whole point behind local64_t is that some processors * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs * running on this CPU. local64_t allows exploitation of such capabilities. */ /* Implement in terms of atomics. */ #if BITS_PER_LONG == 64 #include <asm/local.h> typedef struct { local_t a; } local64_t; #define LOCAL64_INIT(i) { LOCAL_INIT(i) } #define local64_read(l) local_read(&(l)->a) #define local64_set(l,i) local_set((&(l)->a),(i)) #define local64_inc(l) local_inc(&(l)->a) #define local64_dec(l) local_dec(&(l)->a) #define local64_add(i,l) local_add((i),(&(l)->a)) #define local64_sub(i,l) local_sub((i),(&(l)->a)) #define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a)) #define local64_dec_and_test(l) local_dec_and_test(&(l)->a) #define local64_inc_and_test(l) local_inc_and_test(&(l)->a) #define local64_add_negative(i, l) local_add_negative((i), (&(l)->a)) #define local64_add_return(i, l) local_add_return((i), (&(l)->a)) #define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) #define local64_inc_return(l) local_inc_return(&(l)->a) #define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) #define local64_xchg(l, n) local_xchg((&(l)->a), (n)) #define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) #define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) /* Non-atomic variants, ie. preemption disabled and won't be touched * in interrupt, etc. Some archs can optimize this case well. */ #define __local64_inc(l) local64_set((l), local64_read(l) + 1) #define __local64_dec(l) local64_set((l), local64_read(l) - 1) #define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) #define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) #else /* BITS_PER_LONG != 64 */ #include <linux/atomic.h> /* Don't use typedef: don't want them to be mixed with atomic_t's. */ typedef struct { atomic64_t a; } local64_t; #define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) } #define local64_read(l) atomic64_read(&(l)->a) #define local64_set(l,i) atomic64_set((&(l)->a),(i)) #define local64_inc(l) atomic64_inc(&(l)->a) #define local64_dec(l) atomic64_dec(&(l)->a) #define local64_add(i,l) atomic64_add((i),(&(l)->a)) #define local64_sub(i,l) atomic64_sub((i),(&(l)->a)) #define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a)) #define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a) #define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a) #define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a)) #define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a)) #define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a)) #define local64_inc_return(l) atomic64_inc_return(&(l)->a) #define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) #define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) #define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) #define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) /* Non-atomic variants, ie. preemption disabled and won't be touched * in interrupt, etc. Some archs can optimize this case well. */ #define __local64_inc(l) local64_set((l), local64_read(l) + 1) #define __local64_dec(l) local64_set((l), local64_read(l) - 1) #define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) #define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) #endif /* BITS_PER_LONG != 64 */ #endif /* _ASM_GENERIC_LOCAL64_H */ qrwlock.h 0000644 00000006543 14722073052 0006411 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Queue read/write lock * * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. * * Authors: Waiman Long <waiman.long@hp.com> */ #ifndef __ASM_GENERIC_QRWLOCK_H #define __ASM_GENERIC_QRWLOCK_H #include <linux/atomic.h> #include <asm/barrier.h> #include <asm/processor.h> #include <asm-generic/qrwlock_types.h> /* * Writer states & reader shift and bias. */ #define _QW_WAITING 0x100 /* A writer is waiting */ #define _QW_LOCKED 0x0ff /* A writer holds the lock */ #define _QW_WMASK 0x1ff /* Writer mask */ #define _QR_SHIFT 9 /* Reader count shift */ #define _QR_BIAS (1U << _QR_SHIFT) /* * External function declarations */ extern void queued_read_lock_slowpath(struct qrwlock *lock); extern void queued_write_lock_slowpath(struct qrwlock *lock); /** * queued_read_trylock - try to acquire read lock of a queue rwlock * @lock : Pointer to queue rwlock structure * Return: 1 if lock acquired, 0 if failed */ static inline int queued_read_trylock(struct qrwlock *lock) { u32 cnts; cnts = atomic_read(&lock->cnts); if (likely(!(cnts & _QW_WMASK))) { cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); if (likely(!(cnts & _QW_WMASK))) return 1; atomic_sub(_QR_BIAS, &lock->cnts); } return 0; } /** * queued_write_trylock - try to acquire write lock of a queue rwlock * @lock : Pointer to queue rwlock structure * Return: 1 if lock acquired, 0 if failed */ static inline int queued_write_trylock(struct qrwlock *lock) { u32 cnts; cnts = atomic_read(&lock->cnts); if (unlikely(cnts)) return 0; return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)); } /** * queued_read_lock - acquire read lock of a queue rwlock * @lock: Pointer to queue rwlock structure */ static inline void queued_read_lock(struct qrwlock *lock) { u32 cnts; cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); if (likely(!(cnts & _QW_WMASK))) return; /* The slowpath will decrement the reader count, if necessary. */ queued_read_lock_slowpath(lock); } /** * queued_write_lock - acquire write lock of a queue rwlock * @lock : Pointer to queue rwlock structure */ static inline void queued_write_lock(struct qrwlock *lock) { u32 cnts = 0; /* Optimize for the unfair lock case where the fair flag is 0. */ if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))) return; queued_write_lock_slowpath(lock); } /** * queued_read_unlock - release read lock of a queue rwlock * @lock : Pointer to queue rwlock structure */ static inline void queued_read_unlock(struct qrwlock *lock) { /* * Atomically decrement the reader count */ (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); } /** * queued_write_unlock - release write lock of a queue rwlock * @lock : Pointer to queue rwlock structure */ static inline void queued_write_unlock(struct qrwlock *lock) { smp_store_release(&lock->wlocked, 0); } /* * Remapping rwlock architecture specific functions to the corresponding * queue rwlock functions. */ #define arch_read_lock(l) queued_read_lock(l) #define arch_write_lock(l) queued_write_lock(l) #define arch_read_trylock(l) queued_read_trylock(l) #define arch_write_trylock(l) queued_write_trylock(l) #define arch_read_unlock(l) queued_read_unlock(l) #define arch_write_unlock(l) queued_write_unlock(l) #endif /* __ASM_GENERIC_QRWLOCK_H */ atomic-instrumented.h 0000644 00000130231 14722073052 0010712 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 // Generated by scripts/atomic/gen-atomic-instrumented.sh // DO NOT MODIFY THIS FILE DIRECTLY /* * This file provides wrappers with KASAN instrumentation for atomic operations. * To use this functionality an arch's atomic.h file needs to define all * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include * this file at the end. This file provides atomic_read() that forwards to * arch_atomic_read() for actual atomic operation. * Note: if an arch atomic operation is implemented by means of other atomic * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid * double instrumentation. */ #ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H #define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H #include <linux/build_bug.h> #include <linux/kasan-checks.h> static inline int atomic_read(const atomic_t *v) { kasan_check_read(v, sizeof(*v)); return arch_atomic_read(v); } #define atomic_read atomic_read #if defined(arch_atomic_read_acquire) static inline int atomic_read_acquire(const atomic_t *v) { kasan_check_read(v, sizeof(*v)); return arch_atomic_read_acquire(v); } #define atomic_read_acquire atomic_read_acquire #endif static inline void atomic_set(atomic_t *v, int i) { kasan_check_write(v, sizeof(*v)); arch_atomic_set(v, i); } #define atomic_set atomic_set #if defined(arch_atomic_set_release) static inline void atomic_set_release(atomic_t *v, int i) { kasan_check_write(v, sizeof(*v)); arch_atomic_set_release(v, i); } #define atomic_set_release atomic_set_release #endif static inline void atomic_add(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_add(i, v); } #define atomic_add atomic_add #if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return) static inline int atomic_add_return(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_add_return(i, v); } #define atomic_add_return atomic_add_return #endif #if defined(arch_atomic_add_return_acquire) static inline int atomic_add_return_acquire(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_add_return_acquire(i, v); } #define atomic_add_return_acquire atomic_add_return_acquire #endif #if defined(arch_atomic_add_return_release) static inline int atomic_add_return_release(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_add_return_release(i, v); } #define atomic_add_return_release atomic_add_return_release #endif #if defined(arch_atomic_add_return_relaxed) static inline int atomic_add_return_relaxed(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_add_return_relaxed(i, v); } #define atomic_add_return_relaxed atomic_add_return_relaxed #endif #if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add) static inline int atomic_fetch_add(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_add(i, v); } #define atomic_fetch_add atomic_fetch_add #endif #if defined(arch_atomic_fetch_add_acquire) static inline int atomic_fetch_add_acquire(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_add_acquire(i, v); } #define atomic_fetch_add_acquire atomic_fetch_add_acquire #endif #if defined(arch_atomic_fetch_add_release) static inline int atomic_fetch_add_release(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_add_release(i, v); } #define atomic_fetch_add_release atomic_fetch_add_release #endif #if defined(arch_atomic_fetch_add_relaxed) static inline int atomic_fetch_add_relaxed(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_add_relaxed(i, v); } #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #endif static inline void atomic_sub(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_sub(i, v); } #define atomic_sub atomic_sub #if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return) static inline int atomic_sub_return(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_sub_return(i, v); } #define atomic_sub_return atomic_sub_return #endif #if defined(arch_atomic_sub_return_acquire) static inline int atomic_sub_return_acquire(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_sub_return_acquire(i, v); } #define atomic_sub_return_acquire atomic_sub_return_acquire #endif #if defined(arch_atomic_sub_return_release) static inline int atomic_sub_return_release(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_sub_return_release(i, v); } #define atomic_sub_return_release atomic_sub_return_release #endif #if defined(arch_atomic_sub_return_relaxed) static inline int atomic_sub_return_relaxed(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_sub_return_relaxed(i, v); } #define atomic_sub_return_relaxed atomic_sub_return_relaxed #endif #if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub) static inline int atomic_fetch_sub(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_sub(i, v); } #define atomic_fetch_sub atomic_fetch_sub #endif #if defined(arch_atomic_fetch_sub_acquire) static inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_sub_acquire(i, v); } #define atomic_fetch_sub_acquire atomic_fetch_sub_acquire #endif #if defined(arch_atomic_fetch_sub_release) static inline int atomic_fetch_sub_release(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_sub_release(i, v); } #define atomic_fetch_sub_release atomic_fetch_sub_release #endif #if defined(arch_atomic_fetch_sub_relaxed) static inline int atomic_fetch_sub_relaxed(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_sub_relaxed(i, v); } #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed #endif #if defined(arch_atomic_inc) static inline void atomic_inc(atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_inc(v); } #define atomic_inc atomic_inc #endif #if defined(arch_atomic_inc_return) static inline int atomic_inc_return(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_inc_return(v); } #define atomic_inc_return atomic_inc_return #endif #if defined(arch_atomic_inc_return_acquire) static inline int atomic_inc_return_acquire(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_inc_return_acquire(v); } #define atomic_inc_return_acquire atomic_inc_return_acquire #endif #if defined(arch_atomic_inc_return_release) static inline int atomic_inc_return_release(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_inc_return_release(v); } #define atomic_inc_return_release atomic_inc_return_release #endif #if defined(arch_atomic_inc_return_relaxed) static inline int atomic_inc_return_relaxed(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_inc_return_relaxed(v); } #define atomic_inc_return_relaxed atomic_inc_return_relaxed #endif #if defined(arch_atomic_fetch_inc) static inline int atomic_fetch_inc(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_inc(v); } #define atomic_fetch_inc atomic_fetch_inc #endif #if defined(arch_atomic_fetch_inc_acquire) static inline int atomic_fetch_inc_acquire(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_inc_acquire(v); } #define atomic_fetch_inc_acquire atomic_fetch_inc_acquire #endif #if defined(arch_atomic_fetch_inc_release) static inline int atomic_fetch_inc_release(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_inc_release(v); } #define atomic_fetch_inc_release atomic_fetch_inc_release #endif #if defined(arch_atomic_fetch_inc_relaxed) static inline int atomic_fetch_inc_relaxed(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_inc_relaxed(v); } #define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed #endif #if defined(arch_atomic_dec) static inline void atomic_dec(atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_dec(v); } #define atomic_dec atomic_dec #endif #if defined(arch_atomic_dec_return) static inline int atomic_dec_return(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_dec_return(v); } #define atomic_dec_return atomic_dec_return #endif #if defined(arch_atomic_dec_return_acquire) static inline int atomic_dec_return_acquire(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_dec_return_acquire(v); } #define atomic_dec_return_acquire atomic_dec_return_acquire #endif #if defined(arch_atomic_dec_return_release) static inline int atomic_dec_return_release(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_dec_return_release(v); } #define atomic_dec_return_release atomic_dec_return_release #endif #if defined(arch_atomic_dec_return_relaxed) static inline int atomic_dec_return_relaxed(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_dec_return_relaxed(v); } #define atomic_dec_return_relaxed atomic_dec_return_relaxed #endif #if defined(arch_atomic_fetch_dec) static inline int atomic_fetch_dec(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_dec(v); } #define atomic_fetch_dec atomic_fetch_dec #endif #if defined(arch_atomic_fetch_dec_acquire) static inline int atomic_fetch_dec_acquire(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_dec_acquire(v); } #define atomic_fetch_dec_acquire atomic_fetch_dec_acquire #endif #if defined(arch_atomic_fetch_dec_release) static inline int atomic_fetch_dec_release(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_dec_release(v); } #define atomic_fetch_dec_release atomic_fetch_dec_release #endif #if defined(arch_atomic_fetch_dec_relaxed) static inline int atomic_fetch_dec_relaxed(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_dec_relaxed(v); } #define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed #endif static inline void atomic_and(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_and(i, v); } #define atomic_and atomic_and #if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and) static inline int atomic_fetch_and(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_and(i, v); } #define atomic_fetch_and atomic_fetch_and #endif #if defined(arch_atomic_fetch_and_acquire) static inline int atomic_fetch_and_acquire(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_and_acquire(i, v); } #define atomic_fetch_and_acquire atomic_fetch_and_acquire #endif #if defined(arch_atomic_fetch_and_release) static inline int atomic_fetch_and_release(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_and_release(i, v); } #define atomic_fetch_and_release atomic_fetch_and_release #endif #if defined(arch_atomic_fetch_and_relaxed) static inline int atomic_fetch_and_relaxed(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_and_relaxed(i, v); } #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed #endif #if defined(arch_atomic_andnot) static inline void atomic_andnot(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_andnot(i, v); } #define atomic_andnot atomic_andnot #endif #if defined(arch_atomic_fetch_andnot) static inline int atomic_fetch_andnot(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_andnot(i, v); } #define atomic_fetch_andnot atomic_fetch_andnot #endif #if defined(arch_atomic_fetch_andnot_acquire) static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_acquire(i, v); } #define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire #endif #if defined(arch_atomic_fetch_andnot_release) static inline int atomic_fetch_andnot_release(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_release(i, v); } #define atomic_fetch_andnot_release atomic_fetch_andnot_release #endif #if defined(arch_atomic_fetch_andnot_relaxed) static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_relaxed(i, v); } #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed #endif static inline void atomic_or(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_or(i, v); } #define atomic_or atomic_or #if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or) static inline int atomic_fetch_or(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_or(i, v); } #define atomic_fetch_or atomic_fetch_or #endif #if defined(arch_atomic_fetch_or_acquire) static inline int atomic_fetch_or_acquire(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_or_acquire(i, v); } #define atomic_fetch_or_acquire atomic_fetch_or_acquire #endif #if defined(arch_atomic_fetch_or_release) static inline int atomic_fetch_or_release(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_or_release(i, v); } #define atomic_fetch_or_release atomic_fetch_or_release #endif #if defined(arch_atomic_fetch_or_relaxed) static inline int atomic_fetch_or_relaxed(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_or_relaxed(i, v); } #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed #endif static inline void atomic_xor(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_xor(i, v); } #define atomic_xor atomic_xor #if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor) static inline int atomic_fetch_xor(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_xor(i, v); } #define atomic_fetch_xor atomic_fetch_xor #endif #if defined(arch_atomic_fetch_xor_acquire) static inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_xor_acquire(i, v); } #define atomic_fetch_xor_acquire atomic_fetch_xor_acquire #endif #if defined(arch_atomic_fetch_xor_release) static inline int atomic_fetch_xor_release(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_xor_release(i, v); } #define atomic_fetch_xor_release atomic_fetch_xor_release #endif #if defined(arch_atomic_fetch_xor_relaxed) static inline int atomic_fetch_xor_relaxed(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_xor_relaxed(i, v); } #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed #endif #if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg) static inline int atomic_xchg(atomic_t *v, int i) { kasan_check_write(v, sizeof(*v)); return arch_atomic_xchg(v, i); } #define atomic_xchg atomic_xchg #endif #if defined(arch_atomic_xchg_acquire) static inline int atomic_xchg_acquire(atomic_t *v, int i) { kasan_check_write(v, sizeof(*v)); return arch_atomic_xchg_acquire(v, i); } #define atomic_xchg_acquire atomic_xchg_acquire #endif #if defined(arch_atomic_xchg_release) static inline int atomic_xchg_release(atomic_t *v, int i) { kasan_check_write(v, sizeof(*v)); return arch_atomic_xchg_release(v, i); } #define atomic_xchg_release atomic_xchg_release #endif #if defined(arch_atomic_xchg_relaxed) static inline int atomic_xchg_relaxed(atomic_t *v, int i) { kasan_check_write(v, sizeof(*v)); return arch_atomic_xchg_relaxed(v, i); } #define atomic_xchg_relaxed atomic_xchg_relaxed #endif #if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { kasan_check_write(v, sizeof(*v)); return arch_atomic_cmpxchg(v, old, new); } #define atomic_cmpxchg atomic_cmpxchg #endif #if defined(arch_atomic_cmpxchg_acquire) static inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { kasan_check_write(v, sizeof(*v)); return arch_atomic_cmpxchg_acquire(v, old, new); } #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire #endif #if defined(arch_atomic_cmpxchg_release) static inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { kasan_check_write(v, sizeof(*v)); return arch_atomic_cmpxchg_release(v, old, new); } #define atomic_cmpxchg_release atomic_cmpxchg_release #endif #if defined(arch_atomic_cmpxchg_relaxed) static inline int atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) { kasan_check_write(v, sizeof(*v)); return arch_atomic_cmpxchg_relaxed(v, old, new); } #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed #endif #if defined(arch_atomic_try_cmpxchg) static inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { kasan_check_write(v, sizeof(*v)); kasan_check_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg(v, old, new); } #define atomic_try_cmpxchg atomic_try_cmpxchg #endif #if defined(arch_atomic_try_cmpxchg_acquire) static inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { kasan_check_write(v, sizeof(*v)); kasan_check_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_acquire(v, old, new); } #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire #endif #if defined(arch_atomic_try_cmpxchg_release) static inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { kasan_check_write(v, sizeof(*v)); kasan_check_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_release(v, old, new); } #define atomic_try_cmpxchg_release atomic_try_cmpxchg_release #endif #if defined(arch_atomic_try_cmpxchg_relaxed) static inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { kasan_check_write(v, sizeof(*v)); kasan_check_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_relaxed(v, old, new); } #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed #endif #if defined(arch_atomic_sub_and_test) static inline bool atomic_sub_and_test(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_sub_and_test(i, v); } #define atomic_sub_and_test atomic_sub_and_test #endif #if defined(arch_atomic_dec_and_test) static inline bool atomic_dec_and_test(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_dec_and_test(v); } #define atomic_dec_and_test atomic_dec_and_test #endif #if defined(arch_atomic_inc_and_test) static inline bool atomic_inc_and_test(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_inc_and_test(v); } #define atomic_inc_and_test atomic_inc_and_test #endif #if defined(arch_atomic_add_negative) static inline bool atomic_add_negative(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_add_negative(i, v); } #define atomic_add_negative atomic_add_negative #endif #if defined(arch_atomic_fetch_add_unless) static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { kasan_check_write(v, sizeof(*v)); return arch_atomic_fetch_add_unless(v, a, u); } #define atomic_fetch_add_unless atomic_fetch_add_unless #endif #if defined(arch_atomic_add_unless) static inline bool atomic_add_unless(atomic_t *v, int a, int u) { kasan_check_write(v, sizeof(*v)); return arch_atomic_add_unless(v, a, u); } #define atomic_add_unless atomic_add_unless #endif #if defined(arch_atomic_inc_not_zero) static inline bool atomic_inc_not_zero(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_inc_not_zero(v); } #define atomic_inc_not_zero atomic_inc_not_zero #endif #if defined(arch_atomic_inc_unless_negative) static inline bool atomic_inc_unless_negative(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_inc_unless_negative(v); } #define atomic_inc_unless_negative atomic_inc_unless_negative #endif #if defined(arch_atomic_dec_unless_positive) static inline bool atomic_dec_unless_positive(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_dec_unless_positive(v); } #define atomic_dec_unless_positive atomic_dec_unless_positive #endif #if defined(arch_atomic_dec_if_positive) static inline int atomic_dec_if_positive(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_dec_if_positive(v); } #define atomic_dec_if_positive atomic_dec_if_positive #endif static inline s64 atomic64_read(const atomic64_t *v) { kasan_check_read(v, sizeof(*v)); return arch_atomic64_read(v); } #define atomic64_read atomic64_read #if defined(arch_atomic64_read_acquire) static inline s64 atomic64_read_acquire(const atomic64_t *v) { kasan_check_read(v, sizeof(*v)); return arch_atomic64_read_acquire(v); } #define atomic64_read_acquire atomic64_read_acquire #endif static inline void atomic64_set(atomic64_t *v, s64 i) { kasan_check_write(v, sizeof(*v)); arch_atomic64_set(v, i); } #define atomic64_set atomic64_set #if defined(arch_atomic64_set_release) static inline void atomic64_set_release(atomic64_t *v, s64 i) { kasan_check_write(v, sizeof(*v)); arch_atomic64_set_release(v, i); } #define atomic64_set_release atomic64_set_release #endif static inline void atomic64_add(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic64_add(i, v); } #define atomic64_add atomic64_add #if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return) static inline s64 atomic64_add_return(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_add_return(i, v); } #define atomic64_add_return atomic64_add_return #endif #if defined(arch_atomic64_add_return_acquire) static inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_add_return_acquire(i, v); } #define atomic64_add_return_acquire atomic64_add_return_acquire #endif #if defined(arch_atomic64_add_return_release) static inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_add_return_release(i, v); } #define atomic64_add_return_release atomic64_add_return_release #endif #if defined(arch_atomic64_add_return_relaxed) static inline s64 atomic64_add_return_relaxed(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_add_return_relaxed(i, v); } #define atomic64_add_return_relaxed atomic64_add_return_relaxed #endif #if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add) static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add(i, v); } #define atomic64_fetch_add atomic64_fetch_add #endif #if defined(arch_atomic64_fetch_add_acquire) static inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add_acquire(i, v); } #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire #endif #if defined(arch_atomic64_fetch_add_release) static inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add_release(i, v); } #define atomic64_fetch_add_release atomic64_fetch_add_release #endif #if defined(arch_atomic64_fetch_add_relaxed) static inline s64 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add_relaxed(i, v); } #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #endif static inline void atomic64_sub(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic64_sub(i, v); } #define atomic64_sub atomic64_sub #if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return) static inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_sub_return(i, v); } #define atomic64_sub_return atomic64_sub_return #endif #if defined(arch_atomic64_sub_return_acquire) static inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_sub_return_acquire(i, v); } #define atomic64_sub_return_acquire atomic64_sub_return_acquire #endif #if defined(arch_atomic64_sub_return_release) static inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_sub_return_release(i, v); } #define atomic64_sub_return_release atomic64_sub_return_release #endif #if defined(arch_atomic64_sub_return_relaxed) static inline s64 atomic64_sub_return_relaxed(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_sub_return_relaxed(i, v); } #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed #endif #if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub) static inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_sub(i, v); } #define atomic64_fetch_sub atomic64_fetch_sub #endif #if defined(arch_atomic64_fetch_sub_acquire) static inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_acquire(i, v); } #define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire #endif #if defined(arch_atomic64_fetch_sub_release) static inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_release(i, v); } #define atomic64_fetch_sub_release atomic64_fetch_sub_release #endif #if defined(arch_atomic64_fetch_sub_relaxed) static inline s64 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_relaxed(i, v); } #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed #endif #if defined(arch_atomic64_inc) static inline void atomic64_inc(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic64_inc(v); } #define atomic64_inc atomic64_inc #endif #if defined(arch_atomic64_inc_return) static inline s64 atomic64_inc_return(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_inc_return(v); } #define atomic64_inc_return atomic64_inc_return #endif #if defined(arch_atomic64_inc_return_acquire) static inline s64 atomic64_inc_return_acquire(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_inc_return_acquire(v); } #define atomic64_inc_return_acquire atomic64_inc_return_acquire #endif #if defined(arch_atomic64_inc_return_release) static inline s64 atomic64_inc_return_release(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_inc_return_release(v); } #define atomic64_inc_return_release atomic64_inc_return_release #endif #if defined(arch_atomic64_inc_return_relaxed) static inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_inc_return_relaxed(v); } #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed #endif #if defined(arch_atomic64_fetch_inc) static inline s64 atomic64_fetch_inc(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_inc(v); } #define atomic64_fetch_inc atomic64_fetch_inc #endif #if defined(arch_atomic64_fetch_inc_acquire) static inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_acquire(v); } #define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire #endif #if defined(arch_atomic64_fetch_inc_release) static inline s64 atomic64_fetch_inc_release(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_release(v); } #define atomic64_fetch_inc_release atomic64_fetch_inc_release #endif #if defined(arch_atomic64_fetch_inc_relaxed) static inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_relaxed(v); } #define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed #endif #if defined(arch_atomic64_dec) static inline void atomic64_dec(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic64_dec(v); } #define atomic64_dec atomic64_dec #endif #if defined(arch_atomic64_dec_return) static inline s64 atomic64_dec_return(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_dec_return(v); } #define atomic64_dec_return atomic64_dec_return #endif #if defined(arch_atomic64_dec_return_acquire) static inline s64 atomic64_dec_return_acquire(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_dec_return_acquire(v); } #define atomic64_dec_return_acquire atomic64_dec_return_acquire #endif #if defined(arch_atomic64_dec_return_release) static inline s64 atomic64_dec_return_release(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_dec_return_release(v); } #define atomic64_dec_return_release atomic64_dec_return_release #endif #if defined(arch_atomic64_dec_return_relaxed) static inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_dec_return_relaxed(v); } #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed #endif #if defined(arch_atomic64_fetch_dec) static inline s64 atomic64_fetch_dec(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_dec(v); } #define atomic64_fetch_dec atomic64_fetch_dec #endif #if defined(arch_atomic64_fetch_dec_acquire) static inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_acquire(v); } #define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire #endif #if defined(arch_atomic64_fetch_dec_release) static inline s64 atomic64_fetch_dec_release(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_release(v); } #define atomic64_fetch_dec_release atomic64_fetch_dec_release #endif #if defined(arch_atomic64_fetch_dec_relaxed) static inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_relaxed(v); } #define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed #endif static inline void atomic64_and(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic64_and(i, v); } #define atomic64_and atomic64_and #if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and) static inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_and(i, v); } #define atomic64_fetch_and atomic64_fetch_and #endif #if defined(arch_atomic64_fetch_and_acquire) static inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_and_acquire(i, v); } #define atomic64_fetch_and_acquire atomic64_fetch_and_acquire #endif #if defined(arch_atomic64_fetch_and_release) static inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_and_release(i, v); } #define atomic64_fetch_and_release atomic64_fetch_and_release #endif #if defined(arch_atomic64_fetch_and_relaxed) static inline s64 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_and_relaxed(i, v); } #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed #endif #if defined(arch_atomic64_andnot) static inline void atomic64_andnot(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic64_andnot(i, v); } #define atomic64_andnot atomic64_andnot #endif #if defined(arch_atomic64_fetch_andnot) static inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot(i, v); } #define atomic64_fetch_andnot atomic64_fetch_andnot #endif #if defined(arch_atomic64_fetch_andnot_acquire) static inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_acquire(i, v); } #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire #endif #if defined(arch_atomic64_fetch_andnot_release) static inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_release(i, v); } #define atomic64_fetch_andnot_release atomic64_fetch_andnot_release #endif #if defined(arch_atomic64_fetch_andnot_relaxed) static inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_relaxed(i, v); } #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed #endif static inline void atomic64_or(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic64_or(i, v); } #define atomic64_or atomic64_or #if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or) static inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_or(i, v); } #define atomic64_fetch_or atomic64_fetch_or #endif #if defined(arch_atomic64_fetch_or_acquire) static inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_or_acquire(i, v); } #define atomic64_fetch_or_acquire atomic64_fetch_or_acquire #endif #if defined(arch_atomic64_fetch_or_release) static inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_or_release(i, v); } #define atomic64_fetch_or_release atomic64_fetch_or_release #endif #if defined(arch_atomic64_fetch_or_relaxed) static inline s64 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_or_relaxed(i, v); } #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed #endif static inline void atomic64_xor(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic64_xor(i, v); } #define atomic64_xor atomic64_xor #if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor) static inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_xor(i, v); } #define atomic64_fetch_xor atomic64_fetch_xor #endif #if defined(arch_atomic64_fetch_xor_acquire) static inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_acquire(i, v); } #define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire #endif #if defined(arch_atomic64_fetch_xor_release) static inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_release(i, v); } #define atomic64_fetch_xor_release atomic64_fetch_xor_release #endif #if defined(arch_atomic64_fetch_xor_relaxed) static inline s64 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_relaxed(i, v); } #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed #endif #if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg) static inline s64 atomic64_xchg(atomic64_t *v, s64 i) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_xchg(v, i); } #define atomic64_xchg atomic64_xchg #endif #if defined(arch_atomic64_xchg_acquire) static inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_xchg_acquire(v, i); } #define atomic64_xchg_acquire atomic64_xchg_acquire #endif #if defined(arch_atomic64_xchg_release) static inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_xchg_release(v, i); } #define atomic64_xchg_release atomic64_xchg_release #endif #if defined(arch_atomic64_xchg_relaxed) static inline s64 atomic64_xchg_relaxed(atomic64_t *v, s64 i) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_xchg_relaxed(v, i); } #define atomic64_xchg_relaxed atomic64_xchg_relaxed #endif #if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg) static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_cmpxchg(v, old, new); } #define atomic64_cmpxchg atomic64_cmpxchg #endif #if defined(arch_atomic64_cmpxchg_acquire) static inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_acquire(v, old, new); } #define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire #endif #if defined(arch_atomic64_cmpxchg_release) static inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_release(v, old, new); } #define atomic64_cmpxchg_release atomic64_cmpxchg_release #endif #if defined(arch_atomic64_cmpxchg_relaxed) static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_relaxed(v, old, new); } #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed #endif #if defined(arch_atomic64_try_cmpxchg) static inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { kasan_check_write(v, sizeof(*v)); kasan_check_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg(v, old, new); } #define atomic64_try_cmpxchg atomic64_try_cmpxchg #endif #if defined(arch_atomic64_try_cmpxchg_acquire) static inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { kasan_check_write(v, sizeof(*v)); kasan_check_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_acquire(v, old, new); } #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire #endif #if defined(arch_atomic64_try_cmpxchg_release) static inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { kasan_check_write(v, sizeof(*v)); kasan_check_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_release(v, old, new); } #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release #endif #if defined(arch_atomic64_try_cmpxchg_relaxed) static inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { kasan_check_write(v, sizeof(*v)); kasan_check_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_relaxed(v, old, new); } #define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed #endif #if defined(arch_atomic64_sub_and_test) static inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_sub_and_test(i, v); } #define atomic64_sub_and_test atomic64_sub_and_test #endif #if defined(arch_atomic64_dec_and_test) static inline bool atomic64_dec_and_test(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_dec_and_test(v); } #define atomic64_dec_and_test atomic64_dec_and_test #endif #if defined(arch_atomic64_inc_and_test) static inline bool atomic64_inc_and_test(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_inc_and_test(v); } #define atomic64_inc_and_test atomic64_inc_and_test #endif #if defined(arch_atomic64_add_negative) static inline bool atomic64_add_negative(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_add_negative(i, v); } #define atomic64_add_negative atomic64_add_negative #endif #if defined(arch_atomic64_fetch_add_unless) static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add_unless(v, a, u); } #define atomic64_fetch_add_unless atomic64_fetch_add_unless #endif #if defined(arch_atomic64_add_unless) static inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_add_unless(v, a, u); } #define atomic64_add_unless atomic64_add_unless #endif #if defined(arch_atomic64_inc_not_zero) static inline bool atomic64_inc_not_zero(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_inc_not_zero(v); } #define atomic64_inc_not_zero atomic64_inc_not_zero #endif #if defined(arch_atomic64_inc_unless_negative) static inline bool atomic64_inc_unless_negative(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_inc_unless_negative(v); } #define atomic64_inc_unless_negative atomic64_inc_unless_negative #endif #if defined(arch_atomic64_dec_unless_positive) static inline bool atomic64_dec_unless_positive(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_dec_unless_positive(v); } #define atomic64_dec_unless_positive atomic64_dec_unless_positive #endif #if defined(arch_atomic64_dec_if_positive) static inline s64 atomic64_dec_if_positive(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_dec_if_positive(v); } #define atomic64_dec_if_positive atomic64_dec_if_positive #endif #if !defined(arch_xchg_relaxed) || defined(arch_xchg) #define xchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg(__ai_ptr, __VA_ARGS__); \ }) #endif #if defined(arch_xchg_acquire) #define xchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif #if defined(arch_xchg_release) #define xchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_release(__ai_ptr, __VA_ARGS__); \ }) #endif #if defined(arch_xchg_relaxed) #define xchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif #if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg) #define cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #endif #if defined(arch_cmpxchg_acquire) #define cmpxchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif #if defined(arch_cmpxchg_release) #define cmpxchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ }) #endif #if defined(arch_cmpxchg_relaxed) #define cmpxchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif #if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64) #define cmpxchg64(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ }) #endif #if defined(arch_cmpxchg64_acquire) #define cmpxchg64_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif #if defined(arch_cmpxchg64_release) #define cmpxchg64_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ }) #endif #if defined(arch_cmpxchg64_relaxed) #define cmpxchg64_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif #define cmpxchg_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ }) #define sync_cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_double(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_double_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ }) #endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ // b29b625d5de9280f680e42c7be859b55b15e5f6a cmpxchg.h 0000644 00000004356 14722073052 0006360 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Generic UP xchg and cmpxchg using interrupt disablement. Does not * support SMP. */ #ifndef __ASM_GENERIC_CMPXCHG_H #define __ASM_GENERIC_CMPXCHG_H #ifdef CONFIG_SMP #error "Cannot use generic cmpxchg on SMP" #endif #include <linux/types.h> #include <linux/irqflags.h> #ifndef xchg /* * This function doesn't exist, so you'll get a linker error if * something tries to do an invalidly-sized xchg(). */ extern void __xchg_called_with_bad_pointer(void); static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) { unsigned long ret, flags; switch (size) { case 1: #ifdef __xchg_u8 return __xchg_u8(x, ptr); #else local_irq_save(flags); ret = *(volatile u8 *)ptr; *(volatile u8 *)ptr = x; local_irq_restore(flags); return ret; #endif /* __xchg_u8 */ case 2: #ifdef __xchg_u16 return __xchg_u16(x, ptr); #else local_irq_save(flags); ret = *(volatile u16 *)ptr; *(volatile u16 *)ptr = x; local_irq_restore(flags); return ret; #endif /* __xchg_u16 */ case 4: #ifdef __xchg_u32 return __xchg_u32(x, ptr); #else local_irq_save(flags); ret = *(volatile u32 *)ptr; *(volatile u32 *)ptr = x; local_irq_restore(flags); return ret; #endif /* __xchg_u32 */ #ifdef CONFIG_64BIT case 8: #ifdef __xchg_u64 return __xchg_u64(x, ptr); #else local_irq_save(flags); ret = *(volatile u64 *)ptr; *(volatile u64 *)ptr = x; local_irq_restore(flags); return ret; #endif /* __xchg_u64 */ #endif /* CONFIG_64BIT */ default: __xchg_called_with_bad_pointer(); return x; } } #define xchg(ptr, x) ({ \ ((__typeof__(*(ptr))) \ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ }) #endif /* xchg */ /* * Atomic compare and exchange. */ #include <asm-generic/cmpxchg-local.h> #ifndef cmpxchg_local #define cmpxchg_local(ptr, o, n) ({ \ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ (unsigned long)(n), sizeof(*(ptr)))); \ }) #endif #ifndef cmpxchg64_local #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #endif #define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #endif /* __ASM_GENERIC_CMPXCHG_H */ extable.h 0000644 00000001442 14722073052 0006344 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_EXTABLE_H #define __ASM_GENERIC_EXTABLE_H /* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is * the address at which the program should continue. No registers are * modified, so it is entirely up to the continuation code to figure out * what to do. * * All the routines below use bits of fixup code that are out of line * with the main instruction path. This means when everything is well, * we don't even have to jump over them. Further, they do not intrude * on our cache or tlb entries. */ struct exception_table_entry { unsigned long insn, fixup; }; struct pt_regs; extern int fixup_exception(struct pt_regs *regs); #endif export.h 0000644 00000004213 14722073052 0006240 0 ustar 00 #ifndef __ASM_GENERIC_EXPORT_H #define __ASM_GENERIC_EXPORT_H #ifndef KSYM_FUNC #define KSYM_FUNC(x) x #endif #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS #define KSYM_ALIGN 4 #elif defined(CONFIG_64BIT) #define KSYM_ALIGN 8 #else #define KSYM_ALIGN 4 #endif #ifndef KCRC_ALIGN #define KCRC_ALIGN 4 #endif .macro __put, val, name #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS .long \val - ., \name - ., 0 #elif defined(CONFIG_64BIT) .quad \val, \name, 0 #else .long \val, \name, 0 #endif .endm /* * note on .section use: @progbits vs %progbits nastiness doesn't matter, * since we immediately emit into those sections anyway. */ .macro ___EXPORT_SYMBOL name,val,sec #ifdef CONFIG_MODULES .globl __ksymtab_\name .section ___ksymtab\sec+\name,"a" .balign KSYM_ALIGN __ksymtab_\name: __put \val, __kstrtab_\name .previous .section __ksymtab_strings,"a" __kstrtab_\name: .asciz "\name" .previous #ifdef CONFIG_MODVERSIONS .section ___kcrctab\sec+\name,"a" .balign KCRC_ALIGN __kcrctab_\name: #if defined(CONFIG_MODULE_REL_CRCS) .long __crc_\name - . #else .long __crc_\name #endif .weak __crc_\name .previous #endif #endif .endm #if defined(CONFIG_TRIM_UNUSED_KSYMS) #include <linux/kconfig.h> #include <generated/autoksyms.h> .macro __ksym_marker sym .section ".discard.ksym","a" __ksym_marker_\sym: .previous .endm #define __EXPORT_SYMBOL(sym, val, sec) \ __ksym_marker sym; \ __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym)) #define __cond_export_sym(sym, val, sec, conf) \ ___cond_export_sym(sym, val, sec, conf) #define ___cond_export_sym(sym, val, sec, enabled) \ __cond_export_sym_##enabled(sym, val, sec) #define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec #define __cond_export_sym_0(sym, val, sec) /* nothing */ #else #define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec #endif #define EXPORT_SYMBOL(name) \ __EXPORT_SYMBOL(name, KSYM_FUNC(name),) #define EXPORT_SYMBOL_GPL(name) \ __EXPORT_SYMBOL(name, KSYM_FUNC(name), _gpl) #define EXPORT_DATA_SYMBOL(name) \ __EXPORT_SYMBOL(name, name,) #define EXPORT_DATA_SYMBOL_GPL(name) \ __EXPORT_SYMBOL(name, name,_gpl) #endif mm_hooks.h 0000644 00000001526 14722073052 0006537 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap * and arch_unmap to be included in asm-FOO/mmu_context.h for any * arch FOO which doesn't need to hook these. */ #ifndef _ASM_GENERIC_MM_HOOKS_H #define _ASM_GENERIC_MM_HOOKS_H static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { return 0; } static inline void arch_exit_mmap(struct mm_struct *mm) { } static inline void arch_unmap(struct mm_struct *mm, unsigned long start, unsigned long end) { } static inline void arch_bprm_mm_init(struct mm_struct *mm, struct vm_area_struct *vma) { } static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool execute, bool foreign) { /* by default, allow everything */ return true; } #endif /* _ASM_GENERIC_MM_HOOKS_H */ simd.h 0000644 00000000664 14722073052 0005661 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/hardirq.h> /* * may_use_simd - whether it is allowable at this time to issue SIMD * instructions or access the SIMD register file * * As architectures typically don't preserve the SIMD register file when * taking an interrupt, !in_interrupt() should be a reasonable default. */ static __must_check inline bool may_use_simd(void) { return !in_interrupt(); } termios.h 0000644 00000005475 14722073052 0006414 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_TERMIOS_H #define _ASM_GENERIC_TERMIOS_H #include <linux/uaccess.h> #include <uapi/asm-generic/termios.h> /* intr=^C quit=^\ erase=del kill=^U eof=^D vtime=\0 vmin=\1 sxtc=\0 start=^Q stop=^S susp=^Z eol=\0 reprint=^R discard=^U werase=^W lnext=^V eol2=\0 */ #define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" /* * Translate a "termio" structure into a "termios". Ugh. */ static inline int user_termio_to_kernel_termios(struct ktermios *termios, const struct termio __user *termio) { unsigned short tmp; if (get_user(tmp, &termio->c_iflag) < 0) goto fault; termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp; if (get_user(tmp, &termio->c_oflag) < 0) goto fault; termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp; if (get_user(tmp, &termio->c_cflag) < 0) goto fault; termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp; if (get_user(tmp, &termio->c_lflag) < 0) goto fault; termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp; if (get_user(termios->c_line, &termio->c_line) < 0) goto fault; if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0) goto fault; return 0; fault: return -EFAULT; } /* * Translate a "termios" structure into a "termio". Ugh. */ static inline int kernel_termios_to_user_termio(struct termio __user *termio, struct ktermios *termios) { if (put_user(termios->c_iflag, &termio->c_iflag) < 0 || put_user(termios->c_oflag, &termio->c_oflag) < 0 || put_user(termios->c_cflag, &termio->c_cflag) < 0 || put_user(termios->c_lflag, &termio->c_lflag) < 0 || put_user(termios->c_line, &termio->c_line) < 0 || copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0) return -EFAULT; return 0; } #ifdef TCGETS2 static inline int user_termios_to_kernel_termios(struct ktermios *k, struct termios2 __user *u) { return copy_from_user(k, u, sizeof(struct termios2)); } static inline int kernel_termios_to_user_termios(struct termios2 __user *u, struct ktermios *k) { return copy_to_user(u, k, sizeof(struct termios2)); } static inline int user_termios_to_kernel_termios_1(struct ktermios *k, struct termios __user *u) { return copy_from_user(k, u, sizeof(struct termios)); } static inline int kernel_termios_to_user_termios_1(struct termios __user *u, struct ktermios *k) { return copy_to_user(u, k, sizeof(struct termios)); } #else /* TCGETS2 */ static inline int user_termios_to_kernel_termios(struct ktermios *k, struct termios __user *u) { return copy_from_user(k, u, sizeof(struct termios)); } static inline int kernel_termios_to_user_termios(struct termios __user *u, struct ktermios *k) { return copy_to_user(u, k, sizeof(struct termios)); } #endif /* TCGETS2 */ #endif /* _ASM_GENERIC_TERMIOS_H */ termios-base.h 0000644 00000004164 14722073052 0007316 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* termios.h: generic termios/termio user copying/translation */ #ifndef _ASM_GENERIC_TERMIOS_BASE_H #define _ASM_GENERIC_TERMIOS_BASE_H #include <linux/uaccess.h> #ifndef __ARCH_TERMIO_GETPUT /* * Translate a "termio" structure into a "termios". Ugh. */ static inline int user_termio_to_kernel_termios(struct ktermios *termios, struct termio __user *termio) { unsigned short tmp; if (get_user(tmp, &termio->c_iflag) < 0) goto fault; termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp; if (get_user(tmp, &termio->c_oflag) < 0) goto fault; termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp; if (get_user(tmp, &termio->c_cflag) < 0) goto fault; termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp; if (get_user(tmp, &termio->c_lflag) < 0) goto fault; termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp; if (get_user(termios->c_line, &termio->c_line) < 0) goto fault; if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0) goto fault; return 0; fault: return -EFAULT; } /* * Translate a "termios" structure into a "termio". Ugh. */ static inline int kernel_termios_to_user_termio(struct termio __user *termio, struct ktermios *termios) { if (put_user(termios->c_iflag, &termio->c_iflag) < 0 || put_user(termios->c_oflag, &termio->c_oflag) < 0 || put_user(termios->c_cflag, &termio->c_cflag) < 0 || put_user(termios->c_lflag, &termio->c_lflag) < 0 || put_user(termios->c_line, &termio->c_line) < 0 || copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0) return -EFAULT; return 0; } #ifndef user_termios_to_kernel_termios #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) #endif #ifndef kernel_termios_to_user_termios #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) #endif #define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) #define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) #endif /* __ARCH_TERMIO_GETPUT */ #endif /* _ASM_GENERIC_TERMIOS_BASE_H */ checksum.h 0000644 00000004431 14722073052 0006523 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_CHECKSUM_H #define __ASM_GENERIC_CHECKSUM_H /* * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) * * returns a 32-bit number suitable for feeding into itself * or csum_tcpudp_magic * * this function must be called with even lengths, except * for the last fragment, which may be odd * * it's best to have buff aligned on a 32-bit boundary */ extern __wsum csum_partial(const void *buff, int len, __wsum sum); /* * the same as csum_partial, but copies from src while it * checksums * * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum); /* * the same as csum_partial_copy, but copies from user space. * * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *csum_err); #ifndef csum_partial_copy_nocheck #define csum_partial_copy_nocheck(src, dst, len, sum) \ csum_partial_copy((src), (dst), (len), (sum)) #endif #ifndef ip_fast_csum /* * This is a version of ip_compute_csum() optimized for IP headers, * which always checksum on 4 octet boundaries. */ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); #endif #ifndef csum_fold /* * Fold a partial checksum */ static inline __sum16 csum_fold(__wsum csum) { u32 sum = (__force u32)csum; sum = (sum & 0xffff) + (sum >> 16); sum = (sum & 0xffff) + (sum >> 16); return (__force __sum16)~sum; } #endif #ifndef csum_tcpudp_nofold /* * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented */ extern __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum); #endif #ifndef csum_tcpudp_magic static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } #endif /* * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ extern __sum16 ip_compute_csum(const void *buff, int len); #endif /* __ASM_GENERIC_CHECKSUM_H */ linkage.h 0000644 00000000341 14722073052 0006327 0 ustar 00 #ifndef __ASM_GENERIC_LINKAGE_H #define __ASM_GENERIC_LINKAGE_H /* * linux/linkage.h provides reasonable defaults. * an architecture can override them by providing its own version. */ #endif /* __ASM_GENERIC_LINKAGE_H */ string.h 0000644 00000000431 14722073052 0006223 0 ustar 00 #ifndef __ASM_GENERIC_STRING_H #define __ASM_GENERIC_STRING_H /* * The kernel provides all required functions in lib/string.c * * Architectures probably want to provide at least their own optimized * memcpy and memset functions though. */ #endif /* __ASM_GENERIC_STRING_H */
| ver. 1.4 |
Github
|
.
| PHP 7.4.3-4ubuntu2.24 | Генерация страницы: 0.04 |
proxy
|
phpinfo
|
Настройка