Файловый менеджер - Редактировать - /var/www/xthruster/html/wp-content/uploads/flags/arc.tar
Назад
include/uapi/asm/page.h 0000644 00000002070 14722070650 0010774 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _UAPI__ASM_ARC_PAGE_H #define _UAPI__ASM_ARC_PAGE_H #include <linux/const.h> /* PAGE_SHIFT determines the page size */ #if defined(CONFIG_ARC_PAGE_SIZE_16K) #define PAGE_SHIFT 14 #elif defined(CONFIG_ARC_PAGE_SIZE_4K) #define PAGE_SHIFT 12 #else /* * Default 8k * done this way (instead of under CONFIG_ARC_PAGE_SIZE_8K) because adhoc * user code (busybox appletlib.h) expects PAGE_SHIFT to be defined w/o * using the correct uClibc header and in their build our autoconf.h is * not available */ #define PAGE_SHIFT 13 #endif #define PAGE_SIZE _BITUL(PAGE_SHIFT) /* Default 8K */ #define PAGE_OFFSET _AC(0x80000000, UL) /* Kernel starts at 2G onwrds */ #define PAGE_MASK (~(PAGE_SIZE-1)) #endif /* _UAPI__ASM_ARC_PAGE_H */ include/uapi/asm/cachectl.h 0000644 00000001400 14722070650 0011622 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __ARC_ASM_CACHECTL_H #define __ARC_ASM_CACHECTL_H /* * ARC ABI flags defined for Android's finegrained cacheflush requirements */ #define CF_I_INV 0x0002 #define CF_D_FLUSH 0x0010 #define CF_D_FLUSH_INV 0x0020 #define CF_DEFAULT (CF_I_INV | CF_D_FLUSH) /* * Standard flags expected by cacheflush system call users */ #define ICACHE CF_I_INV #define DCACHE CF_D_FLUSH #define BCACHE (CF_I_INV | CF_D_FLUSH) #endif include/uapi/asm/setup.h 0000644 00000000362 14722070650 0011222 0 ustar 00 /* * setup.h is part of userspace header ABI so UAPI scripts have to generate it * even if there's nothing to export - causing empty <uapi/asm/setup.h> * However to prevent "patch" from discarding it we add this placeholder * comment */ include/uapi/asm/elf.h 0000644 00000002050 14722070650 0010624 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _UAPI__ASM_ARC_ELF_H #define _UAPI__ASM_ARC_ELF_H #include <asm/ptrace.h> /* for user_regs_struct */ /* Machine specific ELF Hdr flags */ #define EF_ARC_OSABI_MSK 0x00000f00 #define EF_ARC_OSABI_V3 0x00000300 /* v3 (no legacy syscalls) */ #define EF_ARC_OSABI_V4 0x00000400 /* v4 (64bit data any reg align) */ #if __GNUC__ < 6 #define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V3 #else #define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V4 #endif typedef unsigned long elf_greg_t; typedef unsigned long elf_fpregset_t; #define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) #define ELF_ARCV2REG (sizeof(struct user_regs_arcv2) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; #endif include/uapi/asm/signal.h 0000644 00000001744 14722070650 0011344 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 */ #ifndef _ASM_ARC_SIGNAL_H #define _ASM_ARC_SIGNAL_H /* * This is much needed for ARC sigreturn optimization. * This allows uClibc to piggback the addr of a sigreturn stub in sigaction, * which allows sigreturn based re-entry into kernel after handling signal. * W/o this kernel needs to "synthesize" the sigreturn trampoline on user * mode stack which in turn forces the following: * -TLB Flush (after making the stack page executable) * -Cache line Flush (to make I/D Cache lines coherent) */ #define SA_RESTORER 0x04000000 #include <asm-generic/signal.h> #endif /* _ASM_ARC_SIGNAL_H */ include/uapi/asm/unistd.h 0000644 00000003121 14722070650 0011364 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /******** no-legacy-syscalls-ABI *******/ /* * Non-typical guard macro to enable inclusion twice in ARCH sys.c * That is how the Generic syscall wrapper generator works */ #if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL) #define _UAPI_ASM_ARC_UNISTD_H #define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_CLONE3 #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_TIME32_SYSCALLS #define sys_mmap2 sys_mmap_pgoff #include <asm-generic/unistd.h> #define NR_syscalls __NR_syscalls /* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */ #define __NR_sysfs (__NR_arch_specific_syscall + 3) /* ARC specific syscall */ #define __NR_cacheflush (__NR_arch_specific_syscall + 0) #define __NR_arc_settls (__NR_arch_specific_syscall + 1) #define __NR_arc_gettls (__NR_arch_specific_syscall + 2) #define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4) __SYSCALL(__NR_cacheflush, sys_cacheflush) __SYSCALL(__NR_arc_settls, sys_arc_settls) __SYSCALL(__NR_arc_gettls, sys_arc_gettls) __SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg) __SYSCALL(__NR_sysfs, sys_sysfs) #undef __SYSCALL #endif include/uapi/asm/swab.h 0000644 00000005425 14722070650 0011023 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * vineetg: May 2011 * -Support single cycle endian-swap insn in ARC700 4.10 * * vineetg: June 2009 * -Better htonl implementation (5 instead of 9 ALU instructions) * -Hardware assisted single cycle bswap (Use Case of ARC custom instrn) */ #ifndef __ASM_ARC_SWAB_H #define __ASM_ARC_SWAB_H #include <linux/types.h> /* Native single cycle endian swap insn */ #ifdef CONFIG_ARC_HAS_SWAPE #define __arch_swab32(x) \ ({ \ unsigned int tmp = x; \ __asm__( \ " swape %0, %1 \n" \ : "=r" (tmp) \ : "r" (tmp)); \ tmp; \ }) #else /* Several ways of Endian-Swap Emulation for ARC * 0: kernel generic * 1: ARC optimised "C" * 2: ARC Custom instruction */ #define ARC_BSWAP_TYPE 1 #if (ARC_BSWAP_TYPE == 1) /******* Software only ********/ /* The kernel default implementation of htonl is * return x<<24 | x>>24 | * (x & (__u32)0x0000ff00UL)<<8 | (x & (__u32)0x00ff0000UL)>>8; * * This generates 9 instructions on ARC (excluding the ld/st) * * 8051fd8c: ld r3,[r7,20] ; Mem op : Get the value to be swapped * 8051fd98: asl r5,r3,24 ; get 3rd Byte * 8051fd9c: lsr r2,r3,24 ; get 0th Byte * 8051fda0: and r4,r3,0xff00 * 8051fda8: asl r4,r4,8 ; get 1st Byte * 8051fdac: and r3,r3,0x00ff0000 * 8051fdb4: or r2,r2,r5 ; combine 0th and 3rd Bytes * 8051fdb8: lsr r3,r3,8 ; 2nd Byte at correct place in Dst Reg * 8051fdbc: or r2,r2,r4 ; combine 0,3 Bytes with 1st Byte * 8051fdc0: or r2,r2,r3 ; combine 0,3,1 Bytes with 2nd Byte * 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem * * Joern suggested a better "C" algorithm which is great since * (1) It is portable to any architecure * (2) At the same time it takes advantage of ARC ISA (rotate intrns) */ #define __arch_swab32(x) \ ({ unsigned long __in = (x), __tmp; \ __tmp = __in << 8 | __in >> 24; /* ror tmp,in,24 */ \ __in = __in << 24 | __in >> 8; /* ror in,in,8 */ \ __tmp ^= __in; \ __tmp &= 0xff00ff; \ __tmp ^ __in; \ }) #elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */ #define __arch_swab32(x) \ ({ \ unsigned int tmp = x; \ __asm__( \ " .extInstruction bswap, 7, 0x00, SUFFIX_NONE, SYNTAX_2OP \n"\ " bswap %0, %1 \n"\ : "=r" (tmp) \ : "r" (tmp)); \ tmp; \ }) #endif /* ARC_BSWAP_TYPE=zzz */ #endif /* CONFIG_ARC_HAS_SWAPE */ #if !defined(__STRICT_ANSI__) || defined(__KERNEL__) #define __SWAB_64_THRU_32__ #endif #endif include/uapi/asm/sigcontext.h 0000644 00000001230 14722070650 0012244 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _ASM_ARC_SIGCONTEXT_H #define _ASM_ARC_SIGCONTEXT_H #include <asm/ptrace.h> /* * Signal context structure - contains all info to do with the state * before the signal handler was invoked. */ struct sigcontext { struct user_regs_struct regs; struct user_regs_arcv2 v2abi; }; #endif /* _ASM_ARC_SIGCONTEXT_H */ include/uapi/asm/byteorder.h 0000644 00000001050 14722070650 0012054 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __ASM_ARC_BYTEORDER_H #define __ASM_ARC_BYTEORDER_H #ifdef __BIG_ENDIAN__ #include <linux/byteorder/big_endian.h> #else #include <linux/byteorder/little_endian.h> #endif #endif /* ASM_ARC_BYTEORDER_H */ include/uapi/asm/Kbuild 0000644 00000000073 14722070650 0011045 0 ustar 00 # SPDX-License-Identifier: GPL-2.0 generic-y += ucontext.h include/uapi/asm/ptrace.h 0000644 00000003451 14722070650 0011342 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 */ #ifndef _UAPI__ASM_ARC_PTRACE_H #define _UAPI__ASM_ARC_PTRACE_H #define PTRACE_GET_THREAD_AREA 25 #ifndef __ASSEMBLY__ /* * Userspace ABI: Register state needed by * -ptrace (gdbserver) * -sigcontext (SA_SIGNINFO signal frame) * * This is to decouple pt_regs from user-space ABI, to be able to change it * w/o affecting the ABI. * * The intermediate pad,pad2 are relics of initial layout based on pt_regs * for optimizations when copying pt_regs to/from user_regs_struct. * We no longer need them, but can't be changed as they are part of ABI now. * * Also, sigcontext only care about the scratch regs as that is what we really * save/restore for signal handling. However gdb also uses the same struct * hence callee regs need to be in there too. */ struct user_regs_struct { unsigned long pad; struct { unsigned long bta, lp_start, lp_end, lp_count; unsigned long status32, ret, blink, fp, gp; unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; unsigned long sp; } scratch; unsigned long pad2; struct { unsigned long r25, r24, r23, r22, r21, r20; unsigned long r19, r18, r17, r16, r15, r14, r13; } callee; unsigned long efa; /* break pt addr, for break points in delay slots */ unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ }; struct user_regs_arcv2 { unsigned long r30, r58, r59; }; #endif /* !__ASSEMBLY__ */ #endif /* _UAPI__ASM_ARC_PTRACE_H */ include/asm/exec.h 0000644 00000000410 14722070650 0010042 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_ARC_EXEC_H #define __ASM_ARC_EXEC_H /* Align to 16b */ #define arch_align_stack(p) ((unsigned long)(p) & ~0xf) #endif include/asm/irq.h 0000644 00000001247 14722070650 0007722 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_ARC_IRQ_H #define __ASM_ARC_IRQ_H /* * ARCv2 can support 240 interrupts in the core interrupts controllers and * 128 interrupts in IDU. Thus 512 virtual IRQs must be enough for most * configurations of boards. * This doesnt affect ARCompact, but we change it to same value */ #define NR_IRQS 512 /* Platform Independent IRQs */ #ifdef CONFIG_ISA_ARCV2 #define IPI_IRQ 19 #define SOFTIRQ_IRQ 21 #define FIRST_EXT_IRQ 24 #endif #include <linux/interrupt.h> #include <asm-generic/irq.h> extern void arc_init_IRQ(void); #endif include/asm/tlbflush.h 0000644 00000003166 14722070650 0010754 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_ARC_TLBFLUSH__ #define __ASM_ARC_TLBFLUSH__ #include <linux/mm.h> void local_flush_tlb_all(void); void local_flush_tlb_mm(struct mm_struct *mm); void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); #ifdef CONFIG_TRANSPARENT_HUGEPAGE void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); #endif #ifndef CONFIG_SMP #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) #define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e) #define flush_tlb_all() local_flush_tlb_all() #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e) #endif #else extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void flush_tlb_all(void); extern void flush_tlb_mm(struct mm_struct *mm); #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); #endif #endif /* CONFIG_SMP */ #endif include/asm/serial.h 0000644 00000000762 14722070650 0010407 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_SERIAL_H #define _ASM_ARC_SERIAL_H /* * early 8250 (now earlycon) requires BASE_BAUD to be defined in this header. * However to still determine it dynamically (for multi-platform images) * we do this in a helper by parsing the FDT early */ extern unsigned int __init arc_early_base_baud(void); #define BASE_BAUD arc_early_base_baud() #endif /* _ASM_ARC_SERIAL_H */ include/asm/spinlock.h 0000644 00000020765 14722070650 0010757 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H #include <asm/spinlock_types.h> #include <asm/processor.h> #include <asm/barrier.h> #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__) #ifdef CONFIG_ARC_HAS_LLSC static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned int val; __asm__ __volatile__( "1: llock %[val], [%[slock]] \n" " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ " scond %[LOCKED], [%[slock]] \n" /* acquire */ " bnz 1b \n" " \n" : [val] "=&r" (val) : [slock] "r" (&(lock->slock)), [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) : "memory", "cc"); /* * ACQUIRE barrier to ensure load/store after taking the lock * don't "bleed-up" out of the critical section (leak-in is allowed) * http://www.spinics.net/lists/kernel/msg2010409.html * * ARCv2 only has load-load, store-store and all-all barrier * thus need the full all-all barrier */ smp_mb(); } /* 1 - lock taken successfully */ static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int val, got_it = 0; __asm__ __volatile__( "1: llock %[val], [%[slock]] \n" " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ " scond %[LOCKED], [%[slock]] \n" /* acquire */ " bnz 1b \n" " mov %[got_it], 1 \n" "4: \n" " \n" : [val] "=&r" (val), [got_it] "+&r" (got_it) : [slock] "r" (&(lock->slock)), [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) : "memory", "cc"); smp_mb(); return got_it; } static inline void arch_spin_unlock(arch_spinlock_t *lock) { smp_mb(); WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__); } /* * Read-write spinlocks, allowing multiple readers but only one writer. * Unfair locking as Writers could be starved indefinitely by Reader(s) */ static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned int val; /* * zero means writer holds the lock exclusively, deny Reader. * Otherwise grant lock to first/subseq reader * * if (rw->counter > 0) { * rw->counter--; * ret = 1; * } */ __asm__ __volatile__( "1: llock %[val], [%[rwlock]] \n" " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */ " sub %[val], %[val], 1 \n" /* reader lock */ " scond %[val], [%[rwlock]] \n" " bnz 1b \n" " \n" : [val] "=&r" (val) : [rwlock] "r" (&(rw->counter)), [WR_LOCKED] "ir" (0) : "memory", "cc"); smp_mb(); } /* 1 - lock taken successfully */ static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned int val, got_it = 0; __asm__ __volatile__( "1: llock %[val], [%[rwlock]] \n" " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ " sub %[val], %[val], 1 \n" /* counter-- */ " scond %[val], [%[rwlock]] \n" " bnz 1b \n" /* retry if collided with someone */ " mov %[got_it], 1 \n" " \n" "4: ; --- done --- \n" : [val] "=&r" (val), [got_it] "+&r" (got_it) : [rwlock] "r" (&(rw->counter)), [WR_LOCKED] "ir" (0) : "memory", "cc"); smp_mb(); return got_it; } static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned int val; /* * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), * deny writer. Otherwise if unlocked grant to writer * Hence the claim that Linux rwlocks are unfair to writers. * (can be starved for an indefinite time by readers). * * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { * rw->counter = 0; * ret = 1; * } */ __asm__ __volatile__( "1: llock %[val], [%[rwlock]] \n" " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */ " mov %[val], %[WR_LOCKED] \n" " scond %[val], [%[rwlock]] \n" " bnz 1b \n" " \n" : [val] "=&r" (val) : [rwlock] "r" (&(rw->counter)), [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), [WR_LOCKED] "ir" (0) : "memory", "cc"); smp_mb(); } /* 1 - lock taken successfully */ static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int val, got_it = 0; __asm__ __volatile__( "1: llock %[val], [%[rwlock]] \n" " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ " mov %[val], %[WR_LOCKED] \n" " scond %[val], [%[rwlock]] \n" " bnz 1b \n" /* retry if collided with someone */ " mov %[got_it], 1 \n" " \n" "4: ; --- done --- \n" : [val] "=&r" (val), [got_it] "+&r" (got_it) : [rwlock] "r" (&(rw->counter)), [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), [WR_LOCKED] "ir" (0) : "memory", "cc"); smp_mb(); return got_it; } static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int val; smp_mb(); /* * rw->counter++; */ __asm__ __volatile__( "1: llock %[val], [%[rwlock]] \n" " add %[val], %[val], 1 \n" " scond %[val], [%[rwlock]] \n" " bnz 1b \n" " \n" : [val] "=&r" (val) : [rwlock] "r" (&(rw->counter)) : "memory", "cc"); } static inline void arch_write_unlock(arch_rwlock_t *rw) { smp_mb(); WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__); } #else /* !CONFIG_ARC_HAS_LLSC */ static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; /* * Per lkmm, smp_mb() is only required after _lock (and before_unlock) * for ACQ and REL semantics respectively. However EX based spinlocks * need the extra smp_mb to workaround a hardware quirk. */ smp_mb(); __asm__ __volatile__( "1: ex %0, [%1] \n" #ifdef CONFIG_EZNPS_MTM_EXT " .word %3 \n" #endif " breq %0, %2, 1b \n" : "+&r" (val) : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) #ifdef CONFIG_EZNPS_MTM_EXT , "i"(CTOP_INST_SCHD_RW) #endif : "memory"); smp_mb(); } /* 1 - lock taken successfully */ static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; smp_mb(); __asm__ __volatile__( "1: ex %0, [%1] \n" : "+r" (val) : "r"(&(lock->slock)) : "memory"); smp_mb(); return (val == __ARCH_SPIN_LOCK_UNLOCKED__); } static inline void arch_spin_unlock(arch_spinlock_t *lock) { unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__; /* * RELEASE barrier: given the instructions avail on ARCv2, full barrier * is the only option */ smp_mb(); /* * EX is not really required here, a simple STore of 0 suffices. * However this causes tasklist livelocks in SystemC based SMP virtual * platforms where the systemc core scheduler uses EX as a cue for * moving to next core. Do a git log of this file for details */ __asm__ __volatile__( " ex %0, [%1] \n" : "+r" (val) : "r"(&(lock->slock)) : "memory"); /* * see pairing version/comment in arch_spin_lock above */ smp_mb(); } /* * Read-write spinlocks, allowing multiple readers but only one writer. * Unfair locking as Writers could be starved indefinitely by Reader(s) * * The spinlock itself is contained in @counter and access to it is * serialized with @lock_mutex. */ /* 1 - lock taken successfully */ static inline int arch_read_trylock(arch_rwlock_t *rw) { int ret = 0; unsigned long flags; local_irq_save(flags); arch_spin_lock(&(rw->lock_mutex)); /* * zero means writer holds the lock exclusively, deny Reader. * Otherwise grant lock to first/subseq reader */ if (rw->counter > 0) { rw->counter--; ret = 1; } arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); return ret; } /* 1 - lock taken successfully */ static inline int arch_write_trylock(arch_rwlock_t *rw) { int ret = 0; unsigned long flags; local_irq_save(flags); arch_spin_lock(&(rw->lock_mutex)); /* * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), * deny writer. Otherwise if unlocked grant to writer * Hence the claim that Linux rwlocks are unfair to writers. * (can be starved for an indefinite time by readers). */ if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { rw->counter = 0; ret = 1; } arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); return ret; } static inline void arch_read_lock(arch_rwlock_t *rw) { while (!arch_read_trylock(rw)) cpu_relax(); } static inline void arch_write_lock(arch_rwlock_t *rw) { while (!arch_write_trylock(rw)) cpu_relax(); } static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); arch_spin_lock(&(rw->lock_mutex)); rw->counter++; arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); } static inline void arch_write_unlock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); arch_spin_lock(&(rw->lock_mutex)); rw->counter = __ARCH_RW_LOCK_UNLOCKED__; arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); } #endif #endif /* __ASM_SPINLOCK_H */ include/asm/page.h 0000644 00000006157 14722070650 0010050 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_ARC_PAGE_H #define __ASM_ARC_PAGE_H #include <uapi/asm/page.h> #ifdef CONFIG_ARC_HAS_PAE40 #define MAX_POSSIBLE_PHYSMEM_BITS 40 #define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK) #else /* CONFIG_ARC_HAS_PAE40 */ #define MAX_POSSIBLE_PHYSMEM_BITS 32 #define PAGE_MASK_PHYS PAGE_MASK #endif /* CONFIG_ARC_HAS_PAE40 */ #ifndef __ASSEMBLY__ #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) struct vm_area_struct; struct page; #define __HAVE_ARCH_COPY_USER_HIGHPAGE void copy_user_highpage(struct page *to, struct page *from, unsigned long u_vaddr, struct vm_area_struct *vma); void clear_user_page(void *to, unsigned long u_vaddr, struct page *page); #undef STRICT_MM_TYPECHECKS #ifdef STRICT_MM_TYPECHECKS /* * These are used to make use of C type-checking.. */ typedef struct { #ifdef CONFIG_ARC_HAS_PAE40 unsigned long long pte; #else unsigned long pte; #endif } pte_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; #define pte_val(x) ((x).pte) #define pgd_val(x) ((x).pgd) #define pgprot_val(x) ((x).pgprot) #define __pte(x) ((pte_t) { (x) }) #define __pgd(x) ((pgd_t) { (x) }) #define __pgprot(x) ((pgprot_t) { (x) }) #define pte_pgprot(x) __pgprot(pte_val(x)) #else /* !STRICT_MM_TYPECHECKS */ #ifdef CONFIG_ARC_HAS_PAE40 typedef unsigned long long pte_t; #else typedef unsigned long pte_t; #endif typedef unsigned long pgd_t; typedef unsigned long pgprot_t; #define pte_val(x) (x) #define pgd_val(x) (x) #define pgprot_val(x) (x) #define __pte(x) (x) #define __pgd(x) (x) #define __pgprot(x) (x) #define pte_pgprot(x) (x) #endif typedef pte_t * pgtable_t; /* * Use virt_to_pfn with caution: * If used in pte or paddr related macros, it could cause truncation * in PAE40 builds * As a rule of thumb, only use it in helpers starting with virt_ * You have been warned ! */ #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) #define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_RAM_BASE) #ifdef CONFIG_FLATMEM #define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr) #endif /* * __pa, __va, virt_to_page (ALERT: deprecated, don't use them) * * These macros have historically been misnamed * virt here means link-address/program-address as embedded in object code. * And for ARC, link-addr = physical address */ #define __pa(vaddr) ((unsigned long)(vaddr)) #define __va(paddr) ((void *)((unsigned long)(paddr))) #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) /* Default Permissions for stack/heaps pages (Non Executable) */ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define WANT_PAGE_VIRTUAL 1 #include <asm-generic/memory_model.h> /* page_to_pfn, pfn_to_page */ #include <asm-generic/getorder.h> #endif /* !__ASSEMBLY__ */ #endif include/asm/irqflags-arcv2.h 0000644 00000006643 14722070650 0011757 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_IRQFLAGS_ARCV2_H #define __ASM_IRQFLAGS_ARCV2_H #include <asm/arcregs.h> /* status32 Bits */ #define STATUS_AD_BIT 19 /* Disable Align chk: core supports non-aligned */ #define STATUS_IE_BIT 31 #define STATUS_AD_MASK (1<<STATUS_AD_BIT) #define STATUS_IE_MASK (1<<STATUS_IE_BIT) /* status32 Bits as encoded/expected by CLRI/SETI */ #define CLRI_STATUS_IE_BIT 4 #define CLRI_STATUS_E_MASK 0xF #define CLRI_STATUS_IE_MASK (1 << CLRI_STATUS_IE_BIT) #define AUX_USER_SP 0x00D #define AUX_IRQ_CTRL 0x00E #define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */ #define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */ #define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */ #define AUX_IRQ_PRIORITY 0x206 #define ICAUSE 0x40a #define AUX_IRQ_SELECT 0x40b #define AUX_IRQ_ENABLE 0x40c /* Was Intr taken in User Mode */ #define AUX_IRQ_ACT_BIT_U 31 /* * Hardware supports 16 priorities (0 highest, 15 lowest) * Linux by default runs at 1, priority 0 reserved for NMI style interrupts */ #define ARCV2_IRQ_DEF_PRIO 1 /* seed value for status register */ #ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS #define __AD_ENB STATUS_AD_MASK #else #define __AD_ENB 0 #endif #define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | __AD_ENB | \ (ARCV2_IRQ_DEF_PRIO << 1)) #ifndef __ASSEMBLY__ /* * Save IRQ state and disable IRQs */ static inline long arch_local_irq_save(void) { unsigned long flags; __asm__ __volatile__(" clri %0 \n" : "=r" (flags) : : "memory"); return flags; } /* * restore saved IRQ state */ static inline void arch_local_irq_restore(unsigned long flags) { __asm__ __volatile__(" seti %0 \n" : : "r" (flags) : "memory"); } /* * Unconditionally Enable IRQs */ static inline void arch_local_irq_enable(void) { unsigned int irqact = read_aux_reg(AUX_IRQ_ACT); if (irqact & 0xffff) write_aux_reg(AUX_IRQ_ACT, irqact & ~0xffff); __asm__ __volatile__(" seti \n" : : : "memory"); } /* * Unconditionally Disable IRQs */ static inline void arch_local_irq_disable(void) { __asm__ __volatile__(" clri \n" : : : "memory"); } /* * save IRQ state */ static inline long arch_local_save_flags(void) { unsigned long temp; __asm__ __volatile__( " lr %0, [status32] \n" : "=&r"(temp) : : "memory"); /* To be compatible with irq_save()/irq_restore() * encode the irq bits as expected by CLRI/SETI * (this was needed to make CONFIG_TRACE_IRQFLAGS work) */ temp = (1 << 5) | ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) | ((temp >> 1) & CLRI_STATUS_E_MASK); return temp; } /* * Query IRQ state */ static inline int arch_irqs_disabled_flags(unsigned long flags) { return !(flags & CLRI_STATUS_IE_MASK); } static inline int arch_irqs_disabled(void) { return arch_irqs_disabled_flags(arch_local_save_flags()); } static inline void arc_softirq_trigger(int irq) { write_aux_reg(AUX_IRQ_HINT, irq); } static inline void arc_softirq_clear(int irq) { write_aux_reg(AUX_IRQ_HINT, 0); } #else #ifdef CONFIG_TRACE_IRQFLAGS .macro TRACE_ASM_IRQ_DISABLE bl trace_hardirqs_off .endm .macro TRACE_ASM_IRQ_ENABLE bl trace_hardirqs_on .endm #else .macro TRACE_ASM_IRQ_DISABLE .endm .macro TRACE_ASM_IRQ_ENABLE .endm #endif .macro IRQ_DISABLE scratch clri TRACE_ASM_IRQ_DISABLE .endm .macro IRQ_ENABLE scratch TRACE_ASM_IRQ_ENABLE seti .endm #endif /* __ASSEMBLY__ */ #endif include/asm/cacheflush.h 0000644 00000007360 14722070650 0011236 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs * -flush_cache_dup_mm (fork) * -likewise for flush_cache_mm (exit/execve) * -likewise for flush_cache_{range,page} (munmap, exit, COW-break) * * vineetg: April 2008 * -Added a critical CacheLine flush to copy_to_user_page( ) which * was causing gdbserver to not setup breakpoints consistently */ #ifndef _ASM_CACHEFLUSH_H #define _ASM_CACHEFLUSH_H #include <linux/mm.h> #include <asm/shmparam.h> /* * Semantically we need this because icache doesn't snoop dcache/dma. * However ARC Cache flush requires paddr as well as vaddr, latter not available * in the flush_icache_page() API. So we no-op it but do the equivalent work * in update_mmu_cache() */ #define flush_icache_page(vma, page) void flush_cache_all(void); void flush_icache_range(unsigned long kstart, unsigned long kend); void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len); void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr); void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr); #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 void flush_dcache_page(struct page *page); void dma_cache_wback_inv(phys_addr_t start, unsigned long sz); void dma_cache_inv(phys_addr_t start, unsigned long sz); void dma_cache_wback(phys_addr_t start, unsigned long sz); #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) /* TBD: optimize this */ #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() #define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */ #ifndef CONFIG_ARC_CACHE_VIPT_ALIASING #define flush_cache_mm(mm) /* called on munmap/exit */ #define flush_cache_range(mm, u_vstart, u_vend) #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */ #else /* VIPT aliasing dcache */ /* To clear out stale userspace mappings */ void flush_cache_mm(struct mm_struct *mm); void flush_cache_range(struct vm_area_struct *vma, unsigned long start,unsigned long end); void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long page); /* * To make sure that userspace mapping is flushed to memory before * get_user_pages() uses a kernel mapping to access the page */ #define ARCH_HAS_FLUSH_ANON_PAGE void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long u_vaddr); #endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ /* * A new pagecache page has PG_arch_1 clear - thus dcache dirty by default * This works around some PIO based drivers which don't call flush_dcache_page * to record that they dirtied the dcache */ #define PG_dc_clean PG_arch_1 #define CACHE_COLORS_NUM 4 #define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1) #define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK) /* * Simple wrapper over config option * Bootup code ensures that hardware matches kernel configuration */ static inline int cache_is_vipt_aliasing(void) { return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING); } /* * checks if two addresses (after page aligning) index into same cache set */ #define addr_not_cache_congruent(addr1, addr2) \ ({ \ cache_is_vipt_aliasing() ? \ (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \ }) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ if (vma->vm_flags & VM_EXEC) \ __sync_icache_dcache((unsigned long)(dst), vaddr, len); \ } while (0) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len); \ #endif include/asm/barrier.h 0000644 00000003163 14722070650 0010554 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H #ifdef CONFIG_ISA_ARCV2 /* * ARCv2 based HS38 cores are in-order issue, but still weakly ordered * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ... * * Explicit barrier provided by DMB instruction * - Operand supports fine grained load/store/load+store semantics * - Ensures that selected memory operation issued before it will complete * before any subsequent memory operation of same type * - DMB guarantees SMP as well as local barrier semantics * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e. * UP: barrier(), SMP: smp_*mb == *mb) * - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed * in the general case. Plus it only provides full barrier. */ #define mb() asm volatile("dmb 3\n" : : : "memory") #define rmb() asm volatile("dmb 1\n" : : : "memory") #define wmb() asm volatile("dmb 2\n" : : : "memory") #elif !defined(CONFIG_ARC_PLAT_EZNPS) /* CONFIG_ISA_ARCOMPACT */ /* * ARCompact based cores (ARC700) only have SYNC instruction which is super * heavy weight as it flushes the pipeline as well. * There are no real SMP implementations of such cores. */ #define mb() asm volatile("sync\n" : : : "memory") #else /* CONFIG_ARC_PLAT_EZNPS */ #include <plat/ctop.h> #define mb() asm volatile (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory") #define rmb() asm volatile (".word %0" : : "i"(CTOP_INST_SCHD_RD) : "memory") #endif #include <asm-generic/barrier.h> #endif include/asm/syscall.h 0000644 00000003312 14722070650 0010574 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_SYSCALL_H #define _ASM_ARC_SYSCALL_H 1 #include <uapi/linux/audit.h> #include <linux/err.h> #include <linux/sched.h> #include <asm/unistd.h> #include <asm/ptrace.h> /* in_syscall() */ static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { if (user_mode(regs) && in_syscall(regs)) return regs->r8; else return -1; } static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { regs->r0 = regs->orig_r0; } static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { /* 0 if syscall succeeded, otherwise -Errorcode */ return IS_ERR_VALUE(regs->r0) ? regs->r0 : 0; } static inline long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) { return regs->r0; } static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { regs->r0 = (long) error ?: val; } /* * @i: argument index [0,5] * @n: number of arguments; n+i must be [1,6]. */ static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, unsigned long *args) { unsigned long *inside_ptregs = &(regs->r0); unsigned int n = 6; unsigned int i = 0; while (n--) { args[i++] = (*inside_ptregs); inside_ptregs--; } } static inline int syscall_get_arch(struct task_struct *task) { return IS_ENABLED(CONFIG_ISA_ARCOMPACT) ? (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ? AUDIT_ARCH_ARCOMPACTBE : AUDIT_ARCH_ARCOMPACT) : (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ? AUDIT_ARCH_ARCV2BE : AUDIT_ARCH_ARCV2); } #endif include/asm/perf_event.h 0000644 00000015333 14722070650 0011265 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Linux performance counter support for ARC * * Copyright (C) 2014-2015 Synopsys, Inc. (www.synopsys.com) * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_PERF_EVENT_H #define __ASM_PERF_EVENT_H /* Max number of counters that PCT block may ever have */ #define ARC_PERF_MAX_COUNTERS 32 #define ARC_REG_CC_BUILD 0xF6 #define ARC_REG_CC_INDEX 0x240 #define ARC_REG_CC_NAME0 0x241 #define ARC_REG_CC_NAME1 0x242 #define ARC_REG_PCT_BUILD 0xF5 #define ARC_REG_PCT_COUNTL 0x250 #define ARC_REG_PCT_COUNTH 0x251 #define ARC_REG_PCT_SNAPL 0x252 #define ARC_REG_PCT_SNAPH 0x253 #define ARC_REG_PCT_CONFIG 0x254 #define ARC_REG_PCT_CONTROL 0x255 #define ARC_REG_PCT_INDEX 0x256 #define ARC_REG_PCT_INT_CNTL 0x25C #define ARC_REG_PCT_INT_CNTH 0x25D #define ARC_REG_PCT_INT_CTRL 0x25E #define ARC_REG_PCT_INT_ACT 0x25F #define ARC_REG_PCT_CONFIG_USER (1 << 18) /* count in user mode */ #define ARC_REG_PCT_CONFIG_KERN (1 << 19) /* count in kernel mode */ #define ARC_REG_PCT_CONTROL_CC (1 << 16) /* clear counts */ #define ARC_REG_PCT_CONTROL_SN (1 << 17) /* snapshot */ struct arc_reg_pct_build { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int m:8, c:8, r:5, i:1, s:2, v:8; #else unsigned int v:8, s:2, i:1, r:5, c:8, m:8; #endif }; struct arc_reg_cc_build { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int c:16, r:8, v:8; #else unsigned int v:8, r:8, c:16; #endif }; #define PERF_COUNT_ARC_DCLM (PERF_COUNT_HW_MAX + 0) #define PERF_COUNT_ARC_DCSM (PERF_COUNT_HW_MAX + 1) #define PERF_COUNT_ARC_ICM (PERF_COUNT_HW_MAX + 2) #define PERF_COUNT_ARC_BPOK (PERF_COUNT_HW_MAX + 3) #define PERF_COUNT_ARC_EDTLB (PERF_COUNT_HW_MAX + 4) #define PERF_COUNT_ARC_EITLB (PERF_COUNT_HW_MAX + 5) #define PERF_COUNT_ARC_LDC (PERF_COUNT_HW_MAX + 6) #define PERF_COUNT_ARC_STC (PERF_COUNT_HW_MAX + 7) #define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 8) /* * Some ARC pct quirks: * * PERF_COUNT_HW_STALLED_CYCLES_BACKEND * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND * The ARC 700 can either measure stalls per pipeline stage, or all stalls * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND * and all pipeline flushes (e.g. caused by mispredicts, etc.) to * STALLED_CYCLES_FRONTEND. * * We could start multiple performance counters and combine everything * afterwards, but that makes it complicated. * * Note that I$ cache misses aren't counted by either of the two! */ /* * ARC PCT has hardware conditions with fixed "names" but variable "indexes" * (based on a specific RTL build) * Below is the static map between perf generic/arc specific event_id and * h/w condition names. * At the time of probe, we loop thru each index and find it's name to * complete the mapping of perf event_id to h/w index as latter is needed * to program the counter really */ static const char * const arc_pmu_ev_hw_map[] = { /* count cycles */ [PERF_COUNT_HW_CPU_CYCLES] = "crun", [PERF_COUNT_HW_REF_CPU_CYCLES] = "crun", [PERF_COUNT_HW_BUS_CYCLES] = "crun", [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush", [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall", /* counts condition */ [PERF_COUNT_HW_INSTRUCTIONS] = "iall", /* All jump instructions that are taken */ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak", #ifdef CONFIG_ISA_ARCV2 [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", #else [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */ #endif [PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */ [PERF_COUNT_ARC_STC] = "imemwrc", /* Instr: mem write cached */ [PERF_COUNT_ARC_DCLM] = "dclm", /* D-cache Load Miss */ [PERF_COUNT_ARC_DCSM] = "dcsm", /* D-cache Store Miss */ [PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */ [PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */ [PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */ [PERF_COUNT_HW_CACHE_REFERENCES] = "imemrdc", /* Instr: mem read cached */ [PERF_COUNT_HW_CACHE_MISSES] = "dclm", /* D-cache Load Miss */ }; #define C(_x) PERF_COUNT_HW_CACHE_##_x #define CACHE_OP_UNSUPPORTED 0xffff static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC, [C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = PERF_COUNT_ARC_STC, [C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = PERF_COUNT_HW_INSTRUCTIONS, [C(RESULT_MISS)] = PERF_COUNT_ARC_ICM, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(LL)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(DTLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC, [C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB, }, /* DTLB LD/ST Miss not segregated by h/w*/ [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(BPU)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS, [C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(NODE)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, }; #endif /* __ASM_PERF_EVENT_H */ include/asm/bitops.h 0000644 00000023266 14722070650 0010434 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_BITOPS_H #define _ASM_BITOPS_H #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif #ifndef __ASSEMBLY__ #include <linux/types.h> #include <linux/compiler.h> #include <asm/barrier.h> #ifndef CONFIG_ARC_HAS_LLSC #include <asm/smp.h> #endif #ifdef CONFIG_ARC_HAS_LLSC /* * Hardware assisted Atomic-R-M-W */ #define BIT_OP(op, c_op, asm_op) \ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\ { \ unsigned int temp; \ \ m += nr >> 5; \ \ nr &= 0x1f; \ \ __asm__ __volatile__( \ "1: llock %0, [%1] \n" \ " " #asm_op " %0, %0, %2 \n" \ " scond %0, [%1] \n" \ " bnz 1b \n" \ : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \ : "r"(m), /* Not "m": llock only supports reg direct addr mode */ \ "ir"(nr) \ : "cc"); \ } /* * Semantically: * Test the bit * if clear * set it and return 0 (old value) * else * return 1 (old value). * * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally * and the old value of bit is returned */ #define TEST_N_BIT_OP(op, c_op, asm_op) \ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\ { \ unsigned long old, temp; \ \ m += nr >> 5; \ \ nr &= 0x1f; \ \ /* \ * Explicit full memory barrier needed before/after as \ * LLOCK/SCOND themselves don't provide any such smenatic \ */ \ smp_mb(); \ \ __asm__ __volatile__( \ "1: llock %0, [%2] \n" \ " " #asm_op " %1, %0, %3 \n" \ " scond %1, [%2] \n" \ " bnz 1b \n" \ : "=&r"(old), "=&r"(temp) \ : "r"(m), "ir"(nr) \ : "cc"); \ \ smp_mb(); \ \ return (old & (1 << nr)) != 0; \ } #elif !defined(CONFIG_ARC_PLAT_EZNPS) /* * Non hardware assisted Atomic-R-M-W * Locking would change to irq-disabling only (UP) and spinlocks (SMP) * * There's "significant" micro-optimization in writing our own variants of * bitops (over generic variants) * * (1) The generic APIs have "signed" @nr while we have it "unsigned" * This avoids extra code to be generated for pointer arithmatic, since * is "not sure" that index is NOT -ve * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc * only consider bottom 5 bits of @nr, so NO need to mask them off. * (GCC Quirk: however for constant @nr we still need to do the masking * at compile time) */ #define BIT_OP(op, c_op, asm_op) \ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\ { \ unsigned long temp, flags; \ m += nr >> 5; \ \ /* \ * spin lock/unlock provide the needed smp_mb() before/after \ */ \ bitops_lock(flags); \ \ temp = *m; \ *m = temp c_op (1UL << (nr & 0x1f)); \ \ bitops_unlock(flags); \ } #define TEST_N_BIT_OP(op, c_op, asm_op) \ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\ { \ unsigned long old, flags; \ m += nr >> 5; \ \ bitops_lock(flags); \ \ old = *m; \ *m = old c_op (1UL << (nr & 0x1f)); \ \ bitops_unlock(flags); \ \ return (old & (1UL << (nr & 0x1f))) != 0; \ } #else /* CONFIG_ARC_PLAT_EZNPS */ #define BIT_OP(op, c_op, asm_op) \ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\ { \ m += nr >> 5; \ \ nr = (1UL << (nr & 0x1f)); \ if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3) \ nr = ~nr; \ \ __asm__ __volatile__( \ " mov r2, %0\n" \ " mov r3, %1\n" \ " .word %2\n" \ : \ : "r"(nr), "r"(m), "i"(asm_op) \ : "r2", "r3", "memory"); \ } #define TEST_N_BIT_OP(op, c_op, asm_op) \ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\ { \ unsigned long old; \ \ m += nr >> 5; \ \ nr = old = (1UL << (nr & 0x1f)); \ if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3) \ old = ~old; \ \ /* Explicit full memory barrier needed before/after */ \ smp_mb(); \ \ __asm__ __volatile__( \ " mov r2, %0\n" \ " mov r3, %1\n" \ " .word %2\n" \ " mov %0, r2" \ : "+r"(old) \ : "r"(m), "i"(asm_op) \ : "r2", "r3", "memory"); \ \ smp_mb(); \ \ return (old & nr) != 0; \ } #endif /* CONFIG_ARC_PLAT_EZNPS */ /*************************************** * Non atomic variants **************************************/ #define __BIT_OP(op, c_op, asm_op) \ static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \ { \ unsigned long temp; \ m += nr >> 5; \ \ temp = *m; \ *m = temp c_op (1UL << (nr & 0x1f)); \ } #define __TEST_N_BIT_OP(op, c_op, asm_op) \ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\ { \ unsigned long old; \ m += nr >> 5; \ \ old = *m; \ *m = old c_op (1UL << (nr & 0x1f)); \ \ return (old & (1UL << (nr & 0x1f))) != 0; \ } #define BIT_OPS(op, c_op, asm_op) \ \ /* set_bit(), clear_bit(), change_bit() */ \ BIT_OP(op, c_op, asm_op) \ \ /* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\ TEST_N_BIT_OP(op, c_op, asm_op) \ \ /* __set_bit(), __clear_bit(), __change_bit() */ \ __BIT_OP(op, c_op, asm_op) \ \ /* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\ __TEST_N_BIT_OP(op, c_op, asm_op) #ifndef CONFIG_ARC_PLAT_EZNPS BIT_OPS(set, |, bset) BIT_OPS(clear, & ~, bclr) BIT_OPS(change, ^, bxor) #else BIT_OPS(set, |, CTOP_INST_AOR_DI_R2_R2_R3) BIT_OPS(clear, & ~, CTOP_INST_AAND_DI_R2_R2_R3) BIT_OPS(change, ^, CTOP_INST_AXOR_DI_R2_R2_R3) #endif /* * This routine doesn't need to be atomic. */ static inline int test_bit(unsigned int nr, const volatile unsigned long *addr) { unsigned long mask; addr += nr >> 5; mask = 1UL << (nr & 0x1f); return ((mask & *addr) != 0); } #ifdef CONFIG_ISA_ARCOMPACT /* * Count the number of zeros, starting from MSB * Helper for fls( ) friends * This is a pure count, so (1-32) or (0-31) doesn't apply * It could be 0 to 32, based on num of 0's in there * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31 */ static inline __attribute__ ((const)) int clz(unsigned int x) { unsigned int res; __asm__ __volatile__( " norm.f %0, %1 \n" " mov.n %0, 0 \n" " add.p %0, %0, 1 \n" : "=r"(res) : "r"(x) : "cc"); return res; } static inline int constant_fls(unsigned int x) { int r = 32; if (!x) return 0; if (!(x & 0xffff0000u)) { x <<= 16; r -= 16; } if (!(x & 0xff000000u)) { x <<= 8; r -= 8; } if (!(x & 0xf0000000u)) { x <<= 4; r -= 4; } if (!(x & 0xc0000000u)) { x <<= 2; r -= 2; } if (!(x & 0x80000000u)) { x <<= 1; r -= 1; } return r; } /* * fls = Find Last Set in word * @result: [1-32] * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0 */ static inline __attribute__ ((const)) int fls(unsigned int x) { if (__builtin_constant_p(x)) return constant_fls(x); return 32 - clz(x); } /* * __fls: Similar to fls, but zero based (0-31) */ static inline __attribute__ ((const)) int __fls(unsigned long x) { if (!x) return 0; else return fls(x) - 1; } /* * ffs = Find First Set in word (LSB to MSB) * @result: [1-32], 0 if all 0's */ #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) /* * __ffs: Similar to ffs, but zero based (0-31) */ static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word) { if (!word) return word; return ffs(word) - 1; } #else /* CONFIG_ISA_ARCV2 */ /* * fls = Find Last Set in word * @result: [1-32] * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0 */ static inline __attribute__ ((const)) int fls(unsigned long x) { int n; asm volatile( " fls.f %0, %1 \n" /* 0:31; 0(Z) if src 0 */ " add.nz %0, %0, 1 \n" /* 0:31 -> 1:32 */ : "=r"(n) /* Early clobber not needed */ : "r"(x) : "cc"); return n; } /* * __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set */ static inline __attribute__ ((const)) int __fls(unsigned long x) { /* FLS insn has exactly same semantics as the API */ return __builtin_arc_fls(x); } /* * ffs = Find First Set in word (LSB to MSB) * @result: [1-32], 0 if all 0's */ static inline __attribute__ ((const)) int ffs(unsigned long x) { int n; asm volatile( " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */ " add.nz %0, %0, 1 \n" /* 0:31 -> 1:32 */ " mov.z %0, 0 \n" /* 31(Z)-> 0 */ : "=r"(n) /* Early clobber not needed */ : "r"(x) : "cc"); return n; } /* * __ffs: Similar to ffs, but zero based (0-31) */ static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x) { unsigned long n; asm volatile( " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */ " mov.z %0, 0 \n" /* 31(Z)-> 0 */ : "=r"(n) : "r"(x) : "cc"); return n; } #endif /* CONFIG_ISA_ARCOMPACT */ /* * ffz = Find First Zero in word. * @return:[0-31], 32 if all 1's */ #define ffz(x) __ffs(~(x)) #include <asm-generic/bitops/hweight.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/find.h> #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic-setbit.h> #endif /* !__ASSEMBLY__ */ #endif include/asm/setup.h 0000644 00000002051 14722070650 0010261 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASMARC_SETUP_H #define __ASMARC_SETUP_H #include <linux/types.h> #include <uapi/asm/setup.h> #ifdef CONFIG_ARC_PLAT_EZNPS #define COMMAND_LINE_SIZE 2048 #else #define COMMAND_LINE_SIZE 256 #endif /* * Data structure to map a ID to string * Used a lot for bootup reporting of hardware diversity */ struct id_to_str { int id; const char *str; }; extern int root_mountflags, end_mem; void setup_processor(void); void __init setup_arch_memory(void); long __init arc_get_mem_sz(void); /* Helpers used in arc_*_mumbojumbo routines */ #define IS_AVAIL1(v, s) ((v) ? s : "") #define IS_DISABLED_RUN(v) ((v) ? "" : "(disabled) ") #define IS_USED_RUN(v) ((v) ? "" : "(not used) ") #define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg)) #define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg)) #define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2)) #endif /* __ASMARC_SETUP_H */ include/asm/timex.h 0000644 00000000552 14722070650 0010253 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_TIMEX_H #define _ASM_ARC_TIMEX_H #define CLOCK_TICK_RATE 80000000 /* slated to be removed */ #include <asm-generic/timex.h> /* XXX: get_cycles() to be implemented with RTSC insn */ #endif /* _ASM_ARC_TIMEX_H */ include/asm/kmap_types.h 0000644 00000000524 14722070650 0011300 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_KMAP_TYPES_H #define _ASM_KMAP_TYPES_H /* * We primarily need to define KM_TYPE_NR here but that in turn * is a function of PGDIR_SIZE etc. * To avoid circular deps issue, put everything in asm/highmem.h */ #endif include/asm/spinlock_types.h 0000644 00000001611 14722070650 0012170 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H typedef struct { volatile unsigned int slock; } arch_spinlock_t; #define __ARCH_SPIN_LOCK_UNLOCKED__ 0 #define __ARCH_SPIN_LOCK_LOCKED__ 1 #define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED__ } #define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ } /* * Unlocked : 0x0100_0000 * Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it) * Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000 */ typedef struct { volatile unsigned int counter; #ifndef CONFIG_ARC_HAS_LLSC arch_spinlock_t lock_mutex; #endif } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 #define __ARCH_RW_LOCK_UNLOCKED { .counter = __ARCH_RW_LOCK_UNLOCKED__ } #endif include/asm/mmu.h 0000644 00000004464 14722070650 0007731 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_MMU_H #define _ASM_ARC_MMU_H #ifndef __ASSEMBLY__ #include <linux/threads.h> /* NR_CPUS */ #endif #if defined(CONFIG_ARC_MMU_V1) #define CONFIG_ARC_MMU_VER 1 #elif defined(CONFIG_ARC_MMU_V2) #define CONFIG_ARC_MMU_VER 2 #elif defined(CONFIG_ARC_MMU_V3) #define CONFIG_ARC_MMU_VER 3 #elif defined(CONFIG_ARC_MMU_V4) #define CONFIG_ARC_MMU_VER 4 #endif /* MMU Management regs */ #define ARC_REG_MMU_BCR 0x06f #if (CONFIG_ARC_MMU_VER < 4) #define ARC_REG_TLBPD0 0x405 #define ARC_REG_TLBPD1 0x406 #define ARC_REG_TLBPD1HI 0 /* Dummy: allows code sharing with ARC700 */ #define ARC_REG_TLBINDEX 0x407 #define ARC_REG_TLBCOMMAND 0x408 #define ARC_REG_PID 0x409 #define ARC_REG_SCRATCH_DATA0 0x418 #else #define ARC_REG_TLBPD0 0x460 #define ARC_REG_TLBPD1 0x461 #define ARC_REG_TLBPD1HI 0x463 #define ARC_REG_TLBINDEX 0x464 #define ARC_REG_TLBCOMMAND 0x465 #define ARC_REG_PID 0x468 #define ARC_REG_SCRATCH_DATA0 0x46c #endif /* Bits in MMU PID register */ #define __TLB_ENABLE (1 << 31) #define __PROG_ENABLE (1 << 30) #define MMU_ENABLE (__TLB_ENABLE | __PROG_ENABLE) /* Error code if probe fails */ #define TLB_LKUP_ERR 0x80000000 #if (CONFIG_ARC_MMU_VER < 4) #define TLB_DUP_ERR (TLB_LKUP_ERR | 0x00000001) #else #define TLB_DUP_ERR (TLB_LKUP_ERR | 0x40000000) #endif /* TLB Commands */ #define TLBWrite 0x1 #define TLBRead 0x2 #define TLBGetIndex 0x3 #define TLBProbe 0x4 #if (CONFIG_ARC_MMU_VER >= 2) #define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */ #define TLBIVUTLB 0x6 /* explicitly inv uTLBs */ #endif #if (CONFIG_ARC_MMU_VER >= 4) #define TLBInsertEntry 0x7 #define TLBDeleteEntry 0x8 #endif #ifndef __ASSEMBLY__ typedef struct { unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */ } mm_context_t; #ifdef CONFIG_ARC_DBG_TLB_PARANOIA void tlb_paranoid_check(unsigned int mm_asid, unsigned long address); #else #define tlb_paranoid_check(a, b) #endif void arc_mmu_init(void); extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); void read_decode_mmu_bcr(void); static inline int is_pae40_enabled(void) { return IS_ENABLED(CONFIG_ARC_HAS_PAE40); } extern int pae40_exist_but_not_enab(void); #endif /* !__ASSEMBLY__ */ #endif include/asm/uaccess.h 0000644 00000044530 14722070650 0010557 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * vineetg: June 2010 * -__clear_user( ) called multiple times during elf load was byte loop * converted to do as much word clear as possible. * * vineetg: Dec 2009 * -Hand crafted constant propagation for "constant" copy sizes * -stock kernel shrunk by 33K at -O3 * * vineetg: Sept 2009 * -Added option to (UN)inline copy_(to|from)_user to reduce code sz * -kernel shrunk by 200K even at -O3 (gcc 4.2.1) * -Enabled when doing -Os * * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 */ #ifndef _ASM_ARC_UACCESS_H #define _ASM_ARC_UACCESS_H #include <linux/string.h> /* for generic string functions */ #define __kernel_ok (uaccess_kernel()) /* * Algorithmically, for __user_ok() we want do: * (start < TASK_SIZE) && (start+len < TASK_SIZE) * where TASK_SIZE could either be retrieved from thread_info->addr_limit or * emitted directly in code. * * This can however be rewritten as follows: * (len <= TASK_SIZE) && (start+len < TASK_SIZE) * * Because it essentially checks if buffer end is within limit and @len is * non-ngeative, which implies that buffer start will be within limit too. * * The reason for rewriting being, for majority of cases, @len is generally * compile time constant, causing first sub-expression to be compile time * subsumed. * * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10), * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem * would already have been done at this call site for __kernel_ok() * */ #define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \ ((addr) <= (get_fs() - (sz)))) #define __access_ok(addr, sz) (unlikely(__kernel_ok) || \ likely(__user_ok((addr), (sz)))) /*********** Single byte/hword/word copies ******************/ #define __get_user_fn(sz, u, k) \ ({ \ long __ret = 0; /* success by default */ \ switch (sz) { \ case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \ case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \ case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \ case 8: __arc_get_user_one_64(*(k), u, __ret); break; \ } \ __ret; \ }) /* * Returns 0 on success, -EFAULT if not. * @ret already contains 0 - given that errors will be less likely * (hence +r asm constraint below). * In case of error, fixup code will make it -EFAULT */ #define __arc_get_user_one(dst, src, op, ret) \ __asm__ __volatile__( \ "1: "op" %1,[%2]\n" \ "2: ;nop\n" \ " .section .fixup, \"ax\"\n" \ " .align 4\n" \ "3: # return -EFAULT\n" \ " mov %0, %3\n" \ " # zero out dst ptr\n" \ " mov %1, 0\n" \ " j 2b\n" \ " .previous\n" \ " .section __ex_table, \"a\"\n" \ " .align 4\n" \ " .word 1b,3b\n" \ " .previous\n" \ \ : "+r" (ret), "=r" (dst) \ : "r" (src), "ir" (-EFAULT)) #define __arc_get_user_one_64(dst, src, ret) \ __asm__ __volatile__( \ "1: ld %1,[%2]\n" \ "4: ld %R1,[%2, 4]\n" \ "2: ;nop\n" \ " .section .fixup, \"ax\"\n" \ " .align 4\n" \ "3: # return -EFAULT\n" \ " mov %0, %3\n" \ " # zero out dst ptr\n" \ " mov %1, 0\n" \ " mov %R1, 0\n" \ " j 2b\n" \ " .previous\n" \ " .section __ex_table, \"a\"\n" \ " .align 4\n" \ " .word 1b,3b\n" \ " .word 4b,3b\n" \ " .previous\n" \ \ : "+r" (ret), "=r" (dst) \ : "r" (src), "ir" (-EFAULT)) #define __put_user_fn(sz, u, k) \ ({ \ long __ret = 0; /* success by default */ \ switch (sz) { \ case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \ case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \ case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \ case 8: __arc_put_user_one_64(*(k), u, __ret); break; \ } \ __ret; \ }) #define __arc_put_user_one(src, dst, op, ret) \ __asm__ __volatile__( \ "1: "op" %1,[%2]\n" \ "2: ;nop\n" \ " .section .fixup, \"ax\"\n" \ " .align 4\n" \ "3: mov %0, %3\n" \ " j 2b\n" \ " .previous\n" \ " .section __ex_table, \"a\"\n" \ " .align 4\n" \ " .word 1b,3b\n" \ " .previous\n" \ \ : "+r" (ret) \ : "r" (src), "r" (dst), "ir" (-EFAULT)) #define __arc_put_user_one_64(src, dst, ret) \ __asm__ __volatile__( \ "1: st %1,[%2]\n" \ "4: st %R1,[%2, 4]\n" \ "2: ;nop\n" \ " .section .fixup, \"ax\"\n" \ " .align 4\n" \ "3: mov %0, %3\n" \ " j 2b\n" \ " .previous\n" \ " .section __ex_table, \"a\"\n" \ " .align 4\n" \ " .word 1b,3b\n" \ " .word 4b,3b\n" \ " .previous\n" \ \ : "+r" (ret) \ : "r" (src), "r" (dst), "ir" (-EFAULT)) static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { long res = 0; char val; unsigned long tmp1, tmp2, tmp3, tmp4; unsigned long orig_n = n; if (n == 0) return 0; /* unaligned */ if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) { unsigned char tmp; __asm__ __volatile__ ( " mov.f lp_count, %0 \n" " lpnz 2f \n" "1: ldb.ab %1, [%3, 1] \n" " stb.ab %1, [%2, 1] \n" " sub %0,%0,1 \n" "2: ;nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "3: j 2b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 1b, 3b \n" " .previous \n" : "+r" (n), /* * Note as an '&' earlyclobber operand to make sure the * temporary register inside the loop is not the same as * FROM or TO. */ "=&r" (tmp), "+r" (to), "+r" (from) : : "lp_count", "memory"); return n; } /* * Hand-crafted constant propagation to reduce code sz of the * laddered copy 16x,8,4,2,1 */ if (__builtin_constant_p(orig_n)) { res = orig_n; if (orig_n / 16) { orig_n = orig_n % 16; __asm__ __volatile__( " lsr lp_count, %7,4 \n" " lp 3f \n" "1: ld.ab %3, [%2, 4] \n" "11: ld.ab %4, [%2, 4] \n" "12: ld.ab %5, [%2, 4] \n" "13: ld.ab %6, [%2, 4] \n" " st.ab %3, [%1, 4] \n" " st.ab %4, [%1, 4] \n" " st.ab %5, [%1, 4] \n" " st.ab %6, [%1, 4] \n" " sub %0,%0,16 \n" "3: ;nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: j 3b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 1b, 4b \n" " .word 11b,4b \n" " .word 12b,4b \n" " .word 13b,4b \n" " .previous \n" : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4) : "ir"(n) : "lp_count", "memory"); } if (orig_n / 8) { orig_n = orig_n % 8; __asm__ __volatile__( "14: ld.ab %3, [%2,4] \n" "15: ld.ab %4, [%2,4] \n" " st.ab %3, [%1,4] \n" " st.ab %4, [%1,4] \n" " sub %0,%0,8 \n" "31: ;nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: j 31b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 14b,4b \n" " .word 15b,4b \n" " .previous \n" : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1), "=r"(tmp2) : : "memory"); } if (orig_n / 4) { orig_n = orig_n % 4; __asm__ __volatile__( "16: ld.ab %3, [%2,4] \n" " st.ab %3, [%1,4] \n" " sub %0,%0,4 \n" "32: ;nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: j 32b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 16b,4b \n" " .previous \n" : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) : : "memory"); } if (orig_n / 2) { orig_n = orig_n % 2; __asm__ __volatile__( "17: ldw.ab %3, [%2,2] \n" " stw.ab %3, [%1,2] \n" " sub %0,%0,2 \n" "33: ;nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: j 33b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 17b,4b \n" " .previous \n" : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) : : "memory"); } if (orig_n & 1) { __asm__ __volatile__( "18: ldb.ab %3, [%2,2] \n" " stb.ab %3, [%1,2] \n" " sub %0,%0,1 \n" "34: ; nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: j 34b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 18b,4b \n" " .previous \n" : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) : : "memory"); } } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */ __asm__ __volatile__( " mov %0,%3 \n" " lsr.f lp_count, %3,4 \n" /* 16x bytes */ " lpnz 3f \n" "1: ld.ab %5, [%2, 4] \n" "11: ld.ab %6, [%2, 4] \n" "12: ld.ab %7, [%2, 4] \n" "13: ld.ab %8, [%2, 4] \n" " st.ab %5, [%1, 4] \n" " st.ab %6, [%1, 4] \n" " st.ab %7, [%1, 4] \n" " st.ab %8, [%1, 4] \n" " sub %0,%0,16 \n" "3: and.f %3,%3,0xf \n" /* stragglers */ " bz 34f \n" " bbit0 %3,3,31f \n" /* 8 bytes left */ "14: ld.ab %5, [%2,4] \n" "15: ld.ab %6, [%2,4] \n" " st.ab %5, [%1,4] \n" " st.ab %6, [%1,4] \n" " sub.f %0,%0,8 \n" "31: bbit0 %3,2,32f \n" /* 4 bytes left */ "16: ld.ab %5, [%2,4] \n" " st.ab %5, [%1,4] \n" " sub.f %0,%0,4 \n" "32: bbit0 %3,1,33f \n" /* 2 bytes left */ "17: ldw.ab %5, [%2,2] \n" " stw.ab %5, [%1,2] \n" " sub.f %0,%0,2 \n" "33: bbit0 %3,0,34f \n" "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */ " stb.ab %5, [%1,1] \n" " sub.f %0,%0,1 \n" "34: ;nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: j 34b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 1b, 4b \n" " .word 11b,4b \n" " .word 12b,4b \n" " .word 13b,4b \n" " .word 14b,4b \n" " .word 15b,4b \n" " .word 16b,4b \n" " .word 17b,4b \n" " .word 18b,4b \n" " .previous \n" : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val), "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4) : : "lp_count", "memory"); } return res; } static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { long res = 0; char val; unsigned long tmp1, tmp2, tmp3, tmp4; unsigned long orig_n = n; if (n == 0) return 0; /* unaligned */ if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) { unsigned char tmp; __asm__ __volatile__( " mov.f lp_count, %0 \n" " lpnz 3f \n" " ldb.ab %1, [%3, 1] \n" "1: stb.ab %1, [%2, 1] \n" " sub %0, %0, 1 \n" "3: ;nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: j 3b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 1b, 4b \n" " .previous \n" : "+r" (n), /* Note as an '&' earlyclobber operand to make sure the * temporary register inside the loop is not the same as * FROM or TO. */ "=&r" (tmp), "+r" (to), "+r" (from) : : "lp_count", "memory"); return n; } if (__builtin_constant_p(orig_n)) { res = orig_n; if (orig_n / 16) { orig_n = orig_n % 16; __asm__ __volatile__( " lsr lp_count, %7,4 \n" " lp 3f \n" " ld.ab %3, [%2, 4] \n" " ld.ab %4, [%2, 4] \n" " ld.ab %5, [%2, 4] \n" " ld.ab %6, [%2, 4] \n" "1: st.ab %3, [%1, 4] \n" "11: st.ab %4, [%1, 4] \n" "12: st.ab %5, [%1, 4] \n" "13: st.ab %6, [%1, 4] \n" " sub %0, %0, 16 \n" "3:;nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: j 3b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 1b, 4b \n" " .word 11b,4b \n" " .word 12b,4b \n" " .word 13b,4b \n" " .previous \n" : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4) : "ir"(n) : "lp_count", "memory"); } if (orig_n / 8) { orig_n = orig_n % 8; __asm__ __volatile__( " ld.ab %3, [%2,4] \n" " ld.ab %4, [%2,4] \n" "14: st.ab %3, [%1,4] \n" "15: st.ab %4, [%1,4] \n" " sub %0, %0, 8 \n" "31:;nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: j 31b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 14b,4b \n" " .word 15b,4b \n" " .previous \n" : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1), "=r"(tmp2) : : "memory"); } if (orig_n / 4) { orig_n = orig_n % 4; __asm__ __volatile__( " ld.ab %3, [%2,4] \n" "16: st.ab %3, [%1,4] \n" " sub %0, %0, 4 \n" "32:;nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: j 32b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 16b,4b \n" " .previous \n" : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) : : "memory"); } if (orig_n / 2) { orig_n = orig_n % 2; __asm__ __volatile__( " ldw.ab %3, [%2,2] \n" "17: stw.ab %3, [%1,2] \n" " sub %0, %0, 2 \n" "33:;nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: j 33b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 17b,4b \n" " .previous \n" : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) : : "memory"); } if (orig_n & 1) { __asm__ __volatile__( " ldb.ab %3, [%2,1] \n" "18: stb.ab %3, [%1,1] \n" " sub %0, %0, 1 \n" "34: ;nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: j 34b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 18b,4b \n" " .previous \n" : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) : : "memory"); } } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */ __asm__ __volatile__( " mov %0,%3 \n" " lsr.f lp_count, %3,4 \n" /* 16x bytes */ " lpnz 3f \n" " ld.ab %5, [%2, 4] \n" " ld.ab %6, [%2, 4] \n" " ld.ab %7, [%2, 4] \n" " ld.ab %8, [%2, 4] \n" "1: st.ab %5, [%1, 4] \n" "11: st.ab %6, [%1, 4] \n" "12: st.ab %7, [%1, 4] \n" "13: st.ab %8, [%1, 4] \n" " sub %0, %0, 16 \n" "3: and.f %3,%3,0xf \n" /* stragglers */ " bz 34f \n" " bbit0 %3,3,31f \n" /* 8 bytes left */ " ld.ab %5, [%2,4] \n" " ld.ab %6, [%2,4] \n" "14: st.ab %5, [%1,4] \n" "15: st.ab %6, [%1,4] \n" " sub.f %0, %0, 8 \n" "31: bbit0 %3,2,32f \n" /* 4 bytes left */ " ld.ab %5, [%2,4] \n" "16: st.ab %5, [%1,4] \n" " sub.f %0, %0, 4 \n" "32: bbit0 %3,1,33f \n" /* 2 bytes left */ " ldw.ab %5, [%2,2] \n" "17: stw.ab %5, [%1,2] \n" " sub.f %0, %0, 2 \n" "33: bbit0 %3,0,34f \n" " ldb.ab %5, [%2,1] \n" /* 1 byte left */ "18: stb.ab %5, [%1,1] \n" " sub.f %0, %0, 1 \n" "34: ;nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: j 34b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 1b, 4b \n" " .word 11b,4b \n" " .word 12b,4b \n" " .word 13b,4b \n" " .word 14b,4b \n" " .word 15b,4b \n" " .word 16b,4b \n" " .word 17b,4b \n" " .word 18b,4b \n" " .previous \n" : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val), "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4) : : "lp_count", "memory"); } return res; } static inline unsigned long __arc_clear_user(void __user *to, unsigned long n) { long res = n; unsigned char *d_char = to; __asm__ __volatile__( " bbit0 %0, 0, 1f \n" "75: stb.ab %2, [%0,1] \n" " sub %1, %1, 1 \n" "1: bbit0 %0, 1, 2f \n" "76: stw.ab %2, [%0,2] \n" " sub %1, %1, 2 \n" "2: asr.f lp_count, %1, 2 \n" " lpnz 3f \n" "77: st.ab %2, [%0,4] \n" " sub %1, %1, 4 \n" "3: bbit0 %1, 1, 4f \n" "78: stw.ab %2, [%0,2] \n" " sub %1, %1, 2 \n" "4: bbit0 %1, 0, 5f \n" "79: stb.ab %2, [%0,1] \n" " sub %1, %1, 1 \n" "5: \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "3: j 5b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 75b, 3b \n" " .word 76b, 3b \n" " .word 77b, 3b \n" " .word 78b, 3b \n" " .word 79b, 3b \n" " .previous \n" : "+r"(d_char), "+r"(res) : "i"(0) : "lp_count", "memory"); return res; } static inline long __arc_strncpy_from_user(char *dst, const char __user *src, long count) { long res = 0; char val; if (count == 0) return 0; __asm__ __volatile__( " mov lp_count, %5 \n" " lp 3f \n" "1: ldb.ab %3, [%2, 1] \n" " breq.d %3, 0, 3f \n" " stb.ab %3, [%1, 1] \n" " add %0, %0, 1 # Num of NON NULL bytes copied \n" "3: \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: mov %0, %4 # sets @res as -EFAULT \n" " j 3b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 1b, 4b \n" " .previous \n" : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) : "g"(-EFAULT), "r"(count) : "lp_count", "memory"); return res; } static inline long __arc_strnlen_user(const char __user *s, long n) { long res, tmp1, cnt; char val; __asm__ __volatile__( " mov %2, %1 \n" "1: ldb.ab %3, [%0, 1] \n" " breq.d %3, 0, 2f \n" " sub.f %2, %2, 1 \n" " bnz 1b \n" " sub %2, %2, 1 \n" "2: sub %0, %1, %2 \n" "3: ;nop \n" " .section .fixup, \"ax\" \n" " .align 4 \n" "4: mov %0, 0 \n" " j 3b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 4 \n" " .word 1b, 4b \n" " .previous \n" : "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val) : "0"(s), "1"(n) : "memory"); return res; } #ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE #define INLINE_COPY_TO_USER #define INLINE_COPY_FROM_USER #define __clear_user(d, n) __arc_clear_user(d, n) #define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n) #define __strnlen_user(s, n) __arc_strnlen_user(s, n) #else extern unsigned long arc_clear_user_noinline(void __user *to, unsigned long n); extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src, long count); extern long arc_strnlen_user_noinline(const char __user *src, long n); #define __clear_user(d, n) arc_clear_user_noinline(d, n) #define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n) #define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n) #endif #include <asm/segment.h> #include <asm-generic/uaccess.h> #endif include/asm/bug.h 0000644 00000001430 14722070650 0007676 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_BUG_H #define _ASM_ARC_BUG_H #ifndef __ASSEMBLY__ #include <asm/ptrace.h> struct task_struct; void show_regs(struct pt_regs *regs); void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs); void show_kernel_fault_diag(const char *str, struct pt_regs *regs, unsigned long address); void die(const char *str, struct pt_regs *regs, unsigned long address); #define BUG() do { \ pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ barrier_before_unreachable(); \ __builtin_trap(); \ } while (0) #define HAVE_ARCH_BUG #include <asm-generic/bug.h> #endif /* !__ASSEMBLY__ */ #endif include/asm/cache.h 0000644 00000007205 14722070650 0010172 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ARC_ASM_CACHE_H #define __ARC_ASM_CACHE_H /* In case $$ not config, setup a dummy number for rest of kernel */ #ifndef CONFIG_ARC_CACHE_LINE_SHIFT #define L1_CACHE_SHIFT 6 #else #define L1_CACHE_SHIFT CONFIG_ARC_CACHE_LINE_SHIFT #endif #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define CACHE_LINE_MASK (~(L1_CACHE_BYTES - 1)) /* * ARC700 doesn't cache any access in top 1G (0xc000_0000 to 0xFFFF_FFFF) * Ideal for wiring memory mapped peripherals as we don't need to do * explicit uncached accesses (LD.di/ST.di) hence more portable drivers */ #define ARC_UNCACHED_ADDR_SPACE 0xc0000000 #ifndef __ASSEMBLY__ /* Uncached access macros */ #define arc_read_uncached_32(ptr) \ ({ \ unsigned int __ret; \ __asm__ __volatile__( \ " ld.di %0, [%1] \n" \ : "=r"(__ret) \ : "r"(ptr)); \ __ret; \ }) #define arc_write_uncached_32(ptr, data)\ ({ \ __asm__ __volatile__( \ " st.di %0, [%1] \n" \ : \ : "r"(data), "r"(ptr)); \ }) /* Largest line length for either L1 or L2 is 128 bytes */ #define SMP_CACHE_BYTES 128 #define cache_line_size() SMP_CACHE_BYTES #define ARCH_DMA_MINALIGN SMP_CACHE_BYTES /* * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit * alignment for any atomic64_t embedded in buffer. * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed * value of 4 (and not 8) in ARC ABI. */ #if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC) #define ARCH_SLAB_MINALIGN 8 #endif extern void arc_cache_init(void); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern void read_decode_cache_bcr(void); extern int ioc_enable; extern unsigned long perip_base, perip_end; #endif /* !__ASSEMBLY__ */ /* Instruction cache related Auxiliary registers */ #define ARC_REG_IC_BCR 0x77 /* Build Config reg */ #define ARC_REG_IC_IVIC 0x10 #define ARC_REG_IC_CTRL 0x11 #define ARC_REG_IC_IVIR 0x16 #define ARC_REG_IC_ENDR 0x17 #define ARC_REG_IC_IVIL 0x19 #define ARC_REG_IC_PTAG 0x1E #define ARC_REG_IC_PTAG_HI 0x1F /* Bit val in IC_CTRL */ #define IC_CTRL_DIS 0x1 /* Data cache related Auxiliary registers */ #define ARC_REG_DC_BCR 0x72 /* Build Config reg */ #define ARC_REG_DC_IVDC 0x47 #define ARC_REG_DC_CTRL 0x48 #define ARC_REG_DC_IVDL 0x4A #define ARC_REG_DC_FLSH 0x4B #define ARC_REG_DC_FLDL 0x4C #define ARC_REG_DC_STARTR 0x4D #define ARC_REG_DC_ENDR 0x4E #define ARC_REG_DC_PTAG 0x5C #define ARC_REG_DC_PTAG_HI 0x5F /* Bit val in DC_CTRL */ #define DC_CTRL_DIS 0x001 #define DC_CTRL_INV_MODE_FLUSH 0x040 #define DC_CTRL_FLUSH_STATUS 0x100 #define DC_CTRL_RGN_OP_INV 0x200 #define DC_CTRL_RGN_OP_MSK 0x200 /*System-level cache (L2 cache) related Auxiliary registers */ #define ARC_REG_SLC_CFG 0x901 #define ARC_REG_SLC_CTRL 0x903 #define ARC_REG_SLC_FLUSH 0x904 #define ARC_REG_SLC_INVALIDATE 0x905 #define ARC_AUX_SLC_IVDL 0x910 #define ARC_AUX_SLC_FLDL 0x912 #define ARC_REG_SLC_RGN_START 0x914 #define ARC_REG_SLC_RGN_START1 0x915 #define ARC_REG_SLC_RGN_END 0x916 #define ARC_REG_SLC_RGN_END1 0x917 /* Bit val in SLC_CONTROL */ #define SLC_CTRL_DIS 0x001 #define SLC_CTRL_IM 0x040 #define SLC_CTRL_BUSY 0x100 #define SLC_CTRL_RGN_OP_INV 0x200 /* IO coherency related Auxiliary registers */ #define ARC_REG_IO_COH_ENABLE 0x500 #define ARC_IO_COH_ENABLE_BIT BIT(0) #define ARC_REG_IO_COH_PARTIAL 0x501 #define ARC_IO_COH_PARTIAL_BIT BIT(0) #define ARC_REG_IO_COH_AP0_BASE 0x508 #define ARC_REG_IO_COH_AP0_SIZE 0x509 #endif /* _ASM_CACHE_H */ include/asm/highmem.h 0000644 00000002500 14722070650 0010536 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_HIGHMEM_H #define _ASM_HIGHMEM_H #ifdef CONFIG_HIGHMEM #include <uapi/asm/page.h> #include <asm/kmap_types.h> /* start after vmalloc area */ #define FIXMAP_BASE (PAGE_OFFSET - FIXMAP_SIZE - PKMAP_SIZE) #define FIXMAP_SIZE PGDIR_SIZE /* only 1 PGD worth */ #define KM_TYPE_NR ((FIXMAP_SIZE >> PAGE_SHIFT)/NR_CPUS) #define FIXMAP_ADDR(nr) (FIXMAP_BASE + ((nr) << PAGE_SHIFT)) /* start after fixmap area */ #define PKMAP_BASE (FIXMAP_BASE + FIXMAP_SIZE) #define PKMAP_SIZE PGDIR_SIZE #define LAST_PKMAP (PKMAP_SIZE >> PAGE_SHIFT) #define LAST_PKMAP_MASK (LAST_PKMAP - 1) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) #define kmap_prot PAGE_KERNEL #include <asm/cacheflush.h> extern void *kmap(struct page *page); extern void *kmap_high(struct page *page); extern void *kmap_atomic(struct page *page); extern void __kunmap_atomic(void *kvaddr); extern void kunmap_high(struct page *page); extern void kmap_init(void); static inline void flush_cache_kmaps(void) { flush_cache_all(); } static inline void kunmap(struct page *page) { BUG_ON(in_interrupt()); if (!PageHighMem(page)) return; kunmap_high(page); } #endif #endif include/asm/disasm.h 0000644 00000007345 14722070650 0010414 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * several functions that help interpret ARC instructions * used for unaligned accesses, kprobes and kgdb * * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ARC_DISASM_H__ #define __ARC_DISASM_H__ enum { op_Bcc = 0, op_BLcc = 1, op_LD = 2, op_ST = 3, op_MAJOR_4 = 4, op_MAJOR_5 = 5, op_LD_ADD = 12, op_ADD_SUB_SHIFT = 13, op_ADD_MOV_CMP = 14, op_S = 15, op_LD_S = 16, op_LDB_S = 17, op_LDW_S = 18, op_LDWX_S = 19, op_ST_S = 20, op_STB_S = 21, op_STW_S = 22, op_Su5 = 23, op_SP = 24, op_GP = 25, op_Pcl = 26, op_MOV_S = 27, op_ADD_CMP = 28, op_BR_S = 29, op_B_S = 30, op_BL_S = 31 }; enum flow { noflow, direct_jump, direct_call, indirect_jump, indirect_call, invalid_instr }; #define IS_BIT(word, n) ((word) & (1<<n)) #define BITS(word, s, e) (((word) >> (s)) & (~((-2) << ((e) - (s))))) #define MAJOR_OPCODE(word) (BITS((word), 27, 31)) #define MINOR_OPCODE(word) (BITS((word), 16, 21)) #define FIELD_A(word) (BITS((word), 0, 5)) #define FIELD_B(word) ((BITS((word), 12, 14)<<3) | \ (BITS((word), 24, 26))) #define FIELD_C(word) (BITS((word), 6, 11)) #define FIELD_u6(word) FIELDC(word) #define FIELD_s12(word) sign_extend(((BITS((word), 0, 5) << 6) | \ BITS((word), 6, 11)), 12) /* note that for BL/BRcc these two macro's need another AND statement to mask * out bit 1 (make the result a multiple of 4) */ #define FIELD_s9(word) sign_extend(((BITS(word, 15, 15) << 8) | \ BITS(word, 16, 23)), 9) #define FIELD_s21(word) sign_extend(((BITS(word, 6, 15) << 11) | \ (BITS(word, 17, 26) << 1)), 12) #define FIELD_s25(word) sign_extend(((BITS(word, 0, 3) << 21) | \ (BITS(word, 6, 15) << 11) | \ (BITS(word, 17, 26) << 1)), 12) /* note: these operate on 16 bits! */ #define FIELD_S_A(word) ((BITS((word), 2, 2)<<3) | BITS((word), 0, 2)) #define FIELD_S_B(word) ((BITS((word), 10, 10)<<3) | \ BITS((word), 8, 10)) #define FIELD_S_C(word) ((BITS((word), 7, 7)<<3) | BITS((word), 5, 7)) #define FIELD_S_H(word) ((BITS((word), 0, 2)<<3) | BITS((word), 5, 8)) #define FIELD_S_u5(word) (BITS((word), 0, 4)) #define FIELD_S_u6(word) (BITS((word), 0, 4) << 1) #define FIELD_S_u7(word) (BITS((word), 0, 4) << 2) #define FIELD_S_u10(word) (BITS((word), 0, 7) << 2) #define FIELD_S_s7(word) sign_extend(BITS((word), 0, 5) << 1, 9) #define FIELD_S_s8(word) sign_extend(BITS((word), 0, 7) << 1, 9) #define FIELD_S_s9(word) sign_extend(BITS((word), 0, 8), 9) #define FIELD_S_s10(word) sign_extend(BITS((word), 0, 8) << 1, 10) #define FIELD_S_s11(word) sign_extend(BITS((word), 0, 8) << 2, 11) #define FIELD_S_s13(word) sign_extend(BITS((word), 0, 10) << 2, 13) #define STATUS32_L 0x00000100 #define REG_LIMM 62 struct disasm_state { /* generic info */ unsigned long words[2]; int instr_len; int major_opcode; /* info for branch/jump */ int is_branch; int target; int delay_slot; enum flow flow; /* info for load/store */ int src1, src2, src3, dest, wb_reg; int zz, aa, x, pref, di; int fault, write; }; static inline int sign_extend(int value, int bits) { if (IS_BIT(value, (bits - 1))) value |= (0xffffffff << bits); return value; } static inline int is_short_instr(unsigned long addr) { uint16_t word = *((uint16_t *)addr); int opcode = (word >> 11) & 0x1F; return (opcode >= 0x0B); } void disasm_instr(unsigned long addr, struct disasm_state *state, int userspace, struct pt_regs *regs, struct callee_regs *cregs); int disasm_next_pc(unsigned long pc, struct pt_regs *regs, struct callee_regs *cregs, unsigned long *fall_thru, unsigned long *target); long get_reg(int reg, struct pt_regs *regs, struct callee_regs *cregs); void set_reg(int reg, long val, struct pt_regs *regs, struct callee_regs *cregs); #endif /* __ARC_DISASM_H__ */ include/asm/dwarf.h 0000644 00000001352 14722070650 0010227 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_DWARF_H #define _ASM_ARC_DWARF_H #ifdef __ASSEMBLY__ #ifdef ARC_DW2_UNWIND_AS_CFI #define CFI_STARTPROC .cfi_startproc #define CFI_ENDPROC .cfi_endproc #define CFI_DEF_CFA .cfi_def_cfa #define CFI_REGISTER .cfi_register #define CFI_REL_OFFSET .cfi_rel_offset #define CFI_UNDEFINED .cfi_undefined #else #define CFI_IGNORE # #define CFI_STARTPROC CFI_IGNORE #define CFI_ENDPROC CFI_IGNORE #define CFI_DEF_CFA CFI_IGNORE #define CFI_REGISTER CFI_IGNORE #define CFI_REL_OFFSET CFI_IGNORE #define CFI_UNDEFINED CFI_IGNORE #endif /* !ARC_DW2_UNWIND_AS_CFI */ #endif /* __ASSEMBLY__ */ #endif /* _ASM_ARC_DWARF_H */ include/asm/arcregs.h 0000644 00000020700 14722070650 0010550 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_ARCREGS_H #define _ASM_ARC_ARCREGS_H /* Build Configuration Registers */ #define ARC_REG_AUX_DCCM 0x18 /* DCCM Base Addr ARCv2 */ #define ARC_REG_ERP_CTRL 0x3F /* ARCv2 Error protection control */ #define ARC_REG_DCCM_BASE_BUILD 0x61 /* DCCM Base Addr ARCompact */ #define ARC_REG_CRC_BCR 0x62 #define ARC_REG_VECBASE_BCR 0x68 #define ARC_REG_PERIBASE_BCR 0x69 #define ARC_REG_FP_BCR 0x6B /* ARCompact: Single-Precision FPU */ #define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */ #define ARC_REG_ERP_BUILD 0xc7 /* ARCv2 Error protection Build: ECC/Parity */ #define ARC_REG_FP_V2_BCR 0xc8 /* ARCv2 FPU */ #define ARC_REG_SLC_BCR 0xce #define ARC_REG_DCCM_BUILD 0x74 /* DCCM size (common) */ #define ARC_REG_AP_BCR 0x76 #define ARC_REG_ICCM_BUILD 0x78 /* ICCM size (common) */ #define ARC_REG_XY_MEM_BCR 0x79 #define ARC_REG_MAC_BCR 0x7a #define ARC_REG_MUL_BCR 0x7b #define ARC_REG_SWAP_BCR 0x7c #define ARC_REG_NORM_BCR 0x7d #define ARC_REG_MIXMAX_BCR 0x7e #define ARC_REG_BARREL_BCR 0x7f #define ARC_REG_D_UNCACH_BCR 0x6A #define ARC_REG_BPU_BCR 0xc0 #define ARC_REG_ISA_CFG_BCR 0xc1 #define ARC_REG_LPB_BUILD 0xE9 /* ARCv2 Loop Buffer Build */ #define ARC_REG_RTT_BCR 0xF2 #define ARC_REG_IRQ_BCR 0xF3 #define ARC_REG_MICRO_ARCH_BCR 0xF9 /* ARCv2 Product revision */ #define ARC_REG_SMART_BCR 0xFF #define ARC_REG_CLUSTER_BCR 0xcf #define ARC_REG_AUX_ICCM 0x208 /* ICCM Base Addr (ARCv2) */ #define ARC_REG_LPB_CTRL 0x488 /* ARCv2 Loop Buffer control */ /* Common for ARCompact and ARCv2 status register */ #define ARC_REG_STATUS32 0x0A /* status32 Bits Positions */ #define STATUS_AE_BIT 5 /* Exception active */ #define STATUS_DE_BIT 6 /* PC is in delay slot */ #define STATUS_U_BIT 7 /* User/Kernel mode */ #define STATUS_Z_BIT 11 #define STATUS_L_BIT 12 /* Loop inhibit */ /* These masks correspond to the status word(STATUS_32) bits */ #define STATUS_AE_MASK (1<<STATUS_AE_BIT) #define STATUS_DE_MASK (1<<STATUS_DE_BIT) #define STATUS_U_MASK (1<<STATUS_U_BIT) #define STATUS_Z_MASK (1<<STATUS_Z_BIT) #define STATUS_L_MASK (1<<STATUS_L_BIT) /* * ECR: Exception Cause Reg bits-n-pieces * [23:16] = Exception Vector * [15: 8] = Exception Cause Code * [ 7: 0] = Exception Parameters (for certain types only) */ #ifdef CONFIG_ISA_ARCOMPACT #define ECR_V_MEM_ERR 0x01 #define ECR_V_INSN_ERR 0x02 #define ECR_V_MACH_CHK 0x20 #define ECR_V_ITLB_MISS 0x21 #define ECR_V_DTLB_MISS 0x22 #define ECR_V_PROTV 0x23 #define ECR_V_TRAP 0x25 #else #define ECR_V_MEM_ERR 0x01 #define ECR_V_INSN_ERR 0x02 #define ECR_V_MACH_CHK 0x03 #define ECR_V_ITLB_MISS 0x04 #define ECR_V_DTLB_MISS 0x05 #define ECR_V_PROTV 0x06 #define ECR_V_TRAP 0x09 #define ECR_V_MISALIGN 0x0d #endif /* DTLB Miss and Protection Violation Cause Codes */ #define ECR_C_PROTV_INST_FETCH 0x00 #define ECR_C_PROTV_LOAD 0x01 #define ECR_C_PROTV_STORE 0x02 #define ECR_C_PROTV_XCHG 0x03 #define ECR_C_PROTV_MISALIG_DATA 0x04 #define ECR_C_BIT_PROTV_MISALIG_DATA 10 /* Machine Check Cause Code Values */ #define ECR_C_MCHK_DUP_TLB 0x01 /* DTLB Miss Exception Cause Code Values */ #define ECR_C_BIT_DTLB_LD_MISS 8 #define ECR_C_BIT_DTLB_ST_MISS 9 /* Auxiliary registers */ #define AUX_IDENTITY 4 #define AUX_EXEC_CTRL 8 #define AUX_INTR_VEC_BASE 0x25 #define AUX_VOL 0x5e /* * Floating Pt Registers * Status regs are read-only (build-time) so need not be saved/restored */ #define ARC_AUX_FP_STAT 0x300 #define ARC_AUX_DPFP_1L 0x301 #define ARC_AUX_DPFP_1H 0x302 #define ARC_AUX_DPFP_2L 0x303 #define ARC_AUX_DPFP_2H 0x304 #define ARC_AUX_DPFP_STAT 0x305 #ifndef __ASSEMBLY__ #include <soc/arc/aux.h> /* Helpers */ #define TO_KB(bytes) ((bytes) >> 10) #define TO_MB(bytes) (TO_KB(bytes) >> 10) #define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10)) #define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10) /* *************************************************************** * Build Configuration Registers, with encoded hardware config */ struct bcr_identity { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int chip_id:16, cpu_id:8, family:8; #else unsigned int family:8, cpu_id:8, chip_id:16; #endif }; struct bcr_isa_arcv2 { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1, pad1:12, ver:8; #else unsigned int ver:8, pad1:12, be:1, atomic:1, unalign:1, ldd:1, pad2:4, div_rem:4; #endif }; struct bcr_uarch_build_arcv2 { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad:8, prod:8, maj:8, min:8; #else unsigned int min:8, maj:8, prod:8, pad:8; #endif }; struct bcr_mpy { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8; #else unsigned int ver:8, type:2, cycles:2, dsp:4, x1616:8, pad:8; #endif }; struct bcr_iccm_arcompact { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int base:16, pad:5, sz:3, ver:8; #else unsigned int ver:8, sz:3, pad:5, base:16; #endif }; struct bcr_iccm_arcv2 { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad:8, sz11:4, sz01:4, sz10:4, sz00:4, ver:8; #else unsigned int ver:8, sz00:4, sz10:4, sz01:4, sz11:4, pad:8; #endif }; struct bcr_dccm_arcompact { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int res:21, sz:3, ver:8; #else unsigned int ver:8, sz:3, res:21; #endif }; struct bcr_dccm_arcv2 { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad2:12, cyc:3, pad1:1, sz1:4, sz0:4, ver:8; #else unsigned int ver:8, sz0:4, sz1:4, pad1:1, cyc:3, pad2:12; #endif }; /* ARCompact: Both SP and DP FPU BCRs have same format */ struct bcr_fp_arcompact { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int fast:1, ver:8; #else unsigned int ver:8, fast:1; #endif }; struct bcr_fp_arcv2 { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad2:15, dp:1, pad1:7, sp:1, ver:8; #else unsigned int ver:8, sp:1, pad1:7, dp:1, pad2:15; #endif }; struct bcr_actionpoint { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad:21, min:1, num:2, ver:8; #else unsigned int ver:8, num:2, min:1, pad:21; #endif }; #include <soc/arc/timers.h> struct bcr_bpu_arcompact { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad2:19, fam:1, pad:2, ent:2, ver:8; #else unsigned int ver:8, ent:2, pad:2, fam:1, pad2:19; #endif }; struct bcr_bpu_arcv2 { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad:6, fbe:2, tqe:2, ts:4, ft:1, rse:2, pte:3, bce:3, ver:8; #else unsigned int ver:8, bce:3, pte:3, rse:2, ft:1, ts:4, tqe:2, fbe:2, pad:6; #endif }; /* Error Protection Build: ECC/Parity */ struct bcr_erp { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad3:5, mmu:3, pad2:4, ic:3, dc:3, pad1:6, ver:8; #else unsigned int ver:8, pad1:6, dc:3, ic:3, pad2:4, mmu:3, pad3:5; #endif }; /* Error Protection Control */ struct ctl_erp { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad2:27, mpd:1, pad1:2, dpd:1, dpi:1; #else unsigned int dpi:1, dpd:1, pad1:2, mpd:1, pad2:27; #endif }; struct bcr_lpb { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad:16, entries:8, ver:8; #else unsigned int ver:8, entries:8, pad:16; #endif }; struct bcr_generic { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int info:24, ver:8; #else unsigned int ver:8, info:24; #endif }; /* ******************************************************************* * Generic structures to hold build configuration used at runtime */ struct cpuinfo_arc_mmu { unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, pad:10, sasid:1, pae:1; unsigned int sets:12, ways:4, u_dtlb:8, u_itlb:8; }; struct cpuinfo_arc_cache { unsigned int sz_k:14, line_len:8, assoc:4, alias:1, vipt:1, pad:4; }; struct cpuinfo_arc_bpu { unsigned int ver, full, num_cache, num_pred, ret_stk; }; struct cpuinfo_arc_ccm { unsigned int base_addr, sz; }; struct cpuinfo_arc { struct cpuinfo_arc_cache icache, dcache, slc; struct cpuinfo_arc_mmu mmu; struct cpuinfo_arc_bpu bpu; struct bcr_identity core; struct bcr_isa_arcv2 isa; const char *release, *name; unsigned int vec_base; struct cpuinfo_arc_ccm iccm, dccm; struct { unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4, ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1, timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; } extn; struct bcr_mpy extn_mpy; }; extern struct cpuinfo_arc cpuinfo_arc700[]; static inline int is_isa_arcv2(void) { return IS_ENABLED(CONFIG_ISA_ARCV2); } static inline int is_isa_arcompact(void) { return IS_ENABLED(CONFIG_ISA_ARCOMPACT); } #endif /* __ASEMBLY__ */ #endif /* _ASM_ARC_ARCREGS_H */ include/asm/kprobes.h 0000644 00000002240 14722070650 0010566 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ARC_KPROBES_H #define _ARC_KPROBES_H #include <asm-generic/kprobes.h> #ifdef CONFIG_KPROBES typedef u16 kprobe_opcode_t; #define UNIMP_S_INSTRUCTION 0x79e0 #define TRAP_S_2_INSTRUCTION 0x785e #define MAX_INSN_SIZE 8 #define MAX_STACK_SIZE 64 struct arch_specific_insn { int is_short; kprobe_opcode_t *t1_addr, *t2_addr; kprobe_opcode_t t1_opcode, t2_opcode; }; #define flush_insn_slot(p) do { } while (0) #define kretprobe_blacklist_size 0 struct kprobe; void arch_remove_kprobe(struct kprobe *p); int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); struct prev_kprobe { struct kprobe *kp; unsigned long status; }; struct kprobe_ctlblk { unsigned int kprobe_status; struct prev_kprobe prev_kprobe; }; int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause); void kretprobe_trampoline(void); void trap_is_kprobe(unsigned long address, struct pt_regs *regs); #else #define trap_is_kprobe(address, regs) #endif /* CONFIG_KPROBES */ #endif /* _ARC_KPROBES_H */ include/asm/unaligned.h 0000644 00000001161 14722070650 0011070 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_UNALIGNED_H #define _ASM_ARC_UNALIGNED_H /* ARC700 can't handle unaligned Data accesses. */ #include <asm-generic/unaligned.h> #include <asm/ptrace.h> #ifdef CONFIG_ARC_EMUL_UNALIGNED int misaligned_fixup(unsigned long address, struct pt_regs *regs, struct callee_regs *cregs); #else static inline int misaligned_fixup(unsigned long address, struct pt_regs *regs, struct callee_regs *cregs) { /* Not fixed */ return 1; } #endif #endif /* _ASM_ARC_UNALIGNED_H */ include/asm/asm-offsets.h 0000644 00000000245 14722070650 0011353 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #include <generated/asm-offsets.h> include/asm/elf.h 0000644 00000003617 14722070650 0007700 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_ARC_ELF_H #define __ASM_ARC_ELF_H #include <linux/types.h> #include <linux/elf-em.h> #include <uapi/asm/elf.h> #define EM_ARC_INUSE (IS_ENABLED(CONFIG_ISA_ARCOMPACT) ? \ EM_ARCOMPACT : EM_ARCV2) /* ARC Relocations (kernel Modules only) */ #define R_ARC_32 0x4 #define R_ARC_32_ME 0x1B #define R_ARC_32_PCREL 0x31 /*to set parameters in the core dumps */ #define ELF_ARCH EM_ARC_INUSE #define ELF_CLASS ELFCLASS32 #ifdef CONFIG_CPU_BIG_ENDIAN #define ELF_DATA ELFDATA2MSB #else #define ELF_DATA ELFDATA2LSB #endif /* * To ensure that * -we don't load something for the wrong architecture. * -The userspace is using the correct syscall ABI */ struct elf32_hdr; extern int elf_check_arch(const struct elf32_hdr *); #define elf_check_arch elf_check_arch #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE PAGE_SIZE /* * This is the location that an ET_DYN program is loaded if exec'ed. Typical * use of this is to invoke "./ld.so someprog" to test out a new version of * the loader. We need to make sure that it is out of the way of the program * that it will "exec", and that there is sufficient room for the brk. */ #define ELF_ET_DYN_BASE (2UL * TASK_SIZE / 3) /* * When the program starts, a1 contains a pointer to a function to be * registered with atexit, as per the SVR4 ABI. A value of 0 means we * have no such handler. */ #define ELF_PLAT_INIT(_r, load_addr) ((_r)->r0 = 0) /* * This yields a mask that user programs can use to figure out what * instruction set this cpu supports. */ #define ELF_HWCAP (0) /* * This yields a string that ld.so will use to load implementation * specific libraries for optimization. This is more specific in * intent than poking at uname or /proc/cpuinfo. */ #define ELF_PLATFORM (NULL) #endif include/asm/irqflags-compact.h 0000644 00000010153 14722070650 0012357 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_IRQFLAGS_ARCOMPACT_H #define __ASM_IRQFLAGS_ARCOMPACT_H /* vineetg: March 2010 : local_irq_save( ) optimisation * -Remove explicit mov of current status32 into reg, that is not needed * -Use BIC insn instead of INVERTED + AND * -Conditionally disable interrupts (if they are not enabled, don't disable) */ #include <asm/arcregs.h> /* status32 Reg bits related to Interrupt Handling */ #define STATUS_E1_BIT 1 /* Int 1 enable */ #define STATUS_E2_BIT 2 /* Int 2 enable */ #define STATUS_A1_BIT 3 /* Int 1 active */ #define STATUS_A2_BIT 4 /* Int 2 active */ #define STATUS_AE_BIT 5 /* Exception active */ #define STATUS_E1_MASK (1<<STATUS_E1_BIT) #define STATUS_E2_MASK (1<<STATUS_E2_BIT) #define STATUS_A1_MASK (1<<STATUS_A1_BIT) #define STATUS_A2_MASK (1<<STATUS_A2_BIT) #define STATUS_AE_MASK (1<<STATUS_AE_BIT) #define STATUS_IE_MASK (STATUS_E1_MASK | STATUS_E2_MASK) /* Other Interrupt Handling related Aux regs */ #define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */ #define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */ #define AUX_IRQ_LV12 0x43 /* interrupt level register */ #define AUX_IENABLE 0x40c #define AUX_ITRIGGER 0x40d #define AUX_IPULSE 0x415 #define ISA_INIT_STATUS_BITS STATUS_IE_MASK #ifndef __ASSEMBLY__ /****************************************************************** * IRQ Control Macros * * All of them have "memory" clobber (compiler barrier) which is needed to * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available) * are redone after IRQs are re-enabled (and gcc doesn't reuse stale register) * * Noted at the time of Abilis Timer List corruption * Orig Bug + Rejected solution : https://lkml.org/lkml/2013/3/29/67 * Reasoning : https://lkml.org/lkml/2013/4/8/15 * ******************************************************************/ /* * Save IRQ state and disable IRQs */ static inline long arch_local_irq_save(void) { unsigned long temp, flags; __asm__ __volatile__( " lr %1, [status32] \n" " bic %0, %1, %2 \n" " and.f 0, %1, %2 \n" " flag.nz %0 \n" : "=r"(temp), "=r"(flags) : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) : "memory", "cc"); return flags; } /* * restore saved IRQ state */ static inline void arch_local_irq_restore(unsigned long flags) { __asm__ __volatile__( " flag %0 \n" : : "r"(flags) : "memory"); } /* * Unconditionally Enable IRQs */ static inline void arch_local_irq_enable(void) { unsigned long temp; __asm__ __volatile__( " lr %0, [status32] \n" " or %0, %0, %1 \n" " flag %0 \n" : "=&r"(temp) : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) : "cc", "memory"); } /* * Unconditionally Disable IRQs */ static inline void arch_local_irq_disable(void) { unsigned long temp; __asm__ __volatile__( " lr %0, [status32] \n" " and %0, %0, %1 \n" " flag %0 \n" : "=&r"(temp) : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)) : "memory"); } /* * save IRQ state */ static inline long arch_local_save_flags(void) { unsigned long temp; __asm__ __volatile__( " lr %0, [status32] \n" : "=&r"(temp) : : "memory"); return temp; } /* * Query IRQ state */ static inline int arch_irqs_disabled_flags(unsigned long flags) { return !(flags & (STATUS_E1_MASK #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS | STATUS_E2_MASK #endif )); } static inline int arch_irqs_disabled(void) { return arch_irqs_disabled_flags(arch_local_save_flags()); } #else #ifdef CONFIG_TRACE_IRQFLAGS .macro TRACE_ASM_IRQ_DISABLE bl trace_hardirqs_off .endm .macro TRACE_ASM_IRQ_ENABLE bl trace_hardirqs_on .endm #else .macro TRACE_ASM_IRQ_DISABLE .endm .macro TRACE_ASM_IRQ_ENABLE .endm #endif .macro IRQ_DISABLE scratch lr \scratch, [status32] bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) flag \scratch TRACE_ASM_IRQ_DISABLE .endm .macro IRQ_ENABLE scratch TRACE_ASM_IRQ_ENABLE lr \scratch, [status32] or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) flag \scratch .endm #endif /* __ASSEMBLY__ */ #endif include/asm/entry.h 0000644 00000015037 14722070650 0010272 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_ARC_ENTRY_H #define __ASM_ARC_ENTRY_H #include <asm/unistd.h> /* For NR_syscalls defination */ #include <asm/arcregs.h> #include <asm/ptrace.h> #include <asm/processor.h> /* For VMALLOC_START */ #include <asm/mmu.h> #ifdef CONFIG_ISA_ARCOMPACT #include <asm/entry-compact.h> /* ISA specific bits */ #else #include <asm/entry-arcv2.h> #endif /* Note on the LD/ST addr modes with addr reg wback * * LD.a same as LD.aw * * LD.a reg1, [reg2, x] => Pre Incr * Eff Addr for load = [reg2 + x] * * LD.ab reg1, [reg2, x] => Post Incr * Eff Addr for load = [reg2] */ .macro PUSH reg st.a \reg, [sp, -4] .endm .macro PUSHAX aux lr r9, [\aux] PUSH r9 .endm .macro POP reg ld.ab \reg, [sp, 4] .endm .macro POPAX aux POP r9 sr r9, [\aux] .endm /*-------------------------------------------------------------- * Helpers to save/restore Scratch Regs: * used by Interrupt/Exception Prologue/Epilogue *-------------------------------------------------------------*/ .macro SAVE_R0_TO_R12 PUSH r0 PUSH r1 PUSH r2 PUSH r3 PUSH r4 PUSH r5 PUSH r6 PUSH r7 PUSH r8 PUSH r9 PUSH r10 PUSH r11 PUSH r12 .endm .macro RESTORE_R12_TO_R0 POP r12 POP r11 POP r10 POP r9 POP r8 POP r7 POP r6 POP r5 POP r4 POP r3 POP r2 POP r1 POP r0 .endm /*-------------------------------------------------------------- * Helpers to save/restore callee-saved regs: * used by several macros below *-------------------------------------------------------------*/ .macro SAVE_R13_TO_R24 PUSH r13 PUSH r14 PUSH r15 PUSH r16 PUSH r17 PUSH r18 PUSH r19 PUSH r20 PUSH r21 PUSH r22 PUSH r23 PUSH r24 .endm .macro RESTORE_R24_TO_R13 POP r24 POP r23 POP r22 POP r21 POP r20 POP r19 POP r18 POP r17 POP r16 POP r15 POP r14 POP r13 .endm /*-------------------------------------------------------------- * Collect User Mode callee regs as struct callee_regs - needed by * fork/do_signal/unaligned-access-emulation. * (By default only scratch regs are saved on entry to kernel) * * Special handling for r25 if used for caching Task Pointer. * It would have been saved in task->thread.user_r25 already, but to keep * the interface same it is copied into regular r25 placeholder in * struct callee_regs. *-------------------------------------------------------------*/ .macro SAVE_CALLEE_SAVED_USER mov r12, sp ; save SP as ref to pt_regs SAVE_R13_TO_R24 #ifdef CONFIG_ARC_CURR_IN_REG ; Retrieve orig r25 and save it with rest of callee_regs ld r12, [r12, PT_user_r25] PUSH r12 #else PUSH r25 #endif .endm /*-------------------------------------------------------------- * Save kernel Mode callee regs at the time of Contect Switch. * * Special handling for r25 if used for caching Task Pointer. * Kernel simply skips saving it since it will be loaded with * incoming task pointer anyways *-------------------------------------------------------------*/ .macro SAVE_CALLEE_SAVED_KERNEL SAVE_R13_TO_R24 #ifdef CONFIG_ARC_CURR_IN_REG sub sp, sp, 4 #else PUSH r25 #endif .endm /*-------------------------------------------------------------- * Opposite of SAVE_CALLEE_SAVED_KERNEL *-------------------------------------------------------------*/ .macro RESTORE_CALLEE_SAVED_KERNEL #ifdef CONFIG_ARC_CURR_IN_REG add sp, sp, 4 /* skip usual r25 placeholder */ #else POP r25 #endif RESTORE_R24_TO_R13 .endm /*-------------------------------------------------------------- * Opposite of SAVE_CALLEE_SAVED_USER * * ptrace tracer or unaligned-access fixup might have changed a user mode * callee reg which is saved back to usual r25 storage location *-------------------------------------------------------------*/ .macro RESTORE_CALLEE_SAVED_USER #ifdef CONFIG_ARC_CURR_IN_REG POP r12 #else POP r25 #endif RESTORE_R24_TO_R13 ; SP is back to start of pt_regs #ifdef CONFIG_ARC_CURR_IN_REG st r12, [sp, PT_user_r25] #endif .endm /*-------------------------------------------------------------- * Super FAST Restore callee saved regs by simply re-adjusting SP *-------------------------------------------------------------*/ .macro DISCARD_CALLEE_SAVED_USER add sp, sp, SZ_CALLEE_REGS .endm /*------------------------------------------------------------- * given a tsk struct, get to the base of it's kernel mode stack * tsk->thread_info is really a PAGE, whose bottom hoists stack * which grows upwards towards thread_info *------------------------------------------------------------*/ .macro GET_TSK_STACK_BASE tsk, out /* Get task->thread_info (this is essentially start of a PAGE) */ ld \out, [\tsk, TASK_THREAD_INFO] /* Go to end of page where stack begins (grows upwards) */ add2 \out, \out, (THREAD_SIZE)/4 .endm /* * @reg [OUT] thread_info->flags of "current" */ .macro GET_CURR_THR_INFO_FLAGS reg GET_CURR_THR_INFO_FROM_SP \reg ld \reg, [\reg, THREAD_INFO_FLAGS] .endm #ifdef CONFIG_SMP /*------------------------------------------------- * Retrieve the current running task on this CPU * 1. Determine curr CPU id. * 2. Use it to index into _current_task[ ] */ .macro GET_CURR_TASK_ON_CPU reg GET_CPU_ID \reg ld.as \reg, [@_current_task, \reg] .endm /*------------------------------------------------- * Save a new task as the "current" task on this CPU * 1. Determine curr CPU id. * 2. Use it to index into _current_task[ ] * * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS) * because ST r0, [r1, offset] can ONLY have s9 @offset * while LD can take s9 (4 byte insn) or LIMM (8 byte insn) */ .macro SET_CURR_TASK_ON_CPU tsk, tmp GET_CPU_ID \tmp add2 \tmp, @_current_task, \tmp st \tsk, [\tmp] #ifdef CONFIG_ARC_CURR_IN_REG mov r25, \tsk #endif .endm #else /* Uniprocessor implementation of macros */ .macro GET_CURR_TASK_ON_CPU reg ld \reg, [@_current_task] .endm .macro SET_CURR_TASK_ON_CPU tsk, tmp st \tsk, [@_current_task] #ifdef CONFIG_ARC_CURR_IN_REG mov r25, \tsk #endif .endm #endif /* SMP / UNI */ /* ------------------------------------------------------------------ * Get the ptr to some field of Current Task at @off in task struct * -Uses r25 for Current task ptr if that is enabled */ #ifdef CONFIG_ARC_CURR_IN_REG .macro GET_CURR_TASK_FIELD_PTR off, reg add \reg, r25, \off .endm #else .macro GET_CURR_TASK_FIELD_PTR off, reg GET_CURR_TASK_ON_CPU \reg add \reg, \reg, \off .endm #endif /* CONFIG_ARC_CURR_IN_REG */ #endif /* __ASM_ARC_ENTRY_H */ include/asm/irqflags.h 0000644 00000000553 14722070650 0010736 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_ARC_IRQFLAGS_H #define __ASM_ARC_IRQFLAGS_H #ifdef CONFIG_ISA_ARCOMPACT #include <asm/irqflags-compact.h> #else #include <asm/irqflags-arcv2.h> #endif #endif include/asm/kgdb.h 0000644 00000002327 14722070650 0010036 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * kgdb support for ARC * * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ARC_KGDB_H__ #define __ARC_KGDB_H__ #ifdef CONFIG_KGDB #include <asm/ptrace.h> /* to ensure compatibility with Linux 2.6.35, we don't implement the get/set * register API yet */ #undef DBG_MAX_REG_NUM #define GDB_MAX_REGS 87 #define BREAK_INSTR_SIZE 2 #define CACHE_FLUSH_IS_SAFE 1 #define NUMREGBYTES (GDB_MAX_REGS * 4) #define BUFMAX 2048 static inline void arch_kgdb_breakpoint(void) { __asm__ __volatile__ ("trap_s 0x4\n"); } extern void kgdb_trap(struct pt_regs *regs); /* This is the numbering of registers according to the GDB. See GDB's * arc-tdep.h for details. * * Registers are ordered for GDB 7.5. It is incompatible with GDB 6.8. */ enum arc_linux_regnums { _R0 = 0, _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13, _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24, _R25, _R26, _FP = 27, __SP = 28, _R30 = 30, _BLINK = 31, _LP_COUNT = 60, _STOP_PC = 64, _RET = 64, _LP_START = 65, _LP_END = 66, _STATUS32 = 67, _ECR = 76, _BTA = 82, }; #else #define kgdb_trap(regs) #endif #endif /* __ARC_KGDB_H__ */ include/asm/entry-compact.h 0000644 00000022404 14722070650 0011712 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * Vineetg: March 2009 (Supporting 2 levels of Interrupts) * Stack switching code can no longer reliably rely on the fact that * if we are NOT in user mode, stack is switched to kernel mode. * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed * it's prologue including stack switching from user mode * * Vineetg: Aug 28th 2008: Bug #94984 * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap * Normally CPU does this automatically, however when doing FAKE rtie, * we also need to explicitly do this. The problem in macros * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context * * Vineetg: May 5th 2008 * -Modified CALLEE_REG save/restore macros to handle the fact that * r25 contains the kernel current task ptr * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the * address Write back load ld.ab instead of seperate ld/add instn * * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 */ #ifndef __ASM_ARC_ENTRY_COMPACT_H #define __ASM_ARC_ENTRY_COMPACT_H #include <asm/asm-offsets.h> #include <asm/irqflags-compact.h> #include <asm/thread_info.h> /* For THREAD_SIZE */ #ifdef CONFIG_ARC_PLAT_EZNPS #include <plat/ctop.h> #endif /*-------------------------------------------------------------- * Switch to Kernel Mode stack if SP points to User Mode stack * * Entry : r9 contains pre-IRQ/exception/trap status32 * Exit : SP set to K mode stack * SP at the time of entry (K/U) saved @ pt_regs->sp * Clobbers: r9 *-------------------------------------------------------------*/ .macro SWITCH_TO_KERNEL_STK /* User Mode when this happened ? Yes: Proceed to switch stack */ bbit1 r9, STATUS_U_BIT, 88f /* OK we were already in kernel mode when this event happened, thus can * assume SP is kernel mode SP. _NO_ need to do any stack switching */ #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* However.... * If Level 2 Interrupts enabled, we may end up with a corner case: * 1. User Task executing * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode) * 3. But before it could switch SP from USER to KERNEL stack * a L2 IRQ "Interrupts" L1 * Thay way although L2 IRQ happened in Kernel mode, stack is still * not switched. * To handle this, we may need to switch stack even if in kernel mode * provided SP has values in range of USER mode stack ( < 0x7000_0000 ) */ brlo sp, VMALLOC_START, 88f /* TODO: vineetg: * We need to be a bit more cautious here. What if a kernel bug in * L1 ISR, caused SP to go whaco (some small value which looks like * USER stk) and then we take L2 ISR. * Above brlo alone would treat it as a valid L1-L2 scenario * instead of shouting around * The only feasible way is to make sure this L2 happened in * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in * L1 ISR before it switches stack */ #endif /*------Intr/Ecxp happened in kernel mode, SP already setup ------ */ /* save it nevertheless @ pt_regs->sp for uniformity */ b.d 66f st sp, [sp, PT_sp - SZ_PT_REGS] 88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */ GET_CURR_TASK_ON_CPU r9 /* With current tsk in r9, get it's kernel mode stack base */ GET_TSK_STACK_BASE r9, r9 /* save U mode SP @ pt_regs->sp */ st sp, [r9, PT_sp - SZ_PT_REGS] /* final SP switch */ mov sp, r9 66: .endm /*------------------------------------------------------------ * "FAKE" a rtie to return from CPU Exception context * This is to re-enable Exceptions within exception * Look at EV_ProtV to see how this is actually used *-------------------------------------------------------------*/ .macro FAKE_RET_FROM_EXCPN lr r9, [status32] bclr r9, r9, STATUS_AE_BIT or r9, r9, (STATUS_E1_MASK|STATUS_E2_MASK) sr r9, [erstatus] mov r9, 55f sr r9, [eret] rtie 55: .endm /*-------------------------------------------------------------- * For early Exception/ISR Prologue, a core reg is temporarily needed to * code the rest of prolog (stack switching). This is done by stashing * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP). * * Before saving the full regfile - this reg is restored back, only * to be saved again on kernel mode stack, as part of pt_regs. *-------------------------------------------------------------*/ .macro PROLOG_FREEUP_REG reg, mem #ifdef CONFIG_SMP sr \reg, [ARC_REG_SCRATCH_DATA0] #else st \reg, [\mem] #endif .endm .macro PROLOG_RESTORE_REG reg, mem #ifdef CONFIG_SMP lr \reg, [ARC_REG_SCRATCH_DATA0] #else ld \reg, [\mem] #endif .endm /*-------------------------------------------------------------- * Exception Entry prologue * -Switches stack to K mode (if not already) * -Saves the register file * * After this it is safe to call the "C" handlers *-------------------------------------------------------------*/ .macro EXCEPTION_PROLOGUE /* Need at least 1 reg to code the early exception prologue */ PROLOG_FREEUP_REG r9, @ex_saved_reg1 /* U/K mode at time of exception (stack not switched if already K) */ lr r9, [erstatus] /* ARC700 doesn't provide auto-stack switching */ SWITCH_TO_KERNEL_STK #ifdef CONFIG_ARC_CURR_IN_REG /* Treat r25 as scratch reg (save on stack) and load with "current" */ PUSH r25 GET_CURR_TASK_ON_CPU r25 #else sub sp, sp, 4 #endif st.a r0, [sp, -8] /* orig_r0 needed for syscall (skip ECR slot) */ sub sp, sp, 4 /* skip pt_regs->sp, already saved above */ /* Restore r9 used to code the early prologue */ PROLOG_RESTORE_REG r9, @ex_saved_reg1 /* now we are ready to save the regfile */ SAVE_R0_TO_R12 PUSH gp PUSH fp PUSH blink PUSHAX eret PUSHAX erstatus PUSH lp_count PUSHAX lp_end PUSHAX lp_start PUSHAX erbta #ifdef CONFIG_ARC_PLAT_EZNPS .word CTOP_INST_SCHD_RW PUSHAX CTOP_AUX_GPA1 PUSHAX CTOP_AUX_EFLAGS #endif lr r10, [ecr] st r10, [sp, PT_event] /* EV_Trap expects r10 to have ECR */ .endm /*-------------------------------------------------------------- * Restore all registers used by system call or Exceptions * SP should always be pointing to the next free stack element * when entering this macro. * * NOTE: * * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg * for memory load operations. If used in that way interrupts are deffered * by hardware and that is not good. *-------------------------------------------------------------*/ .macro EXCEPTION_EPILOGUE #ifdef CONFIG_ARC_PLAT_EZNPS .word CTOP_INST_SCHD_RW POPAX CTOP_AUX_EFLAGS POPAX CTOP_AUX_GPA1 #endif POPAX erbta POPAX lp_start POPAX lp_end POP r9 mov lp_count, r9 ;LD to lp_count is not allowed POPAX erstatus POPAX eret POP blink POP fp POP gp RESTORE_R12_TO_R0 #ifdef CONFIG_ARC_CURR_IN_REG ld r25, [sp, 12] #endif ld sp, [sp] /* restore original sp */ /* orig_r0, ECR, user_r25 skipped automatically */ .endm /* Dummy ECR values for Interrupts */ #define event_IRQ1 0x0031abcd #define event_IRQ2 0x0032abcd .macro INTERRUPT_PROLOGUE LVL /* free up r9 as scratchpad */ PROLOG_FREEUP_REG r9, @int\LVL\()_saved_reg /* Which mode (user/kernel) was the system in when intr occurred */ lr r9, [status32_l\LVL\()] SWITCH_TO_KERNEL_STK #ifdef CONFIG_ARC_CURR_IN_REG /* Treat r25 as scratch reg (save on stack) and load with "current" */ PUSH r25 GET_CURR_TASK_ON_CPU r25 #else sub sp, sp, 4 #endif PUSH 0x003\LVL\()abcd /* Dummy ECR */ sub sp, sp, 8 /* skip orig_r0 (not needed) skip pt_regs->sp, already saved above */ /* Restore r9 used to code the early prologue */ PROLOG_RESTORE_REG r9, @int\LVL\()_saved_reg SAVE_R0_TO_R12 PUSH gp PUSH fp PUSH blink PUSH ilink\LVL\() PUSHAX status32_l\LVL\() PUSH lp_count PUSHAX lp_end PUSHAX lp_start PUSHAX bta_l\LVL\() #ifdef CONFIG_ARC_PLAT_EZNPS .word CTOP_INST_SCHD_RW PUSHAX CTOP_AUX_GPA1 PUSHAX CTOP_AUX_EFLAGS #endif .endm /*-------------------------------------------------------------- * Restore all registers used by interrupt handlers. * * NOTE: * * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg * for memory load operations. If used in that way interrupts are deffered * by hardware and that is not good. *-------------------------------------------------------------*/ .macro INTERRUPT_EPILOGUE LVL #ifdef CONFIG_ARC_PLAT_EZNPS .word CTOP_INST_SCHD_RW POPAX CTOP_AUX_EFLAGS POPAX CTOP_AUX_GPA1 #endif POPAX bta_l\LVL\() POPAX lp_start POPAX lp_end POP r9 mov lp_count, r9 ;LD to lp_count is not allowed POPAX status32_l\LVL\() POP ilink\LVL\() POP blink POP fp POP gp RESTORE_R12_TO_R0 #ifdef CONFIG_ARC_CURR_IN_REG ld r25, [sp, 12] #endif ld sp, [sp] /* restore original sp */ /* orig_r0, ECR, user_r25 skipped automatically */ .endm /* Get thread_info of "current" tsk */ .macro GET_CURR_THR_INFO_FROM_SP reg bic \reg, sp, (THREAD_SIZE - 1) .endm #ifndef CONFIG_ARC_PLAT_EZNPS /* Get CPU-ID of this core */ .macro GET_CPU_ID reg lr \reg, [identity] lsr \reg, \reg, 8 bmsk \reg, \reg, 7 .endm #endif #endif /* __ASM_ARC_ENTRY_COMPACT_H */ include/asm/unwind.h 0000644 00000006566 14722070650 0010444 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_UNWIND_H #define _ASM_ARC_UNWIND_H #ifdef CONFIG_ARC_DW2_UNWIND #include <linux/sched.h> struct arc700_regs { unsigned long r0; unsigned long r1; unsigned long r2; unsigned long r3; unsigned long r4; unsigned long r5; unsigned long r6; unsigned long r7; unsigned long r8; unsigned long r9; unsigned long r10; unsigned long r11; unsigned long r12; unsigned long r13; unsigned long r14; unsigned long r15; unsigned long r16; unsigned long r17; unsigned long r18; unsigned long r19; unsigned long r20; unsigned long r21; unsigned long r22; unsigned long r23; unsigned long r24; unsigned long r25; unsigned long r26; unsigned long r27; /* fp */ unsigned long r28; /* sp */ unsigned long r29; unsigned long r30; unsigned long r31; /* blink */ unsigned long r63; /* pc */ }; struct unwind_frame_info { struct arc700_regs regs; struct task_struct *task; unsigned call_frame:1; }; #define UNW_PC(frame) ((frame)->regs.r63) #define UNW_SP(frame) ((frame)->regs.r28) #define UNW_BLINK(frame) ((frame)->regs.r31) /* Rajesh FIXME */ #ifdef CONFIG_FRAME_POINTER #define UNW_FP(frame) ((frame)->regs.r27) #define FRAME_RETADDR_OFFSET 4 #define FRAME_LINK_OFFSET 0 #define STACK_BOTTOM_UNW(tsk) STACK_LIMIT((tsk)->thread.ksp) #define STACK_TOP_UNW(tsk) ((tsk)->thread.ksp) #else #define UNW_FP(frame) ((void)(frame), 0) #endif #define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) #define UNW_REGISTER_INFO \ PTREGS_INFO(r0), \ PTREGS_INFO(r1), \ PTREGS_INFO(r2), \ PTREGS_INFO(r3), \ PTREGS_INFO(r4), \ PTREGS_INFO(r5), \ PTREGS_INFO(r6), \ PTREGS_INFO(r7), \ PTREGS_INFO(r8), \ PTREGS_INFO(r9), \ PTREGS_INFO(r10), \ PTREGS_INFO(r11), \ PTREGS_INFO(r12), \ PTREGS_INFO(r13), \ PTREGS_INFO(r14), \ PTREGS_INFO(r15), \ PTREGS_INFO(r16), \ PTREGS_INFO(r17), \ PTREGS_INFO(r18), \ PTREGS_INFO(r19), \ PTREGS_INFO(r20), \ PTREGS_INFO(r21), \ PTREGS_INFO(r22), \ PTREGS_INFO(r23), \ PTREGS_INFO(r24), \ PTREGS_INFO(r25), \ PTREGS_INFO(r26), \ PTREGS_INFO(r27), \ PTREGS_INFO(r28), \ PTREGS_INFO(r29), \ PTREGS_INFO(r30), \ PTREGS_INFO(r31), \ PTREGS_INFO(r63) #define UNW_DEFAULT_RA(raItem, dataAlign) \ ((raItem).where == Memory && !((raItem).value * (dataAlign) + 4)) extern int arc_unwind(struct unwind_frame_info *frame); extern void arc_unwind_init(void); extern void *unwind_add_table(struct module *module, const void *table_start, unsigned long table_size); extern void unwind_remove_table(void *handle, int init_only); static inline int arch_unwind_init_running(struct unwind_frame_info *info, int (*callback) (struct unwind_frame_info *info, void *arg), void *arg) { return 0; } static inline int arch_unw_user_mode(const struct unwind_frame_info *info) { return 0; } static inline void arch_unw_init_blocked(struct unwind_frame_info *info) { return; } static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, struct pt_regs *regs) { return; } #else #define UNW_PC(frame) ((void)(frame), 0) #define UNW_SP(frame) ((void)(frame), 0) #define UNW_FP(frame) ((void)(frame), 0) static inline void arc_unwind_init(void) { } #define unwind_add_table(a, b, c) #define unwind_remove_table(a, b) #endif /* CONFIG_ARC_DW2_UNWIND */ #endif /* _ASM_ARC_UNWIND_H */ include/asm/smp.h 0000644 00000010151 14722070650 0007720 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_ARC_SMP_H #define __ASM_ARC_SMP_H #ifdef CONFIG_SMP #include <linux/types.h> #include <linux/init.h> #include <linux/threads.h> #define raw_smp_processor_id() (current_thread_info()->cpu) /* including cpumask.h leads to cyclic deps hence this Forward declaration */ struct cpumask; /* * APIs provided by arch SMP code to generic code */ extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); /* * APIs provided by arch SMP code to rest of arch code */ extern void __init smp_init_cpus(void); extern void first_lines_of_secondary(void); extern const char *arc_platform_smp_cpuinfo(void); /* * API expected BY platform smp code (FROM arch smp code) * * smp_ipi_irq_setup: * Takes @cpu and @hwirq to which the arch-common ISR is hooked up */ extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq); /* * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP * * @info: SoC SMP specific info for /proc/cpuinfo etc * @init_early_smp: A SMP specific h/w block can init itself * Could be common across platforms so not covered by * mach_desc->init_early() * @init_per_cpu: Called for each core so SMP h/w block driver can do * any needed setup per cpu (e.g. IPI request) * @cpu_kick: For Master to kickstart a cpu (optionally at a PC) * @ipi_send: To send IPI to a @cpu * @ips_clear: To clear IPI received at @irq */ struct plat_smp_ops { const char *info; void (*init_early_smp)(void); void (*init_per_cpu)(int cpu); void (*cpu_kick)(int cpu, unsigned long pc); void (*ipi_send)(int cpu); void (*ipi_clear)(int irq); }; /* TBD: stop exporting it for direct population by platform */ extern struct plat_smp_ops plat_smp_ops; #else /* CONFIG_SMP */ static inline void smp_init_cpus(void) {} static inline const char *arc_platform_smp_cpuinfo(void) { return ""; } #endif /* !CONFIG_SMP */ /* * ARC700 doesn't support atomic Read-Modify-Write ops. * Originally Interrupts had to be disabled around code to gaurantee atomicity. * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops * based on retry-if-irq-in-atomic (with hardware assist). * However despite these, we provide the IRQ disabling variant * * (1) These insn were introduced only in 4.10 release. So for older released * support needed. * * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be * gaurantted by the platform (not something which core handles). * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ * disabling for atomicity. * * However exported spinlock API is not usable due to cyclic hdr deps * (even after system.h disintegration upstream) * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h * * So the workaround is to use the lowest level arch spinlock API. * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP, * but same is not true for ARCH backend, hence the need for 2 variants */ #ifndef CONFIG_ARC_HAS_LLSC #include <linux/irqflags.h> #ifdef CONFIG_SMP #include <asm/spinlock.h> extern arch_spinlock_t smp_atomic_ops_lock; extern arch_spinlock_t smp_bitops_lock; #define atomic_ops_lock(flags) do { \ local_irq_save(flags); \ arch_spin_lock(&smp_atomic_ops_lock); \ } while (0) #define atomic_ops_unlock(flags) do { \ arch_spin_unlock(&smp_atomic_ops_lock); \ local_irq_restore(flags); \ } while (0) #define bitops_lock(flags) do { \ local_irq_save(flags); \ arch_spin_lock(&smp_bitops_lock); \ } while (0) #define bitops_unlock(flags) do { \ arch_spin_unlock(&smp_bitops_lock); \ local_irq_restore(flags); \ } while (0) #else /* !CONFIG_SMP */ #define atomic_ops_lock(flags) local_irq_save(flags) #define atomic_ops_unlock(flags) local_irq_restore(flags) #define bitops_lock(flags) local_irq_save(flags) #define bitops_unlock(flags) local_irq_restore(flags) #endif /* !CONFIG_SMP */ #endif /* !CONFIG_ARC_HAS_LLSC */ #endif include/asm/entry-arcv2.h 0000644 00000016056 14722070650 0011307 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ARC_ENTRY_ARCV2_H #define __ASM_ARC_ENTRY_ARCV2_H #include <asm/asm-offsets.h> #include <asm/irqflags-arcv2.h> #include <asm/thread_info.h> /* For THREAD_SIZE */ /* * Interrupt/Exception stack layout (pt_regs) for ARCv2 * (End of struct aligned to end of page [unless nested]) * * INTERRUPT EXCEPTION * * manual --------------------- manual * | orig_r0 | * | event/ECR | * | bta | * | user_r25 | * | gp | * | fp | * | sp | * | r12 | * | r30 | * | r58 | * | r59 | * hw autosave --------------------- * optional | r0 | * | r1 | * ~ ~ * | r9 | * | r10 | * | r11 | * | blink | * | lpe | * | lps | * | lpc | * | ei base | * | ldi base | * | jli base | * --------------------- * hw autosave | pc / eret | * mandatory | stat32 / erstatus | * --------------------- */ /*------------------------------------------------------------------------*/ .macro INTERRUPT_PROLOGUE ; (A) Before jumping to Interrupt Vector, hardware micro-ops did following: ; 1. SP auto-switched to kernel mode stack ; 2. STATUS32.Z flag set if in U mode at time of interrupt (U:1,K:0) ; 3. Auto save: (mandatory) Push PC and STAT32 on stack ; hardware does even if CONFIG_ARC_IRQ_NO_AUTOSAVE ; 4. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI ; ; (B) Manually saved some regs: r12,r25,r30, sp,fp,gp, ACCL pair #ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE ; carve pt_regs on stack (case #3), PC/STAT32 already on stack sub sp, sp, SZ_PT_REGS - 8 __SAVE_REGFILE_HARD #else ; carve pt_regs on stack (case #4), which grew partially already sub sp, sp, PT_r0 #endif __SAVE_REGFILE_SOFT .endm /*------------------------------------------------------------------------*/ .macro EXCEPTION_PROLOGUE ; (A) Before jumping to Exception Vector, hardware micro-ops did following: ; 1. SP auto-switched to kernel mode stack ; 2. STATUS32.Z flag set if in U mode at time of exception (U:1,K:0) ; ; (B) Manually save the complete reg file below sub sp, sp, SZ_PT_REGS ; carve pt_regs ; _HARD saves r10 clobbered by _SOFT as scratch hence comes first __SAVE_REGFILE_HARD __SAVE_REGFILE_SOFT st r0, [sp] ; orig_r0 lr r10, [eret] lr r11, [erstatus] ST2 r10, r11, PT_ret lr r10, [ecr] lr r11, [erbta] ST2 r10, r11, PT_event ; OUTPUT: r10 has ECR expected by EV_Trap .endm /*------------------------------------------------------------------------ * This macro saves the registers manually which would normally be autosaved * by hardware on taken interrupts. It is used by * - exception handlers (which don't have autosave) * - interrupt autosave disabled due to CONFIG_ARC_IRQ_NO_AUTOSAVE */ .macro __SAVE_REGFILE_HARD ST2 r0, r1, PT_r0 ST2 r2, r3, PT_r2 ST2 r4, r5, PT_r4 ST2 r6, r7, PT_r6 ST2 r8, r9, PT_r8 ST2 r10, r11, PT_r10 st blink, [sp, PT_blink] lr r10, [lp_end] lr r11, [lp_start] ST2 r10, r11, PT_lpe st lp_count, [sp, PT_lpc] ; skip JLI, LDI, EI for now .endm /*------------------------------------------------------------------------ * This macros saves a bunch of other registers which can't be autosaved for * various reasons: * - r12: the last caller saved scratch reg since hardware saves in pairs so r0-r11 * - r30: free reg, used by gcc as scratch * - ACCL/ACCH pair when they exist */ .macro __SAVE_REGFILE_SOFT ST2 gp, fp, PT_r26 ; gp (r26), fp (r27) st r12, [sp, PT_sp + 4] st r30, [sp, PT_sp + 8] ; Saving pt_regs->sp correctly requires some extra work due to the way ; Auto stack switch works ; - U mode: retrieve it from AUX_USER_SP ; - K mode: add the offset from current SP where H/w starts auto push ; ; 1. Utilize the fact that Z bit is set if Intr taken in U mode ; 2. Upon entry SP is always saved (for any inspection, unwinding etc), ; but on return, restored only if U mode lr r10, [AUX_USER_SP] ; U mode SP ; ISA requires ADD.nz to have same dest and src reg operands mov.nz r10, sp add.nz r10, r10, SZ_PT_REGS ; K mode SP st r10, [sp, PT_sp] ; SP (pt_regs->sp) #ifdef CONFIG_ARC_CURR_IN_REG st r25, [sp, PT_user_r25] GET_CURR_TASK_ON_CPU r25 #endif #ifdef CONFIG_ARC_HAS_ACCL_REGS ST2 r58, r59, PT_sp + 12 #endif .endm /*------------------------------------------------------------------------*/ .macro __RESTORE_REGFILE_SOFT LD2 gp, fp, PT_r26 ; gp (r26), fp (r27) ld r12, [sp, PT_sp + 4] ld r30, [sp, PT_sp + 8] ; Restore SP (into AUX_USER_SP) only if returning to U mode ; - for K mode, it will be implicitly restored as stack is unwound ; - Z flag set on K is inverse of what hardware does on interrupt entry ; but that doesn't really matter bz 1f ld r10, [sp, PT_sp] ; SP (pt_regs->sp) sr r10, [AUX_USER_SP] 1: #ifdef CONFIG_ARC_CURR_IN_REG ld r25, [sp, PT_user_r25] #endif #ifdef CONFIG_ARC_HAS_ACCL_REGS LD2 r58, r59, PT_sp + 12 #endif .endm /*------------------------------------------------------------------------*/ .macro __RESTORE_REGFILE_HARD ld blink, [sp, PT_blink] LD2 r10, r11, PT_lpe sr r10, [lp_end] sr r11, [lp_start] ld r10, [sp, PT_lpc] ; lp_count can't be target of LD mov lp_count, r10 LD2 r0, r1, PT_r0 LD2 r2, r3, PT_r2 LD2 r4, r5, PT_r4 LD2 r6, r7, PT_r6 LD2 r8, r9, PT_r8 LD2 r10, r11, PT_r10 .endm /*------------------------------------------------------------------------*/ .macro INTERRUPT_EPILOGUE ; INPUT: r0 has STAT32 of calling context ; INPUT: Z flag set if returning to K mode ; _SOFT clobbers r10 restored by _HARD hence the order __RESTORE_REGFILE_SOFT #ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE __RESTORE_REGFILE_HARD add sp, sp, SZ_PT_REGS - 8 #else add sp, sp, PT_r0 #endif .endm /*------------------------------------------------------------------------*/ .macro EXCEPTION_EPILOGUE ; INPUT: r0 has STAT32 of calling context btst r0, STATUS_U_BIT ; Z flag set if K, used in restoring SP ld r10, [sp, PT_event + 4] sr r10, [erbta] LD2 r10, r11, PT_ret sr r10, [eret] sr r11, [erstatus] __RESTORE_REGFILE_SOFT __RESTORE_REGFILE_HARD add sp, sp, SZ_PT_REGS .endm .macro FAKE_RET_FROM_EXCPN lr r9, [status32] bic r9, r9, STATUS_AE_MASK or r9, r9, STATUS_IE_MASK kflag r9 .endm /* Get thread_info of "current" tsk */ .macro GET_CURR_THR_INFO_FROM_SP reg bmskn \reg, sp, THREAD_SHIFT - 1 .endm /* Get CPU-ID of this core */ .macro GET_CPU_ID reg lr \reg, [identity] xbfu \reg, \reg, 0xE8 /* 00111 01000 */ /* M = 8-1 N = 8 */ .endm #endif include/asm/stacktrace.h 0000644 00000002232 14722070650 0011246 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_STACKTRACE_H #define __ASM_STACKTRACE_H #include <linux/sched.h> /** * arc_unwind_core - Unwind the kernel mode stack for an execution context * @tsk: NULL for current task, specific task otherwise * @regs: pt_regs used to seed the unwinder {SP, FP, BLINK, PC} * If NULL, use pt_regs of @tsk (if !NULL) otherwise * use the current values of {SP, FP, BLINK, PC} * @consumer_fn: Callback invoked for each frame unwound * Returns 0 to continue unwinding, -1 to stop * @arg: Arg to callback * * Returns the address of first function in stack * * Semantics: * - synchronous unwinding (e.g. dump_stack): @tsk NULL, @regs NULL * - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs NULL * - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL */ notrace noinline unsigned int arc_unwind_core( struct task_struct *tsk, struct pt_regs *regs, int (*consumer_fn) (unsigned int, void *), void *arg); #endif /* __ASM_STACKTRACE_H */ include/asm/pgtable.h 0000644 00000033412 14722070650 0010544 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * vineetg: May 2011 * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1. * They are semantically the same although in different contexts * VALID marks a TLB entry exists and it will only happen if PRESENT * - Utilise some unused free bits to confine PTE flags to 12 bits * This is a must for 4k pg-sz * * vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods * -TLB Locking never really existed, except for initial specs * -SILENT_xxx not needed for our port * -Per my request, MMU V3 changes the layout of some of the bits * to avoid a few shifts in TLB Miss handlers. * * vineetg: April 2010 * -PGD entry no longer contains any flags. If empty it is 0, otherwise has * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler * * vineetg: April 2010 * -Switched form 8:11:13 split for page table lookup to 11:8:13 * -this speeds up page table allocation itself as we now have to memset 1K * instead of 8k per page table. * -TODO: Right now page table alloc is 8K and rest 7K is unused * need to optimise it * * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 */ #ifndef _ASM_ARC_PGTABLE_H #define _ASM_ARC_PGTABLE_H #include <linux/bits.h> #define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopmd.h> #include <asm/page.h> #include <asm/mmu.h> /* to propagate CONFIG_ARC_MMU_VER <n> */ /************************************************************************** * Page Table Flags * * ARC700 MMU only deals with softare managed TLB entries. * Page Tables are purely for Linux VM's consumption and the bits below are * suited to that (uniqueness). Hence some are not implemented in the TLB and * some have different value in TLB. * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in * seperate PD0 and PD1, which combined forms a translation entry) * while for PTE perspective, they are 8 and 9 respectively * with MMU v3: Most bits (except SHARED) represent the exact hardware pos * (saves some bit shift ops in TLB Miss hdlrs) */ #if (CONFIG_ARC_MMU_VER <= 2) #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */ #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */ #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ #define _PAGE_READ (1<<5) /* Page has user read perm (H) */ #define _PAGE_DIRTY (1<<6) /* Page modified (dirty) (S) */ #define _PAGE_SPECIAL (1<<7) #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */ #else /* MMU v3 onwards */ #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ #define _PAGE_DIRTY (1<<5) /* Page modified (dirty) (S) */ #define _PAGE_SPECIAL (1<<6) #if (CONFIG_ARC_MMU_VER >= 4) #define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */ #endif #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ #if (CONFIG_ARC_MMU_VER >= 4) #define _PAGE_HW_SZ (1<<10) /* Page Size indicator (H): 0 normal, 1 super */ #endif #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr usable for shared TLB entries (H) */ #define _PAGE_UNUSED_BIT (1<<12) #endif /* vmalloc permissions */ #define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ _PAGE_GLOBAL | _PAGE_PRESENT) #ifndef CONFIG_ARC_CACHE_PAGES #undef _PAGE_CACHEABLE #define _PAGE_CACHEABLE 0 #endif #ifndef _PAGE_HW_SZ #define _PAGE_HW_SZ 0 #endif /* Defaults for every user page */ #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE) /* Set of bits not changed in pte_modify */ #define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \ _PAGE_SPECIAL) /* More Abbrevaited helpers */ #define PAGE_U_NONE __pgprot(___DEF) #define PAGE_U_R __pgprot(___DEF | _PAGE_READ) #define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE) #define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE) #define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \ _PAGE_EXECUTE) #define PAGE_SHARED PAGE_U_W_R /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of * user vaddr space - visible in all addr spaces, but kernel mode only * Thus Global, all-kernel-access, no-user-access, cached */ #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE) /* ioremap */ #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) /* Masks for actual TLB "PD"s */ #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ) #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ) #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE) /************************************************************************** * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) * * Certain cases have 1:1 mapping * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED * which directly corresponds to PAGE_U_X_R * * Other rules which cause the divergence from 1:1 mapping * * 1. Although ARC700 can do exclusive execute/write protection (meaning R * can be tracked independet of X/W unlike some other CPUs), still to * keep things consistent with other archs: * -Write implies Read: W => R * -Execute implies Read: X => R * * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W * This is to enable COW mechanism */ /* xwr */ #define __P000 PAGE_U_NONE #define __P001 PAGE_U_R #define __P010 PAGE_U_R /* Pvt-W => !W */ #define __P011 PAGE_U_R /* Pvt-W => !W */ #define __P100 PAGE_U_X_R /* X => R */ #define __P101 PAGE_U_X_R #define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */ #define __P111 PAGE_U_X_R /* Pvt-W => !W */ #define __S000 PAGE_U_NONE #define __S001 PAGE_U_R #define __S010 PAGE_U_W_R /* W => R */ #define __S011 PAGE_U_W_R #define __S100 PAGE_U_X_R /* X => R */ #define __S101 PAGE_U_X_R #define __S110 PAGE_U_X_W_R /* X => R */ #define __S111 PAGE_U_X_W_R /**************************************************************** * 2 tier (PGD:PTE) software page walker * * [31] 32 bit virtual address [0] * ------------------------------------------------------- * | | <------------ PGDIR_SHIFT ----------> | * | | | * | BITS_FOR_PGD | BITS_FOR_PTE | <-- PAGE_SHIFT --> | * ------------------------------------------------------- * | | | * | | --> off in page frame * | ---> index into Page Table * ----> index into Page Directory * * In a single page size configuration, only PAGE_SHIFT is fixed * So both PGD and PTE sizing can be tweaked * e.g. 8K page (PAGE_SHIFT 13) can have * - PGDIR_SHIFT 21 -> 11:8:13 address split * - PGDIR_SHIFT 24 -> 8:11:13 address split * * If Super Page is configured, PGDIR_SHIFT becomes fixed too, * so the sizing flexibility is gone. */ #if defined(CONFIG_ARC_HUGEPAGE_16M) #define PGDIR_SHIFT 24 #elif defined(CONFIG_ARC_HUGEPAGE_2M) #define PGDIR_SHIFT 21 #else /* * Only Normal page support so "hackable" (see comment above) * Default value provides 11:8:13 (8K), 11:9:12 (4K) */ #define PGDIR_SHIFT 21 #endif #define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT) #define BITS_FOR_PGD (32 - PGDIR_SHIFT) #define PGDIR_SIZE BIT(PGDIR_SHIFT) /* vaddr span, not PDG sz */ #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PTRS_PER_PTE BIT(BITS_FOR_PTE) #define PTRS_PER_PGD BIT(BITS_FOR_PGD) /* * Number of entries a user land program use. * TASK_SIZE is the maximum vaddr that can be used by a userland program. */ #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) /* * No special requirements for lowest virtual address we permit any user space * mapping to be mapped at. */ #define FIRST_USER_ADDRESS 0UL /**************************************************************** * Bucket load of VM Helpers */ #ifndef __ASSEMBLY__ #define pte_ERROR(e) \ pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) #define pgd_ERROR(e) \ pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) /* the zero page used for uninitialized and anonymous pages */ extern char empty_zero_page[PAGE_SIZE]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) /* find the page descriptor of the Page Tbl ref by PMD entry */ #define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK) /* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */ #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) /* In a 2 level sys, setup the PGD entry with PTE value */ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) { pmd_val(*pmdp) = (unsigned long)ptep; } #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) #define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0)) #define pmd_none(x) (!pmd_val(x)) #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK)) #define pmd_present(x) (pmd_val(x)) #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) #define pte_page(pte) pfn_to_page(pte_pfn(pte)) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) #define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/ #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) /* * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system) * and returns ptr to PTE entry corresponding to @addr */ #define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\ __pte_index(addr)) /* No mapping of Page Tables in high mem etc, so following same as above */ #define pte_offset_kernel(dir, addr) pte_offset(dir, addr) #define pte_offset_map(dir, addr) pte_offset(dir, addr) /* Zoo of pte_xxx function */ #define pte_read(pte) (pte_val(pte) & _PAGE_READ) #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) #define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY) #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) #define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL) #define PTE_BIT_FUNC(fn, op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT)); PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE)); PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE)); PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY)); PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY)); PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED)); PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED)); PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE)); PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE)); PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL)); PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ)); static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); } /* Macro to mark a page protection as uncacheable */ #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE)) static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { set_pte(ptep, pteval); } /* * All kernel related VM pages are in init's mm. */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) #define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr)) /* * Macro to quickly access the PGD entry, utlising the fact that some * arch may cache the pointer to Page Directory of "current" task * in a MMU register * * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply * becomes read a register * * ********CAUTION*******: * Kernel code might be dealing with some mm_struct of NON "current" * Thus use this macro only when you are certain that "current" is current * e.g. when dealing with signal frame setup code etc */ #ifndef CONFIG_SMP #define pgd_offset_fast(mm, addr) \ ({ \ pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \ pgd_base + pgd_index(addr); \ }) #else #define pgd_offset_fast(mm, addr) pgd_offset(mm, addr) #endif extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE); void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); /* Encode swap {type,off} tuple into PTE * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that * PAGE_PRESENT is zero in a PTE holding swap "identifier" */ #define __swp_entry(type, off) ((swp_entry_t) { \ ((type) & 0x1f) | ((off) << 13) }) /* Decode a PTE containing swap "identifier "into constituents */ #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f) #define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13) /* NOPs, to keep generic kernel happy */ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define kern_addr_valid(addr) (1) /* * remap a physical page `pfn' of size `size' with page protection `prot' * into virtual address `from' */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE #include <asm/hugepage.h> #endif #include <asm-generic/pgtable.h> /* to cope with aliasing VIPT cache */ #define HAVE_ARCH_UNMAPPED_AREA #endif /* __ASSEMBLY__ */ #endif include/asm/atomic.h 0000644 00000032203 14722070650 0010377 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_ATOMIC_H #define _ASM_ARC_ATOMIC_H #ifndef __ASSEMBLY__ #include <linux/types.h> #include <linux/compiler.h> #include <asm/cmpxchg.h> #include <asm/barrier.h> #include <asm/smp.h> #define ATOMIC_INIT(i) { (i) } #ifndef CONFIG_ARC_PLAT_EZNPS #define atomic_read(v) READ_ONCE((v)->counter) #ifdef CONFIG_ARC_HAS_LLSC #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #define ATOMIC_OP(op, c_op, asm_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ unsigned int val; \ \ __asm__ __volatile__( \ "1: llock %[val], [%[ctr]] \n" \ " " #asm_op " %[val], %[val], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ " bnz 1b \n" \ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ [i] "ir" (i) \ : "cc"); \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned int val; \ \ /* \ * Explicit full memory barrier needed before/after as \ * LLOCK/SCOND thmeselves don't provide any such semantics \ */ \ smp_mb(); \ \ __asm__ __volatile__( \ "1: llock %[val], [%[ctr]] \n" \ " " #asm_op " %[val], %[val], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ " bnz 1b \n" \ : [val] "=&r" (val) \ : [ctr] "r" (&v->counter), \ [i] "ir" (i) \ : "cc"); \ \ smp_mb(); \ \ return val; \ } #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned int val, orig; \ \ /* \ * Explicit full memory barrier needed before/after as \ * LLOCK/SCOND thmeselves don't provide any such semantics \ */ \ smp_mb(); \ \ __asm__ __volatile__( \ "1: llock %[orig], [%[ctr]] \n" \ " " #asm_op " %[val], %[orig], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ " bnz 1b \n" \ : [val] "=&r" (val), \ [orig] "=&r" (orig) \ : [ctr] "r" (&v->counter), \ [i] "ir" (i) \ : "cc"); \ \ smp_mb(); \ \ return orig; \ } #else /* !CONFIG_ARC_HAS_LLSC */ #ifndef CONFIG_SMP /* violating atomic_xxx API locking protocol in UP for optimization sake */ #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #else static inline void atomic_set(atomic_t *v, int i) { /* * Independent of hardware support, all of the atomic_xxx() APIs need * to follow the same locking rules to make sure that a "hardware" * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn * sequence * * Thus atomic_set() despite being 1 insn (and seemingly atomic) * requires the locking. */ unsigned long flags; atomic_ops_lock(flags); WRITE_ONCE(v->counter, i); atomic_ops_unlock(flags); } #define atomic_set_release(v, i) atomic_set((v), (i)) #endif /* * Non hardware assisted Atomic-R-M-W * Locking would change to irq-disabling only (UP) and spinlocks (SMP) */ #define ATOMIC_OP(op, c_op, asm_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ \ atomic_ops_lock(flags); \ v->counter c_op i; \ atomic_ops_unlock(flags); \ } #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long flags; \ unsigned long temp; \ \ /* \ * spin lock/unlock provides the needed smp_mb() before/after \ */ \ atomic_ops_lock(flags); \ temp = v->counter; \ temp c_op i; \ v->counter = temp; \ atomic_ops_unlock(flags); \ \ return temp; \ } #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ unsigned long orig; \ \ /* \ * spin lock/unlock provides the needed smp_mb() before/after \ */ \ atomic_ops_lock(flags); \ orig = v->counter; \ v->counter c_op i; \ atomic_ops_unlock(flags); \ \ return orig; \ } #endif /* !CONFIG_ARC_HAS_LLSC */ #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_OP_RETURN(op, c_op, asm_op) \ ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) #define atomic_andnot atomic_andnot #define atomic_fetch_andnot atomic_fetch_andnot #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(and, &=, and) ATOMIC_OPS(andnot, &= ~, bic) ATOMIC_OPS(or, |=, or) ATOMIC_OPS(xor, ^=, xor) #else /* CONFIG_ARC_PLAT_EZNPS */ static inline int atomic_read(const atomic_t *v) { int temp; __asm__ __volatile__( " ld.di %0, [%1]" : "=r"(temp) : "r"(&v->counter) : "memory"); return temp; } static inline void atomic_set(atomic_t *v, int i) { __asm__ __volatile__( " st.di %0,[%1]" : : "r"(i), "r"(&v->counter) : "memory"); } #define ATOMIC_OP(op, c_op, asm_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ __asm__ __volatile__( \ " mov r2, %0\n" \ " mov r3, %1\n" \ " .word %2\n" \ : \ : "r"(i), "r"(&v->counter), "i"(asm_op) \ : "r2", "r3", "memory"); \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned int temp = i; \ \ /* Explicit full memory barrier needed before/after */ \ smp_mb(); \ \ __asm__ __volatile__( \ " mov r2, %0\n" \ " mov r3, %1\n" \ " .word %2\n" \ " mov %0, r2" \ : "+r"(temp) \ : "r"(&v->counter), "i"(asm_op) \ : "r2", "r3", "memory"); \ \ smp_mb(); \ \ temp c_op i; \ \ return temp; \ } #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned int temp = i; \ \ /* Explicit full memory barrier needed before/after */ \ smp_mb(); \ \ __asm__ __volatile__( \ " mov r2, %0\n" \ " mov r3, %1\n" \ " .word %2\n" \ " mov %0, r2" \ : "+r"(temp) \ : "r"(&v->counter), "i"(asm_op) \ : "r2", "r3", "memory"); \ \ smp_mb(); \ \ return temp; \ } #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_OP_RETURN(op, c_op, asm_op) \ ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3) #define atomic_sub(i, v) atomic_add(-(i), (v)) #define atomic_sub_return(i, v) atomic_add_return(-(i), (v)) #define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v)) #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3) ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3) ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) #endif /* CONFIG_ARC_PLAT_EZNPS */ #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP #ifdef CONFIG_GENERIC_ATOMIC64 #include <asm-generic/atomic64.h> #else /* Kconfig ensures this is only enabled with needed h/w assist */ /* * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD) * - The address HAS to be 64-bit aligned * - There are 2 semantics involved here: * = exclusive implies no interim update between load/store to same addr * = both words are observed/updated together: this is guaranteed even * for regular 64-bit load (LDD) / store (STD). Thus atomic64_set() * is NOT required to use LLOCKD+SCONDD, STD suffices */ typedef struct { s64 __aligned(8) counter; } atomic64_t; #define ATOMIC64_INIT(a) { (a) } static inline s64 atomic64_read(const atomic64_t *v) { s64 val; __asm__ __volatile__( " ldd %0, [%1] \n" : "=r"(val) : "r"(&v->counter)); return val; } static inline void atomic64_set(atomic64_t *v, s64 a) { /* * This could have been a simple assignment in "C" but would need * explicit volatile. Otherwise gcc optimizers could elide the store * which borked atomic64 self-test * In the inline asm version, memory clobber needed for exact same * reason, to tell gcc about the store. * * This however is not needed for sibling atomic64_add() etc since both * load/store are explicitly done in inline asm. As long as API is used * for each access, gcc has no way to optimize away any load/store */ __asm__ __volatile__( " std %0, [%1] \n" : : "r"(a), "r"(&v->counter) : "memory"); } #define ATOMIC64_OP(op, op1, op2) \ static inline void atomic64_##op(s64 a, atomic64_t *v) \ { \ s64 val; \ \ __asm__ __volatile__( \ "1: \n" \ " llockd %0, [%1] \n" \ " " #op1 " %L0, %L0, %L2 \n" \ " " #op2 " %H0, %H0, %H2 \n" \ " scondd %0, [%1] \n" \ " bnz 1b \n" \ : "=&r"(val) \ : "r"(&v->counter), "ir"(a) \ : "cc"); \ } \ #define ATOMIC64_OP_RETURN(op, op1, op2) \ static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ { \ s64 val; \ \ smp_mb(); \ \ __asm__ __volatile__( \ "1: \n" \ " llockd %0, [%1] \n" \ " " #op1 " %L0, %L0, %L2 \n" \ " " #op2 " %H0, %H0, %H2 \n" \ " scondd %0, [%1] \n" \ " bnz 1b \n" \ : [val] "=&r"(val) \ : "r"(&v->counter), "ir"(a) \ : "cc"); /* memory clobber comes from smp_mb() */ \ \ smp_mb(); \ \ return val; \ } #define ATOMIC64_FETCH_OP(op, op1, op2) \ static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ { \ s64 val, orig; \ \ smp_mb(); \ \ __asm__ __volatile__( \ "1: \n" \ " llockd %0, [%2] \n" \ " " #op1 " %L1, %L0, %L3 \n" \ " " #op2 " %H1, %H0, %H3 \n" \ " scondd %1, [%2] \n" \ " bnz 1b \n" \ : "=&r"(orig), "=&r"(val) \ : "r"(&v->counter), "ir"(a) \ : "cc"); /* memory clobber comes from smp_mb() */ \ \ smp_mb(); \ \ return orig; \ } #define ATOMIC64_OPS(op, op1, op2) \ ATOMIC64_OP(op, op1, op2) \ ATOMIC64_OP_RETURN(op, op1, op2) \ ATOMIC64_FETCH_OP(op, op1, op2) #define atomic64_andnot atomic64_andnot #define atomic64_fetch_andnot atomic64_fetch_andnot ATOMIC64_OPS(add, add.f, adc) ATOMIC64_OPS(sub, sub.f, sbc) ATOMIC64_OPS(and, and, and) ATOMIC64_OPS(andnot, bic, bic) ATOMIC64_OPS(or, or, or) ATOMIC64_OPS(xor, xor, xor) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP static inline s64 atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new) { s64 prev; smp_mb(); __asm__ __volatile__( "1: llockd %0, [%1] \n" " brne %L0, %L2, 2f \n" " brne %H0, %H2, 2f \n" " scondd %3, [%1] \n" " bnz 1b \n" "2: \n" : "=&r"(prev) : "r"(ptr), "ir"(expected), "r"(new) : "cc"); /* memory clobber comes from smp_mb() */ smp_mb(); return prev; } static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new) { s64 prev; smp_mb(); __asm__ __volatile__( "1: llockd %0, [%1] \n" " scondd %2, [%1] \n" " bnz 1b \n" "2: \n" : "=&r"(prev) : "r"(ptr), "r"(new) : "cc"); /* memory clobber comes from smp_mb() */ smp_mb(); return prev; } /** * atomic64_dec_if_positive - decrement by 1 if old value positive * @v: pointer of type atomic64_t * * The function returns the old value of *v minus 1, even if * the atomic variable, v, was not decremented. */ static inline s64 atomic64_dec_if_positive(atomic64_t *v) { s64 val; smp_mb(); __asm__ __volatile__( "1: llockd %0, [%1] \n" " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n" " sub.c %H0, %H0, 1 # if C set, w1 - 1\n" " brlt %H0, 0, 2f \n" " scondd %0, [%1] \n" " bnz 1b \n" "2: \n" : "=&r"(val) : "r"(&v->counter) : "cc"); /* memory clobber comes from smp_mb() */ smp_mb(); return val; } #define atomic64_dec_if_positive atomic64_dec_if_positive /** * atomic64_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, if it was not @u. * Returns the old value of @v */ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { s64 old, temp; smp_mb(); __asm__ __volatile__( "1: llockd %0, [%2] \n" " brne %L0, %L4, 2f # continue to add since v != u \n" " breq.d %H0, %H4, 3f # return since v == u \n" "2: \n" " add.f %L1, %L0, %L3 \n" " adc %H1, %H0, %H3 \n" " scondd %1, [%2] \n" " bnz 1b \n" "3: \n" : "=&r"(old), "=&r" (temp) : "r"(&v->counter), "r"(a), "r"(u) : "cc"); /* memory clobber comes from smp_mb() */ smp_mb(); return old; } #define atomic64_fetch_add_unless atomic64_fetch_add_unless #endif /* !CONFIG_GENERIC_ATOMIC64 */ #endif /* !__ASSEMBLY__ */ #endif include/asm/pgalloc.h 0000644 00000007075 14722070650 0010555 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * vineetg: June 2011 * -"/proc/meminfo | grep PageTables" kept on increasing * Recently added pgtable dtor was not getting called. * * vineetg: May 2011 * -Variable pg-sz means that Page Tables could be variable sized themselves * So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx] * -Page Table size capped to max 1 to save memory - hence verified. * -Since these deal with constants, gcc compile-time optimizes them. * * vineetg: Nov 2010 * -Added pgtable ctor/dtor used for pgtable mem accounting * * vineetg: April 2010 * -Switched pgtable_t from being struct page * to unsigned long * =Needed so that Page Table allocator (pte_alloc_one) is not forced to * to deal with struct page. Thay way in future we can make it allocate * multiple PG Tbls in one Page Frame * =sweet side effect is avoiding calls to ugly page_address( ) from the * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate * * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 */ #ifndef _ASM_ARC_PGALLOC_H #define _ASM_ARC_PGALLOC_H #include <linux/mm.h> #include <linux/log2.h> static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { pmd_set(pmd, pte); } static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep) { pmd_set(pmd, (pte_t *) ptep); } static inline int __get_order_pgd(void) { return get_order(PTRS_PER_PGD * sizeof(pgd_t)); } static inline pgd_t *pgd_alloc(struct mm_struct *mm) { int num, num2; pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd()); if (ret) { num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE; memzero(ret, num * sizeof(pgd_t)); num2 = VMALLOC_SIZE / PGDIR_SIZE; memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t)); memzero(ret + num + num2, (PTRS_PER_PGD - num - num2) * sizeof(pgd_t)); } return ret; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { free_pages((unsigned long)pgd, __get_order_pgd()); } /* * With software-only page-tables, addr-split for traversal is tweakable and * that directly governs how big tables would be at each level. * Further, the MMU page size is configurable. * Thus we need to programatically assert the size constraint * All of this is const math, allowing gcc to do constant folding/propagation. */ static inline int __get_order_pte(void) { return get_order(PTRS_PER_PTE * sizeof(pte_t)); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte; pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, __get_order_pte()); return pte; } static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { pgtable_t pte_pg; struct page *page; pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte()); if (!pte_pg) return 0; memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t)); page = virt_to_page(pte_pg); if (!pgtable_pte_page_ctor(page)) { __free_page(page); return 0; } return pte_pg; } static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */ } static inline void pte_free(struct mm_struct *mm, pgtable_t ptep) { pgtable_pte_page_dtor(virt_to_page(ptep)); free_pages((unsigned long)ptep, __get_order_pte()); } #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) #define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd)) #endif /* _ASM_ARC_PGALLOC_H */ include/asm/mmu_context.h 0000644 00000013040 14722070650 0011463 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * vineetg: May 2011 * -Refactored get_new_mmu_context( ) to only handle live-mm. * retiring-mm handled in other hooks * * Vineetg: March 25th, 2008: Bug #92690 * -Major rewrite of Core ASID allocation routine get_new_mmu_context * * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 */ #ifndef _ASM_ARC_MMU_CONTEXT_H #define _ASM_ARC_MMU_CONTEXT_H #include <asm/arcregs.h> #include <asm/tlb.h> #include <linux/sched/mm.h> #include <asm-generic/mm_hooks.h> /* ARC700 ASID Management * * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries * with same vaddr (different tasks) to co-exit. This provides for * "Fast Context Switch" i.e. no TLB flush on ctxt-switch * * Linux assigns each task a unique ASID. A simple round-robin allocation * of H/w ASID is done using software tracker @asid_cpu. * When it reaches max 255, the allocation cycle starts afresh by flushing * the entire TLB and wrapping ASID back to zero. * * A new allocation cycle, post rollover, could potentially reassign an ASID * to a different task. Thus the rule is to refresh the ASID in a new cycle. * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits * serve as cycle/generation indicator and natural 32 bit unsigned math * automagically increments the generation when lower 8 bits rollover. */ #define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */ #define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK) #define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1) #define MM_CTXT_NO_ASID 0UL #define asid_mm(mm, cpu) mm->context.asid[cpu] #define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK) DECLARE_PER_CPU(unsigned int, asid_cache); #define asid_cpu(cpu) per_cpu(asid_cache, cpu) /* * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) * Also set the MMU PID register to existing/updated ASID */ static inline void get_new_mmu_context(struct mm_struct *mm) { const unsigned int cpu = smp_processor_id(); unsigned long flags; local_irq_save(flags); /* * Move to new ASID if it was not from current alloc-cycle/generation. * This is done by ensuring that the generation bits in both mm->ASID * and cpu's ASID counter are exactly same. * * Note: Callers needing new ASID unconditionally, independent of * generation, e.g. local_flush_tlb_mm() for forking parent, * first need to destroy the context, setting it to invalid * value. */ if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK)) goto set_hw; /* move to new ASID and handle rollover */ if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) { local_flush_tlb_all(); /* * Above check for rollover of 8 bit ASID in 32 bit container. * If the container itself wrapped around, set it to a non zero * "generation" to distinguish from no context */ if (!asid_cpu(cpu)) asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE; } /* Assign new ASID to tsk */ asid_mm(mm, cpu) = asid_cpu(cpu); set_hw: write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE); local_irq_restore(flags); } /* * Initialize the context related info for a new mm_struct * instance. */ static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int i; for_each_possible_cpu(i) asid_mm(mm, i) = MM_CTXT_NO_ASID; return 0; } static inline void destroy_context(struct mm_struct *mm) { unsigned long flags; /* Needed to elide CONFIG_DEBUG_PREEMPT warning */ local_irq_save(flags); asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID; local_irq_restore(flags); } /* Prepare the MMU for task: setup PID reg with allocated ASID If task doesn't have an ASID (never alloc or stolen, get a new ASID) */ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { const int cpu = smp_processor_id(); /* * Note that the mm_cpumask is "aggregating" only, we don't clear it * for the switched-out task, unlike some other arches. * It is used to enlist cpus for sending TLB flush IPIs and not sending * it to CPUs where a task once ran-on, could cause stale TLB entry * re-use, specially for a multi-threaded task. * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps. * For a non-aggregating mm_cpumask, IPI not sent C1, and if T1 * were to re-migrate to C1, it could access the unmapped region * via any existing stale TLB entries. */ cpumask_set_cpu(cpu, mm_cpumask(next)); #ifndef CONFIG_SMP /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); #endif get_new_mmu_context(next); } /* * Called at the time of execve() to get a new ASID * Note the subtlety here: get_new_mmu_context() behaves differently here * vs. in switch_mm(). Here it always returns a new ASID, because mm has * an unallocated "initial" value, while in latter, it moves to a new ASID, * only if it was unallocated */ #define activate_mm(prev, next) switch_mm(prev, next, NULL) /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping * for retiring-mm. However destroy_context( ) still needs to do that because * between mm_release( ) = >deactive_mm( ) and * mmput => .. => __mmdrop( ) => destroy_context( ) * there is a good chance that task gets sched-out/in, making it's ASID valid * again (this teased me for a whole day). */ #define deactivate_mm(tsk, mm) do { } while (0) #define enter_lazy_tlb(mm, tsk) #endif /* __ASM_ARC_MMU_CONTEXT_H */ include/asm/futex.h 0000644 00000007016 14722070650 0010262 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * Vineetg: August 2010: From Android kernel work */ #ifndef _ASM_FUTEX_H #define _ASM_FUTEX_H #include <linux/futex.h> #include <linux/preempt.h> #include <linux/uaccess.h> #include <asm/errno.h> #ifdef CONFIG_ARC_HAS_LLSC #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\ \ smp_mb(); \ __asm__ __volatile__( \ "1: llock %1, [%2] \n" \ insn "\n" \ "2: scond %0, [%2] \n" \ " bnz 1b \n" \ " mov %0, 0 \n" \ "3: \n" \ " .section .fixup,\"ax\" \n" \ " .align 4 \n" \ "4: mov %0, %4 \n" \ " j 3b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " .align 4 \n" \ " .word 1b, 4b \n" \ " .word 2b, 4b \n" \ " .previous \n" \ \ : "=&r" (ret), "=&r" (oldval) \ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \ : "cc", "memory"); \ smp_mb() \ #else /* !CONFIG_ARC_HAS_LLSC */ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\ \ smp_mb(); \ __asm__ __volatile__( \ "1: ld %1, [%2] \n" \ insn "\n" \ "2: st %0, [%2] \n" \ " mov %0, 0 \n" \ "3: \n" \ " .section .fixup,\"ax\" \n" \ " .align 4 \n" \ "4: mov %0, %4 \n" \ " j 3b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " .align 4 \n" \ " .word 1b, 4b \n" \ " .word 2b, 4b \n" \ " .previous \n" \ \ : "=&r" (ret), "=&r" (oldval) \ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \ : "cc", "memory"); \ smp_mb() \ #endif static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) { int oldval = 0, ret; #ifndef CONFIG_ARC_HAS_LLSC preempt_disable(); /* to guarantee atomic r-m-w of futex op */ #endif pagefault_disable(); switch (op) { case FUTEX_OP_SET: __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ADD: /* oldval = *uaddr; *uaddr += oparg ; ret = *uaddr */ __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg); break; case FUTEX_OP_OR: __futex_atomic_op("or %0, %1, %3", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ANDN: __futex_atomic_op("bic %0, %1, %3", ret, oldval, uaddr, oparg); break; case FUTEX_OP_XOR: __futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg); break; default: ret = -ENOSYS; } pagefault_enable(); #ifndef CONFIG_ARC_HAS_LLSC preempt_enable(); #endif if (!ret) *oval = oldval; return ret; } /* * cmpxchg of futex (pagefaults disabled by caller) * Return 0 for success, -EFAULT otherwise */ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 expval, u32 newval) { int ret = 0; u32 existval; if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; #ifndef CONFIG_ARC_HAS_LLSC preempt_disable(); /* to guarantee atomic r-m-w of futex op */ #endif smp_mb(); __asm__ __volatile__( #ifdef CONFIG_ARC_HAS_LLSC "1: llock %1, [%4] \n" " brne %1, %2, 3f \n" "2: scond %3, [%4] \n" " bnz 1b \n" #else "1: ld %1, [%4] \n" " brne %1, %2, 3f \n" "2: st %3, [%4] \n" #endif "3: \n" " .section .fixup,\"ax\" \n" "4: mov %0, %5 \n" " j 3b \n" " .previous \n" " .section __ex_table,\"a\" \n" " .align 4 \n" " .word 1b, 4b \n" " .word 2b, 4b \n" " .previous\n" : "+&r"(ret), "=&r"(existval) : "r"(expval), "r"(newval), "r"(uaddr), "ir"(-EFAULT) : "cc", "memory"); smp_mb(); #ifndef CONFIG_ARC_HAS_LLSC preempt_enable(); #endif *uval = existval; return ret; } #endif include/asm/module.h 0000644 00000001003 14722070650 0010402 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 */ #ifndef _ASM_ARC_MODULE_H #define _ASM_ARC_MODULE_H #include <asm-generic/module.h> struct mod_arch_specific { #ifdef CONFIG_ARC_DW2_UNWIND void *unw_info; int unw_sec_idx; #endif const char *secstr; }; #define MODULE_PROC_FAMILY "ARC700" #define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY #endif /* _ASM_ARC_MODULE_H */ include/asm/current.h 0000644 00000001045 14722070650 0010605 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * Vineetg: May 16th, 2008 * - Current macro is now implemented as "global register" r25 */ #ifndef _ASM_ARC_CURRENT_H #define _ASM_ARC_CURRENT_H #ifndef __ASSEMBLY__ #ifdef CONFIG_ARC_CURR_IN_REG register struct task_struct *curr_arc asm("r25"); #define current (curr_arc) #else #include <asm-generic/current.h> #endif /* ! CONFIG_ARC_CURR_IN_REG */ #endif /* ! __ASSEMBLY__ */ #endif /* _ASM_ARC_CURRENT_H */ include/asm/delay.h 0000644 00000003550 14722070650 0010224 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * Delay routines using pre computed loops_per_jiffy value. * * vineetg: Feb 2012 * -Rewrote in "C" to avoid dealing with availability of H/w MPY * -Also reduced the num of MPY operations from 3 to 2 * * Amit Bhor: Codito Technologies 2004 */ #ifndef __ASM_ARC_UDELAY_H #define __ASM_ARC_UDELAY_H #include <asm-generic/types.h> #include <asm/param.h> /* HZ */ extern unsigned long loops_per_jiffy; static inline void __delay(unsigned long loops) { __asm__ __volatile__( " mov lp_count, %0 \n" " lp 1f \n" " nop \n" "1: \n" : : "r"(loops) : "lp_count"); } extern void __bad_udelay(void); /* * Normal Math for computing loops in "N" usecs * -we have precomputed @loops_per_jiffy * -1 sec has HZ jiffies * loops per "N" usecs = ((loops_per_jiffy * HZ / 1000000) * N) * * Approximate Division by multiplication: * -Mathematically if we multiply and divide a number by same value the * result remains unchanged: In this case, we use 2^32 * -> (loops_per_N_usec * 2^32 ) / 2^32 * -> (((loops_per_jiffy * HZ / 1000000) * N) * 2^32) / 2^32 * -> (loops_per_jiffy * HZ * N * 4295) / 2^32 * * -Divide by 2^32 is very simply right shift by 32 * -We simply need to ensure that the multiply per above eqn happens in * 64-bit precision (if CPU doesn't support it - gcc can emaulate it) */ static inline void __udelay(unsigned long usecs) { unsigned long loops; /* (u64) cast ensures 64 bit MPY - real or emulated * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops */ loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32; __delay(loops); } #define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \ : __udelay(n)) : __udelay(n)) #endif /* __ASM_ARC_UDELAY_H */ include/asm/switch_to.h 0000644 00000002037 14722070650 0011130 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_SWITCH_TO_H #define _ASM_ARC_SWITCH_TO_H #ifndef __ASSEMBLY__ #include <linux/sched.h> #ifdef CONFIG_ARC_FPU_SAVE_RESTORE extern void fpu_save_restore(struct task_struct *p, struct task_struct *n); #define ARC_FPU_PREV(p, n) fpu_save_restore(p, n) #define ARC_FPU_NEXT(t) #else #define ARC_FPU_PREV(p, n) #define ARC_FPU_NEXT(n) #endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */ #ifdef CONFIG_ARC_PLAT_EZNPS extern void dp_save_restore(struct task_struct *p, struct task_struct *n); #define ARC_EZNPS_DP_PREV(p, n) dp_save_restore(p, n) #else #define ARC_EZNPS_DP_PREV(p, n) #endif /* !CONFIG_ARC_PLAT_EZNPS */ struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n); #define switch_to(prev, next, last) \ do { \ ARC_EZNPS_DP_PREV(prev, next); \ ARC_FPU_PREV(prev, next); \ last = __switch_to(prev, next);\ ARC_FPU_NEXT(next); \ mb(); \ } while (0) #endif #endif include/asm/dma.h 0000644 00000000471 14722070650 0007666 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef ASM_ARC_DMA_H #define ASM_ARC_DMA_H #define MAX_DMA_ADDRESS 0xC0000000 #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy 0 #endif #endif include/asm/kdebug.h 0000644 00000000376 14722070650 0010372 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_KDEBUG_H #define _ASM_ARC_KDEBUG_H enum die_val { DIE_UNUSED, DIE_TRAP, DIE_IERR, DIE_OOPS }; #endif include/asm/tlb-mmu1.h 0000644 00000006530 14722070650 0010565 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_TLB_MMU_V1_H__ #define __ASM_TLB_MMU_V1_H__ #include <asm/mmu.h> #if defined(__ASSEMBLY__) && (CONFIG_ARC_MMU_VER == 1) .macro TLB_WRITE_HEURISTICS #define JH_HACK1 #undef JH_HACK2 #undef JH_HACK3 #ifdef JH_HACK3 ; Calculate set index for 2-way MMU ; -avoiding use of GetIndex from MMU ; and its unpleasant LFSR pseudo-random sequence ; ; r1 = TLBPD0 from TLB_RELOAD above ; ; -- jh_ex_way_set not cleared on startup ; didn't want to change setup.c ; hence extra instruction to clean ; ; -- should be in cache since in same line ; as r0/r1 saves above ; ld r0,[jh_ex_way_sel] ; victim pointer and r0,r0,1 ; clean xor.f r0,r0,1 ; flip st r0,[jh_ex_way_sel] ; store back asr r0,r1,12 ; get set # <<1, note bit 12=R=0 or.nz r0,r0,1 ; set way bit and r0,r0,0xff ; clean sr r0,[ARC_REG_TLBINDEX] #endif #ifdef JH_HACK2 ; JH hack #2 ; Faster than hack #1 in non-thrash case, but hard-coded for 2-way MMU ; Slower in thrash case (where it matters) because more code is executed ; Inefficient due to two-register paradigm of this miss handler ; /* r1 = data TLBPD0 at this point */ lr r0,[eret] /* instruction address */ xor r0,r0,r1 /* compare set # */ and.f r0,r0,0x000fe000 /* 2-way MMU mask */ bne 88f /* not in same set - no need to probe */ lr r0,[eret] /* instruction address */ and r0,r0,PAGE_MASK /* VPN of instruction address */ ; lr r1,[ARC_REG_TLBPD0] /* Data VPN+ASID - already in r1 from TLB_RELOAD*/ and r1,r1,0xff /* Data ASID */ or r0,r0,r1 /* Instruction address + Data ASID */ lr r1,[ARC_REG_TLBPD0] /* save TLBPD0 containing data TLB*/ sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */ sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */ lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */ sr r1,[ARC_REG_TLBPD0] /* restore TLBPD0 */ xor r0,r0,1 /* flip bottom bit of data index */ b.d 89f sr r0,[ARC_REG_TLBINDEX] /* and put it back */ 88: sr TLBGetIndex, [ARC_REG_TLBCOMMAND] 89: #endif #ifdef JH_HACK1 ; ; Always checks whether instruction will be kicked out by dtlb miss ; mov_s r3, r1 ; save PD0 prepared by TLB_RELOAD in r3 lr r0,[eret] /* instruction address */ and r0,r0,PAGE_MASK /* VPN of instruction address */ bmsk r1,r3,7 /* Data ASID, bits 7-0 */ or_s r0,r0,r1 /* Instruction address + Data ASID */ sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */ sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */ lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */ sr r3,[ARC_REG_TLBPD0] /* restore TLBPD0 */ sr TLBGetIndex, [ARC_REG_TLBCOMMAND] lr r1,[ARC_REG_TLBINDEX] /* r1 = index where MMU wants to put data */ cmp r0,r1 /* if no match on indices, go around */ xor.eq r1,r1,1 /* flip bottom bit of data index */ sr r1,[ARC_REG_TLBINDEX] /* and put it back */ #endif .endm #endif #endif include/asm/fb.h 0000644 00000000633 14722070650 0007514 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_FB_H_ #define _ASM_FB_H_ #include <linux/fb.h> #include <linux/fs.h> #include <asm/page.h> static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, unsigned long off) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); } static inline int fb_is_primary_device(struct fb_info *info) { return 0; } #endif /* _ASM_FB_H_ */ include/asm/io.h 0000644 00000014442 14722070650 0007537 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_IO_H #define _ASM_ARC_IO_H #include <linux/types.h> #include <asm/byteorder.h> #include <asm/page.h> #include <asm/unaligned.h> #ifdef CONFIG_ISA_ARCV2 #include <asm/barrier.h> #define __iormb() rmb() #define __iowmb() wmb() #else #define __iormb() do { } while (0) #define __iowmb() do { } while (0) #endif extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size); extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, unsigned long flags); static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { return (void __iomem *)port; } static inline void ioport_unmap(void __iomem *addr) { } extern void iounmap(const volatile void __iomem *addr); #define ioremap_nocache(phy, sz) ioremap(phy, sz) #define ioremap_wc(phy, sz) ioremap(phy, sz) #define ioremap_wt(phy, sz) ioremap(phy, sz) /* * io{read,write}{16,32}be() macros */ #define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) #define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); }) #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); }) /* Change struct page to physical address */ #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define __raw_readb __raw_readb static inline u8 __raw_readb(const volatile void __iomem *addr) { u8 b; __asm__ __volatile__( " ldb%U1 %0, %1 \n" : "=r" (b) : "m" (*(volatile u8 __force *)addr) : "memory"); return b; } #define __raw_readw __raw_readw static inline u16 __raw_readw(const volatile void __iomem *addr) { u16 s; __asm__ __volatile__( " ldw%U1 %0, %1 \n" : "=r" (s) : "m" (*(volatile u16 __force *)addr) : "memory"); return s; } #define __raw_readl __raw_readl static inline u32 __raw_readl(const volatile void __iomem *addr) { u32 w; __asm__ __volatile__( " ld%U1 %0, %1 \n" : "=r" (w) : "m" (*(volatile u32 __force *)addr) : "memory"); return w; } /* * {read,write}s{b,w,l}() repeatedly access the same IO address in * native endianness in 8-, 16-, 32-bit chunks {into,from} memory, * @count times */ #define __raw_readsx(t,f) \ static inline void __raw_reads##f(const volatile void __iomem *addr, \ void *ptr, unsigned int count) \ { \ bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \ u##t *buf = ptr; \ \ if (!count) \ return; \ \ /* Some ARC CPU's don't support unaligned accesses */ \ if (is_aligned) { \ do { \ u##t x = __raw_read##f(addr); \ *buf++ = x; \ } while (--count); \ } else { \ do { \ u##t x = __raw_read##f(addr); \ put_unaligned(x, buf++); \ } while (--count); \ } \ } #define __raw_readsb __raw_readsb __raw_readsx(8, b) #define __raw_readsw __raw_readsw __raw_readsx(16, w) #define __raw_readsl __raw_readsl __raw_readsx(32, l) #define __raw_writeb __raw_writeb static inline void __raw_writeb(u8 b, volatile void __iomem *addr) { __asm__ __volatile__( " stb%U1 %0, %1 \n" : : "r" (b), "m" (*(volatile u8 __force *)addr) : "memory"); } #define __raw_writew __raw_writew static inline void __raw_writew(u16 s, volatile void __iomem *addr) { __asm__ __volatile__( " stw%U1 %0, %1 \n" : : "r" (s), "m" (*(volatile u16 __force *)addr) : "memory"); } #define __raw_writel __raw_writel static inline void __raw_writel(u32 w, volatile void __iomem *addr) { __asm__ __volatile__( " st%U1 %0, %1 \n" : : "r" (w), "m" (*(volatile u32 __force *)addr) : "memory"); } #define __raw_writesx(t,f) \ static inline void __raw_writes##f(volatile void __iomem *addr, \ const void *ptr, unsigned int count) \ { \ bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \ const u##t *buf = ptr; \ \ if (!count) \ return; \ \ /* Some ARC CPU's don't support unaligned accesses */ \ if (is_aligned) { \ do { \ __raw_write##f(*buf++, addr); \ } while (--count); \ } else { \ do { \ __raw_write##f(get_unaligned(buf++), addr); \ } while (--count); \ } \ } #define __raw_writesb __raw_writesb __raw_writesx(8, b) #define __raw_writesw __raw_writesw __raw_writesx(16, w) #define __raw_writesl __raw_writesl __raw_writesx(32, l) /* * MMIO can also get buffered/optimized in micro-arch, so barriers needed * Based on ARM model for the typical use case * * <ST [DMA buffer]> * <writel MMIO "go" reg> * or: * <readl MMIO "status" reg> * <LD [DMA buffer]> * * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com */ #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) #define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); }) #define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); }) #define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); }) #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) #define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); }) #define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); }) #define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); }) /* * Relaxed API for drivers which can handle barrier ordering themselves * * Also these are defined to perform little endian accesses. * To provide the typical device register semantics of fixed endian, * swap the byte order for Big Endian * * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de */ #define readb_relaxed(c) __raw_readb(c) #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \ __raw_readw(c)); __r; }) #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ __raw_readl(c)); __r; }) #define writeb_relaxed(v,c) __raw_writeb(v,c) #define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c) #define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c) #include <asm-generic/io.h> #endif /* _ASM_ARC_IO_H */ include/asm/shmparam.h 0000644 00000000450 14722070650 0010732 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ARC_ASM_SHMPARAM_H #define __ARC_ASM_SHMPARAM_H /* Handle upto 2 cache bins */ #define SHMLBA (2 * PAGE_SIZE) /* Enforce SHMLBA in shmat */ #define __ARCH_FORCE_SHMLBA #endif include/asm/syscalls.h 0000644 00000001043 14722070650 0010756 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_SYSCALLS_H #define _ASM_ARC_SYSCALLS_H 1 #include <linux/compiler.h> #include <linux/linkage.h> #include <linux/types.h> int sys_clone_wrapper(int, int, int, int, int); int sys_clone3_wrapper(void *, size_t); int sys_cacheflush(uint32_t, uint32_t uint32_t); int sys_arc_settls(void *); int sys_arc_gettls(void); int sys_arc_usr_cmpxchg(int *, int, int); #include <asm-generic/syscalls.h> #endif include/asm/sections.h 0000644 00000000405 14722070650 0010751 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_SECTIONS_H #define _ASM_ARC_SECTIONS_H #include <asm-generic/sections.h> extern char __arc_dccm_base[]; #endif include/asm/segment.h 0000644 00000000722 14722070650 0010566 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASMARC_SEGMENT_H #define __ASMARC_SEGMENT_H #ifndef __ASSEMBLY__ typedef unsigned long mm_segment_t; #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) #define KERNEL_DS MAKE_MM_SEG(0) #define USER_DS MAKE_MM_SEG(TASK_SIZE) #define segment_eq(a, b) ((a) == (b)) #endif /* __ASSEMBLY__ */ #endif /* __ASMARC_SEGMENT_H */ include/asm/thread_info.h 0000644 00000006207 14722070650 0011412 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * Vineetg: Oct 2009 * No need for ARC specific thread_info allocator (kmalloc/free). This is * anyways one page allocation, thus slab alloc can be short-circuited and * the generic version (get_free_page) would be loads better. * * Sameer Dhavale: Codito Technologies 2004 */ #ifndef _ASM_THREAD_INFO_H #define _ASM_THREAD_INFO_H #include <asm/page.h> #ifdef CONFIG_16KSTACKS #define THREAD_SIZE_ORDER 1 #else #define THREAD_SIZE_ORDER 0 #endif #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define THREAD_SHIFT (PAGE_SHIFT << THREAD_SIZE_ORDER) #ifndef __ASSEMBLY__ #include <linux/thread_info.h> #include <asm/segment.h> /* * low level task data that entry.S needs immediate access to * - this struct should fit entirely inside of one cache line * - this struct shares the supervisor stack pages * - if the contents of this structure are changed, the assembly constants * must also be changed */ struct thread_info { unsigned long flags; /* low level flags */ int preempt_count; /* 0 => preemptable, <0 => BUG */ struct task_struct *task; /* main task structure */ mm_segment_t addr_limit; /* thread address space */ __u32 cpu; /* current CPU */ unsigned long thr_ptr; /* TLS ptr */ }; /* * macros/functions for gaining access to the thread information structure * * preempt_count needs to be 1 initially, until the scheduler is functional. */ #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ } static inline __attribute_const__ struct thread_info *current_thread_info(void) { register unsigned long sp asm("sp"); return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); } #endif /* !__ASSEMBLY__ */ /* * thread information flags * - these are process state flags that various assembly files may need to * access * - pending work-to-be-done flags are in LSW * - other flags in MSW */ #define TIF_RESTORE_SIGMASK 0 /* restore sig mask in do_signal() */ #define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ #define TIF_SYSCALL_TRACE 15 /* syscall trace active */ /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 16 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_MEMDIE (1<<TIF_MEMDIE) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME) /* * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it. * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a * syscall, so all that reamins to be tested is _TIF_WORK_MASK */ #endif /* _ASM_THREAD_INFO_H */ include/asm/tlb.h 0000644 00000000406 14722070650 0007704 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_TLB_H #define _ASM_ARC_TLB_H #include <linux/pagemap.h> #include <asm-generic/tlb.h> #endif /* _ASM_ARC_TLB_H */ include/asm/processor.h 0000644 00000010414 14722070650 0011142 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * vineetg: March 2009 * -Implemented task_pt_regs( ) * * Amit Bhor, Sameer Dhavale, Ashwin Chaugule: Codito Technologies 2004 */ #ifndef __ASM_ARC_PROCESSOR_H #define __ASM_ARC_PROCESSOR_H #ifndef __ASSEMBLY__ #include <asm/ptrace.h> #ifdef CONFIG_ARC_FPU_SAVE_RESTORE /* These DPFP regs need to be saved/restored across ctx-sw */ struct arc_fpu { struct { unsigned int l, h; } aux_dpfp[2]; }; #endif #ifdef CONFIG_ARC_PLAT_EZNPS struct eznps_dp { unsigned int eflags; unsigned int gpa1; }; #endif /* Arch specific stuff which needs to be saved per task. * However these items are not so important so as to earn a place in * struct thread_info */ struct thread_struct { unsigned long ksp; /* kernel mode stack pointer */ unsigned long callee_reg; /* pointer to callee regs */ unsigned long fault_address; /* dbls as brkpt holder as well */ #ifdef CONFIG_ARC_FPU_SAVE_RESTORE struct arc_fpu fpu; #endif #ifdef CONFIG_ARC_PLAT_EZNPS struct eznps_dp dp; #endif }; #define INIT_THREAD { \ .ksp = sizeof(init_stack) + (unsigned long) init_stack, \ } /* Forward declaration, a strange C thing */ struct task_struct; #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1) /* Free all resources held by a thread */ #define release_thread(thread) do { } while (0) /* * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise * get optimised away by gcc */ #ifndef CONFIG_EZNPS_MTM_EXT #define cpu_relax() barrier() #else #define cpu_relax() \ __asm__ __volatile__ (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory") #endif #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) /* * Where about of Task's sp, fp, blink when it was last seen in kernel mode. * Look in process.c for details of kernel stack layout */ #define TSK_K_ESP(tsk) (tsk->thread.ksp) #define TSK_K_REG(tsk, off) (*((unsigned long *)(TSK_K_ESP(tsk) + \ sizeof(struct callee_regs) + off))) #define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4) #define TSK_K_FP(tsk) TSK_K_REG(tsk, 0) extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp); extern unsigned int get_wchan(struct task_struct *p); #endif /* !__ASSEMBLY__ */ /* * Default System Memory Map on ARC * * ---------------------------- (lower 2G, Translated) ------------------------- * 0x0000_0000 0x5FFF_FFFF (user vaddr: TASK_SIZE) * 0x6000_0000 0x6FFF_FFFF (reserved gutter between U/K) * 0x7000_0000 0x7FFF_FFFF (kvaddr: vmalloc/modules/pkmap..) * * PAGE_OFFSET ---------------- (Upper 2G, Untranslated) ----------------------- * 0x8000_0000 0xBFFF_FFFF (kernel direct mapped) * 0xC000_0000 0xFFFF_FFFF (peripheral uncached space) * ----------------------------------------------------------------------------- */ #define TASK_SIZE 0x60000000 #define VMALLOC_START (PAGE_OFFSET - (CONFIG_ARC_KVADDR_SIZE << 20)) /* 1 PGDIR_SIZE each for fixmap/pkmap, 2 PGDIR_SIZE gutter (see asm/highmem.h) */ #define VMALLOC_SIZE ((CONFIG_ARC_KVADDR_SIZE << 20) - PGDIR_SIZE * 4) #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) #define USER_KERNEL_GUTTER (VMALLOC_START - TASK_SIZE) #ifdef CONFIG_ARC_PLAT_EZNPS /* NPS architecture defines special window of 129M in user address space for * special memory areas, when accessing this window the MMU do not use TLB. * Instead MMU direct the access to: * 0x57f00000:0x57ffffff -- 1M of closely coupled memory (aka CMEM) * 0x58000000:0x5fffffff -- 16 huge pages, 8M each, with fixed map (aka FMTs) * * CMEM - is the fastest memory we got and its size is 16K. * FMT - is used to map either to internal/external memory. * Internal memory is the second fast memory and its size is 16M * External memory is the biggest memory (16G) and also the slowest. * * STACK_TOP need to be PMD align (21bit) that is why we supply 0x57e00000. */ #define STACK_TOP 0x57e00000 #else #define STACK_TOP TASK_SIZE #endif #define STACK_TOP_MAX STACK_TOP /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define TASK_UNMAPPED_BASE (TASK_SIZE / 3) #endif /* __ASM_ARC_PROCESSOR_H */ include/asm/pci.h 0000644 00000000550 14722070650 0007676 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_PCI_H #define _ASM_ARC_PCI_H #ifdef __KERNEL__ #include <linux/ioport.h> #define PCIBIOS_MIN_IO 0x100 #define PCIBIOS_MIN_MEM 0x100000 #define pcibios_assign_all_busses() 1 #endif /* __KERNEL__ */ #endif /* _ASM_ARC_PCI_H */ include/asm/Kbuild 0000644 00000001263 14722070650 0010111 0 ustar 00 # SPDX-License-Identifier: GPL-2.0 generic-y += bugs.h generic-y += compat.h generic-y += device.h generic-y += div64.h generic-y += dma-mapping.h generic-y += emergency-restart.h generic-y += extable.h generic-y += ftrace.h generic-y += hardirq.h generic-y += hw_irq.h generic-y += irq_regs.h generic-y += irq_work.h generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += mmiowb.h generic-y += msi.h generic-y += parport.h generic-y += percpu.h generic-y += preempt.h generic-y += topology.h generic-y += trace_clock.h generic-y += user.h generic-y += vga.h generic-y += word-at-a-time.h generic-y += xor.h include/asm/hugepage.h 0000644 00000004615 14722070650 0010716 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_HUGEPAGE_H #define _ASM_ARC_HUGEPAGE_H #include <linux/types.h> #define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopmd.h> static inline pte_t pmd_pte(pmd_t pmd) { return __pte(pmd_val(pmd)); } static inline pmd_t pte_pmd(pte_t pte) { return __pmd(pte_val(pte)); } #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) #define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd))) #define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd))) #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) #define pmd_write(pmd) pte_write(pmd_pte(pmd)) #define pmd_young(pmd) pte_young(pmd_pte(pmd)) #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) #define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot)) #define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ) #define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) { /* * open-coded pte_modify() with additional retaining of HW_SZ bit * so that pmd_trans_huge() remains true for this PMD */ return __pmd((pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HW_SZ)) | pgprot_val(newprot)); } static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { *pmdp = pmd; } extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd); /* Generic variants assume pgtable_t is struct page *, hence need for these */ #define __HAVE_ARCH_PGTABLE_DEPOSIT extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable); #define __HAVE_ARCH_PGTABLE_WITHDRAW extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ #define pmdp_establish generic_pmdp_establish #endif include/asm/ptrace.h 0000644 00000007347 14722070650 0010414 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 */ #ifndef __ASM_ARC_PTRACE_H #define __ASM_ARC_PTRACE_H #include <uapi/asm/ptrace.h> #ifndef __ASSEMBLY__ /* THE pt_regs: Defines how regs are saved during entry into kernel */ #ifdef CONFIG_ISA_ARCOMPACT struct pt_regs { #ifdef CONFIG_ARC_PLAT_EZNPS unsigned long eflags; /* Extended FLAGS */ unsigned long gpa1; /* General Purpose Aux */ #endif /* Real registers */ unsigned long bta; /* bta_l1, bta_l2, erbta */ unsigned long lp_start, lp_end, lp_count; unsigned long status32; /* status32_l1, status32_l2, erstatus */ unsigned long ret; /* ilink1, ilink2 or eret */ unsigned long blink; unsigned long fp; unsigned long r26; /* gp */ unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; unsigned long sp; /* User/Kernel depending on where we came from */ unsigned long orig_r0; /* * To distinguish bet excp, syscall, irq * For traps and exceptions, Exception Cause Register. * ECR: <00> <VV> <CC> <PP> * Last word used by Linux for extra state mgmt (syscall-restart) * For interrupts, use artificial ECR values to note current prio-level */ union { struct { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned long state:8, ecr_vec:8, ecr_cause:8, ecr_param:8; #else unsigned long ecr_param:8, ecr_cause:8, ecr_vec:8, state:8; #endif }; unsigned long event; }; unsigned long user_r25; }; #else struct pt_regs { unsigned long orig_r0; union { struct { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned long state:8, ecr_vec:8, ecr_cause:8, ecr_param:8; #else unsigned long ecr_param:8, ecr_cause:8, ecr_vec:8, state:8; #endif }; unsigned long event; }; unsigned long bta; /* bta_l1, bta_l2, erbta */ unsigned long user_r25; unsigned long r26; /* gp */ unsigned long fp; unsigned long sp; /* user/kernel sp depending on where we came from */ unsigned long r12, r30; #ifdef CONFIG_ARC_HAS_ACCL_REGS unsigned long r58, r59; /* ACCL/ACCH used by FPU / DSP MPY */ #endif /*------- Below list auto saved by h/w -----------*/ unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; unsigned long blink; unsigned long lp_end, lp_start, lp_count; unsigned long ei, ldi, jli; unsigned long ret; unsigned long status32; }; #endif /* Callee saved registers - need to be saved only when you are scheduled out */ struct callee_regs { unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; }; #define instruction_pointer(regs) ((regs)->ret) #define profile_pc(regs) instruction_pointer(regs) /* return 1 if user mode or 0 if kernel mode */ #define user_mode(regs) (regs->status32 & STATUS_U_MASK) #define user_stack_pointer(regs)\ ({ unsigned int sp; \ if (user_mode(regs)) \ sp = (regs)->sp;\ else \ sp = -1; \ sp; \ }) /* return 1 if PC in delay slot */ #define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK) #define in_syscall(regs) ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param) #define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param) #define STATE_SCALL_RESTARTED 0x01 #define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED) #define syscall_restartable(reg) !(reg->state & STATE_SCALL_RESTARTED) #define current_pt_regs() \ ({ \ /* open-coded current_thread_info() */ \ register unsigned long sp asm ("sp"); \ unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \ (struct pt_regs *)(pg_start + THREAD_SIZE) - 1; \ }) static inline long regs_return_value(struct pt_regs *regs) { return (long)regs->r0; } #endif /* !__ASSEMBLY__ */ #endif /* __ASM_PTRACE_H */ include/asm/cmpxchg.h 0000644 00000012414 14722070650 0010556 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_ARC_CMPXCHG_H #define __ASM_ARC_CMPXCHG_H #include <linux/types.h> #include <asm/barrier.h> #include <asm/smp.h> #ifdef CONFIG_ARC_HAS_LLSC static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) { unsigned long prev; /* * Explicit full memory barrier needed before/after as * LLOCK/SCOND thmeselves don't provide any such semantics */ smp_mb(); __asm__ __volatile__( "1: llock %0, [%1] \n" " brne %0, %2, 2f \n" " scond %3, [%1] \n" " bnz 1b \n" "2: \n" : "=&r"(prev) /* Early clobber, to prevent reg reuse */ : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */ "ir"(expected), "r"(new) /* can't be "ir". scond can't take LIMM for "b" */ : "cc", "memory"); /* so that gcc knows memory is being written here */ smp_mb(); return prev; } #elif !defined(CONFIG_ARC_PLAT_EZNPS) static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) { unsigned long flags; int prev; volatile unsigned long *p = ptr; /* * spin lock/unlock provide the needed smp_mb() before/after */ atomic_ops_lock(flags); prev = *p; if (prev == expected) *p = new; atomic_ops_unlock(flags); return prev; } #else /* CONFIG_ARC_PLAT_EZNPS */ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) { /* * Explicit full memory barrier needed before/after */ smp_mb(); write_aux_reg(CTOP_AUX_GPA1, expected); __asm__ __volatile__( " mov r2, %0\n" " mov r3, %1\n" " .word %2\n" " mov %0, r2" : "+r"(new) : "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3) : "r2", "r3", "memory"); smp_mb(); return new; } #endif /* CONFIG_ARC_HAS_LLSC */ #define cmpxchg(ptr, o, n) ({ \ (typeof(*(ptr)))__cmpxchg((ptr), \ (unsigned long)(o), \ (unsigned long)(n)); \ }) /* * atomic_cmpxchg is same as cmpxchg * LLSC: only different in data-type, semantics are exactly same * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee * semantics, and this lock also happens to be used by atomic_*() */ #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) #ifndef CONFIG_ARC_PLAT_EZNPS /* * xchg (reg with memory) based on "Native atomic" EX insn */ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, int size) { extern unsigned long __xchg_bad_pointer(void); switch (size) { case 4: smp_mb(); __asm__ __volatile__( " ex %0, [%1] \n" : "+r"(val) : "r"(ptr) : "memory"); smp_mb(); return val; } return __xchg_bad_pointer(); } #define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \ sizeof(*(ptr)))) /* * xchg() maps directly to ARC EX instruction which guarantees atomicity. * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock * due to a subtle reason: * - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot * of kernel code which calls xchg()/cmpxchg() on same data (see llist.h) * Hence xchg() needs to follow same locking rules. * * Technically the lock is also needed for UP (boils down to irq save/restore) * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg() * Other way around, xchg is one instruction anyways, so can't be interrupted * as such */ #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP) #define xchg(ptr, with) \ ({ \ unsigned long flags; \ typeof(*(ptr)) old_val; \ \ atomic_ops_lock(flags); \ old_val = _xchg(ptr, with); \ atomic_ops_unlock(flags); \ old_val; \ }) #else #define xchg(ptr, with) _xchg(ptr, with) #endif #else /* CONFIG_ARC_PLAT_EZNPS */ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, int size) { extern unsigned long __xchg_bad_pointer(void); switch (size) { case 4: /* * Explicit full memory barrier needed before/after */ smp_mb(); __asm__ __volatile__( " mov r2, %0\n" " mov r3, %1\n" " .word %2\n" " mov %0, r2\n" : "+r"(val) : "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3) : "r2", "r3", "memory"); smp_mb(); return val; } return __xchg_bad_pointer(); } #define xchg(ptr, with) ({ \ (typeof(*(ptr)))__xchg((unsigned long)(with), \ (ptr), \ sizeof(*(ptr))); \ }) #endif /* CONFIG_ARC_PLAT_EZNPS */ /* * "atomic" variant of xchg() * REQ: It needs to follow the same serialization rules as other atomic_xxx() * Since xchg() doesn't always do that, it would seem that following defintion * is incorrect. But here's the rationale: * SMP : Even xchg() takes the atomic_ops_lock, so OK. * LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC * is natively "SMP safe", no serialization required). * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg() * could clobber them. atomic_xchg() itself would be 1 insn, so it * can't be clobbered by others. Thus no serialization required when * atomic_xchg is involved. */ #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #endif include/asm/mach_desc.h 0000644 00000003625 14722070650 0011037 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com) * * based on METAG mach/arch.h (which in turn was based on ARM) */ #ifndef _ASM_ARC_MACH_DESC_H_ #define _ASM_ARC_MACH_DESC_H_ /** * struct machine_desc - Board specific callbacks, called from ARC common code * Provided by each ARC board using MACHINE_START()/MACHINE_END(), so * a multi-platform kernel builds with array of such descriptors. * We extend the early DT scan to also match the DT's "compatible" string * against the @dt_compat of all such descriptors, and one with highest * "DT score" is selected as global @machine_desc. * * @name: Board/SoC name * @dt_compat: Array of device tree 'compatible' strings * (XXX: although only 1st entry is looked at) * @init_early: Very early callback [called from setup_arch()] * @init_per_cpu: for each CPU as it is coming up (SMP as well as UP) * [(M):init_IRQ(), (o):start_kernel_secondary()] * @init_machine: arch initcall level callback (e.g. populate static * platform devices or parse Devicetree) * @init_late: Late initcall level callback * */ struct machine_desc { const char *name; const char **dt_compat; void (*init_early)(void); void (*init_per_cpu)(unsigned int); void (*init_machine)(void); void (*init_late)(void); }; /* * Current machine - only accessible during boot. */ extern const struct machine_desc *machine_desc; /* * Machine type table - also only accessible during boot */ extern const struct machine_desc __arch_info_begin[], __arch_info_end[]; /* * Set of macros to define architecture features. * This is built into a table by the linker. */ #define MACHINE_START(_type, _name) \ static const struct machine_desc __mach_desc_##_type \ __used __section(.arch.info.init) = { \ .name = _name, #define MACHINE_END \ }; extern const struct machine_desc *setup_machine_fdt(void *dt); #endif include/asm/mmzone.h 0000644 00000001513 14722070650 0010430 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) */ #ifndef _ASM_ARC_MMZONE_H #define _ASM_ARC_MMZONE_H #ifdef CONFIG_DISCONTIGMEM extern struct pglist_data node_data[]; #define NODE_DATA(nid) (&node_data[nid]) static inline int pfn_to_nid(unsigned long pfn) { int is_end_low = 1; if (IS_ENABLED(CONFIG_ARC_HAS_PAE40)) is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL); /* * node 0: lowmem: 0x8000_0000 to 0xFFFF_FFFF * node 1: HIGHMEM w/o PAE40: 0x0 to 0x7FFF_FFFF * HIGHMEM with PAE40: 0x1_0000_0000 to ... */ if (pfn >= ARCH_PFN_OFFSET && is_end_low) return 0; return 1; } static inline int pfn_valid(unsigned long pfn) { int nid = pfn_to_nid(pfn); return (pfn <= node_end_pfn(nid)); } #endif /* CONFIG_DISCONTIGMEM */ #endif include/asm/checksum.h 0000644 00000004477 14722070650 0010741 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * Joern Rennecke <joern.rennecke@embecosm.com>: Jan 2012 * -Insn Scheduling improvements to csum core routines. * = csum_fold( ) largely derived from ARM version. * = ip_fast_cum( ) to have module scheduling * -gcc 4.4.x broke networking. Alias analysis needed to be primed. * worked around by adding memory clobber to ip_fast_csum( ) * * vineetg: May 2010 * -Rewrote ip_fast_cscum( ) and csum_fold( ) with fast inline asm */ #ifndef _ASM_ARC_CHECKSUM_H #define _ASM_ARC_CHECKSUM_H /* * Fold a partial checksum * * The 2 swords comprising the 32bit sum are added, any carry to 16th bit * added back and final sword result inverted. */ static inline __sum16 csum_fold(__wsum s) { unsigned r = s << 16 | s >> 16; /* ror */ s = ~s; s -= r; return s >> 16; } /* * This is a version of ip_compute_csum() optimized for IP headers, * which always checksum on 4 octet boundaries. */ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { const void *ptr = iph; unsigned int tmp, tmp2, sum; __asm__( " ld.ab %0, [%3, 4] \n" " ld.ab %2, [%3, 4] \n" " sub %1, %4, 2 \n" " lsr.f lp_count, %1, 1 \n" " bcc 0f \n" " add.f %0, %0, %2 \n" " ld.ab %2, [%3, 4] \n" "0: lp 1f \n" " ld.ab %1, [%3, 4] \n" " adc.f %0, %0, %2 \n" " ld.ab %2, [%3, 4] \n" " adc.f %0, %0, %1 \n" "1: adc.f %0, %0, %2 \n" " add.cs %0,%0,1 \n" : "=&r"(sum), "=r"(tmp), "=&r"(tmp2), "+&r" (ptr) : "r"(ihl) : "cc", "lp_count", "memory"); return csum_fold(sum); } /* * TCP pseudo Header is 12 bytes: * SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2] */ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum) { __asm__ __volatile__( " add.f %0, %0, %1 \n" " adc.f %0, %0, %2 \n" " adc.f %0, %0, %3 \n" " adc.f %0, %0, %4 \n" " adc %0, %0, 0 \n" : "+&r"(sum) : "r"(saddr), "r"(daddr), #ifdef CONFIG_CPU_BIG_ENDIAN "r"(len), #else "r"(len << 8), #endif "r"(htons(proto)) : "cc"); return sum; } #define csum_fold csum_fold #define ip_fast_csum ip_fast_csum #define csum_tcpudp_nofold csum_tcpudp_nofold #include <asm-generic/checksum.h> #endif /* _ASM_ARC_CHECKSUM_H */ include/asm/linkage.h 0000644 00000002704 14722070650 0010540 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H #include <asm/dwarf.h> #define ASM_NL ` /* use '`' to mark new line in macro */ #define __ALIGN .align 4 #define __ALIGN_STR __stringify(__ALIGN) #ifdef __ASSEMBLY__ .macro ST2 e, o, off #ifdef CONFIG_ARC_HAS_LL64 std \e, [sp, \off] #else st \e, [sp, \off] st \o, [sp, \off+4] #endif .endm .macro LD2 e, o, off #ifdef CONFIG_ARC_HAS_LL64 ldd \e, [sp, \off] #else ld \e, [sp, \off] ld \o, [sp, \off+4] #endif .endm /* annotation for data we want in DCCM - if enabled in .config */ .macro ARCFP_DATA nm #ifdef CONFIG_ARC_HAS_DCCM .section .data.arcfp #else .section .data #endif .global \nm .endm /* annotation for data we want in DCCM - if enabled in .config */ .macro ARCFP_CODE #ifdef CONFIG_ARC_HAS_ICCM .section .text.arcfp, "ax",@progbits #else .section .text, "ax",@progbits #endif .endm #define ENTRY_CFI(name) \ .globl name ASM_NL \ ALIGN ASM_NL \ name: ASM_NL \ CFI_STARTPROC ASM_NL #define END_CFI(name) \ CFI_ENDPROC ASM_NL \ .size name, .-name #else /* !__ASSEMBLY__ */ #ifdef CONFIG_ARC_HAS_ICCM #define __arcfp_code __section(.text.arcfp) #else #define __arcfp_code __section(.text) #endif #ifdef CONFIG_ARC_HAS_DCCM #define __arcfp_data __section(.data.arcfp) #else #define __arcfp_data __section(.data) #endif #endif /* __ASSEMBLY__ */ #endif include/asm/string.h 0000644 00000002014 14722070650 0010426 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * vineetg: May 2011 * -We had half-optimised memset/memcpy, got better versions of those * -Added memcmp, strchr, strcpy, strcmp, strlen * * Amit Bhor: Codito Technologies 2004 */ #ifndef _ASM_ARC_STRING_H #define _ASM_ARC_STRING_H #include <linux/types.h> #define __HAVE_ARCH_MEMSET #define __HAVE_ARCH_MEMCPY #define __HAVE_ARCH_MEMCMP #define __HAVE_ARCH_STRCHR #define __HAVE_ARCH_STRCPY #define __HAVE_ARCH_STRCMP #define __HAVE_ARCH_STRLEN extern void *memset(void *ptr, int, __kernel_size_t); extern void *memcpy(void *, const void *, __kernel_size_t); extern void memzero(void *ptr, __kernel_size_t n); extern int memcmp(const void *, const void *, __kernel_size_t); extern char *strchr(const char *s, int c); extern char *strcpy(char *dest, const char *src); extern int strcmp(const char *cs, const char *ct); extern __kernel_size_t strlen(const char *); #endif /* _ASM_ARC_STRING_H */ plat-eznps/include/plat/mtm.h 0000644 00000002217 14722070650 0012177 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright(c) 2015 EZchip Technologies. */ #ifndef _PLAT_EZNPS_MTM_H #define _PLAT_EZNPS_MTM_H #include <plat/ctop.h> static inline void *nps_mtm_reg_addr(u32 cpu, u32 reg) { struct global_id gid; u32 core, blkid; gid.value = cpu; core = gid.core; blkid = (((core & 0x0C) << 2) | (core & 0x03)); return nps_host_reg(cpu, blkid, reg); } #ifdef CONFIG_EZNPS_MTM_EXT #define NPS_CPU_TO_THREAD_NUM(cpu) \ ({ struct global_id gid; gid.value = cpu; gid.thread; }) /* MTM registers */ #define MTM_CFG(cpu) nps_mtm_reg_addr(cpu, 0x81) #define MTM_THR_INIT(cpu) nps_mtm_reg_addr(cpu, 0x92) #define MTM_THR_INIT_STS(cpu) nps_mtm_reg_addr(cpu, 0x93) #define get_thread(map) map.thread #define eznps_max_cpus 4096 #define eznps_cpus_per_cluster 256 void mtm_enable_core(unsigned int cpu); int mtm_enable_thread(int cpu); #else /* !CONFIG_EZNPS_MTM_EXT */ #define get_thread(map) 0 #define eznps_max_cpus 256 #define eznps_cpus_per_cluster 16 #define mtm_enable_core(cpu) #define mtm_enable_thread(cpu) 1 #define NPS_CPU_TO_THREAD_NUM(cpu) 0 #endif /* CONFIG_EZNPS_MTM_EXT */ #endif /* _PLAT_EZNPS_MTM_H */ plat-eznps/include/plat/smp.h 0000644 00000000350 14722070650 0012175 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright(c) 2015 EZchip Technologies. */ #ifndef __PLAT_EZNPS_SMP_H #define __PLAT_EZNPS_SMP_H #ifdef CONFIG_SMP extern void res_service(void); #endif /* CONFIG_SMP */ #endif plat-eznps/include/plat/ctop.h 0000644 00000011672 14722070650 0012354 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright(c) 2015 EZchip Technologies. */ #ifndef _PLAT_EZNPS_CTOP_H #define _PLAT_EZNPS_CTOP_H #ifndef CONFIG_ARC_PLAT_EZNPS #error "Incorrect ctop.h include" #endif #include <linux/bits.h> #include <linux/types.h> #include <soc/nps/common.h> /* core auxiliary registers */ #ifdef __ASSEMBLY__ #define CTOP_AUX_BASE (-0x800) #else #define CTOP_AUX_BASE 0xFFFFF800 #endif #define CTOP_AUX_GLOBAL_ID (CTOP_AUX_BASE + 0x000) #define CTOP_AUX_CLUSTER_ID (CTOP_AUX_BASE + 0x004) #define CTOP_AUX_CORE_ID (CTOP_AUX_BASE + 0x008) #define CTOP_AUX_THREAD_ID (CTOP_AUX_BASE + 0x00C) #define CTOP_AUX_LOGIC_GLOBAL_ID (CTOP_AUX_BASE + 0x010) #define CTOP_AUX_LOGIC_CLUSTER_ID (CTOP_AUX_BASE + 0x014) #define CTOP_AUX_LOGIC_CORE_ID (CTOP_AUX_BASE + 0x018) #define CTOP_AUX_MT_CTRL (CTOP_AUX_BASE + 0x020) #define CTOP_AUX_HW_COMPLY (CTOP_AUX_BASE + 0x024) #define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C) #define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030) #define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080) #define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C) #define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300) /* EZchip core instructions */ #define CTOP_INST_HWSCHD_WFT_IE12 0x3E6F7344 #define CTOP_INST_HWSCHD_OFF_R4 0x3C6F00BF #define CTOP_INST_HWSCHD_RESTORE_R4 0x3E6F7103 #define CTOP_INST_SCHD_RW 0x3E6F7004 #define CTOP_INST_SCHD_RD 0x3E6F7084 #define CTOP_INST_ASRI_0_R3 0x3B56003E #define CTOP_INST_XEX_DI_R2_R2_R3 0x4A664C00 #define CTOP_INST_EXC_DI_R2_R2_R3 0x4A664C01 #define CTOP_INST_AADD_DI_R2_R2_R3 0x4A664C02 #define CTOP_INST_AAND_DI_R2_R2_R3 0x4A664C04 #define CTOP_INST_AOR_DI_R2_R2_R3 0x4A664C05 #define CTOP_INST_AXOR_DI_R2_R2_R3 0x4A664C06 /* Do not use D$ for address in 2G-3G */ #define HW_COMPLY_KRN_NOT_D_CACHED BIT(28) #define NPS_MSU_EN_CFG 0x80 #define NPS_CRG_BLKID 0x480 #define NPS_CRG_SYNC_BIT BIT(0) #define NPS_GIM_BLKID 0x5C0 /* GIM registers and fields*/ #define NPS_GIM_UART_LINE BIT(7) #define NPS_GIM_DBG_LAN_EAST_TX_DONE_LINE BIT(10) #define NPS_GIM_DBG_LAN_EAST_RX_RDY_LINE BIT(11) #define NPS_GIM_DBG_LAN_WEST_TX_DONE_LINE BIT(25) #define NPS_GIM_DBG_LAN_WEST_RX_RDY_LINE BIT(26) #ifndef __ASSEMBLY__ /* Functional registers definition */ struct nps_host_reg_mtm_cfg { union { struct { u32 gen:1, gdis:1, clk_gate_dis:1, asb:1, __reserved:9, nat:3, ten:16; }; u32 value; }; }; struct nps_host_reg_mtm_cpu_cfg { union { struct { u32 csa:22, dmsid:6, __reserved:3, cs:1; }; u32 value; }; }; struct nps_host_reg_thr_init { union { struct { u32 str:1, __reserved:27, thr_id:4; }; u32 value; }; }; struct nps_host_reg_thr_init_sts { union { struct { u32 bsy:1, err:1, __reserved:26, thr_id:4; }; u32 value; }; }; struct nps_host_reg_msu_en_cfg { union { struct { u32 __reserved1:11, rtc_en:1, ipc_en:1, gim_1_en:1, gim_0_en:1, ipi_en:1, buff_e_rls_bmuw:1, buff_e_alc_bmuw:1, buff_i_rls_bmuw:1, buff_i_alc_bmuw:1, buff_e_rls_bmue:1, buff_e_alc_bmue:1, buff_i_rls_bmue:1, buff_i_alc_bmue:1, __reserved2:1, buff_e_pre_en:1, buff_i_pre_en:1, pmuw_ja_en:1, pmue_ja_en:1, pmuw_nj_en:1, pmue_nj_en:1, msu_en:1; }; u32 value; }; }; struct nps_host_reg_gim_p_int_dst { union { struct { u32 int_out_en:1, __reserved1:4, is:1, intm:2, __reserved2:4, nid:4, __reserved3:4, cid:4, __reserved4:4, tid:4; }; u32 value; }; }; /* AUX registers definition */ struct nps_host_reg_aux_dpc { union { struct { u32 ien:1, men:1, hen:1, reserved:29; }; u32 value; }; }; struct nps_host_reg_aux_udmc { union { struct { u32 dcp:1, cme:1, __reserved:19, nat:3, __reserved2:5, dcas:3; }; u32 value; }; }; struct nps_host_reg_aux_mt_ctrl { union { struct { u32 mten:1, hsen:1, scd:1, sten:1, st_cnt:8, __reserved:8, hs_cnt:8, __reserved1:4; }; u32 value; }; }; struct nps_host_reg_aux_hw_comply { union { struct { u32 me:1, le:1, te:1, knc:1, __reserved:28; }; u32 value; }; }; struct nps_host_reg_aux_lpc { union { struct { u32 mep:1, __reserved:31; }; u32 value; }; }; /* CRG registers */ #define REG_GEN_PURP_0 nps_host_reg_non_cl(NPS_CRG_BLKID, 0x1BF) /* GIM registers */ #define REG_GIM_P_INT_EN_0 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x100) #define REG_GIM_P_INT_POL_0 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x110) #define REG_GIM_P_INT_SENS_0 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x114) #define REG_GIM_P_INT_BLK_0 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x118) #define REG_GIM_P_INT_DST_10 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x13A) #define REG_GIM_P_INT_DST_11 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x13B) #define REG_GIM_P_INT_DST_25 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x149) #define REG_GIM_P_INT_DST_26 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x14A) #else .macro GET_CPU_ID reg lr \reg, [CTOP_AUX_LOGIC_GLOBAL_ID] #ifndef CONFIG_EZNPS_MTM_EXT lsr \reg, \reg, 4 #endif .endm #endif /* __ASSEMBLY__ */ #endif /* _PLAT_EZNPS_CTOP_H */ plat-eznps/Kconfig 0000644 00000003707 14722070650 0010156 0 ustar 00 # SPDX-License-Identifier: GPL-2.0 # # For a description of the syntax of this configuration file, # see Documentation/kbuild/kconfig-language.rst. # menuconfig ARC_PLAT_EZNPS bool "\"EZchip\" ARC dev platform" depends on ISA_ARCOMPACT select CPU_BIG_ENDIAN select CLKSRC_NPS if !PHYS_ADDR_T_64BIT select EZNPS_GIC select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET help Support for EZchip development platforms, based on ARC700 cores. We handle few flavors: - Hardware Emulator AKA HE which is FPGA based chassis - Simulator based on MetaWare nSIM - NPS400 chip based on ASIC config EZNPS_MTM_EXT bool "ARC-EZchip MTM Extensions" select CPUMASK_OFFSTACK depends on ARC_PLAT_EZNPS && SMP default y help Here we add new hierarchy for CPUs topology. We got: Core Thread At the new thread level each CPU represent one HW thread. At highest hierarchy each core contain 16 threads, any of them seem like CPU from Linux point of view. All threads within same core share the execution unit of the core and HW scheduler round robin between them. config EZNPS_MEM_ERROR_ALIGN bool "ARC-EZchip Memory error as an exception" depends on EZNPS_MTM_EXT default n help On the real chip of the NPS, user memory errors are handled as a machine check exception, which is fatal, whereas on simulator platform for NPS, is handled as a Level 2 interrupt (just a stock ARC700) which is recoverable. This option makes simulator behave like hardware. config EZNPS_SHARED_AUX_REGS bool "ARC-EZchip Shared Auxiliary Registers Per Core" depends on ARC_PLAT_EZNPS default y help On the real chip of the NPS, auxiliary registers are shared between all the cpus of the core, whereas on simulator platform for NPS, each cpu has a different set of auxiliary registers. Configuration should be unset if auxiliary registers are not shared between the cpus of the core, so there will be a need to initialize them per cpu. plat-eznps/Makefile 0000644 00000000261 14722070650 0010303 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only # # Makefile for the linux kernel. # obj-y := entry.o platform.o ctop.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_EZNPS_MTM_EXT) += mtm.o Kconfig.debug 0000644 00000000654 14722070650 0007144 0 ustar 00 # SPDX-License-Identifier: GPL-2.0 config 16KSTACKS bool "Use 16Kb for kernel stacks instead of 8Kb" help If you say Y here the kernel will use a 16Kb stacksize for the kernel stack attached to each process/thread. The default is 8K. This increases the resident kernel footprint and will cause less threads to run on the system and also increase the pressure on the VM subsystem for higher order allocations. boot/dts/Makefile 0000644 00000000672 14722070650 0007751 0 ustar 00 # SPDX-License-Identifier: GPL-2.0 # Built-in dtb builtindtb-y := nsim_700 ifneq ($(CONFIG_ARC_BUILTIN_DTB_NAME),"") builtindtb-y := $(patsubst "%",%,$(CONFIG_ARC_BUILTIN_DTB_NAME)) endif obj-y += $(builtindtb-y).dtb.o dtb-y := $(builtindtb-y).dtb # for CONFIG_OF_ALL_DTBS test dtstree := $(srctree)/$(src) dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) # board-specific dtc flags DTC_FLAGS_hsdk += --pad 20 boot/Makefile 0000644 00000002140 14722070650 0007147 0 ustar 00 # SPDX-License-Identifier: GPL-2.0 targets := vmlinux.bin vmlinux.bin.gz # uImage build relies on mkimage being availble on your host for ARC target # You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage # and make sure it's reacable from your PATH OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S LINUX_START_TEXT = $$(readelf -h vmlinux | \ grep "Entry point address" | grep -o 0x.*) UIMAGE_LOADADDR = $(CONFIG_LINUX_LINK_BASE) UIMAGE_ENTRYADDR = $(LINUX_START_TEXT) targets += uImage.bin targets += uImage.gz targets += uImage.lzma extra-y += vmlinux.bin extra-y += vmlinux.bin.gz extra-y += vmlinux.bin.lzma $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE $(call if_changed,lzma) $(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE $(call if_changed,uimage,none) $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE $(call if_changed,uimage,gzip) $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE $(call if_changed,uimage,lzma) Kconfig 0000644 00000032521 14722070650 0006055 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) # config ARC def_bool y select ARC_TIMERS select ARCH_HAS_DMA_COHERENT_TO_PFN select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC select ARCH_32BIT_OFF_T select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK select DMA_DIRECT_REMAP select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC) select GENERIC_CLOCKEVENTS select GENERIC_FIND_FIRST_BIT # for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_PENDING_IRQ if SMP select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_STACKOVERFLOW select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_IOREMAP_PROT select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZMA select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_MOD_ARCH_SPECIFIC select HAVE_OPROFILE select HAVE_PERF_EVENTS select HANDLE_DOMAIN_IRQ select IRQ_DOMAIN select MODULES_USE_ELF_RELA select OF select OF_EARLY_FLATTREE select PCI_SYSCALL if PCI select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING config ARCH_HAS_CACHE_LINE_SIZE def_bool y config TRACE_IRQFLAGS_SUPPORT def_bool y config LOCKDEP_SUPPORT def_bool y config SCHED_OMIT_FRAME_POINTER def_bool y config GENERIC_CSUM def_bool y config ARCH_DISCONTIGMEM_ENABLE def_bool n config ARCH_FLATMEM_ENABLE def_bool y config MMU def_bool y config NO_IOPORT_MAP def_bool y config GENERIC_CALIBRATE_DELAY def_bool y config GENERIC_HWEIGHT def_bool y config STACKTRACE_SUPPORT def_bool y select STACKTRACE config HAVE_ARCH_TRANSPARENT_HUGEPAGE def_bool y depends on ARC_MMU_V4 menu "ARC Architecture Configuration" menu "ARC Platform/SoC/Board" source "arch/arc/plat-tb10x/Kconfig" source "arch/arc/plat-axs10x/Kconfig" #New platform adds here source "arch/arc/plat-eznps/Kconfig" source "arch/arc/plat-hsdk/Kconfig" endmenu choice prompt "ARC Instruction Set" default ISA_ARCV2 config ISA_ARCOMPACT bool "ARCompact ISA" select CPU_NO_EFFICIENT_FFS help The original ARC ISA of ARC600/700 cores config ISA_ARCV2 bool "ARC ISA v2" select ARC_TIMERS_64BIT help ISA for the Next Generation ARC-HS cores endchoice menu "ARC CPU Configuration" choice prompt "ARC Core" default ARC_CPU_770 if ISA_ARCOMPACT default ARC_CPU_HS if ISA_ARCV2 if ISA_ARCOMPACT config ARC_CPU_750D bool "ARC750D" select ARC_CANT_LLSC help Support for ARC750 core config ARC_CPU_770 bool "ARC770" select ARC_HAS_SWAPE help Support for ARC770 core introduced with Rel 4.10 (Summer 2011) This core has a bunch of cool new features: -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4) Shared Address Spaces (for sharing TLB entries in MMU) -Caches: New Prog Model, Region Flush -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr endif #ISA_ARCOMPACT config ARC_CPU_HS bool "ARC-HS" depends on ISA_ARCV2 help Support for ARC HS38x Cores based on ARCv2 ISA The notable features are: - SMP configurations of upto 4 core with coherency - Optional L2 Cache and IO-Coherency - Revised Interrupt Architecture (multiple priorites, reg banks, auto stack switch, auto regfile save/restore) - MMUv4 (PIPT dcache, Huge Pages) - Instructions for * 64bit load/store: LDD, STD * Hardware assisted divide/remainder: DIV, REM * Function prologue/epilogue: ENTER_S, LEAVE_S * IRQ enable/disable: CLRI, SETI * pop count: FFS, FLS * SETcc, BMSKN, XBFU... endchoice config CPU_BIG_ENDIAN bool "Enable Big Endian Mode" help Build kernel for Big Endian Mode of ARC CPU config SMP bool "Symmetric Multi-Processing" select ARC_MCIP if ISA_ARCV2 help This enables support for systems with more than one CPU. if SMP config NR_CPUS int "Maximum number of CPUs (2-4096)" range 2 4096 default "4" config ARC_SMP_HALT_ON_RESET bool "Enable Halt-on-reset boot mode" help In SMP configuration cores can be configured as Halt-on-reset or they could all start at same time. For Halt-on-reset, non masters are parked until Master kicks them so they can start of at designated entry point. For other case, all jump to common entry point and spin wait for Master's signal. endif #SMP config ARC_MCIP bool "ARConnect Multicore IP (MCIP) Support " depends on ISA_ARCV2 default y if SMP help This IP block enables SMP in ARC-HS38 cores. It provides for cross-core interrupts, multi-core debug hardware semaphores, shared memory,.... menuconfig ARC_CACHE bool "Enable Cache Support" default y if ARC_CACHE config ARC_CACHE_LINE_SHIFT int "Cache Line Length (as power of 2)" range 5 7 default "6" help Starting with ARC700 4.9, Cache line length is configurable, This option specifies "N", with Line-len = 2 power N So line lengths of 32, 64, 128 are specified by 5,6,7, respectively Linux only supports same line lengths for I and D caches. config ARC_HAS_ICACHE bool "Use Instruction Cache" default y config ARC_HAS_DCACHE bool "Use Data Cache" default y config ARC_CACHE_PAGES bool "Per Page Cache Control" default y depends on ARC_HAS_ICACHE || ARC_HAS_DCACHE help This can be used to over-ride the global I/D Cache Enable on a per-page basis (but only for pages accessed via MMU such as Kernel Virtual address or User Virtual Address) TLB entries have a per-page Cache Enable Bit. Note that Global I/D ENABLE + Per Page DISABLE works but corollary Global DISABLE + Per Page ENABLE won't work config ARC_CACHE_VIPT_ALIASING bool "Support VIPT Aliasing D$" depends on ARC_HAS_DCACHE && ISA_ARCOMPACT endif #ARC_CACHE config ARC_HAS_ICCM bool "Use ICCM" help Single Cycle RAMS to store Fast Path Code config ARC_ICCM_SZ int "ICCM Size in KB" default "64" depends on ARC_HAS_ICCM config ARC_HAS_DCCM bool "Use DCCM" help Single Cycle RAMS to store Fast Path Data config ARC_DCCM_SZ int "DCCM Size in KB" default "64" depends on ARC_HAS_DCCM config ARC_DCCM_BASE hex "DCCM map address" default "0xA0000000" depends on ARC_HAS_DCCM choice prompt "MMU Version" default ARC_MMU_V3 if ARC_CPU_770 default ARC_MMU_V2 if ARC_CPU_750D default ARC_MMU_V4 if ARC_CPU_HS if ISA_ARCOMPACT config ARC_MMU_V1 bool "MMU v1" help Orig ARC700 MMU config ARC_MMU_V2 bool "MMU v2" help Fixed the deficiency of v1 - possible thrashing in memcpy scenario when 2 D-TLB and 1 I-TLB entries index into same 2way set. config ARC_MMU_V3 bool "MMU v3" depends on ARC_CPU_770 help Introduced with ARC700 4.10: New Features Variable Page size (1k-16k), var JTLB size 128 x (2 or 4) Shared Address Spaces (SASID) endif config ARC_MMU_V4 bool "MMU v4" depends on ISA_ARCV2 endchoice choice prompt "MMU Page Size" default ARC_PAGE_SIZE_8K config ARC_PAGE_SIZE_8K bool "8KB" help Choose between 8k vs 16k config ARC_PAGE_SIZE_16K bool "16KB" depends on ARC_MMU_V3 || ARC_MMU_V4 config ARC_PAGE_SIZE_4K bool "4KB" depends on ARC_MMU_V3 || ARC_MMU_V4 endchoice choice prompt "MMU Super Page Size" depends on ISA_ARCV2 && TRANSPARENT_HUGEPAGE default ARC_HUGEPAGE_2M config ARC_HUGEPAGE_2M bool "2MB" config ARC_HUGEPAGE_16M bool "16MB" endchoice config NODES_SHIFT int "Maximum NUMA Nodes (as a power of 2)" default "0" if !DISCONTIGMEM default "1" if DISCONTIGMEM depends on NEED_MULTIPLE_NODES ---help--- Accessing memory beyond 1GB (with or w/o PAE) requires 2 memory zones. if ISA_ARCOMPACT config ARC_COMPACT_IRQ_LEVELS bool "Setup Timer IRQ as high Priority" # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy depends on !SMP config ARC_FPU_SAVE_RESTORE bool "Enable FPU state persistence across context switch" help Double Precision Floating Point unit had dedicated regs which need to be saved/restored across context-switch. Note that ARC FPU is overly simplistic, unlike say x86, which has hardware pieces to allow software to conditionally save/restore, based on actual usage of FPU by a task. Thus our implemn does this for all tasks in system. endif #ISA_ARCOMPACT config ARC_CANT_LLSC def_bool n config ARC_HAS_LLSC bool "Insn: LLOCK/SCOND (efficient atomic ops)" default y depends on !ARC_CANT_LLSC config ARC_HAS_SWAPE bool "Insn: SWAPE (endian-swap)" default y if ISA_ARCV2 config ARC_USE_UNALIGNED_MEM_ACCESS bool "Enable unaligned access in HW" default y select HAVE_EFFICIENT_UNALIGNED_ACCESS help The ARC HS architecture supports unaligned memory access which is disabled by default. Enable unaligned access in hardware and use software to use it config ARC_HAS_LL64 bool "Insn: 64bit LDD/STD" help Enable gcc to generate 64-bit load/store instructions ISA mandates even/odd registers to allow encoding of two dest operands with 2 possible source operands. default y config ARC_HAS_DIV_REM bool "Insn: div, divu, rem, remu" default y config ARC_HAS_ACCL_REGS bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)" default y help Depending on the configuration, CPU can contain accumulator reg-pair (also referred to as r58:r59). These can also be used by gcc as GPR so kernel needs to save/restore per process config ARC_IRQ_NO_AUTOSAVE bool "Disable hardware autosave regfile on interrupts" default n help On HS cores, taken interrupt auto saves the regfile on stack. This is programmable and can be optionally disabled in which case software INTERRUPT_PROLOGUE/EPILGUE do the needed work endif # ISA_ARCV2 endmenu # "ARC CPU Configuration" config LINUX_LINK_BASE hex "Kernel link address" default "0x80000000" help ARC700 divides the 32 bit phy address space into two equal halves -Lower 2G (0 - 0x7FFF_FFFF ) is user virtual, translated by MMU -Upper 2G (0x8000_0000 onwards) is untranslated, for kernel Typically Linux kernel is linked at the start of untransalted addr, hence the default value of 0x8zs. However some customers have peripherals mapped at this addr, so Linux needs to be scooted a bit. If you don't know what the above means, leave this setting alone. This needs to match memory start address specified in Device Tree config LINUX_RAM_BASE hex "RAM base address" default LINUX_LINK_BASE help By default Linux is linked at base of RAM. However in some special cases (such as HSDK), Linux can't be linked at start of DDR, hence this option. config HIGHMEM bool "High Memory Support" select ARCH_DISCONTIGMEM_ENABLE help With ARC 2G:2G address split, only upper 2G is directly addressable by kernel. Enable this to potentially allow access to rest of 2G and PAE in future config ARC_HAS_PAE40 bool "Support for the 40-bit Physical Address Extension" depends on ISA_ARCV2 select HIGHMEM select PHYS_ADDR_T_64BIT help Enable access to physical memory beyond 4G, only supported on ARC cores with 40 bit Physical Addressing support config ARC_KVADDR_SIZE int "Kernel Virtual Address Space size (MB)" range 0 512 default "256" help The kernel address space is carved out of 256MB of translated address space for catering to vmalloc, modules, pkmap, fixmap. This however may not suffice vmalloc requirements of a 4K CPU EZChip system. So allow this to be stretched to 512 MB (by extending into the reserved kernel-user gutter) config ARC_CURR_IN_REG bool "Dedicate Register r25 for current_task pointer" default y help This reserved Register R25 to point to Current Task in kernel mode. This saves memory access for each such access config ARC_EMUL_UNALIGNED bool "Emulate unaligned memory access (userspace only)" select SYSCTL_ARCH_UNALIGN_NO_WARN select SYSCTL_ARCH_UNALIGN_ALLOW depends on ISA_ARCOMPACT help This enables misaligned 16 & 32 bit memory access from user space. Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide potential bugs in code config HZ int "Timer Frequency" default 100 config ARC_METAWARE_HLINK bool "Support for Metaware debugger assisted Host access" help This options allows a Linux userland apps to directly access host file system (open/creat/read/write etc) with help from Metaware Debugger. This can come in handy for Linux-host communication when there is no real usable peripheral such as EMAC. menuconfig ARC_DBG bool "ARC debugging" default y if ARC_DBG config ARC_DW2_UNWIND bool "Enable DWARF specific kernel stack unwind" default y select KALLSYMS help Compiles the kernel with DWARF unwind information and can be used to get stack backtraces. If you say Y here the resulting kernel image will be slightly larger but not slower, and it will give very useful debugging information. If you don't debug the kernel, you can say N, but we may not be able to solve problems without frame unwind information config ARC_DBG_TLB_PARANOIA bool "Paranoia Checks in Low Level TLB Handlers" endif config ARC_BUILTIN_DTB_NAME string "Built in DTB" help Set the name of the DTB to embed in the vmlinux binary Leaving it blank selects the minimal "skeleton" dtb endmenu # "ARC Architecture Configuration" config FORCE_MAX_ZONEORDER int "Maximum zone order" default "12" if ARC_HUGEPAGE_16M default "11" source "kernel/power/Kconfig" mm/Makefile 0000644 00000000365 14722070650 0006624 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) # obj-y := extable.o ioremap.o dma.o fault.o init.o obj-y += tlb.o tlbex.o cache.o mmap.o obj-$(CONFIG_HIGHMEM) += highmem.o plat-tb10x/Kconfig 0000644 00000001101 14722070650 0007737 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only # Abilis Systems TB10x platform kernel configuration file # # Author: Christian Ruppert <christian.ruppert@abilis.com> # menuconfig ARC_PLAT_TB10X bool "Abilis TB10x" select PINCTRL select PINCTRL_TB10X select PINMUX select GPIOLIB select GPIO_TB10X select TB10X_IRQC help Support for platforms based on the TB10x home media gateway SOC by Abilis Systems. TB10x is based on the ARC700 CPU architecture. Say Y if you are building a kernel for one of the SOCs in this series (e.g. TB100 or TB101). If in doubt say N. plat-tb10x/Makefile 0000644 00000000325 14722070650 0010103 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only # Abilis Systems TB10x platform Makefile # # Author: Christian Ruppert <christian.ruppert@abilis.com> # KBUILD_CFLAGS += -Iarch/arc/plat-tb10x/include obj-y += tb10x.o plat-hsdk/Kconfig 0000644 00000000467 14722070650 0007750 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only # Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com) # menuconfig ARC_SOC_HSDK bool "ARC HS Development Kit SOC" depends on ISA_ARCV2 select ARC_HAS_ACCL_REGS select ARC_IRQ_NO_AUTOSAVE select CLK_HSDK select RESET_CONTROLLER select RESET_HSDK select HAVE_PCI plat-hsdk/Makefile 0000644 00000000170 14722070650 0010074 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com) # obj-y := platform.o plat-sim/Makefile 0000644 00000000175 14722070650 0007740 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com) # obj-y := platform.o lib/Makefile 0000644 00000000674 14722070650 0006764 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) # lib-y := strchr-700.o strcpy-700.o strlen.o memcmp.o lib-$(CONFIG_ISA_ARCOMPACT) += memcpy-700.o memset.o strcmp.o lib-$(CONFIG_ISA_ARCV2) += memset-archs.o strcmp-archs.o ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS lib-$(CONFIG_ISA_ARCV2) +=memcpy-archs-unaligned.o else lib-$(CONFIG_ISA_ARCV2) +=memcpy-archs.o endif Makefile 0000644 00000006070 14722070650 0006212 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) # KBUILD_DEFCONFIG := nsim_hs_defconfig ifeq ($(CROSS_COMPILE),) CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-) endif cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38 ifdef CONFIG_ARC_CURR_IN_REG # For a global register defintion, make sure it gets passed to every file # We had a customer reported bug where some code built in kernel was NOT using # any kernel headers, and missing the r25 global register # Can't do unconditionally because of recursive include issues # due to <linux/thread_info.h> LINUXINCLUDE += -include $(srctree)/arch/arc/include/asm/current.h endif cflags-y += -fsection-anchors cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape ifdef CONFIG_ISA_ARCV2 ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS cflags-y += -munaligned-access else cflags-y += -mno-unaligned-access endif ifndef CONFIG_ARC_HAS_LL64 cflags-y += -mno-ll64 endif ifndef CONFIG_ARC_HAS_DIV_REM cflags-y += -mno-div-rem endif endif cfi := $(call as-instr,.cfi_startproc\n.cfi_endproc,-DARC_DW2_UNWIND_AS_CFI) cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi) # small data is default for elf32 tool-chain. If not usable, disable it # This also allows repurposing GP as scratch reg to gcc reg allocator disable_small_data := y cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name) # Modules with short calls might break for calls into builtin-kernel KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode # Finally dump eveything into kernel build system KBUILD_CFLAGS += $(cflags-y) KBUILD_AFLAGS += $(KBUILD_CFLAGS) KBUILD_LDFLAGS += $(ldflags-y) head-y := arch/arc/kernel/head.o # See arch/arc/Kbuild for content of core part of the kernel core-y += arch/arc/ # w/o this dtb won't embed into kernel binary core-y += arch/arc/boot/dts/ core-y += arch/arc/plat-sim/ core-$(CONFIG_ARC_PLAT_TB10X) += arch/arc/plat-tb10x/ core-$(CONFIG_ARC_PLAT_AXS10X) += arch/arc/plat-axs10x/ core-$(CONFIG_ARC_PLAT_EZNPS) += arch/arc/plat-eznps/ core-$(CONFIG_ARC_SOC_HSDK) += arch/arc/plat-hsdk/ ifdef CONFIG_ARC_PLAT_EZNPS KBUILD_CPPFLAGS += -I$(srctree)/arch/arc/plat-eznps/include endif drivers-$(CONFIG_OPROFILE) += arch/arc/oprofile/ libs-y += arch/arc/lib/ $(LIBGCC) boot := arch/arc/boot boot_targets := uImage.bin uImage.gz uImage.lzma PHONY += $(boot_targets) $(boot_targets): vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ uimage-default-y := uImage.bin uimage-default-$(CONFIG_KERNEL_GZIP) := uImage.gz uimage-default-$(CONFIG_KERNEL_LZMA) := uImage.lzma PHONY += uImage uImage: $(uimage-default-y) @ln -sf $< $(boot)/uImage @$(kecho) ' Image $(boot)/uImage is ready' CLEAN_FILES += $(boot)/uImage archclean: $(Q)$(MAKE) $(clean)=$(boot) Kbuild 0000644 00000000101 14722070650 0005674 0 ustar 00 # SPDX-License-Identifier: GPL-2.0 obj-y += kernel/ obj-y += mm/ oprofile/Makefile 0000644 00000000445 14722070650 0010031 0 ustar 00 # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_OPROFILE) += oprofile.o DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ oprof.o cpu_buffer.o buffer_sync.o \ event_buffer.o oprofile_files.o \ oprofilefs.o oprofile_stats.o \ timer_int.o ) oprofile-y := $(DRIVER_OBJS) common.o kernel/Makefile 0000644 00000002064 14722070650 0007471 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) # # Pass UTS_MACHINE for user_regset definition CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' obj-y := arcksyms.o setup.o irq.o reset.o ptrace.o process.o devtree.o obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o obj-$(CONFIG_ISA_ARCOMPACT) += entry-compact.o intc-compact.o obj-$(CONFIG_ISA_ARCV2) += entry-arcv2.o intc-arcv2.o obj-$(CONFIG_MODULES) += arcksyms.o module.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_ARC_MCIP) += mcip.o obj-$(CONFIG_ARC_DW2_UNWIND) += unwind.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_ARC_EMUL_UNALIGNED) += unaligned.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o CFLAGS_fpu.o += -mdpfp ifdef CONFIG_ARC_DW2_UNWIND CFLAGS_ctx_sw.o += -fno-omit-frame-pointer obj-y += ctx_sw.o else obj-y += ctx_sw_asm.o endif extra-y := vmlinux.lds head.o plat-axs10x/Kconfig 0000644 00000002363 14722070650 0010140 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com) # menuconfig ARC_PLAT_AXS10X bool "Synopsys ARC AXS10x Software Development Platforms" select DW_APB_ICTL select GPIO_DWAPB select OF_GPIO select HAVE_PCI select GENERIC_IRQ_CHIP select GPIOLIB select AXS101 if ISA_ARCOMPACT select AXS103 if ISA_ARCV2 help Support for the ARC AXS10x Software Development Platforms. The AXS10x Platforms consist of a mainboard with peripherals, on which several daughter cards can be placed. The daughter cards typically contain a CPU and memory. if ARC_PLAT_AXS10X config AXS101 depends on ISA_ARCOMPACT bool "AXS101 with AXC001 CPU Card (ARC 770D/EM6/AS221)" help This adds support for the 770D/EM6/AS221 CPU Card. Only the ARC 770D is supported in Linux. The AXS101 Platform consists of an AXS10x mainboard with this daughtercard. Please use the axs101.dts device tree with this configuration. config AXS103 bool "AXS103 with AXC003 CPU Card (ARC HS38x)" depends on ISA_ARCV2 help This adds support for the HS38x CPU Card. The AXS103 Platform consists of an AXS10x mainboard with this daughtercard. Please use the axs103.dts device tree with this configuration. endif plat-axs10x/Makefile 0000644 00000000221 14722070650 0010264 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com) # obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x.o
| ver. 1.4 |
Github
|
.
| PHP 7.4.3-4ubuntu2.24 | Генерация страницы: 0.01 |
proxy
|
phpinfo
|
Настройка