Файловый менеджер - Редактировать - /var/www/xthruster/html/wp-content/uploads/flags/csky.tar
Назад
include/uapi/asm/cachectl.h 0000644 00000000416 14722073345 0011634 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef __ASM_CSKY_CACHECTL_H #define __ASM_CSKY_CACHECTL_H /* * See "man cacheflush" */ #define ICACHE (1<<0) #define DCACHE (1<<1) #define BCACHE (ICACHE|DCACHE) #endif /* __ASM_CSKY_CACHECTL_H */ include/uapi/asm/unistd.h 0000644 00000001125 14722073345 0011372 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #define __ARCH_WANT_STAT64 #define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_CLONE3 #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_TIME32_SYSCALLS #define __ARCH_WANT_SYNC_FILE_RANGE2 #include <asm-generic/unistd.h> #define __NR_set_thread_area (__NR_arch_specific_syscall + 0) __SYSCALL(__NR_set_thread_area, sys_set_thread_area) #define __NR_cacheflush (__NR_arch_specific_syscall + 1) __SYSCALL(__NR_cacheflush, sys_cacheflush) include/uapi/asm/sigcontext.h 0000644 00000000512 14722073345 0012252 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_SIGCONTEXT_H #define __ASM_CSKY_SIGCONTEXT_H #include <asm/ptrace.h> struct sigcontext { struct pt_regs sc_pt_regs; struct user_fp sc_user_fp; }; #endif /* __ASM_CSKY_SIGCONTEXT_H */ include/uapi/asm/byteorder.h 0000644 00000000412 14722073345 0012061 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_BYTEORDER_H #define __ASM_CSKY_BYTEORDER_H #include <linux/byteorder/little_endian.h> #endif /* __ASM_CSKY_BYTEORDER_H */ include/uapi/asm/Kbuild 0000644 00000000073 14722073345 0011051 0 ustar 00 # SPDX-License-Identifier: GPL-2.0 generic-y += ucontext.h include/uapi/asm/ptrace.h 0000644 00000001600 14722073345 0011340 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef _CSKY_PTRACE_H #define _CSKY_PTRACE_H #ifndef __ASSEMBLY__ struct pt_regs { unsigned long tls; unsigned long lr; unsigned long pc; unsigned long sr; unsigned long usp; /* * a0, a1, a2, a3: * abiv1: r2, r3, r4, r5 * abiv2: r0, r1, r2, r3 */ unsigned long orig_a0; unsigned long a0; unsigned long a1; unsigned long a2; unsigned long a3; /* * ABIV2: r4 ~ r13 * ABIV1: r6 ~ r14, r1 */ unsigned long regs[10]; #if defined(__CSKYABIV2__) /* r16 ~ r30 */ unsigned long exregs[15]; unsigned long rhi; unsigned long rlo; unsigned long dcsr; #endif }; struct user_fp { unsigned long vr[96]; unsigned long fcr; unsigned long fesr; unsigned long fid; unsigned long reserved; }; #endif /* __ASSEMBLY__ */ #endif /* _CSKY_PTRACE_H */ include/uapi/asm/perf_regs.h 0000644 00000002226 14722073345 0012043 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. #ifndef _ASM_CSKY_PERF_REGS_H #define _ASM_CSKY_PERF_REGS_H /* Index of struct pt_regs */ enum perf_event_csky_regs { PERF_REG_CSKY_TLS, PERF_REG_CSKY_LR, PERF_REG_CSKY_PC, PERF_REG_CSKY_SR, PERF_REG_CSKY_SP, PERF_REG_CSKY_ORIG_A0, PERF_REG_CSKY_A0, PERF_REG_CSKY_A1, PERF_REG_CSKY_A2, PERF_REG_CSKY_A3, PERF_REG_CSKY_REGS0, PERF_REG_CSKY_REGS1, PERF_REG_CSKY_REGS2, PERF_REG_CSKY_REGS3, PERF_REG_CSKY_REGS4, PERF_REG_CSKY_REGS5, PERF_REG_CSKY_REGS6, PERF_REG_CSKY_REGS7, PERF_REG_CSKY_REGS8, PERF_REG_CSKY_REGS9, #if defined(__CSKYABIV2__) PERF_REG_CSKY_EXREGS0, PERF_REG_CSKY_EXREGS1, PERF_REG_CSKY_EXREGS2, PERF_REG_CSKY_EXREGS3, PERF_REG_CSKY_EXREGS4, PERF_REG_CSKY_EXREGS5, PERF_REG_CSKY_EXREGS6, PERF_REG_CSKY_EXREGS7, PERF_REG_CSKY_EXREGS8, PERF_REG_CSKY_EXREGS9, PERF_REG_CSKY_EXREGS10, PERF_REG_CSKY_EXREGS11, PERF_REG_CSKY_EXREGS12, PERF_REG_CSKY_EXREGS13, PERF_REG_CSKY_EXREGS14, PERF_REG_CSKY_HI, PERF_REG_CSKY_LO, PERF_REG_CSKY_DCSR, #endif PERF_REG_CSKY_MAX, }; #endif /* _ASM_CSKY_PERF_REGS_H */ include/asm/tlbflush.h 0000644 00000001563 14722073345 0010757 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_TLBFLUSH_H #define __ASM_TLBFLUSH_H /* * TLB flushing: * * - flush_tlb_all() flushes all processes TLB entries * - flush_tlb_mm(mm) flushes the specified mm context TLB entries * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages */ extern void flush_tlb_all(void); extern void flush_tlb_mm(struct mm_struct *mm); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void flush_tlb_one(unsigned long vaddr); #endif include/asm/spinlock.h 0000644 00000010744 14722073345 0010757 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_CSKY_SPINLOCK_H #define __ASM_CSKY_SPINLOCK_H #include <linux/spinlock_types.h> #include <asm/barrier.h> #ifdef CONFIG_QUEUED_RWLOCKS /* * Ticket-based spin-locking. */ static inline void arch_spin_lock(arch_spinlock_t *lock) { arch_spinlock_t lockval; u32 ticket_next = 1 << TICKET_NEXT; u32 *p = &lock->lock; u32 tmp; asm volatile ( "1: ldex.w %0, (%2) \n" " mov %1, %0 \n" " add %0, %3 \n" " stex.w %0, (%2) \n" " bez %0, 1b \n" : "=&r" (tmp), "=&r" (lockval) : "r"(p), "r"(ticket_next) : "cc"); while (lockval.tickets.next != lockval.tickets.owner) lockval.tickets.owner = READ_ONCE(lock->tickets.owner); smp_mb(); } static inline int arch_spin_trylock(arch_spinlock_t *lock) { u32 tmp, contended, res; u32 ticket_next = 1 << TICKET_NEXT; u32 *p = &lock->lock; do { asm volatile ( " ldex.w %0, (%3) \n" " movi %2, 1 \n" " rotli %1, %0, 16 \n" " cmpne %1, %0 \n" " bt 1f \n" " movi %2, 0 \n" " add %0, %0, %4 \n" " stex.w %0, (%3) \n" "1: \n" : "=&r" (res), "=&r" (tmp), "=&r" (contended) : "r"(p), "r"(ticket_next) : "cc"); } while (!res); if (!contended) smp_mb(); return !contended; } static inline void arch_spin_unlock(arch_spinlock_t *lock) { smp_mb(); WRITE_ONCE(lock->tickets.owner, lock->tickets.owner + 1); } static inline int arch_spin_value_unlocked(arch_spinlock_t lock) { return lock.tickets.owner == lock.tickets.next; } static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return !arch_spin_value_unlocked(READ_ONCE(*lock)); } static inline int arch_spin_is_contended(arch_spinlock_t *lock) { struct __raw_tickets tickets = READ_ONCE(lock->tickets); return (tickets.next - tickets.owner) > 1; } #define arch_spin_is_contended arch_spin_is_contended #include <asm/qrwlock.h> /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() #else /* CONFIG_QUEUED_RWLOCKS */ /* * Test-and-set spin-locking. */ static inline void arch_spin_lock(arch_spinlock_t *lock) { u32 *p = &lock->lock; u32 tmp; asm volatile ( "1: ldex.w %0, (%1) \n" " bnez %0, 1b \n" " movi %0, 1 \n" " stex.w %0, (%1) \n" " bez %0, 1b \n" : "=&r" (tmp) : "r"(p) : "cc"); smp_mb(); } static inline void arch_spin_unlock(arch_spinlock_t *lock) { smp_mb(); WRITE_ONCE(lock->lock, 0); } static inline int arch_spin_trylock(arch_spinlock_t *lock) { u32 *p = &lock->lock; u32 tmp; asm volatile ( "1: ldex.w %0, (%1) \n" " bnez %0, 2f \n" " movi %0, 1 \n" " stex.w %0, (%1) \n" " bez %0, 1b \n" " movi %0, 0 \n" "2: \n" : "=&r" (tmp) : "r"(p) : "cc"); if (!tmp) smp_mb(); return !tmp; } #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0) /* * read lock/unlock/trylock */ static inline void arch_read_lock(arch_rwlock_t *lock) { u32 *p = &lock->lock; u32 tmp; asm volatile ( "1: ldex.w %0, (%1) \n" " blz %0, 1b \n" " addi %0, 1 \n" " stex.w %0, (%1) \n" " bez %0, 1b \n" : "=&r" (tmp) : "r"(p) : "cc"); smp_mb(); } static inline void arch_read_unlock(arch_rwlock_t *lock) { u32 *p = &lock->lock; u32 tmp; smp_mb(); asm volatile ( "1: ldex.w %0, (%1) \n" " subi %0, 1 \n" " stex.w %0, (%1) \n" " bez %0, 1b \n" : "=&r" (tmp) : "r"(p) : "cc"); } static inline int arch_read_trylock(arch_rwlock_t *lock) { u32 *p = &lock->lock; u32 tmp; asm volatile ( "1: ldex.w %0, (%1) \n" " blz %0, 2f \n" " addi %0, 1 \n" " stex.w %0, (%1) \n" " bez %0, 1b \n" " movi %0, 0 \n" "2: \n" : "=&r" (tmp) : "r"(p) : "cc"); if (!tmp) smp_mb(); return !tmp; } /* * write lock/unlock/trylock */ static inline void arch_write_lock(arch_rwlock_t *lock) { u32 *p = &lock->lock; u32 tmp; asm volatile ( "1: ldex.w %0, (%1) \n" " bnez %0, 1b \n" " subi %0, 1 \n" " stex.w %0, (%1) \n" " bez %0, 1b \n" : "=&r" (tmp) : "r"(p) : "cc"); smp_mb(); } static inline void arch_write_unlock(arch_rwlock_t *lock) { smp_mb(); WRITE_ONCE(lock->lock, 0); } static inline int arch_write_trylock(arch_rwlock_t *lock) { u32 *p = &lock->lock; u32 tmp; asm volatile ( "1: ldex.w %0, (%1) \n" " bnez %0, 2f \n" " subi %0, 1 \n" " stex.w %0, (%1) \n" " bez %0, 1b \n" " movi %0, 0 \n" "2: \n" : "=&r" (tmp) : "r"(p) : "cc"); if (!tmp) smp_mb(); return !tmp; } #endif /* CONFIG_QUEUED_RWLOCKS */ #endif /* __ASM_CSKY_SPINLOCK_H */ include/asm/page.h 0000644 00000005451 14722073345 0010050 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_CSKY_PAGE_H #define __ASM_CSKY_PAGE_H #include <asm/setup.h> #include <asm/cache.h> #include <linux/const.h> /* * PAGE_SHIFT determines the page size: 4KB */ #define PAGE_SHIFT 12 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE - 1)) #define THREAD_SIZE (PAGE_SIZE * 2) #define THREAD_MASK (~(THREAD_SIZE - 1)) #define THREAD_SHIFT (PAGE_SHIFT + 1) /* * For C-SKY "User-space:Kernel-space" is "2GB:2GB" fixed by hardware and there * are two segment registers (MSA0 + MSA1) to mapping 512MB + 512MB physical * address region. We use them mapping kernel 1GB direct-map address area and * for more than 1GB of memory we use highmem. */ #define PAGE_OFFSET 0x80000000 #define SSEG_SIZE 0x20000000 #define LOWMEM_LIMIT (SSEG_SIZE * 2) #define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1)) #ifndef __ASSEMBLY__ #include <linux/pfn.h> #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && \ (void *)(kaddr) < high_memory) #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) extern void *memset(void *dest, int c, size_t l); extern void *memcpy(void *to, const void *from, size_t l); #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define phys_to_page(paddr) (pfn_to_page(PFN_DOWN(paddr))) struct page; #include <abi/page.h> struct vm_area_struct; typedef struct { unsigned long pte_low; } pte_t; #define pte_val(x) ((x).pte_low) typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; typedef struct page *pgtable_t; #define pgd_val(x) ((x).pgd) #define pgprot_val(x) ((x).pgprot) #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) #define __pte(x) ((pte_t) { (x) }) #define __pgd(x) ((pgd_t) { (x) }) #define __pgprot(x) ((pgprot_t) { (x) }) extern unsigned long va_pa_offset; #define ARCH_PFN_OFFSET PFN_DOWN(va_pa_offset + PHYS_OFFSET_OFFSET) #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + va_pa_offset) #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - va_pa_offset)) #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) #define MAP_NR(x) PFN_DOWN((unsigned long)(x) - PAGE_OFFSET - \ PHYS_OFFSET_OFFSET) #define virt_to_page(x) (mem_map + MAP_NR(x)) #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define pfn_to_kaddr(x) __va(PFN_PHYS(x)) #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> #endif /* !__ASSEMBLY__ */ #endif /* __ASM_CSKY_PAGE_H */ include/asm/cacheflush.h 0000644 00000000346 14722073345 0011237 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_CACHEFLUSH_H #define __ASM_CSKY_CACHEFLUSH_H #include <abi/cacheflush.h> #endif /* __ASM_CSKY_CACHEFLUSH_H */ include/asm/barrier.h 0000644 00000003267 14722073345 0010565 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_BARRIER_H #define __ASM_CSKY_BARRIER_H #ifndef __ASSEMBLY__ #define nop() asm volatile ("nop\n":::"memory") /* * sync: completion barrier, all sync.xx instructions * guarantee the last response recieved by bus transaction * made by ld/st instructions before sync.s * sync.s: inherit from sync, but also shareable to other cores * sync.i: inherit from sync, but also flush cpu pipeline * sync.is: the same with sync.i + sync.s * * bar.brwarw: ordering barrier for all load/store instructions before it * bar.brwarws: ordering barrier for all load/store instructions before it * and shareable to other cores * bar.brar: ordering barrier for all load instructions before it * bar.brars: ordering barrier for all load instructions before it * and shareable to other cores * bar.bwaw: ordering barrier for all store instructions before it * bar.bwaws: ordering barrier for all store instructions before it * and shareable to other cores */ #ifdef CONFIG_CPU_HAS_CACHEV2 #define mb() asm volatile ("sync.s\n":::"memory") #ifdef CONFIG_SMP #define __smp_mb() asm volatile ("bar.brwarws\n":::"memory") #define __smp_rmb() asm volatile ("bar.brars\n":::"memory") #define __smp_wmb() asm volatile ("bar.bwaws\n":::"memory") #endif /* CONFIG_SMP */ #define sync_is() asm volatile ("sync.is\n":::"memory") #else /* !CONFIG_CPU_HAS_CACHEV2 */ #define mb() asm volatile ("sync\n":::"memory") #endif #include <asm-generic/barrier.h> #endif /* __ASSEMBLY__ */ #endif /* __ASM_CSKY_BARRIER_H */ include/asm/syscall.h 0000644 00000003033 14722073345 0010600 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_SYSCALL_H #define __ASM_SYSCALL_H #include <linux/sched.h> #include <linux/err.h> #include <abi/regdef.h> #include <uapi/linux/audit.h> extern void *sys_call_table[]; static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { return regs_syscallid(regs); } static inline void syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int sysno) { regs_syscallid(regs) = sysno; } static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { regs->a0 = regs->orig_a0; } static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { unsigned long error = regs->a0; return IS_ERR_VALUE(error) ? error : 0; } static inline long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) { return regs->a0; } static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { regs->a0 = (long) error ?: val; } static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, unsigned long *args) { args[0] = regs->orig_a0; args++; memcpy(args, ®s->a1, 5 * sizeof(args[0])); } static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, const unsigned long *args) { regs->orig_a0 = args[0]; args++; memcpy(®s->a1, args, 5 * sizeof(regs->a1)); } static inline int syscall_get_arch(struct task_struct *task) { return AUDIT_ARCH_CSKY; } #endif /* __ASM_SYSCALL_H */ include/asm/perf_event.h 0000644 00000000642 14722073345 0011266 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_PERF_EVENT_H #define __ASM_CSKY_PERF_EVENT_H #include <abi/regdef.h> #define perf_arch_fetch_caller_regs(regs, __ip) { \ (regs)->pc = (__ip); \ regs_fp(regs) = (unsigned long) __builtin_frame_address(0); \ asm volatile("mov %0, sp\n":"=r"((regs)->usp)); \ } #endif /* __ASM_PERF_EVENT_ELF_H */ include/asm/bitops.h 0000644 00000002702 14722073345 0010430 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_BITOPS_H #define __ASM_CSKY_BITOPS_H #include <linux/compiler.h> #include <asm/barrier.h> /* * asm-generic/bitops/ffs.h */ static inline int ffs(int x) { if (!x) return 0; asm volatile ( "brev %0\n" "ff1 %0\n" "addi %0, 1\n" : "=&r"(x) : "0"(x)); return x; } /* * asm-generic/bitops/__ffs.h */ static __always_inline unsigned long __ffs(unsigned long x) { asm volatile ( "brev %0\n" "ff1 %0\n" : "=&r"(x) : "0"(x)); return x; } /* * asm-generic/bitops/fls.h */ static __always_inline int fls(unsigned int x) { asm volatile( "ff1 %0\n" : "=&r"(x) : "0"(x)); return (32 - x); } /* * asm-generic/bitops/__fls.h */ static __always_inline unsigned long __fls(unsigned long x) { return fls(x) - 1; } #include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/find.h> #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/hweight.h> #include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/atomic.h> /* * bug fix, why only could use atomic!!!! */ #include <asm-generic/bitops/non-atomic.h> #define __clear_bit(nr, vaddr) clear_bit(nr, vaddr) #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic.h> #endif /* __ASM_CSKY_BITOPS_H */ include/asm/spinlock_types.h 0000644 00000001256 14722073345 0012201 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_CSKY_SPINLOCK_TYPES_H #define __ASM_CSKY_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_H # error "please don't include this file directly" #endif #define TICKET_NEXT 16 typedef struct { union { u32 lock; struct __raw_tickets { /* little endian */ u16 owner; u16 next; } tickets; }; } arch_spinlock_t; #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } #ifdef CONFIG_QUEUED_RWLOCKS #include <asm-generic/qrwlock_types.h> #else /* CONFIG_NR_CPUS > 2 */ typedef struct { u32 lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif /* CONFIG_QUEUED_RWLOCKS */ #endif /* __ASM_CSKY_SPINLOCK_TYPES_H */ include/asm/mmu.h 0000644 00000000365 14722073345 0007731 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_MMU_H #define __ASM_CSKY_MMU_H typedef struct { atomic64_t asid; void *vdso; } mm_context_t; #endif /* __ASM_CSKY_MMU_H */ include/asm/uaccess.h 0000644 00000030705 14722073345 0010562 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_UACCESS_H #define __ASM_CSKY_UACCESS_H /* * User space memory access functions */ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/version.h> #include <asm/segment.h> static inline int access_ok(const void *addr, unsigned long size) { unsigned long limit = current_thread_info()->addr_limit.seg; return (((unsigned long)addr < limit) && ((unsigned long)(addr + size) < limit)); } #define __addr_ok(addr) (access_ok(addr, 0)) extern int __put_user_bad(void); /* * Tell gcc we read from memory instead of writing: this is because * we do not write to any memory gcc knows about, so there are no * aliasing issues. */ /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. * * This gets kind of ugly. We want to return _two_ values in "get_user()" * and yet we don't want to do any pointers, because that is too much * of a performance impact. Thus we have a few rather ugly macros here, * and hide all the ugliness from the user. * * The "__xxx" versions of the user access functions are versions that * do not verify the address space, that must have been done previously * with a separate "access_ok()" call (this is used when we do multiple * accesses to the same area of user memory). * * As we use the same address space for kernel and user data on * Ckcore, we can just do these as direct assignments. (Of course, the * exception handling means that it's no longer "just"...) */ #define put_user(x, ptr) \ __put_user_check((x), (ptr), sizeof(*(ptr))) #define __put_user(x, ptr) \ __put_user_nocheck((x), (ptr), sizeof(*(ptr))) #define __ptr(x) ((unsigned long *)(x)) #define get_user(x, ptr) \ __get_user_check((x), (ptr), sizeof(*(ptr))) #define __get_user(x, ptr) \ __get_user_nocheck((x), (ptr), sizeof(*(ptr))) #define __put_user_nocheck(x, ptr, size) \ ({ \ long __pu_err = 0; \ typeof(*(ptr)) *__pu_addr = (ptr); \ typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \ if (__pu_addr) \ __put_user_size(__pu_val, (__pu_addr), (size), \ __pu_err); \ __pu_err; \ }) #define __put_user_check(x, ptr, size) \ ({ \ long __pu_err = -EFAULT; \ typeof(*(ptr)) *__pu_addr = (ptr); \ typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \ if (access_ok(__pu_addr, size) && __pu_addr) \ __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \ __pu_err; \ }) #define __put_user_size(x, ptr, size, retval) \ do { \ retval = 0; \ switch (size) { \ case 1: \ __put_user_asm_b(x, ptr, retval); \ break; \ case 2: \ __put_user_asm_h(x, ptr, retval); \ break; \ case 4: \ __put_user_asm_w(x, ptr, retval); \ break; \ case 8: \ __put_user_asm_64(x, ptr, retval); \ break; \ default: \ __put_user_bad(); \ } \ } while (0) /* * We don't tell gcc that we are accessing memory, but this is OK * because we do not write to any memory gcc knows about, so there * are no aliasing issues. * * Note that PC at a fault is the address *after* the faulting * instruction. */ #define __put_user_asm_b(x, ptr, err) \ do { \ int errcode; \ asm volatile( \ "1: stb %1, (%2,0) \n" \ " br 3f \n" \ "2: mov %0, %3 \n" \ " br 3f \n" \ ".section __ex_table, \"a\" \n" \ ".align 2 \n" \ ".long 1b,2b \n" \ ".previous \n" \ "3: \n" \ : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \ : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \ : "memory"); \ } while (0) #define __put_user_asm_h(x, ptr, err) \ do { \ int errcode; \ asm volatile( \ "1: sth %1, (%2,0) \n" \ " br 3f \n" \ "2: mov %0, %3 \n" \ " br 3f \n" \ ".section __ex_table, \"a\" \n" \ ".align 2 \n" \ ".long 1b,2b \n" \ ".previous \n" \ "3: \n" \ : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \ : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \ : "memory"); \ } while (0) #define __put_user_asm_w(x, ptr, err) \ do { \ int errcode; \ asm volatile( \ "1: stw %1, (%2,0) \n" \ " br 3f \n" \ "2: mov %0, %3 \n" \ " br 3f \n" \ ".section __ex_table,\"a\" \n" \ ".align 2 \n" \ ".long 1b, 2b \n" \ ".previous \n" \ "3: \n" \ : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \ : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \ : "memory"); \ } while (0) #define __put_user_asm_64(x, ptr, err) \ do { \ int tmp; \ int errcode; \ typeof(*(ptr))src = (typeof(*(ptr)))x; \ typeof(*(ptr))*psrc = &src; \ \ asm volatile( \ " ldw %3, (%1, 0) \n" \ "1: stw %3, (%2, 0) \n" \ " ldw %3, (%1, 4) \n" \ "2: stw %3, (%2, 4) \n" \ " br 4f \n" \ "3: mov %0, %4 \n" \ " br 4f \n" \ ".section __ex_table, \"a\" \n" \ ".align 2 \n" \ ".long 1b, 3b \n" \ ".long 2b, 3b \n" \ ".previous \n" \ "4: \n" \ : "=r"(err), "=r"(psrc), "=r"(ptr), \ "=r"(tmp), "=r"(errcode) \ : "0"(err), "1"(psrc), "2"(ptr), "3"(0), "4"(-EFAULT) \ : "memory"); \ } while (0) #define __get_user_nocheck(x, ptr, size) \ ({ \ long __gu_err; \ __get_user_size(x, (ptr), (size), __gu_err); \ __gu_err; \ }) #define __get_user_check(x, ptr, size) \ ({ \ int __gu_err = -EFAULT; \ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ if (access_ok(__gu_ptr, size) && __gu_ptr) \ __get_user_size(x, __gu_ptr, size, __gu_err); \ __gu_err; \ }) #define __get_user_size(x, ptr, size, retval) \ do { \ switch (size) { \ case 1: \ __get_user_asm_common((x), ptr, "ldb", retval); \ break; \ case 2: \ __get_user_asm_common((x), ptr, "ldh", retval); \ break; \ case 4: \ __get_user_asm_common((x), ptr, "ldw", retval); \ break; \ default: \ x = 0; \ (retval) = __get_user_bad(); \ } \ } while (0) #define __get_user_asm_common(x, ptr, ins, err) \ do { \ int errcode; \ asm volatile( \ "1: " ins " %1, (%4,0) \n" \ " br 3f \n" \ /* Fix up codes */ \ "2: mov %0, %2 \n" \ " movi %1, 0 \n" \ " br 3f \n" \ ".section __ex_table,\"a\" \n" \ ".align 2 \n" \ ".long 1b, 2b \n" \ ".previous \n" \ "3: \n" \ : "=r"(err), "=r"(x), "=r"(errcode) \ : "0"(0), "r"(ptr), "2"(-EFAULT) \ : "memory"); \ } while (0) extern int __get_user_bad(void); #define ___copy_to_user(to, from, n) \ do { \ int w0, w1, w2, w3; \ asm volatile( \ "0: cmpnei %1, 0 \n" \ " bf 8f \n" \ " mov %3, %1 \n" \ " or %3, %2 \n" \ " andi %3, 3 \n" \ " cmpnei %3, 0 \n" \ " bf 1f \n" \ " br 5f \n" \ "1: cmplti %0, 16 \n" /* 4W */ \ " bt 3f \n" \ " ldw %3, (%2, 0) \n" \ " ldw %4, (%2, 4) \n" \ " ldw %5, (%2, 8) \n" \ " ldw %6, (%2, 12) \n" \ "2: stw %3, (%1, 0) \n" \ "9: stw %4, (%1, 4) \n" \ "10: stw %5, (%1, 8) \n" \ "11: stw %6, (%1, 12) \n" \ " addi %2, 16 \n" \ " addi %1, 16 \n" \ " subi %0, 16 \n" \ " br 1b \n" \ "3: cmplti %0, 4 \n" /* 1W */ \ " bt 5f \n" \ " ldw %3, (%2, 0) \n" \ "4: stw %3, (%1, 0) \n" \ " addi %2, 4 \n" \ " addi %1, 4 \n" \ " subi %0, 4 \n" \ " br 3b \n" \ "5: cmpnei %0, 0 \n" /* 1B */ \ " bf 13f \n" \ " ldb %3, (%2, 0) \n" \ "6: stb %3, (%1, 0) \n" \ " addi %2, 1 \n" \ " addi %1, 1 \n" \ " subi %0, 1 \n" \ " br 5b \n" \ "7: subi %0, 4 \n" \ "8: subi %0, 4 \n" \ "12: subi %0, 4 \n" \ " br 13f \n" \ ".section __ex_table, \"a\" \n" \ ".align 2 \n" \ ".long 2b, 13f \n" \ ".long 4b, 13f \n" \ ".long 6b, 13f \n" \ ".long 9b, 12b \n" \ ".long 10b, 8b \n" \ ".long 11b, 7b \n" \ ".previous \n" \ "13: \n" \ : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \ "=r"(w1), "=r"(w2), "=r"(w3) \ : "0"(n), "1"(to), "2"(from) \ : "memory"); \ } while (0) #define ___copy_from_user(to, from, n) \ do { \ int tmp; \ int nsave; \ asm volatile( \ "0: cmpnei %1, 0 \n" \ " bf 7f \n" \ " mov %3, %1 \n" \ " or %3, %2 \n" \ " andi %3, 3 \n" \ " cmpnei %3, 0 \n" \ " bf 1f \n" \ " br 5f \n" \ "1: cmplti %0, 16 \n" \ " bt 3f \n" \ "2: ldw %3, (%2, 0) \n" \ "10: ldw %4, (%2, 4) \n" \ " stw %3, (%1, 0) \n" \ " stw %4, (%1, 4) \n" \ "11: ldw %3, (%2, 8) \n" \ "12: ldw %4, (%2, 12) \n" \ " stw %3, (%1, 8) \n" \ " stw %4, (%1, 12) \n" \ " addi %2, 16 \n" \ " addi %1, 16 \n" \ " subi %0, 16 \n" \ " br 1b \n" \ "3: cmplti %0, 4 \n" \ " bt 5f \n" \ "4: ldw %3, (%2, 0) \n" \ " stw %3, (%1, 0) \n" \ " addi %2, 4 \n" \ " addi %1, 4 \n" \ " subi %0, 4 \n" \ " br 3b \n" \ "5: cmpnei %0, 0 \n" \ " bf 7f \n" \ "6: ldb %3, (%2, 0) \n" \ " stb %3, (%1, 0) \n" \ " addi %2, 1 \n" \ " addi %1, 1 \n" \ " subi %0, 1 \n" \ " br 5b \n" \ "8: stw %3, (%1, 0) \n" \ " subi %0, 4 \n" \ " bf 7f \n" \ "9: subi %0, 8 \n" \ " bf 7f \n" \ "13: stw %3, (%1, 8) \n" \ " subi %0, 12 \n" \ " bf 7f \n" \ ".section __ex_table, \"a\" \n" \ ".align 2 \n" \ ".long 2b, 7f \n" \ ".long 4b, 7f \n" \ ".long 6b, 7f \n" \ ".long 10b, 8b \n" \ ".long 11b, 9b \n" \ ".long 12b,13b \n" \ ".previous \n" \ "7: \n" \ : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \ "=r"(tmp) \ : "0"(n), "1"(to), "2"(from) \ : "memory"); \ } while (0) unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n); unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n); unsigned long clear_user(void *to, unsigned long n); unsigned long __clear_user(void __user *to, unsigned long n); long strncpy_from_user(char *dst, const char *src, long count); long __strncpy_from_user(char *dst, const char *src, long count); /* * Return the size of a string (including the ending 0) * * Return 0 on exception, a value greater than N if too long */ long strnlen_user(const char *src, long n); #define strlen_user(str) strnlen_user(str, 32767) struct exception_table_entry { unsigned long insn; unsigned long nextinsn; }; extern int fixup_exception(struct pt_regs *regs); #endif /* __ASM_CSKY_UACCESS_H */ include/asm/bug.h 0000644 00000001010 14722073345 0007674 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_BUG_H #define __ASM_CSKY_BUG_H #include <linux/compiler.h> #include <linux/const.h> #include <linux/types.h> #define BUG() \ do { \ asm volatile ("bkpt\n"); \ unreachable(); \ } while (0) #define HAVE_ARCH_BUG #include <asm-generic/bug.h> struct pt_regs; void die_if_kernel(char *str, struct pt_regs *regs, int nr); void show_regs(struct pt_regs *regs); #endif /* __ASM_CSKY_BUG_H */ include/asm/cache.h 0000644 00000001466 14722073345 0010201 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_CSKY_CACHE_H #define __ASM_CSKY_CACHE_H /* bytes per L1 cache line */ #define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define ARCH_DMA_MINALIGN L1_CACHE_BYTES #ifndef __ASSEMBLY__ void dcache_wb_line(unsigned long start); void icache_inv_range(unsigned long start, unsigned long end); void icache_inv_all(void); void dcache_wb_range(unsigned long start, unsigned long end); void dcache_wbinv_all(void); void cache_wbinv_range(unsigned long start, unsigned long end); void cache_wbinv_all(void); void dma_wbinv_range(unsigned long start, unsigned long end); void dma_inv_range(unsigned long start, unsigned long end); void dma_wb_range(unsigned long start, unsigned long end); #endif #endif /* __ASM_CSKY_CACHE_H */ include/asm/highmem.h 0000644 00000002500 14722073345 0010542 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_HIGHMEM_H #define __ASM_CSKY_HIGHMEM_H #ifdef __KERNEL__ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/uaccess.h> #include <asm/kmap_types.h> #include <asm/cache.h> /* undef for production */ #define HIGHMEM_DEBUG 1 /* declarations for highmem.c */ extern unsigned long highstart_pfn, highend_pfn; extern pte_t *pkmap_page_table; /* * Right now we initialize only a single pte table. It can be extended * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. */ #define LAST_PKMAP 1024 #define LAST_PKMAP_MASK (LAST_PKMAP-1) #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) extern void *kmap_high(struct page *page); extern void kunmap_high(struct page *page); extern void *kmap(struct page *page); extern void kunmap(struct page *page); extern void *kmap_atomic(struct page *page); extern void __kunmap_atomic(void *kvaddr); extern void *kmap_atomic_pfn(unsigned long pfn); extern struct page *kmap_atomic_to_page(void *ptr); #define flush_cache_kmaps() do {} while (0) extern void kmap_init(void); #define kmap_prot PAGE_KERNEL #endif /* __KERNEL__ */ #endif /* __ASM_CSKY_HIGHMEM_H */ include/asm/elf.h 0000644 00000005370 14722073345 0007702 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_ELF_H #define __ASM_CSKY_ELF_H #include <asm/ptrace.h> #include <abi/regdef.h> #define ELF_ARCH EM_CSKY #define EM_CSKY_OLD 39 /* CSKY Relocations */ #define R_CSKY_NONE 0 #define R_CSKY_32 1 #define R_CSKY_PCIMM8BY4 2 #define R_CSKY_PCIMM11BY2 3 #define R_CSKY_PCIMM4BY2 4 #define R_CSKY_PC32 5 #define R_CSKY_PCRELJSR_IMM11BY2 6 #define R_CSKY_GNU_VTINHERIT 7 #define R_CSKY_GNU_VTENTRY 8 #define R_CSKY_RELATIVE 9 #define R_CSKY_COPY 10 #define R_CSKY_GLOB_DAT 11 #define R_CSKY_JUMP_SLOT 12 #define R_CSKY_ADDR_HI16 24 #define R_CSKY_ADDR_LO16 25 #define R_CSKY_PCRELJSR_IMM26BY2 40 typedef unsigned long elf_greg_t; typedef struct user_fp elf_fpregset_t; /* * In gdb/bfd elf32-csky.c, csky_elf_grok_prstatus() use fixed size of * elf_prstatus. It's 148 for abiv1 and 220 for abiv2, the size is enough * for coredump and no need full sizeof(struct pt_regs). */ #define ELF_NGREG ((sizeof(struct pt_regs) / sizeof(elf_greg_t)) - 2) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; /* * This is used to ensure we don't load something for the wrong architecture. */ #define elf_check_arch(x) (((x)->e_machine == ELF_ARCH) || \ ((x)->e_machine == EM_CSKY_OLD)) /* * These are used to set parameters in the core dumps. */ #define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 #define ELF_CLASS ELFCLASS32 #define ELF_PLAT_INIT(_r, load_addr) { _r->a0 = 0; } #ifdef __cskyBE__ #define ELF_DATA ELFDATA2MSB #else #define ELF_DATA ELFDATA2LSB #endif /* * This is the location that an ET_DYN program is loaded if exec'ed. Typical * use of this is to invoke "./ld.so someprog" to test out a new version of * the loader. We need to make sure that it is out of the way of the program * that it will "exec", and that there is sufficient room for the brk. */ #define ELF_ET_DYN_BASE 0x0UL #include <abi/elf.h> /* Similar, but for a thread other than current. */ struct task_struct; extern int dump_task_regs(struct task_struct *tsk, elf_gregset_t *elf_regs); #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) #define ELF_HWCAP (0) /* * This yields a string that ld.so will use to load implementation specific * libraries for optimization. This is more specific in intent than poking * at uname or /proc/cpuinfo. */ #define ELF_PLATFORM (NULL) #define SET_PERSONALITY(ex) set_personality(PER_LINUX) #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp); #endif /* __ASM_CSKY_ELF_H */ include/asm/irqflags.h 0000644 00000002154 14722073345 0010741 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_CSKY_IRQFLAGS_H #define __ASM_CSKY_IRQFLAGS_H #include <abi/reg_ops.h> static inline unsigned long arch_local_irq_save(void) { unsigned long flags; flags = mfcr("psr"); asm volatile("psrclr ie\n":::"memory"); return flags; } #define arch_local_irq_save arch_local_irq_save static inline void arch_local_irq_enable(void) { asm volatile("psrset ee, ie\n":::"memory"); } #define arch_local_irq_enable arch_local_irq_enable static inline void arch_local_irq_disable(void) { asm volatile("psrclr ie\n":::"memory"); } #define arch_local_irq_disable arch_local_irq_disable static inline unsigned long arch_local_save_flags(void) { return mfcr("psr"); } #define arch_local_save_flags arch_local_save_flags static inline void arch_local_irq_restore(unsigned long flags) { mtcr("psr", flags); } #define arch_local_irq_restore arch_local_irq_restore static inline int arch_irqs_disabled_flags(unsigned long flags) { return !(flags & (1<<6)); } #define arch_irqs_disabled_flags arch_irqs_disabled_flags #include <asm-generic/irqflags.h> #endif /* __ASM_CSKY_IRQFLAGS_H */ include/asm/fixmap.h 0000644 00000001172 14722073345 0010414 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_FIXMAP_H #define __ASM_CSKY_FIXMAP_H #include <asm/page.h> #ifdef CONFIG_HIGHMEM #include <linux/threads.h> #include <asm/kmap_types.h> #endif enum fixed_addresses { #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, #endif __end_of_fixed_addresses }; #define FIXADDR_TOP 0xffffc000 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) #include <asm-generic/fixmap.h> #endif /* __ASM_CSKY_FIXMAP_H */ include/asm/smp.h 0000644 00000001162 14722073345 0007726 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_CSKY_SMP_H #define __ASM_CSKY_SMP_H #include <linux/cpumask.h> #include <linux/irqreturn.h> #include <linux/threads.h> #ifdef CONFIG_SMP void __init setup_smp(void); void __init setup_smp_ipi(void); void arch_send_call_function_ipi_mask(struct cpumask *mask); void arch_send_call_function_single_ipi(int cpu); void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq); #define raw_smp_processor_id() (current_thread_info()->cpu) int __cpu_disable(void); void __cpu_die(unsigned int cpu); #endif /* CONFIG_SMP */ #endif /* __ASM_CSKY_SMP_H */ include/asm/pgtable.h 0000644 00000020642 14722073345 0010551 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_PGTABLE_H #define __ASM_CSKY_PGTABLE_H #include <asm/fixmap.h> #include <asm/addrspace.h> #include <abi/pgtable-bits.h> #include <asm-generic/pgtable-nopmd.h> #define PGDIR_SHIFT 22 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0UL #define PKMAP_BASE (0xff800000) #define VMALLOC_START (0xc0008000) #define VMALLOC_END (PKMAP_BASE - 2*PAGE_SIZE) /* * C-SKY is two-level paging structure: */ #define PGD_ORDER 0 #define PTE_ORDER 0 #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) #define PTRS_PER_PMD 1 #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) #define pgd_ERROR(e) \ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) /* Find an entry in the third-level page table.. */ #define __pte_offset_t(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) \ (pmd_page_vaddr(*(dir)) + __pte_offset_t(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) #define pte_clear(mm, addr, ptep) set_pte((ptep), \ (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0))) #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ | pgprot_val(prot)) #define __READABLE (_PAGE_READ | _PAGE_VALID | _PAGE_ACCESSED) #define __WRITEABLE (_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED) #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | \ _CACHE_MASK) #define pte_unmap(pte) ((void)(pte)) #define __swp_type(x) (((x).val >> 4) & 0xff) #define __swp_offset(x) ((x).val >> 12) #define __swp_entry(type, offset) ((swp_entry_t) {((type) << 4) | \ ((offset) << 12) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define pte_page(x) pfn_to_page(pte_pfn(x)) #define __mk_pte(page_nr, pgprot) __pte(((page_nr) << PAGE_SHIFT) | \ pgprot_val(pgprot)) /* * CSKY can't do page protection for execute, and considers that the same like * read. Also, write permissions imply read permissions. This is the closest * we can get by reasonable means.. */ #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHED) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ _CACHE_CACHED) #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED) #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ _PAGE_GLOBAL | _CACHE_CACHED) #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ _CACHE_CACHED) #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_COPY #define __P011 PAGE_COPY #define __P100 PAGE_READONLY #define __P101 PAGE_READONLY #define __P110 PAGE_COPY #define __P111 PAGE_COPY #define __S000 PAGE_NONE #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED #define __S100 PAGE_READONLY #define __S101 PAGE_READONLY #define __S110 PAGE_SHARED #define __S111 PAGE_SHARED extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) extern void load_pgd(unsigned long pg_dir); extern pte_t invalid_pte_table[PTRS_PER_PTE]; static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline void set_pte(pte_t *p, pte_t pte) { *p = pte; #if defined(CONFIG_CPU_NEED_TLBSYNC) dcache_wb_line((u32)p); #endif /* prevent out of order excution */ smp_mb(); } #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) static inline pte_t *pmd_page_vaddr(pmd_t pmd) { unsigned long ptr; ptr = pmd_val(pmd); return __va(ptr); } #define pmd_phys(pmd) pmd_val(pmd) static inline void set_pmd(pmd_t *p, pmd_t pmd) { *p = pmd; #if defined(CONFIG_CPU_NEED_TLBSYNC) dcache_wb_line((u32)p); #endif /* prevent specul excute */ smp_mb(); } static inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) == __pa(invalid_pte_table); } #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) static inline int pmd_present(pmd_t pmd) { return (pmd_val(pmd) != __pa(invalid_pte_table)); } static inline void pmd_clear(pmd_t *p) { pmd_val(*p) = (__pa(invalid_pte_table)); #if defined(CONFIG_CPU_NEED_TLBSYNC) dcache_wb_line((u32)p); #endif } /* * The following only work if pte_present() is true. * Undefined behaviour if not.. */ static inline int pte_read(pte_t pte) { return pte.pte_low & _PAGE_READ; } static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_WRITE; } static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_MODIFIED; } static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY); return pte; } static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY); return pte; } static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID); return pte; } static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; if (pte_val(pte) & _PAGE_MODIFIED) pte_val(pte) |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_MODIFIED; if (pte_val(pte) & _PAGE_WRITE) pte_val(pte) |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; if (pte_val(pte) & _PAGE_READ) pte_val(pte) |= _PAGE_VALID; return pte; } #define __pgd_offset(address) pgd_index(address) #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) #define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) #define pgd_index(address) ((address) >> PGDIR_SHIFT) #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot); /* * Macro to make mark a page protection value as "uncacheable". Note * that "protection" is really a misnomer here as the protection value * contains the memory attribute bits, dirty bits, and various other * bits as well. */ #define pgprot_noncached pgprot_noncached static inline pgprot_t pgprot_noncached(pgprot_t _prot) { unsigned long prot = pgprot_val(_prot); prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO; return __pgprot(prot); } #define pgprot_writecombine pgprot_writecombine static inline pgprot_t pgprot_writecombine(pgprot_t _prot) { unsigned long prot = pgprot_val(_prot); prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; return __pgprot(prot); } /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & _PAGE_CHG_MASK) | (pgprot_val(newprot))); } /* to find an entry in a page-table-directory */ static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address) { return mm->pgd + pgd_index(address); } /* Find an entry in the third-level page table.. */ static inline pte_t *pte_offset(pmd_t *dir, unsigned long address) { return (pte_t *) (pmd_page_vaddr(*dir)) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); } extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init(void); void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *pte); /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ #define kern_addr_valid(addr) (1) #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) #include <asm-generic/pgtable.h> #endif /* __ASM_CSKY_PGTABLE_H */ include/asm/atomic.h 0000644 00000011223 14722073345 0010402 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_CSKY_ATOMIC_H #define __ASM_CSKY_ATOMIC_H #include <linux/version.h> #include <asm/cmpxchg.h> #include <asm/barrier.h> #ifdef CONFIG_CPU_HAS_LDSTEX #define __atomic_add_unless __atomic_add_unless static inline int __atomic_add_unless(atomic_t *v, int a, int u) { unsigned long tmp, ret; smp_mb(); asm volatile ( "1: ldex.w %0, (%3) \n" " mov %1, %0 \n" " cmpne %0, %4 \n" " bf 2f \n" " add %0, %2 \n" " stex.w %0, (%3) \n" " bez %0, 1b \n" "2: \n" : "=&r" (tmp), "=&r" (ret) : "r" (a), "r"(&v->counter), "r"(u) : "memory"); if (ret != u) smp_mb(); return ret; } #define ATOMIC_OP(op, c_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ unsigned long tmp; \ \ asm volatile ( \ "1: ldex.w %0, (%2) \n" \ " " #op " %0, %1 \n" \ " stex.w %0, (%2) \n" \ " bez %0, 1b \n" \ : "=&r" (tmp) \ : "r" (i), "r"(&v->counter) \ : "memory"); \ } #define ATOMIC_OP_RETURN(op, c_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long tmp, ret; \ \ smp_mb(); \ asm volatile ( \ "1: ldex.w %0, (%3) \n" \ " " #op " %0, %2 \n" \ " mov %1, %0 \n" \ " stex.w %0, (%3) \n" \ " bez %0, 1b \n" \ : "=&r" (tmp), "=&r" (ret) \ : "r" (i), "r"(&v->counter) \ : "memory"); \ smp_mb(); \ \ return ret; \ } #define ATOMIC_FETCH_OP(op, c_op) \ static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long tmp, ret; \ \ smp_mb(); \ asm volatile ( \ "1: ldex.w %0, (%3) \n" \ " mov %1, %0 \n" \ " " #op " %0, %2 \n" \ " stex.w %0, (%3) \n" \ " bez %0, 1b \n" \ : "=&r" (tmp), "=&r" (ret) \ : "r" (i), "r"(&v->counter) \ : "memory"); \ smp_mb(); \ \ return ret; \ } #else /* CONFIG_CPU_HAS_LDSTEX */ #include <linux/irqflags.h> #define __atomic_add_unless __atomic_add_unless static inline int __atomic_add_unless(atomic_t *v, int a, int u) { unsigned long tmp, ret, flags; raw_local_irq_save(flags); asm volatile ( " ldw %0, (%3) \n" " mov %1, %0 \n" " cmpne %0, %4 \n" " bf 2f \n" " add %0, %2 \n" " stw %0, (%3) \n" "2: \n" : "=&r" (tmp), "=&r" (ret) : "r" (a), "r"(&v->counter), "r"(u) : "memory"); raw_local_irq_restore(flags); return ret; } #define ATOMIC_OP(op, c_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ unsigned long tmp, flags; \ \ raw_local_irq_save(flags); \ \ asm volatile ( \ " ldw %0, (%2) \n" \ " " #op " %0, %1 \n" \ " stw %0, (%2) \n" \ : "=&r" (tmp) \ : "r" (i), "r"(&v->counter) \ : "memory"); \ \ raw_local_irq_restore(flags); \ } #define ATOMIC_OP_RETURN(op, c_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long tmp, ret, flags; \ \ raw_local_irq_save(flags); \ \ asm volatile ( \ " ldw %0, (%3) \n" \ " " #op " %0, %2 \n" \ " stw %0, (%3) \n" \ " mov %1, %0 \n" \ : "=&r" (tmp), "=&r" (ret) \ : "r" (i), "r"(&v->counter) \ : "memory"); \ \ raw_local_irq_restore(flags); \ \ return ret; \ } #define ATOMIC_FETCH_OP(op, c_op) \ static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long tmp, ret, flags; \ \ raw_local_irq_save(flags); \ \ asm volatile ( \ " ldw %0, (%3) \n" \ " mov %1, %0 \n" \ " " #op " %0, %2 \n" \ " stw %0, (%3) \n" \ : "=&r" (tmp), "=&r" (ret) \ : "r" (i), "r"(&v->counter) \ : "memory"); \ \ raw_local_irq_restore(flags); \ \ return ret; \ } #endif /* CONFIG_CPU_HAS_LDSTEX */ #define atomic_add_return atomic_add_return ATOMIC_OP_RETURN(add, +) #define atomic_sub_return atomic_sub_return ATOMIC_OP_RETURN(sub, -) #define atomic_fetch_add atomic_fetch_add ATOMIC_FETCH_OP(add, +) #define atomic_fetch_sub atomic_fetch_sub ATOMIC_FETCH_OP(sub, -) #define atomic_fetch_and atomic_fetch_and ATOMIC_FETCH_OP(and, &) #define atomic_fetch_or atomic_fetch_or ATOMIC_FETCH_OP(or, |) #define atomic_fetch_xor atomic_fetch_xor ATOMIC_FETCH_OP(xor, ^) #define atomic_and atomic_and ATOMIC_OP(and, &) #define atomic_or atomic_or ATOMIC_OP(or, |) #define atomic_xor atomic_xor ATOMIC_OP(xor, ^) #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP #include <asm-generic/atomic.h> #endif /* __ASM_CSKY_ATOMIC_H */ include/asm/pgalloc.h 0000644 00000003506 14722073345 0010554 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_PGALLOC_H #define __ASM_CSKY_PGALLOC_H #include <linux/highmem.h> #include <linux/mm.h> #include <linux/sched.h> #define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL #include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { set_pmd(pmd, __pmd(__pa(pte))); } static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) { set_pmd(pmd, __pmd(__pa(page_address(pte)))); } #define pmd_pgtable(pmd) pmd_page(pmd) extern void pgd_init(unsigned long *p); static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte; unsigned long i; pte = (pte_t *) __get_free_page(GFP_KERNEL); if (!pte) return NULL; for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++) (pte + i)->pte_low = _PAGE_GLOBAL; return pte; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { free_pages((unsigned long)pgd, PGD_ORDER); } static inline pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret; pgd_t *init; ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init((unsigned long *)ret); memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); /* prevent out of order excute */ smp_mb(); #ifdef CONFIG_CPU_NEED_TLBSYNC dcache_wb_range((unsigned int)ret, (unsigned int)(ret + PTRS_PER_PGD)); #endif } return ret; } #define __pte_free_tlb(tlb, pte, address) \ do { \ pgtable_pte_page_dtor(pte); \ tlb_remove_page(tlb, pte); \ } while (0) extern void pagetable_init(void); extern void pre_mmu_init(void); extern void pre_trap_init(void); #endif /* __ASM_CSKY_PGALLOC_H */ include/asm/mmu_context.h 0000644 00000002472 14722073345 0011476 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_MMU_CONTEXT_H #define __ASM_CSKY_MMU_CONTEXT_H #include <asm-generic/mm_hooks.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <linux/errno.h> #include <linux/sched.h> #include <abi/ckmmu.h> #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ setup_pgd(__pa(pgd), false) #define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \ setup_pgd(__pa(pgd), true) #define ASID_MASK ((1 << CONFIG_CPU_ASID_BITS) - 1) #define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK) #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; }) #define activate_mm(prev,next) switch_mm(prev, next, current) #define destroy_context(mm) do {} while (0) #define enter_lazy_tlb(mm, tsk) do {} while (0) #define deactivate_mm(tsk, mm) do {} while (0) void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned int cpu = smp_processor_id(); if (prev != next) check_and_switch_context(next, cpu); TLBMISS_HANDLER_SETUP_PGD(next->pgd); write_mmu_entryhi(next->context.asid.counter); } #endif /* __ASM_CSKY_MMU_CONTEXT_H */ include/asm/unistd.h 0000644 00000000245 14722073345 0010436 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <uapi/asm/unistd.h> #define NR_syscalls (__NR_syscalls) include/asm/ftrace.h 0000644 00000001105 14722073345 0010370 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_FTRACE_H #define __ASM_CSKY_FTRACE_H #define MCOUNT_INSN_SIZE 14 #define HAVE_FUNCTION_GRAPH_FP_TEST #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR #define MCOUNT_ADDR ((unsigned long)_mcount) #ifndef __ASSEMBLY__ extern void _mcount(unsigned long); extern void ftrace_graph_call(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; } struct dyn_arch_ftrace { }; #endif /* !__ASSEMBLY__ */ #endif /* __ASM_CSKY_FTRACE_H */ include/asm/switch_to.h 0000644 00000001726 14722073345 0011140 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_SWITCH_TO_H #define __ASM_CSKY_SWITCH_TO_H #include <linux/thread_info.h> #ifdef CONFIG_CPU_HAS_FPU #include <abi/fpu.h> static inline void __switch_to_fpu(struct task_struct *prev, struct task_struct *next) { save_to_user_fp(&prev->thread.user_fp); restore_from_user_fp(&next->thread.user_fp); } #else static inline void __switch_to_fpu(struct task_struct *prev, struct task_struct *next) {} #endif /* * Context switching is now performed out-of-line in switch_to.S */ extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); #define switch_to(prev, next, last) \ do { \ struct task_struct *__prev = (prev); \ struct task_struct *__next = (next); \ __switch_to_fpu(__prev, __next); \ ((last) = __switch_to((prev), (next))); \ } while (0) #endif /* __ASM_CSKY_SWITCH_TO_H */ include/asm/io.h 0000644 00000003363 14722073345 0007543 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_IO_H #define __ASM_CSKY_IO_H #include <asm/pgtable.h> #include <linux/types.h> #include <linux/version.h> /* * I/O memory access primitives. Reads are ordered relative to any * following Normal memory access. Writes are ordered relative to any prior * Normal memory access. * * For CACHEV1 (807, 810), store instruction could fast retire, so we need * another mb() to prevent st fast retire. * * For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't * fast retire. */ #define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; }) #define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; }) #define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; }) #ifdef CONFIG_CPU_HAS_CACHEV2 #define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); }) #define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); }) #define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); }) #else #define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); }) #define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); }) #define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); }) #endif /* * I/O memory mapping functions. */ extern void __iomem *ioremap_cache(phys_addr_t addr, size_t size); extern void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot); extern void iounmap(void *addr); #define ioremap(addr, size) __ioremap((addr), (size), pgprot_noncached(PAGE_KERNEL)) #define ioremap_wc(addr, size) __ioremap((addr), (size), pgprot_writecombine(PAGE_KERNEL)) #define ioremap_nocache(addr, size) ioremap((addr), (size)) #define ioremap_cache ioremap_cache #include <asm-generic/io.h> #endif /* __ASM_CSKY_IO_H */ include/asm/shmparam.h 0000644 00000000400 14722073345 0010731 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_SHMPARAM_H #define __ASM_CSKY_SHMPARAM_H #define SHMLBA (4 * PAGE_SIZE) #define __ARCH_FORCE_SHMLBA #endif /* __ASM_CSKY_SHMPARAM_H */ include/asm/syscalls.h 0000644 00000000632 14722073345 0010765 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_SYSCALLS_H #define __ASM_CSKY_SYSCALLS_H #include <asm-generic/syscalls.h> long sys_cacheflush(void __user *, unsigned long, int); long sys_set_thread_area(unsigned long addr); long sys_csky_fadvise64_64(int fd, int advice, loff_t offset, loff_t len); #endif /* __ASM_CSKY_SYSCALLS_H */ include/asm/segment.h 0000644 00000001000 14722073345 0010560 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_SEGMENT_H #define __ASM_CSKY_SEGMENT_H typedef struct { unsigned long seg; } mm_segment_t; #define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF }) #define USER_DS ((mm_segment_t) { 0x80000000UL }) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #define segment_eq(a, b) ((a).seg == (b).seg) #endif /* __ASM_CSKY_SEGMENT_H */ include/asm/thread_info.h 0000644 00000004425 14722073345 0011416 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef _ASM_CSKY_THREAD_INFO_H #define _ASM_CSKY_THREAD_INFO_H #ifndef __ASSEMBLY__ #include <linux/version.h> #include <asm/types.h> #include <asm/page.h> #include <asm/processor.h> #include <abi/switch_context.h> struct thread_info { struct task_struct *task; void *dump_exec_domain; unsigned long flags; int preempt_count; unsigned long tp_value; mm_segment_t addr_limit; struct restart_block restart_block; struct pt_regs *regs; unsigned int cpu; }; #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ .cpu = 0, \ .restart_block = { \ .fn = do_no_restart_syscall, \ }, \ } #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) #define thread_saved_fp(tsk) \ ((unsigned long)(((struct switch_stack *)(tsk->thread.ksp))->r8)) static inline struct thread_info *current_thread_info(void) { unsigned long sp; asm volatile("mov %0, sp\n":"=r"(sp)); return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); } #endif /* !__ASSEMBLY__ */ #define TIF_SIGPENDING 0 /* signal pending */ #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_SYSCALL_TRACE 3 /* syscall trace active */ #define TIF_SYSCALL_TRACEPOINT 4 /* syscall tracepoint instrumentation */ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing */ #define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ #define TIF_SECCOMP 21 /* secure computing */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_MEMDIE (1 << TIF_MEMDIE) #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #endif /* _ASM_CSKY_THREAD_INFO_H */ include/asm/tlb.h 0000644 00000001067 14722073345 0007714 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_TLB_H #define __ASM_CSKY_TLB_H #include <asm/cacheflush.h> #define tlb_start_vma(tlb, vma) \ do { \ if (!(tlb)->fullmm) \ flush_cache_range(vma, (vma)->vm_start, (vma)->vm_end); \ } while (0) #define tlb_end_vma(tlb, vma) \ do { \ if (!(tlb)->fullmm) \ flush_tlb_range(vma, (vma)->vm_start, (vma)->vm_end); \ } while (0) #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) #include <asm-generic/tlb.h> #endif /* __ASM_CSKY_TLB_H */ include/asm/processor.h 0000644 00000005452 14722073345 0011154 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_PROCESSOR_H #define __ASM_CSKY_PROCESSOR_H #include <linux/bitops.h> #include <asm/segment.h> #include <asm/ptrace.h> #include <asm/current.h> #include <asm/cache.h> #include <abi/reg_ops.h> #include <abi/regdef.h> #include <abi/switch_context.h> #ifdef CONFIG_CPU_HAS_FPU #include <abi/fpu.h> #endif struct cpuinfo_csky { unsigned long asid_cache; } __aligned(SMP_CACHE_BYTES); extern struct cpuinfo_csky cpu_data[]; /* * User space process size: 2GB. This is hardcoded into a few places, * so don't change it unless you know what you are doing. TASK_SIZE * for a 64 bit kernel expandable to 8192EB, of which the current CSKY * implementations will "only" be able to use 1TB ... */ #define TASK_SIZE 0x7fff8000UL #ifdef __KERNEL__ #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX STACK_TOP #endif /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define TASK_UNMAPPED_BASE (TASK_SIZE / 3) struct thread_struct { unsigned long ksp; /* kernel stack pointer */ unsigned long sr; /* saved status register */ unsigned long trap_no; /* saved status register */ /* FPU regs */ struct user_fp __aligned(16) user_fp; }; #define INIT_THREAD { \ .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ .sr = DEFAULT_PSR_VALUE, \ } /* * Do necessary setup to start up a newly executed thread. * * pass the data segment into user programs if it exists, * it can't hurt anything as far as I can tell */ #define start_thread(_regs, _pc, _usp) \ do { \ set_fs(USER_DS); /* reads from user space */ \ (_regs)->pc = (_pc); \ (_regs)->regs[1] = 0; /* ABIV1 is R7, uClibc_main rtdl arg */ \ (_regs)->regs[2] = 0; \ (_regs)->regs[3] = 0; /* ABIV2 is R7, use it? */ \ (_regs)->sr &= ~PS_S; \ (_regs)->usp = (_usp); \ } while (0) /* Forward declaration, a strange C thing */ struct task_struct; /* Free all resources held by a thread. */ static inline void release_thread(struct task_struct *dead_task) { } /* Prepare to copy thread state - unlazy all lazy status */ #define prepare_to_copy(tsk) do { } while (0) extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); #define copy_segments(tsk, mm) do { } while (0) #define release_segments(mm) do { } while (0) #define forget_segments() do { } while (0) extern unsigned long thread_saved_pc(struct task_struct *tsk); unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) #define cpu_relax() barrier() #endif /* __ASM_CSKY_PROCESSOR_H */ include/asm/Kbuild 0000644 00000002057 14722073345 0010117 0 ustar 00 # SPDX-License-Identifier: GPL-2.0 generic-y += asm-offsets.h generic-y += bugs.h generic-y += compat.h generic-y += current.h generic-y += delay.h generic-y += device.h generic-y += div64.h generic-y += dma.h generic-y += dma-contiguous.h generic-y += dma-mapping.h generic-y += emergency-restart.h generic-y += exec.h generic-y += fb.h generic-y += futex.h generic-y += gpio.h generic-y += hardirq.h generic-y += hw_irq.h generic-y += irq.h generic-y += irq_regs.h generic-y += irq_work.h generic-y += kdebug.h generic-y += kmap_types.h generic-y += kprobes.h generic-y += kvm_para.h generic-y += linkage.h generic-y += local.h generic-y += local64.h generic-y += mm-arch-hooks.h generic-y += mmiowb.h generic-y += module.h generic-y += pci.h generic-y += percpu.h generic-y += preempt.h generic-y += qrwlock.h generic-y += sections.h generic-y += serial.h generic-y += timex.h generic-y += topology.h generic-y += trace_clock.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h generic-y += vmlinux.lds.h generic-y += word-at-a-time.h generic-y += xor.h include/asm/addrspace.h 0000644 00000000442 14722073345 0011055 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_ADDRSPACE_H #define __ASM_CSKY_ADDRSPACE_H #define KSEG0 0x80000000ul #define KSEG0ADDR(a) (((unsigned long)a & 0x1fffffff) | KSEG0) #endif /* __ASM_CSKY_ADDRSPACE_H */ include/asm/ptrace.h 0000644 00000001732 14722073345 0010410 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_PTRACE_H #define __ASM_CSKY_PTRACE_H #include <uapi/asm/ptrace.h> #include <asm/traps.h> #include <linux/types.h> #ifndef __ASSEMBLY__ #define PS_S 0x80000000 /* Supervisor Mode */ #define arch_has_single_step() (1) #define current_pt_regs() \ ({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; }) #define user_stack_pointer(regs) ((regs)->usp) #define user_mode(regs) (!((regs)->sr & PS_S)) #define instruction_pointer(regs) ((regs)->pc) #define profile_pc(regs) instruction_pointer(regs) static inline bool in_syscall(struct pt_regs const *regs) { return ((regs->sr >> 16) & 0xff) == VEC_TRAP0; } static inline void forget_syscall(struct pt_regs *regs) { regs->sr &= ~(0xff << 16); } static inline unsigned long regs_return_value(struct pt_regs *regs) { return regs->a0; } #endif /* __ASSEMBLY__ */ #endif /* __ASM_CSKY_PTRACE_H */ include/asm/cmpxchg.h 0000644 00000003147 14722073345 0010565 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_CSKY_CMPXCHG_H #define __ASM_CSKY_CMPXCHG_H #ifdef CONFIG_CPU_HAS_LDSTEX #include <asm/barrier.h> extern void __bad_xchg(void); #define __xchg(new, ptr, size) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ __typeof__(new) __new = (new); \ __typeof__(*(ptr)) __ret; \ unsigned long tmp; \ switch (size) { \ case 4: \ smp_mb(); \ asm volatile ( \ "1: ldex.w %0, (%3) \n" \ " mov %1, %2 \n" \ " stex.w %1, (%3) \n" \ " bez %1, 1b \n" \ : "=&r" (__ret), "=&r" (tmp) \ : "r" (__new), "r"(__ptr) \ :); \ smp_mb(); \ break; \ default: \ __bad_xchg(); \ } \ __ret; \ }) #define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr)))) #define __cmpxchg(ptr, old, new, size) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ __typeof__(new) __new = (new); \ __typeof__(new) __tmp; \ __typeof__(old) __old = (old); \ __typeof__(*(ptr)) __ret; \ switch (size) { \ case 4: \ smp_mb(); \ asm volatile ( \ "1: ldex.w %0, (%3) \n" \ " cmpne %0, %4 \n" \ " bt 2f \n" \ " mov %1, %2 \n" \ " stex.w %1, (%3) \n" \ " bez %1, 1b \n" \ "2: \n" \ : "=&r" (__ret), "=&r" (__tmp) \ : "r" (__new), "r"(__ptr), "r"(__old) \ :); \ smp_mb(); \ break; \ default: \ __bad_xchg(); \ } \ __ret; \ }) #define cmpxchg(ptr, o, n) \ (__cmpxchg((ptr), (o), (n), sizeof(*(ptr)))) #else #include <asm-generic/cmpxchg.h> #endif #endif /* __ASM_CSKY_CMPXCHG_H */ include/asm/reg_ops.h 0000644 00000000576 14722073345 0010575 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_REGS_OPS_H #define __ASM_REGS_OPS_H #define mfcr(reg) \ ({ \ unsigned int tmp; \ asm volatile( \ "mfcr %0, "reg"\n" \ : "=r"(tmp) \ : \ : "memory"); \ tmp; \ }) #define mtcr(reg, val) \ ({ \ asm volatile( \ "mtcr %0, "reg"\n" \ : \ : "r"(val) \ : "memory"); \ }) #endif /* __ASM_REGS_OPS_H */ include/asm/vdso.h 0000644 00000000320 14722073345 0010075 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_CSKY_VDSO_H #define __ASM_CSKY_VDSO_H #include <abi/vdso.h> struct csky_vdso { unsigned short rt_signal_retcode[4]; }; #endif /* __ASM_CSKY_VDSO_H */ include/asm/asid.h 0000644 00000004611 14722073345 0010051 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ASM_ASID_H #define __ASM_ASM_ASID_H #include <linux/atomic.h> #include <linux/compiler.h> #include <linux/cpumask.h> #include <linux/percpu.h> #include <linux/spinlock.h> struct asid_info { atomic64_t generation; unsigned long *map; atomic64_t __percpu *active; u64 __percpu *reserved; u32 bits; /* Lock protecting the structure */ raw_spinlock_t lock; /* Which CPU requires context flush on next call */ cpumask_t flush_pending; /* Number of ASID allocated by context (shift value) */ unsigned int ctxt_shift; /* Callback to locally flush the context. */ void (*flush_cpu_ctxt_cb)(void); }; #define NUM_ASIDS(info) (1UL << ((info)->bits)) #define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift) #define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu) void asid_new_context(struct asid_info *info, atomic64_t *pasid, unsigned int cpu, struct mm_struct *mm); /* * Check the ASID is still valid for the context. If not generate a new ASID. * * @pasid: Pointer to the current ASID batch * @cpu: current CPU ID. Must have been acquired throught get_cpu() */ static inline void asid_check_context(struct asid_info *info, atomic64_t *pasid, unsigned int cpu, struct mm_struct *mm) { u64 asid, old_active_asid; asid = atomic64_read(pasid); /* * The memory ordering here is subtle. * If our active_asid is non-zero and the ASID matches the current * generation, then we update the active_asid entry with a relaxed * cmpxchg. Racing with a concurrent rollover means that either: * * - We get a zero back from the cmpxchg and end up waiting on the * lock. Taking the lock synchronises with the rollover and so * we are forced to see the updated generation. * * - We get a valid ASID back from the cmpxchg, which means the * relaxed xchg in flush_context will treat us as reserved * because atomic RmWs are totally ordered for a given location. */ old_active_asid = atomic64_read(&active_asid(info, cpu)); if (old_active_asid && !((asid ^ atomic64_read(&info->generation)) >> info->bits) && atomic64_cmpxchg_relaxed(&active_asid(info, cpu), old_active_asid, asid)) return; asid_new_context(info, pasid, cpu, mm); } int asid_allocator_init(struct asid_info *info, u32 bits, unsigned int asid_per_ctxt, void (*flush_cpu_ctxt_cb)(void)); #endif include/asm/checksum.h 0000644 00000001753 14722073345 0010737 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_CHECKSUM_H #define __ASM_CSKY_CHECKSUM_H #include <linux/in6.h> #include <asm/byteorder.h> static inline __sum16 csum_fold(__wsum csum) { u32 tmp; asm volatile( "mov %1, %0\n" "rori %0, 16\n" "addu %0, %1\n" "lsri %0, 16\n" : "=r"(csum), "=r"(tmp) : "0"(csum)); return (__force __sum16) ~csum; } #define csum_fold csum_fold static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, __wsum sum) { asm volatile( "clrc\n" "addc %0, %1\n" "addc %0, %2\n" "addc %0, %3\n" "inct %0\n" : "=r"(sum) : "r"((__force u32)saddr), "r"((__force u32)daddr), #ifdef __BIG_ENDIAN "r"(proto + len), #else "r"((proto + len) << 8), #endif "0" ((__force unsigned long)sum) : "cc"); return sum; } #define csum_tcpudp_nofold csum_tcpudp_nofold #include <asm-generic/checksum.h> #endif /* __ASM_CSKY_CHECKSUM_H */ include/asm/traps.h 0000644 00000001527 14722073345 0010265 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_TRAPS_H #define __ASM_CSKY_TRAPS_H #define VEC_RESET 0 #define VEC_ALIGN 1 #define VEC_ACCESS 2 #define VEC_ZERODIV 3 #define VEC_ILLEGAL 4 #define VEC_PRIV 5 #define VEC_TRACE 6 #define VEC_BREAKPOINT 7 #define VEC_UNRECOVER 8 #define VEC_SOFTRESET 9 #define VEC_AUTOVEC 10 #define VEC_FAUTOVEC 11 #define VEC_HWACCEL 12 #define VEC_TLBMISS 14 #define VEC_TLBMODIFIED 15 #define VEC_TRAP0 16 #define VEC_TRAP1 17 #define VEC_TRAP2 18 #define VEC_TRAP3 19 #define VEC_TLBINVALIDL 20 #define VEC_TLBINVALIDS 21 #define VEC_PRFL 29 #define VEC_FPE 30 extern void *vec_base[]; #define VEC_INIT(i, func) \ do { \ vec_base[i] = (void *)func; \ } while (0) void csky_alignment(struct pt_regs *regs); #endif /* __ASM_CSKY_TRAPS_H */ include/asm/string.h 0000644 00000000444 14722073345 0010437 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef _CSKY_STRING_MM_H_ #define _CSKY_STRING_MM_H_ #ifndef __ASSEMBLY__ #include <linux/types.h> #include <linux/compiler.h> #include <abi/string.h> #endif #endif /* _CSKY_STRING_MM_H_ */ Kconfig.debug 0000644 00000000104 14722073345 0007136 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only # dummy file, do not delete boot/dts/Makefile 0000644 00000000220 14722073345 0007742 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only dtstree := $(srctree)/$(src) dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) boot/Makefile 0000644 00000001266 14722073345 0007163 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only targets := Image zImage uImage targets += $(dtb-y) $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) @echo ' Kernel: $@ is ready' compress-$(CONFIG_KERNEL_GZIP) = gzip compress-$(CONFIG_KERNEL_LZO) = lzo compress-$(CONFIG_KERNEL_LZMA) = lzma compress-$(CONFIG_KERNEL_XZ) = xzkern compress-$(CONFIG_KERNEL_LZ4) = lz4 $(obj)/zImage: $(obj)/Image FORCE $(call if_changed,$(compress-y)) @echo ' Kernel: $@ is ready' UIMAGE_ARCH = sandbox UIMAGE_COMPRESSION = $(compress-y) UIMAGE_LOADADDR = $(shell $(NM) vmlinux | awk '$$NF == "_start" {print $$1}') $(obj)/uImage: $(obj)/zImage $(call if_changed,uimage) @echo 'Image: $@ is ready' Kconfig 0000644 00000011431 14722073345 0006056 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only config CSKY def_bool y select ARCH_32BIT_OFF_T select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2 select COMMON_CLK select CLKSRC_MMIO select CLKSRC_OF select CSKY_MPINTC if CPU_CK860 select CSKY_MP_TIMER if CPU_CK860 select CSKY_APB_INTC select DMA_DIRECT_REMAP select IRQ_DOMAIN select HANDLE_DOMAIN_IRQ select DW_APB_TIMER_OF select GENERIC_LIB_ASHLDI3 select GENERIC_LIB_ASHRDI3 select GENERIC_LIB_LSHRDI3 select GENERIC_LIB_MULDI3 select GENERIC_LIB_CMPDI2 select GENERIC_LIB_UCMPDI2 select GENERIC_ALLOCATOR select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS select GENERIC_CPU_DEVICES select GENERIC_IRQ_CHIP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_IRQ_MULTI_HANDLER select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select GX6605S_TIMER if CPU_CK610 select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_AUDITSYSCALL select HAVE_COPY_THREAD_TLS select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FTRACE_MCOUNT_RECORD select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZO select HAVE_KERNEL_LZMA select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS select HAVE_SYSCALL_TRACEPOINTS select MAY_HAVE_SPARSE_IRQ select MODULES_USE_ELF_RELA if MODULES select OF select OF_EARLY_FLATTREE select PERF_USE_VMALLOC if CPU_CK610 select RTC_LIB select TIMER_OF select USB_ARCH_HAS_EHCI select USB_ARCH_HAS_OHCI config CPU_HAS_CACHEV2 bool config CPU_HAS_FPUV2 bool config CPU_HAS_HILO bool config CPU_HAS_TLBI bool config CPU_HAS_LDSTEX bool help For SMP, CPU needs "ldex&stex" instructions for atomic operations. config CPU_NEED_TLBSYNC bool config CPU_NEED_SOFTALIGN bool config CPU_NO_USER_BKPT bool help For abiv2 we couldn't use "trap 1" as user space bkpt in gdbserver, because abiv2 is 16/32bit instruction set and "trap 1" is 32bit. So we need a 16bit instruction as user space bkpt, and it will cause an illegal instruction exception. In kernel we parse the *regs->pc to determine whether to send SIGTRAP or not. config GENERIC_CALIBRATE_DELAY def_bool y config GENERIC_CSUM def_bool y config GENERIC_HWEIGHT def_bool y config MMU def_bool y config STACKTRACE_SUPPORT def_bool y config TIME_LOW_RES def_bool y config TRACE_IRQFLAGS_SUPPORT def_bool y config CPU_TLB_SIZE int default "128" if (CPU_CK610 || CPU_CK807 || CPU_CK810) default "1024" if (CPU_CK860) config CPU_ASID_BITS int default "8" if (CPU_CK610 || CPU_CK807 || CPU_CK810) default "12" if (CPU_CK860) config L1_CACHE_SHIFT int default "4" if (CPU_CK610) default "5" if (CPU_CK807 || CPU_CK810) default "6" if (CPU_CK860) menu "Processor type and features" choice prompt "CPU MODEL" default CPU_CK807 config CPU_CK610 bool "CSKY CPU ck610" select CPU_NEED_TLBSYNC select CPU_NEED_SOFTALIGN select CPU_NO_USER_BKPT config CPU_CK810 bool "CSKY CPU ck810" select CPU_HAS_HILO select CPU_NEED_TLBSYNC config CPU_CK807 bool "CSKY CPU ck807" select CPU_HAS_HILO config CPU_CK860 bool "CSKY CPU ck860" select CPU_HAS_TLBI select CPU_HAS_CACHEV2 select CPU_HAS_LDSTEX select CPU_HAS_FPUV2 endchoice choice prompt "C-SKY PMU type" depends on PERF_EVENTS depends on CPU_CK807 || CPU_CK810 || CPU_CK860 config CPU_PMU_NONE bool "None" config CSKY_PMU_V1 bool "Performance Monitoring Unit Ver.1" endchoice choice prompt "Power Manager Instruction (wait/doze/stop)" default CPU_PM_NONE config CPU_PM_NONE bool "None" config CPU_PM_WAIT bool "wait" config CPU_PM_DOZE bool "doze" config CPU_PM_STOP bool "stop" endchoice config CPU_HAS_VDSP bool "CPU has VDSP coprocessor" depends on CPU_HAS_FPU && CPU_HAS_FPUV2 config CPU_HAS_FPU bool "CPU has FPU coprocessor" depends on CPU_CK807 || CPU_CK810 || CPU_CK860 config CPU_HAS_TEE bool "CPU has Trusted Execution Environment" depends on CPU_CK810 config SMP bool "Symmetric Multi-Processing (SMP) support for C-SKY" depends on CPU_CK860 default n config NR_CPUS int "Maximum number of CPUs (2-32)" range 2 32 depends on SMP default "2" config HIGHMEM bool "High Memory Support" depends on !CPU_CK610 default y config FORCE_MAX_ZONEORDER int "Maximum zone order" default "11" config DRAM_BASE hex "DRAM start addr (the same with memory-section in dts)" default 0x0 config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" select GENERIC_IRQ_MIGRATION depends on SMP help Say Y here to allow turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu/cpu1/hotplug/target. Say N if you want to disable CPU hotplug. endmenu source "kernel/Kconfig.hz" mm/Makefile 0000644 00000000637 14722073345 0006632 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only ifeq ($(CONFIG_CPU_HAS_CACHEV2),y) obj-y += cachev2.o CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE) else obj-y += cachev1.o CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE) endif obj-y += dma-mapping.o obj-y += fault.o obj-$(CONFIG_HIGHMEM) += highmem.o obj-y += init.o obj-y += ioremap.o obj-y += syscache.o obj-y += tlb.o obj-y += asid.o obj-y += context.o abiv1/Makefile 0000644 00000000342 14722073345 0007214 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_CPU_NEED_SOFTALIGN) += alignment.o obj-y += bswapdi.o obj-y += bswapsi.o obj-y += cacheflush.o obj-y += mmap.o obj-y += memcpy.o obj-y += strksyms.o abiv2/Makefile 0000644 00000000471 14722073345 0007220 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only obj-y += cacheflush.o obj-$(CONFIG_CPU_HAS_FPU) += fpu.o obj-y += memcmp.o obj-y += memcpy.o obj-y += memmove.o obj-y += memset.o obj-y += strcmp.o obj-y += strcpy.o obj-y += strlen.o obj-y += strksyms.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o lib/Makefile 0000644 00000000105 14722073345 0006755 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only lib-y := usercopy.o delay.o Makefile 0000644 00000003217 14722073345 0006216 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only OBJCOPYFLAGS :=-O binary GZFLAGS :=-9 ifdef CONFIG_CPU_HAS_FPU FPUEXT = f endif ifdef CONFIG_CPU_HAS_VDSP VDSPEXT = v endif ifdef CONFIG_CPU_HAS_TEE TEEEXT = t endif ifdef CONFIG_CPU_CK610 CPUTYPE = ck610 CSKYABI = abiv1 endif ifdef CONFIG_CPU_CK810 CPUTYPE = ck810 CSKYABI = abiv2 endif ifdef CONFIG_CPU_CK807 CPUTYPE = ck807 CSKYABI = abiv2 endif ifdef CONFIG_CPU_CK860 CPUTYPE = ck860 CSKYABI = abiv2 endif ifneq ($(CSKYABI),) MCPU_STR = $(CPUTYPE)$(FPUEXT)$(VDSPEXT)$(TEEEXT) KBUILD_CFLAGS += -mcpu=$(CPUTYPE) -Wa,-mcpu=$(MCPU_STR) KBUILD_CFLAGS += -DCSKYCPU_DEF_NAME=\"$(MCPU_STR)\" KBUILD_CFLAGS += -msoft-float -mdiv KBUILD_CFLAGS += -fno-tree-vectorize endif KBUILD_CFLAGS += -pipe ifeq ($(CSKYABI),abiv2) KBUILD_CFLAGS += -mno-stack-size endif ifdef CONFIG_STACKTRACE KBUILD_CFLAGS += -mbacktrace endif abidirs := $(patsubst %,arch/csky/%/,$(CSKYABI)) KBUILD_CFLAGS += $(patsubst %,-I$(srctree)/%inc,$(abidirs)) KBUILD_CPPFLAGS += -mlittle-endian LDFLAGS += -EL KBUILD_AFLAGS += $(KBUILD_CFLAGS) head-y := arch/csky/kernel/head.o core-y += arch/csky/kernel/ core-y += arch/csky/mm/ core-y += arch/csky/$(CSKYABI)/ libs-y += arch/csky/lib/ \ $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) boot := arch/csky/boot core-y += $(boot)/dts/ all: zImage zImage Image uImage: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ archclean: $(Q)$(MAKE) $(clean)=$(boot) define archhelp echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)' echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' echo ' uImage - U-Boot wrapped zImage' endef kernel/Makefile 0000644 00000001145 14722073345 0007474 0 ustar 00 # SPDX-License-Identifier: GPL-2.0-only extra-y := head.o vmlinux.lds obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o obj-y += power.o syscall.o syscall_table.o setup.o obj-y += process.o cpu-probe.o ptrace.o dumpstack.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) endif
| ver. 1.4 |
Github
|
.
| PHP 7.4.3-4ubuntu2.24 | Генерация страницы: 0.01 |
proxy
|
phpinfo
|
Настройка