From 3e9f6a005c8df720dbdd42bba92df8081ee722c2 Mon Sep 17 00:00:00 2001 From: Claudiu Zissulescu Date: Tue, 20 Aug 2024 15:10:39 +0300 Subject: arc: Use __ARC_UNALIGNED__ compiler macro Replace __ARC_ALIGNED_ACCESS__ macro with the compiler defined macro __ARC_UNALIGNED__ and improve file comments. Signed-off-by: Claudiu Zissulescu --- newlib/libc/machine/arc/memcpy-archs.S | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/newlib/libc/machine/arc/memcpy-archs.S b/newlib/libc/machine/arc/memcpy-archs.S index f30dafd..3c477a7 100644 --- a/newlib/libc/machine/arc/memcpy-archs.S +++ b/newlib/libc/machine/arc/memcpy-archs.S @@ -70,7 +70,21 @@ # define ZOLAND 0xF #endif -#ifdef __ARC_ALIGNED_ACCESS__ + +;;; MEMCPY copy memory regions +;;; Input arguments: +;;; r0 - output memory region +;;; r1 - input memory region +;;; r2 - size in bytes +;;; Returns: +;;; r0 - pointer to the first byte of the output region +;;; Clobber: +;;; r1, r2, r3, r4, r5, r6, r8r9, r10r11, lp_count + +#if !defined (__ARC_UNALIGNED__) + +;;; MEMCPY routine for the case when the CPU only accepts ALIGNED +;;; accesses to memory. ENTRY (memcpy) prefetch [r1] ; Prefetch the read location prefetchw [r0] ; Prefetch the write location @@ -268,6 +282,8 @@ ENDFUNC (memcpy) #else +;;; MEMCPY routine which is used by systems with unaligned memory +;;; accesses. This is the case for most of ARCHS CPU family. ENTRY(memcpy) prefetch [r1] ; Prefetch the read location prefetchw [r0] ; Prefetch the write location -- cgit v1.1