ARCv2: lib: memcpy: fix doing prefetchw outside of buffer [Linux 5.0]

This Linux kernel change "ARCv2: lib: memcpy: fix doing prefetchw outside of buffer" is included in the Linux 5.0 release. This change is authored by Eugeniy Paltsev <eugeniy.paltsev [at] synopsys.com> on Wed Jan 30 19:32:40 2019 +0300. The commit for this change in Linux stable tree is f8a15f9 (patch).

ARCv2: lib: memcpy: fix doing prefetchw outside of buffer

ARCv2 optimized memcpy uses PREFETCHW instruction for prefetching the
next cache line but doesn't ensure that the line is not past the end of
the buffer. PRETECHW changes the line ownership and marks it dirty,
which can cause data corruption if this area is used for DMA IO.

Fix the issue by avoiding the PREFETCHW. This leads to performance
degradation but it is OK as we'll introduce new memcpy implementation
optimized for unaligned memory access using.

We also cut off all PREFETCH instructions at they are quite useless
here:
 * we call PREFETCH right before LOAD instruction call.
 * we copy 16 or 32 bytes of data (depending on CONFIG_ARC_HAS_LL64)
   in a main logical loop. so we call PREFETCH 4 times (or 2 times)
   for each L1 cache line (in case of 64B L1 cache Line which is
   default case). Obviously this is not optimal.

Signed-off-by: Eugeniy Paltsev <[email protected]>
Signed-off-by: Vineet Gupta <[email protected]>

There are 14 lines of Linux source code added/deleted in this change. Code changes to Linux kernel are as follows.

 arch/arc/lib/memcpy-archs.S | 14 --------------
 1 file changed, 14 deletions(-)

diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index d61044d..ea14b0b 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -25,15 +25,11 @@
 #endif

 #ifdef CONFIG_ARC_HAS_LL64
-# define PREFETCH_READ(RX) prefetch    [RX, 56]
-# define PREFETCH_WRITE(RX)    prefetchw   [RX, 64]
 # define LOADX(DST,RX)     ldd.ab  DST, [RX, 8]
 # define STOREX(SRC,RX)        std.ab  SRC, [RX, 8]
 # define ZOLSHFT       5
 # define ZOLAND            0x1F
 #else
-# define PREFETCH_READ(RX) prefetch    [RX, 28]
-# define PREFETCH_WRITE(RX)    prefetchw   [RX, 32]
 # define LOADX(DST,RX)     ld.ab   DST, [RX, 4]
 # define STOREX(SRC,RX)        st.ab   SRC, [RX, 4]
 # define ZOLSHFT       4
@@ -41,8 +37,6 @@
 #endif

 ENTRY_CFI(memcpy)
-   prefetch [r1]       ; Prefetch the read location
-   prefetchw [r0]      ; Prefetch the write location
    mov.f   0, r2
 ;;; if size is zero
    jz.d    [blink]
@@ -72,8 +66,6 @@ ENTRY_CFI(memcpy)
    lpnz    @.Lcopy32_64bytes
    ;; LOOP START
    LOADX (r6, r1)
-   PREFETCH_READ (r1)
-   PREFETCH_WRITE (r3)
    LOADX (r8, r1)
    LOADX (r10, r1)
    LOADX (r4, r1)
@@ -117,9 +109,7 @@ ENTRY_CFI(memcpy)
    lpnz    @.Lcopy8bytes_1
    ;; LOOP START
    ld.ab   r6, [r1, 4]
-   prefetch [r1, 28]   ;Prefetch the next read location
    ld.ab   r8, [r1,4]
-   prefetchw [r3, 32]  ;Prefetch the next write location

    SHIFT_1 (r7, r6, 24)
    or  r7, r7, r5
@@ -162,9 +152,7 @@ ENTRY_CFI(memcpy)
    lpnz    @.Lcopy8bytes_2
    ;; LOOP START
    ld.ab   r6, [r1, 4]
-   prefetch [r1, 28]   ;Prefetch the next read location
    ld.ab   r8, [r1,4]
-   prefetchw [r3, 32]  ;Prefetch the next write location

    SHIFT_1 (r7, r6, 16)
    or  r7, r7, r5
@@ -204,9 +192,7 @@ ENTRY_CFI(memcpy)
    lpnz    @.Lcopy8bytes_3
    ;; LOOP START
    ld.ab   r6, [r1, 4]
-   prefetch [r1, 28]   ;Prefetch the next read location
    ld.ab   r8, [r1,4]
-   prefetchw [r3, 32]  ;Prefetch the next write location

    SHIFT_1 (r7, r6, 8)
    or  r7, r7, r5

Leave a Reply

Your email address will not be published. Required fields are marked *