aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorCraig Blackmore <craig.blackmore@embecosm.com>2024-10-18 09:01:35 -0600
committerJeff Law <jlaw@ventanamicro.com>2024-10-18 09:01:35 -0600
commitf244492ec258d84ab253bd58ad57f31c65a2312d (patch)
treeaf1311b0d4350ed4adff3a2afba8340592a13dc5 /gcc
parent3a12ac403251e0a1542609d7a4d8a464a5e1dc86 (diff)
downloadgcc-f244492ec258d84ab253bd58ad57f31c65a2312d.zip
gcc-f244492ec258d84ab253bd58ad57f31c65a2312d.tar.gz
gcc-f244492ec258d84ab253bd58ad57f31c65a2312d.tar.bz2
[PATCH 1/7] RISC-V: Fix indentation in riscv_vector::expand_block_move [NFC]
gcc/ChangeLog: * config/riscv/riscv-string.cc (expand_block_move): Fix indentation.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/riscv/riscv-string.cc32
1 files changed, 16 insertions, 16 deletions
diff --git a/gcc/config/riscv/riscv-string.cc b/gcc/config/riscv/riscv-string.cc
index 4bb8bce..0c5ffd7 100644
--- a/gcc/config/riscv/riscv-string.cc
+++ b/gcc/config/riscv/riscv-string.cc
@@ -1086,22 +1086,22 @@ expand_block_move (rtx dst_in, rtx src_in, rtx length_in)
{
HOST_WIDE_INT length = INTVAL (length_in);
- /* By using LMUL=8, we can copy as many bytes in one go as there
- are bits in a vector register. If the entire block thus fits,
- we don't need a loop. */
- if (length <= TARGET_MIN_VLEN)
- {
- need_loop = false;
-
- /* If a single scalar load / store pair can do the job, leave it
- to the scalar code to do that. */
- /* ??? If fast unaligned access is supported, the scalar code could
- use suitably sized scalars irrespective of alignment. If that
- gets fixed, we have to adjust the test here. */
-
- if (pow2p_hwi (length) && length <= potential_ew)
- return false;
- }
+ /* By using LMUL=8, we can copy as many bytes in one go as there
+ are bits in a vector register. If the entire block thus fits,
+ we don't need a loop. */
+ if (length <= TARGET_MIN_VLEN)
+ {
+ need_loop = false;
+
+ /* If a single scalar load / store pair can do the job, leave it
+ to the scalar code to do that. */
+ /* ??? If fast unaligned access is supported, the scalar code could
+ use suitably sized scalars irrespective of alignment. If that
+ gets fixed, we have to adjust the test here. */
+
+ if (pow2p_hwi (length) && length <= potential_ew)
+ return false;
+ }
/* Find the vector mode to use. Using the largest possible element
size is likely to give smaller constants, and thus potentially