/* Extra machine modes for RISC-V target.
   Copyright (C) 2011-2023 Free Software Foundation, Inc.
   Contributed by Andrew Waterman (andrew@sifive.com).
   Based on MIPS target for GNU compiler.

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.

GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */

FLOAT_MODE (HF, 2, ieee_half_format);
FLOAT_MODE (TF, 16, ieee_quad_format);

/* Vector modes.  */

/* Encode the ratio of SEW/LMUL into the mask types. There are the following
 * mask types.  */

/* | Mode     | MIN_VLEN = 32 | MIN_VLEN = 64 | MIN_VLEN = 128 |
   |          | SEW/LMUL      | SEW/LMUL      | SEW/LMUL       |
   | VNx1BI   | 32            | 64            | 128            |
   | VNx2BI   | 16            | 32            | 64             |
   | VNx4BI   | 8             | 16            | 32             |
   | VNx8BI   | 4             | 8             | 16             |
   | VNx16BI  | 2             | 4             | 8              |
   | VNx32BI  | 1             | 2             | 4              |
   | VNx64BI  | N/A           | 1             | 2              |
   | VNx128BI | N/A           | N/A           | 1              |  */

/* For RVV modes, each boolean value occupies 1-bit.
   4th argument is specify the minmial possible size of the vector mode,
   and will adjust to the right size by ADJUST_BYTESIZE.  */
VECTOR_BOOL_MODE (VNx1BI, 1, BI, 1);
VECTOR_BOOL_MODE (VNx2BI, 2, BI, 1);
VECTOR_BOOL_MODE (VNx4BI, 4, BI, 1);
VECTOR_BOOL_MODE (VNx8BI, 8, BI, 1);
VECTOR_BOOL_MODE (VNx16BI, 16, BI, 2);
VECTOR_BOOL_MODE (VNx32BI, 32, BI, 4);
VECTOR_BOOL_MODE (VNx64BI, 64, BI, 8);
VECTOR_BOOL_MODE (VNx128BI, 128, BI, 16);

ADJUST_NUNITS (VNx1BI, riscv_v_adjust_nunits (VNx1BImode, 1));
ADJUST_NUNITS (VNx2BI, riscv_v_adjust_nunits (VNx2BImode, 2));
ADJUST_NUNITS (VNx4BI, riscv_v_adjust_nunits (VNx4BImode, 4));
ADJUST_NUNITS (VNx8BI, riscv_v_adjust_nunits (VNx8BImode, 8));
ADJUST_NUNITS (VNx16BI, riscv_v_adjust_nunits (VNx16BImode, 16));
ADJUST_NUNITS (VNx32BI, riscv_v_adjust_nunits (VNx32BImode, 32));
ADJUST_NUNITS (VNx64BI, riscv_v_adjust_nunits (VNx64BImode, 64));
ADJUST_NUNITS (VNx128BI, riscv_v_adjust_nunits (VNx128BImode, 128));

ADJUST_ALIGNMENT (VNx1BI, 1);
ADJUST_ALIGNMENT (VNx2BI, 1);
ADJUST_ALIGNMENT (VNx4BI, 1);
ADJUST_ALIGNMENT (VNx8BI, 1);
ADJUST_ALIGNMENT (VNx16BI, 1);
ADJUST_ALIGNMENT (VNx32BI, 1);
ADJUST_ALIGNMENT (VNx64BI, 1);
ADJUST_ALIGNMENT (VNx128BI, 1);

ADJUST_BYTESIZE (VNx1BI, riscv_v_adjust_bytesize (VNx1BImode, 1));
ADJUST_BYTESIZE (VNx2BI, riscv_v_adjust_bytesize (VNx2BImode, 1));
ADJUST_BYTESIZE (VNx4BI, riscv_v_adjust_bytesize (VNx4BImode, 1));
ADJUST_BYTESIZE (VNx8BI, riscv_v_adjust_bytesize (VNx8BImode, 1));
ADJUST_BYTESIZE (VNx16BI, riscv_v_adjust_bytesize (VNx16BImode, 2));
ADJUST_BYTESIZE (VNx32BI, riscv_v_adjust_bytesize (VNx32BImode, 4));
ADJUST_BYTESIZE (VNx64BI, riscv_v_adjust_bytesize (VNx64BImode, 8));
ADJUST_BYTESIZE (VNx128BI, riscv_v_adjust_bytesize (VNx128BImode, 16));

ADJUST_PRECISION (VNx1BI, riscv_v_adjust_precision (VNx1BImode, 1));
ADJUST_PRECISION (VNx2BI, riscv_v_adjust_precision (VNx2BImode, 2));
ADJUST_PRECISION (VNx4BI, riscv_v_adjust_precision (VNx4BImode, 4));
ADJUST_PRECISION (VNx8BI, riscv_v_adjust_precision (VNx8BImode, 8));
ADJUST_PRECISION (VNx16BI, riscv_v_adjust_precision (VNx16BImode, 16));
ADJUST_PRECISION (VNx32BI, riscv_v_adjust_precision (VNx32BImode, 32));
ADJUST_PRECISION (VNx64BI, riscv_v_adjust_precision (VNx64BImode, 64));
ADJUST_PRECISION (VNx128BI, riscv_v_adjust_precision (VNx128BImode, 128));

/*
   | Mode        | MIN_VLEN=32 | MIN_VLEN=32 | MIN_VLEN=64 | MIN_VLEN=64 | MIN_VLEN=128 | MIN_VLEN=128 |
   |             | LMUL        | SEW/LMUL    | LMUL        | SEW/LMUL    | LMUL         | SEW/LMUL     |
   | VNx1QI      | MF4         | 32          | MF8         | 64          | N/A          | N/A          |
   | VNx2QI      | MF2         | 16          | MF4         | 32          | MF8          | 64           |
   | VNx4QI      | M1          | 8           | MF2         | 16          | MF4          | 32           |
   | VNx8QI      | M2          | 4           | M1          | 8           | MF2          | 16           |
   | VNx16QI     | M4          | 2           | M2          | 4           | M1           | 8            |
   | VNx32QI     | M8          | 1           | M4          | 2           | M2           | 4            |
   | VNx64QI     | N/A         | N/A         | M8          | 1           | M4           | 2            |
   | VNx128QI    | N/A         | N/A         | N/A         | N/A         | M8           | 1            |
   | VNx1(HI|HF) | MF2         | 32          | MF4         | 64          | N/A          | N/A          |
   | VNx2(HI|HF) | M1          | 16          | MF2         | 32          | MF4          | 64           |
   | VNx4(HI|HF) | M2          | 8           | M1          | 16          | MF2          | 32           |
   | VNx8(HI|HF) | M4          | 4           | M2          | 8           | M1           | 16           |
   | VNx16(HI|HF)| M8          | 2           | M4          | 4           | M2           | 8            |
   | VNx32(HI|HF)| N/A         | N/A         | M8          | 2           | M4           | 4            |
   | VNx64(HI|HF)| N/A         | N/A         | N/A         | N/A         | M8           | 2            |
   | VNx1(SI|SF) | M1          | 32          | MF2         | 64          | MF2          | 64           |
   | VNx2(SI|SF) | M2          | 16          | M1          | 32          | M1           | 32           |
   | VNx4(SI|SF) | M4          | 8           | M2          | 16          | M2           | 16           |
   | VNx8(SI|SF) | M8          | 4           | M4          | 8           | M4           | 8            |
   | VNx16(SI|SF)| N/A         | N/A         | M8          | 4           | M8           | 4            |
   | VNx1(DI|DF) | N/A         | N/A         | M1          | 64          | N/A          | N/A          |
   | VNx2(DI|DF) | N/A         | N/A         | M2          | 32          | M1           | 64           |
   | VNx4(DI|DF) | N/A         | N/A         | M4          | 16          | M2           | 32           |
   | VNx8(DI|DF) | N/A         | N/A         | M8          | 8           | M4           | 16           |
   | VNx16(DI|DF)| N/A         | N/A         | N/A         | N/A         | M8           | 8            |
*/

/* Define RVV modes whose sizes are multiples of 64-bit chunks.  */
#define RVV_MODES(NVECS, VB, VH, VS, VD)                                       \
  VECTOR_MODE_WITH_PREFIX (VNx, INT, QI, 8 * NVECS, 0);                        \
  VECTOR_MODE_WITH_PREFIX (VNx, INT, HI, 4 * NVECS, 0);                        \
  VECTOR_MODE_WITH_PREFIX (VNx, FLOAT, HF, 4 * NVECS, 0);                      \
  VECTOR_MODE_WITH_PREFIX (VNx, INT, SI, 2 * NVECS, 0);                        \
  VECTOR_MODE_WITH_PREFIX (VNx, FLOAT, SF, 2 * NVECS, 0);                      \
  VECTOR_MODE_WITH_PREFIX (VNx, INT, DI, NVECS, 0);                            \
  VECTOR_MODE_WITH_PREFIX (VNx, FLOAT, DF, NVECS, 0);                          \
                                                                               \
  ADJUST_NUNITS (VB##QI, riscv_v_adjust_nunits (VB##QI##mode, NVECS * 8));     \
  ADJUST_NUNITS (VH##HI, riscv_v_adjust_nunits (VH##HI##mode, NVECS * 4));     \
  ADJUST_NUNITS (VS##SI, riscv_v_adjust_nunits (VS##SI##mode, NVECS * 2));     \
  ADJUST_NUNITS (VD##DI, riscv_v_adjust_nunits (VD##DI##mode, NVECS));         \
  ADJUST_NUNITS (VH##HF, riscv_v_adjust_nunits (VH##HF##mode, NVECS * 4));     \
  ADJUST_NUNITS (VS##SF, riscv_v_adjust_nunits (VS##SF##mode, NVECS * 2));     \
  ADJUST_NUNITS (VD##DF, riscv_v_adjust_nunits (VD##DF##mode, NVECS));         \
                                                                               \
  ADJUST_ALIGNMENT (VB##QI, 1);                                                \
  ADJUST_ALIGNMENT (VH##HI, 2);                                                \
  ADJUST_ALIGNMENT (VS##SI, 4);                                                \
  ADJUST_ALIGNMENT (VD##DI, 8);                                                \
  ADJUST_ALIGNMENT (VH##HF, 2);                                                \
  ADJUST_ALIGNMENT (VS##SF, 4);                                                \
  ADJUST_ALIGNMENT (VD##DF, 8);

RVV_MODES (1, VNx8, VNx4, VNx2, VNx1)
RVV_MODES (2, VNx16, VNx8, VNx4, VNx2)
RVV_MODES (4, VNx32, VNx16, VNx8, VNx4)
RVV_MODES (8, VNx64, VNx32, VNx16, VNx8)
RVV_MODES (16, VNx128, VNx64, VNx32, VNx16)

VECTOR_MODES_WITH_PREFIX (VNx, INT, 4, 0);
VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 4, 0);
ADJUST_NUNITS (VNx4QI, riscv_v_adjust_nunits (VNx4QImode, 4));
ADJUST_NUNITS (VNx2HI, riscv_v_adjust_nunits (VNx2HImode, 2));
ADJUST_NUNITS (VNx2HF, riscv_v_adjust_nunits (VNx2HFmode, 2));
ADJUST_ALIGNMENT (VNx4QI, 1);
ADJUST_ALIGNMENT (VNx2HI, 2);
ADJUST_ALIGNMENT (VNx2HF, 2);

/* 'VECTOR_MODES_WITH_PREFIX' does not allow ncomponents < 2.
   So we use 'VECTOR_MODE_WITH_PREFIX' to define VNx1SImode and VNx1SFmode.  */
VECTOR_MODE_WITH_PREFIX (VNx, INT, SI, 1, 0);
VECTOR_MODE_WITH_PREFIX (VNx, FLOAT, SF, 1, 0);
ADJUST_NUNITS (VNx1SI, riscv_v_adjust_nunits (VNx1SImode, 1));
ADJUST_NUNITS (VNx1SF, riscv_v_adjust_nunits (VNx1SFmode, 1));
ADJUST_ALIGNMENT (VNx1SI, 4);
ADJUST_ALIGNMENT (VNx1SF, 4);

VECTOR_MODES_WITH_PREFIX (VNx, INT, 2, 0);
ADJUST_NUNITS (VNx2QI, riscv_v_adjust_nunits (VNx2QImode, 2));
ADJUST_ALIGNMENT (VNx2QI, 1);

/* 'VECTOR_MODES_WITH_PREFIX' does not allow ncomponents < 2.
   So we use 'VECTOR_MODE_WITH_PREFIX' to define VNx1HImode and VNx1HFmode.  */
VECTOR_MODE_WITH_PREFIX (VNx, INT, HI, 1, 0);
VECTOR_MODE_WITH_PREFIX (VNx, FLOAT, HF, 1, 0);
ADJUST_NUNITS (VNx1HI, riscv_v_adjust_nunits (VNx1HImode, 1));
ADJUST_NUNITS (VNx1HF, riscv_v_adjust_nunits (VNx1HFmode, 1));
ADJUST_ALIGNMENT (VNx1HI, 2);
ADJUST_ALIGNMENT (VNx1HF, 2);

/* 'VECTOR_MODES_WITH_PREFIX' does not allow ncomponents < 2.
   So we use 'VECTOR_MODE_WITH_PREFIX' to define VNx1QImode.  */
VECTOR_MODE_WITH_PREFIX (VNx, INT, QI, 1, 0);
ADJUST_NUNITS (VNx1QI, riscv_v_adjust_nunits (VNx1QImode, 1));
ADJUST_ALIGNMENT (VNx1QI, 1);

/* Tuple modes for segment loads/stores according to NF, NF value can be 2 ~ 8.  */

/*
   | Mode           | MIN_VLEN=32 | MIN_VLEN=32 | MIN_VLEN=64 | MIN_VLEN=64 | MIN_VLEN=128 | MIN_VLEN=128 |
   |                | LMUL        | SEW/LMUL    | LMUL        | SEW/LMUL    | LMUL         | SEW/LMUL     |
   | VNxNFx1QI      | MF4         | 32          | MF8         | 64          | N/A          | N/A          |
   | VNxNFx2QI      | MF2         | 16          | MF4         | 32          | MF8          | 64           |
   | VNxNFx4QI      | M1          | 8           | MF2         | 16          | MF4          | 32           |
   | VNxNFx8QI      | M2          | 4           | M1          | 8           | MF2          | 16           |
   | VNxNFx16QI     | M4          | 2           | M2          | 4           | M1           | 8            |
   | VNxNFx32QI     | M8          | 1           | M4          | 2           | M2           | 4            |
   | VNxNFx64QI     | N/A         | N/A         | M8          | 1           | M4           | 2            |
   | VNxNFx128QI    | N/A         | N/A         | N/A         | N/A         | M8           | 1            |
   | VNxNFx1(HI|HF) | MF2         | 32          | MF4         | 64          | N/A          | N/A          |
   | VNxNFx2(HI|HF) | M1          | 16          | MF2         | 32          | MF4          | 64           |
   | VNxNFx4(HI|HF) | M2          | 8           | M1          | 16          | MF2          | 32           |
   | VNxNFx8(HI|HF) | M4          | 4           | M2          | 8           | M1           | 16           |
   | VNxNFx16(HI|HF)| M8          | 2           | M4          | 4           | M2           | 8            |
   | VNxNFx32(HI|HF)| N/A         | N/A         | M8          | 2           | M4           | 4            |
   | VNxNFx64(HI|HF)| N/A         | N/A         | N/A         | N/A         | M8           | 2            |
   | VNxNFx1(SI|SF) | M1          | 32          | MF2         | 64          | MF2          | 64           |
   | VNxNFx2(SI|SF) | M2          | 16          | M1          | 32          | M1           | 32           |
   | VNxNFx4(SI|SF) | M4          | 8           | M2          | 16          | M2           | 16           |
   | VNxNFx8(SI|SF) | M8          | 4           | M4          | 8           | M4           | 8            |
   | VNxNFx16(SI|SF)| N/A         | N/A         | M8          | 4           | M8           | 4            |
   | VNxNFx1(DI|DF) | N/A         | N/A         | M1          | 64          | N/A          | N/A          |
   | VNxNFx2(DI|DF) | N/A         | N/A         | M2          | 32          | M1           | 64           |
   | VNxNFx4(DI|DF) | N/A         | N/A         | M4          | 16          | M2           | 32           |
   | VNxNFx8(DI|DF) | N/A         | N/A         | M8          | 8           | M4           | 16           |
   | VNxNFx16(DI|DF)| N/A         | N/A         | N/A         | N/A         | M8           | 8            |
*/

#define RVV_TUPLE_MODES(NBYTES, NSUBPARTS, VB, VH, VS, VD)                     \
  VECTOR_MODE_WITH_PREFIX (VNx##NSUBPARTS##x, INT, QI, NBYTES, 1);             \
  VECTOR_MODE_WITH_PREFIX (VNx##NSUBPARTS##x, INT, HI, NBYTES / 2, 1);         \
  VECTOR_MODE_WITH_PREFIX (VNx##NSUBPARTS##x, INT, SI, NBYTES / 4, 1);         \
  VECTOR_MODE_WITH_PREFIX (VNx##NSUBPARTS##x, FLOAT, SF, NBYTES / 4, 1);       \
  VECTOR_MODE_WITH_PREFIX (VNx##NSUBPARTS##x, INT, DI, NBYTES / 8, 1);         \
  VECTOR_MODE_WITH_PREFIX (VNx##NSUBPARTS##x, FLOAT, DF, NBYTES / 8, 1);       \
  ADJUST_NUNITS (VNx##NSUBPARTS##x##VB##QI,                                    \
		 riscv_v_adjust_nunits (VNx##NSUBPARTS##x##VB##QI##mode,       \
					VB * NSUBPARTS));                      \
  ADJUST_NUNITS (VNx##NSUBPARTS##x##VH##HI,                                    \
		 riscv_v_adjust_nunits (VNx##NSUBPARTS##x##VH##HI##mode,       \
					VH * NSUBPARTS));                      \
  ADJUST_NUNITS (VNx##NSUBPARTS##x##VS##SI,                                    \
		 riscv_v_adjust_nunits (VNx##NSUBPARTS##x##VS##SI##mode,       \
					VS * NSUBPARTS));                      \
  ADJUST_NUNITS (VNx##NSUBPARTS##x##VD##DI,                                    \
		 riscv_v_adjust_nunits (VNx##NSUBPARTS##x##VD##DI##mode,       \
					VD * NSUBPARTS));                      \
  ADJUST_NUNITS (VNx##NSUBPARTS##x##VS##SF,                                    \
		 riscv_v_adjust_nunits (VNx##NSUBPARTS##x##VS##SF##mode,       \
					VS * NSUBPARTS));                      \
  ADJUST_NUNITS (VNx##NSUBPARTS##x##VD##DF,                                    \
		 riscv_v_adjust_nunits (VNx##NSUBPARTS##x##VD##DF##mode,       \
					VD * NSUBPARTS));                      \
                                                                               \
  ADJUST_ALIGNMENT (VNx##NSUBPARTS##x##VB##QI, 1);                             \
  ADJUST_ALIGNMENT (VNx##NSUBPARTS##x##VH##HI, 2);                             \
  ADJUST_ALIGNMENT (VNx##NSUBPARTS##x##VS##SI, 4);                             \
  ADJUST_ALIGNMENT (VNx##NSUBPARTS##x##VD##DI, 8);                             \
  ADJUST_ALIGNMENT (VNx##NSUBPARTS##x##VS##SF, 4);                             \
  ADJUST_ALIGNMENT (VNx##NSUBPARTS##x##VD##DF, 8);

RVV_TUPLE_MODES (8, 2, 8, 4, 2, 1)
RVV_TUPLE_MODES (8, 3, 8, 4, 2, 1)
RVV_TUPLE_MODES (8, 4, 8, 4, 2, 1)
RVV_TUPLE_MODES (8, 5, 8, 4, 2, 1)
RVV_TUPLE_MODES (8, 6, 8, 4, 2, 1)
RVV_TUPLE_MODES (8, 7, 8, 4, 2, 1)
RVV_TUPLE_MODES (8, 8, 8, 4, 2, 1)

RVV_TUPLE_MODES (16, 2, 16, 8, 4, 2)
RVV_TUPLE_MODES (16, 3, 16, 8, 4, 2)
RVV_TUPLE_MODES (16, 4, 16, 8, 4, 2)
RVV_TUPLE_MODES (16, 5, 16, 8, 4, 2)
RVV_TUPLE_MODES (16, 6, 16, 8, 4, 2)
RVV_TUPLE_MODES (16, 7, 16, 8, 4, 2)
RVV_TUPLE_MODES (16, 8, 16, 8, 4, 2)

RVV_TUPLE_MODES (32, 2, 32, 16, 8, 4)
RVV_TUPLE_MODES (32, 3, 32, 16, 8, 4)
RVV_TUPLE_MODES (32, 4, 32, 16, 8, 4)

RVV_TUPLE_MODES (64, 2, 64, 32, 16, 8)

#define RVV_TUPLE_PARTIAL_MODES(NSUBPARTS)                                     \
  VECTOR_MODE_WITH_PREFIX (VNx##NSUBPARTS##x, INT, QI, 1, 1);                  \
  VECTOR_MODE_WITH_PREFIX (VNx##NSUBPARTS##x, INT, HI, 1, 1);                  \
  VECTOR_MODE_WITH_PREFIX (VNx##NSUBPARTS##x, INT, SI, 1, 1);                  \
  VECTOR_MODE_WITH_PREFIX (VNx##NSUBPARTS##x, FLOAT, SF, 1, 1);                \
  VECTOR_MODE_WITH_PREFIX (VNx##NSUBPARTS##x, INT, QI, 2, 1);                  \
  VECTOR_MODE_WITH_PREFIX (VNx##NSUBPARTS##x, INT, HI, 2, 1);                  \
  VECTOR_MODE_WITH_PREFIX (VNx##NSUBPARTS##x, INT, QI, 4, 1);                  \
                                                                               \
  ADJUST_NUNITS (VNx##NSUBPARTS##x1QI,                                         \
		 riscv_v_adjust_nunits (VNx##NSUBPARTS##x1QI##mode,            \
					NSUBPARTS));                           \
  ADJUST_NUNITS (VNx##NSUBPARTS##x1HI,                                         \
		 riscv_v_adjust_nunits (VNx##NSUBPARTS##x1HI##mode,            \
					NSUBPARTS));                           \
  ADJUST_NUNITS (VNx##NSUBPARTS##x1SI,                                         \
		 riscv_v_adjust_nunits (VNx##NSUBPARTS##x1SI##mode,            \
					NSUBPARTS));                           \
  ADJUST_NUNITS (VNx##NSUBPARTS##x1SF,                                         \
		 riscv_v_adjust_nunits (VNx##NSUBPARTS##x1SF##mode,            \
					NSUBPARTS));                           \
  ADJUST_NUNITS (VNx##NSUBPARTS##x2QI,                                         \
		 riscv_v_adjust_nunits (VNx##NSUBPARTS##x2QI##mode,            \
					2 * NSUBPARTS));                       \
  ADJUST_NUNITS (VNx##NSUBPARTS##x2HI,                                         \
		 riscv_v_adjust_nunits (VNx##NSUBPARTS##x2HI##mode,            \
					2 * NSUBPARTS));                       \
  ADJUST_NUNITS (VNx##NSUBPARTS##x4QI,                                         \
		 riscv_v_adjust_nunits (VNx##NSUBPARTS##x4QI##mode,            \
					4 * NSUBPARTS));                       \
  ADJUST_ALIGNMENT (VNx##NSUBPARTS##x1QI, 1);                                  \
  ADJUST_ALIGNMENT (VNx##NSUBPARTS##x1HI, 2);                                  \
  ADJUST_ALIGNMENT (VNx##NSUBPARTS##x1SI, 4);                                  \
  ADJUST_ALIGNMENT (VNx##NSUBPARTS##x1SF, 4);                                  \
  ADJUST_ALIGNMENT (VNx##NSUBPARTS##x2QI, 1);                                  \
  ADJUST_ALIGNMENT (VNx##NSUBPARTS##x2HI, 2);                                  \
  ADJUST_ALIGNMENT (VNx##NSUBPARTS##x4QI, 1);

RVV_TUPLE_PARTIAL_MODES (2)
RVV_TUPLE_PARTIAL_MODES (3)
RVV_TUPLE_PARTIAL_MODES (4)
RVV_TUPLE_PARTIAL_MODES (5)
RVV_TUPLE_PARTIAL_MODES (6)
RVV_TUPLE_PARTIAL_MODES (7)
RVV_TUPLE_PARTIAL_MODES (8)

/* TODO: According to RISC-V 'V' ISA spec, the maximun vector length can
   be 65536 for a single vector register which means the vector mode in
   GCC can be maximum = 65536 * 8 bits (LMUL=8).
   However, 'GET_MODE_SIZE' is using poly_uint16/unsigned short which will
   overflow if we specify vector-length = 65536. To support this feature,
   we need to change the codes outside the RISC-V port. We will support it in
   the future.  */
#define MAX_BITSIZE_MODE_ANY_MODE (4096 * 8)

/* Coefficient 1 is multiplied by the number of 64-bit/32-bit chunks in a vector
   minus one.  */
#define NUM_POLY_INT_COEFFS 2