aboutsummaryrefslogtreecommitdiff
path: root/libffi/src
diff options
context:
space:
mode:
authorLulu Cheng <chenglulu@loongson.cn>2023-08-22 19:56:21 +0800
committerLulu Cheng <chenglulu@loongson.cn>2023-08-23 14:11:37 +0800
commit4beacf3cb8d128b3b9c8c2d3824693b895414428 (patch)
treec02d32775797e323bc201f8ce551de340e808f19 /libffi/src
parentaf3820d10d706458633e9e232a989965de24aa49 (diff)
downloadgcc-4beacf3cb8d128b3b9c8c2d3824693b895414428.zip
gcc-4beacf3cb8d128b3b9c8c2d3824693b895414428.tar.gz
gcc-4beacf3cb8d128b3b9c8c2d3824693b895414428.tar.bz2
libffi: Backport of LoongArch support for libffi.
This is a backport of <https://github.com/libffi/libffi/commit/f259a6f6de>, and contains modifications to commit 5a4774cd4d, as well as the LoongArch schema portion of commit ee22ecbd11. This is needed for libgo. libffi/ChangeLog: PR libffi/108682 * configure.host: Add LoongArch support. * Makefile.am: Likewise. * Makefile.in: Regenerate. * src/loongarch64/ffi.c: New file. * src/loongarch64/ffitarget.h: New file. * src/loongarch64/sysv.S: New file.
Diffstat (limited to 'libffi/src')
-rw-r--r--libffi/src/loongarch64/ffi.c621
-rw-r--r--libffi/src/loongarch64/ffitarget.h82
-rw-r--r--libffi/src/loongarch64/sysv.S327
3 files changed, 1030 insertions, 0 deletions
diff --git a/libffi/src/loongarch64/ffi.c b/libffi/src/loongarch64/ffi.c
new file mode 100644
index 0000000..140be3b
--- /dev/null
+++ b/libffi/src/loongarch64/ffi.c
@@ -0,0 +1,621 @@
+/* -----------------------------------------------------------------------
+ ffi.c - Copyright (c) 2022 Xu Chenghua <xuchenghua@loongson.cn>
+ 2022 Cheng Lulu <chenglulu@loongson.cn>
+ Based on RISC-V port
+
+ LoongArch Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#include <ffi.h>
+#include <ffi_common.h>
+
+#include <stdlib.h>
+#include <stdint.h>
+
+#if defined(__loongarch_soft_float)
+# define ABI_FRLEN 0
+#elif defined(__loongarch_single_float)
+# define ABI_FRLEN 32
+# define ABI_FLOAT float
+#elif defined(__loongarch_double_float)
+# define ABI_FRLEN 64
+# define ABI_FLOAT double
+#else
+#error unsupported LoongArch floating-point ABI
+#endif
+
+#define NARGREG 8
+#define STKALIGN 16
+#define MAXCOPYARG (2 * sizeof (double))
+
+/* call_context registers
+ - 8 floating point parameter/result registers.
+ - 8 integer parameter/result registers.
+ - 2 registers used by the assembly code to in-place construct its own
+ stack frame
+ - frame register
+ - return register
+*/
+typedef struct call_context
+{
+ ABI_FLOAT fa[8];
+ size_t a[10];
+} call_context;
+
+typedef struct call_builder
+{
+ call_context *aregs;
+ int used_integer;
+ int used_float;
+ size_t *used_stack;
+ size_t *stack;
+ size_t next_struct_area;
+} call_builder;
+
+/* Integer (not pointer) less than ABI GRLEN. */
+/* FFI_TYPE_INT does not appear to be used. */
+#if __SIZEOF_POINTER__ == 8
+# define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT64)
+#else
+# define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT32)
+#endif
+
+#if ABI_FRLEN
+typedef struct float_struct_info
+{
+ char as_elements;
+ char type1;
+ char offset2;
+ char type2;
+} float_struct_info;
+
+#if ABI_FRLEN >= 64
+# define IS_FLOAT(type) ((type) >= FFI_TYPE_FLOAT && (type) <= FFI_TYPE_DOUBLE)
+#else
+# define IS_FLOAT(type) ((type) == FFI_TYPE_FLOAT)
+#endif
+
+static ffi_type **
+flatten_struct (ffi_type *in, ffi_type **out, ffi_type **out_end)
+{
+ int i;
+
+ if (out == out_end)
+ return out;
+ if (in->type != FFI_TYPE_STRUCT)
+ *(out++) = in;
+ else
+ for (i = 0; in->elements[i]; i++)
+ out = flatten_struct (in->elements[i], out, out_end);
+ return out;
+}
+
+/* Structs with at most two fields after flattening, one of which is of
+ floating point type, are passed in multiple registers if sufficient
+ registers are available. */
+static float_struct_info
+struct_passed_as_elements (call_builder *cb, ffi_type *top)
+{
+ float_struct_info ret = {0, 0, 0, 0};
+ ffi_type *fields[3];
+ int num_floats, num_ints;
+ int num_fields = flatten_struct (top, fields, fields + 3) - fields;
+
+ if (num_fields == 1)
+ {
+ if (IS_FLOAT (fields[0]->type))
+ {
+ ret.as_elements = 1;
+ ret.type1 = fields[0]->type;
+ }
+ }
+ else if (num_fields == 2)
+ {
+ num_floats = IS_FLOAT (fields[0]->type) + IS_FLOAT (fields[1]->type);
+ num_ints = IS_INT (fields[0]->type) + IS_INT (fields[1]->type);
+ if (num_floats == 0 || num_floats + num_ints != 2)
+ return ret;
+ if (cb->used_float + num_floats > NARGREG
+ || cb->used_integer + (2 - num_floats) > NARGREG)
+ return ret;
+ if (!IS_FLOAT (fields[0]->type) && !IS_FLOAT (fields[1]->type))
+ return ret;
+
+ ret.type1 = fields[0]->type;
+ ret.type2 = fields[1]->type;
+ ret.offset2 = FFI_ALIGN (fields[0]->size, fields[1]->alignment);
+ ret.as_elements = 1;
+ }
+ return ret;
+}
+#endif
+
+/* Allocates a single register, float register, or GRLEN-sized stack slot to a
+ datum. */
+static void
+marshal_atom (call_builder *cb, int type, void *data)
+{
+ size_t value = 0;
+ switch (type)
+ {
+ case FFI_TYPE_UINT8:
+ value = *(uint8_t *) data;
+ break;
+ case FFI_TYPE_SINT8:
+ value = *(int8_t *) data;
+ break;
+ case FFI_TYPE_UINT16:
+ value = *(uint16_t *) data;
+ break;
+ case FFI_TYPE_SINT16:
+ value = *(int16_t *) data;
+ break;
+ /* 32-bit quantities are always sign-extended in the ABI. */
+ case FFI_TYPE_UINT32:
+ value = *(int32_t *) data;
+ break;
+ case FFI_TYPE_SINT32:
+ value = *(int32_t *) data;
+ break;
+#if __SIZEOF_POINTER__ == 8
+ case FFI_TYPE_UINT64:
+ value = *(uint64_t *) data;
+ break;
+ case FFI_TYPE_SINT64:
+ value = *(int64_t *) data;
+ break;
+#endif
+ case FFI_TYPE_POINTER:
+ value = *(size_t *) data;
+ break;
+
+#if ABI_FRLEN >= 32
+ case FFI_TYPE_FLOAT:
+ *(float *)(cb->aregs->fa + cb->used_float++) = *(float *) data;
+ return;
+#endif
+#if ABI_FRLEN >= 64
+ case FFI_TYPE_DOUBLE:
+ (cb->aregs->fa[cb->used_float++]) = *(double *) data;
+ return;
+#endif
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+
+ if (cb->used_integer == NARGREG)
+ *cb->used_stack++ = value;
+ else
+ cb->aregs->a[cb->used_integer++] = value;
+}
+
+static void
+unmarshal_atom (call_builder *cb, int type, void *data)
+{
+ size_t value;
+ switch (type)
+ {
+#if ABI_FRLEN >= 32
+ case FFI_TYPE_FLOAT:
+ *(float *) data = *(float *)(cb->aregs->fa + cb->used_float++);
+ return;
+#endif
+#if ABI_FRLEN >= 64
+ case FFI_TYPE_DOUBLE:
+ *(double *) data = cb->aregs->fa[cb->used_float++];
+ return;
+#endif
+ }
+
+ if (cb->used_integer == NARGREG)
+ value = *cb->used_stack++;
+ else
+ value = cb->aregs->a[cb->used_integer++];
+
+ switch (type)
+ {
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+#if __SIZEOF_POINTER__ == 8
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+#endif
+ case FFI_TYPE_POINTER:
+ *(ffi_arg *)data = value;
+ break;
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+}
+
+/* Allocate and copy a structure that is passed by value on the stack and
+ return a pointer to it. */
+static void *
+allocate_and_copy_struct_to_stack (call_builder *cb, void *data,
+ ffi_type *type)
+{
+ size_t dest = cb->next_struct_area - type->size;
+
+ dest = FFI_ALIGN_DOWN (dest, type->alignment);
+ cb->next_struct_area = dest;
+
+ return memcpy ((char *)cb->stack + dest, data, type->size);
+}
+
+/* Adds an argument to a call, or a not by reference return value. */
+static void
+marshal (call_builder *cb, ffi_type *type, int var, void *data)
+{
+ size_t realign[2];
+
+#if ABI_FRLEN
+ if (!var && type->type == FFI_TYPE_STRUCT)
+ {
+ float_struct_info fsi = struct_passed_as_elements (cb, type);
+ if (fsi.as_elements)
+ {
+ marshal_atom (cb, fsi.type1, data);
+ if (fsi.offset2)
+ marshal_atom (cb, fsi.type2, ((char *) data) + fsi.offset2);
+ return;
+ }
+ }
+
+ if (!var && cb->used_float < NARGREG
+ && IS_FLOAT (type->type))
+ {
+ marshal_atom (cb, type->type, data);
+ return;
+ }
+
+ double promoted;
+ if (var && type->type == FFI_TYPE_FLOAT)
+ {
+ /* C standard requires promoting float -> double for variable arg. */
+ promoted = *(float *) data;
+ type = &ffi_type_double;
+ data = &promoted;
+ }
+#endif
+
+ if (type->size > 2 * __SIZEOF_POINTER__)
+ /* Pass by reference. */
+ {
+ allocate_and_copy_struct_to_stack (cb, data, type);
+ data = (char *)cb->stack + cb->next_struct_area;
+ marshal_atom (cb, FFI_TYPE_POINTER, &data);
+ }
+ else if (IS_INT (type->type) || type->type == FFI_TYPE_POINTER)
+ marshal_atom (cb, type->type, data);
+ else
+ {
+ /* Overlong integers, soft-float floats, and structs without special
+ float handling are treated identically from this point on. */
+
+ /* Variadics are aligned even in registers. */
+ if (type->alignment > __SIZEOF_POINTER__)
+ {
+ if (var)
+ cb->used_integer = FFI_ALIGN (cb->used_integer, 2);
+ cb->used_stack
+ = (size_t *) FFI_ALIGN (cb->used_stack, 2 * __SIZEOF_POINTER__);
+ }
+
+ memcpy (realign, data, type->size);
+ if (type->size > 0)
+ marshal_atom (cb, FFI_TYPE_POINTER, realign);
+ if (type->size > __SIZEOF_POINTER__)
+ marshal_atom (cb, FFI_TYPE_POINTER, realign + 1);
+ }
+}
+
+/* For arguments passed by reference returns the pointer, otherwise the arg
+ is copied (up to MAXCOPYARG bytes). */
+static void *
+unmarshal (call_builder *cb, ffi_type *type, int var, void *data)
+{
+ size_t realign[2];
+ void *pointer;
+
+#if ABI_FRLEN
+ if (!var && type->type == FFI_TYPE_STRUCT)
+ {
+ float_struct_info fsi = struct_passed_as_elements (cb, type);
+ if (fsi.as_elements)
+ {
+ unmarshal_atom (cb, fsi.type1, data);
+ if (fsi.offset2)
+ unmarshal_atom (cb, fsi.type2, ((char *) data) + fsi.offset2);
+ return data;
+ }
+ }
+
+ if (!var && cb->used_float < NARGREG
+ && IS_FLOAT (type->type))
+ {
+ unmarshal_atom (cb, type->type, data);
+ return data;
+ }
+
+ if (var && type->type == FFI_TYPE_FLOAT)
+ {
+ int m = cb->used_integer;
+ void *promoted
+ = m < NARGREG ? cb->aregs->a + m : cb->used_stack + m - NARGREG + 1;
+ *(float *) promoted = *(double *) promoted;
+ }
+#endif
+
+ if (type->size > 2 * __SIZEOF_POINTER__)
+ {
+ /* Pass by reference. */
+ unmarshal_atom (cb, FFI_TYPE_POINTER, (char *) &pointer);
+ return pointer;
+ }
+ else if (IS_INT (type->type) || type->type == FFI_TYPE_POINTER)
+ {
+ unmarshal_atom (cb, type->type, data);
+ return data;
+ }
+ else
+ {
+ /* Overlong integers, soft-float floats, and structs without special
+ float handling are treated identically from this point on. */
+
+ /* Variadics are aligned even in registers. */
+ if (type->alignment > __SIZEOF_POINTER__)
+ {
+ if (var)
+ cb->used_integer = FFI_ALIGN (cb->used_integer, 2);
+ cb->used_stack
+ = (size_t *) FFI_ALIGN (cb->used_stack, 2 * __SIZEOF_POINTER__);
+ }
+
+ if (type->size > 0)
+ unmarshal_atom (cb, FFI_TYPE_POINTER, realign);
+ if (type->size > __SIZEOF_POINTER__)
+ unmarshal_atom (cb, FFI_TYPE_POINTER, realign + 1);
+ memcpy (data, realign, type->size);
+ return data;
+ }
+}
+
+static int
+passed_by_ref (call_builder *cb, ffi_type *type, int var)
+{
+#if ABI_FRLEN
+ if (!var && type->type == FFI_TYPE_STRUCT)
+ {
+ float_struct_info fsi = struct_passed_as_elements (cb, type);
+ if (fsi.as_elements)
+ return 0;
+ }
+#endif
+
+ return type->size > 2 * __SIZEOF_POINTER__;
+}
+
+/* Perform machine dependent cif processing. */
+ffi_status
+ffi_prep_cif_machdep (ffi_cif *cif)
+{
+ cif->loongarch_nfixedargs = cif->nargs;
+ return FFI_OK;
+}
+
+/* Perform machine dependent cif processing when we have a variadic
+ function. */
+ffi_status
+ffi_prep_cif_machdep_var (ffi_cif *cif, unsigned int nfixedargs,
+ unsigned int ntotalargs)
+{
+ cif->loongarch_nfixedargs = nfixedargs;
+ return FFI_OK;
+}
+
+/* Low level routine for calling functions. */
+extern void ffi_call_asm (void *stack, struct call_context *regs,
+ void (*fn) (void), void *closure) FFI_HIDDEN;
+
+static void
+ffi_call_int (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue,
+ void *closure)
+{
+ /* This is a conservative estimate, assuming a complex return value and
+ that all remaining arguments are long long / __int128 */
+ size_t arg_bytes = cif->bytes;
+ size_t rval_bytes = 0;
+ if (rvalue == NULL && cif->rtype->size > 2 * __SIZEOF_POINTER__)
+ rval_bytes = FFI_ALIGN (cif->rtype->size, STKALIGN);
+ size_t alloc_size = arg_bytes + rval_bytes + sizeof (call_context);
+
+ /* The assembly code will deallocate all stack data at lower addresses
+ than the argument region, so we need to allocate the frame and the
+ return value after the arguments in a single allocation. */
+ size_t alloc_base;
+ /* Argument region must be 16-byte aligned in LP64 ABIs. */
+ if (_Alignof(max_align_t) >= STKALIGN)
+ /* Since sizeof long double is normally 16, the compiler will
+ guarantee alloca alignment to at least that much. */
+ alloc_base = (size_t) alloca (alloc_size);
+ else
+ alloc_base = FFI_ALIGN (alloca (alloc_size + STKALIGN - 1), STKALIGN);
+
+ if (rval_bytes)
+ rvalue = (void *) (alloc_base + arg_bytes);
+
+ call_builder cb;
+ cb.used_float = cb.used_integer = 0;
+ cb.aregs = (call_context *) (alloc_base + arg_bytes + rval_bytes);
+ cb.used_stack = (void *) alloc_base;
+ cb.stack = (void *) alloc_base;
+ cb.next_struct_area = arg_bytes;
+
+ int return_by_ref = passed_by_ref (&cb, cif->rtype, 0);
+ if (return_by_ref)
+ cb.aregs->a[cb.used_integer++] = (size_t)rvalue;
+
+ int i;
+ for (i = 0; i < cif->nargs; i++)
+ marshal (&cb, cif->arg_types[i], i >= cif->loongarch_nfixedargs,
+ avalue[i]);
+
+ ffi_call_asm ((void *) alloc_base, cb.aregs, fn, closure);
+
+ cb.used_float = cb.used_integer = 0;
+ if (!return_by_ref && rvalue)
+ unmarshal (&cb, cif->rtype, 0, rvalue);
+}
+
+void
+ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue)
+{
+ ffi_call_int (cif, fn, rvalue, avalue, NULL);
+}
+
+void
+ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue,
+ void *closure)
+{
+ ffi_call_int (cif, fn, rvalue, avalue, closure);
+}
+
+extern void ffi_closure_asm (void) FFI_HIDDEN;
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure *closure, ffi_cif *cif,
+ void (*fun) (ffi_cif *, void *, void **, void *),
+ void *user_data, void *codeloc)
+{
+ uint32_t *tramp = (uint32_t *) &closure->tramp[0];
+ uint64_t fn = (uint64_t) (uintptr_t) ffi_closure_asm;
+
+ if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI)
+ return FFI_BAD_ABI;
+
+#if defined(FFI_EXEC_STATIC_TRAMP)
+ if (ffi_tramp_is_present(closure))
+ {
+ ffi_tramp_set_parms (closure->ftramp, ffi_closure_asm, closure);
+ goto out;
+ }
+#endif
+
+ /* Fill the dynamic trampoline. We will call ffi_closure_inner with codeloc,
+ not closure, but as long as the memory is readable it should work. */
+ tramp[0] = 0x1800000c; /* pcaddi $t0, 0 (i.e. $t0 <- tramp) */
+ tramp[1] = 0x28c0418d; /* ld.d $t1, $t0, 16 */
+ tramp[2] = 0x4c0001a0; /* jirl $zero, $t1, 0 */
+ tramp[3] = 0x03400000; /* nop */
+ tramp[4] = fn;
+ tramp[5] = fn >> 32;
+
+ __builtin___clear_cache (codeloc, codeloc + FFI_TRAMPOLINE_SIZE);
+
+#if defined(FFI_EXEC_STATIC_TRAMP)
+out:
+#endif
+ closure->cif = cif;
+ closure->fun = fun;
+ closure->user_data = user_data;
+
+ return FFI_OK;
+}
+
+extern void ffi_go_closure_asm (void) FFI_HIDDEN;
+
+ffi_status
+ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif,
+ void (*fun) (ffi_cif *, void *, void **, void *))
+{
+ if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI)
+ return FFI_BAD_ABI;
+
+ closure->tramp = (void *) ffi_go_closure_asm;
+ closure->cif = cif;
+ closure->fun = fun;
+ return FFI_OK;
+}
+
+/* Called by the assembly code with aregs pointing to saved argument registers
+ and stack pointing to the stacked arguments. Return values passed in
+ registers will be reloaded from aregs. */
+void FFI_HIDDEN
+ffi_closure_inner (ffi_cif *cif,
+ void (*fun) (ffi_cif *, void *, void **, void *),
+ void *user_data, size_t *stack, call_context *aregs)
+{
+ void **avalue = alloca (cif->nargs * sizeof (void *));
+ /* Storage for arguments which will be copied by unmarshal(). We could
+ theoretically avoid the copies in many cases and use at most 128 bytes
+ of memory, but allocating disjoint storage for each argument is
+ simpler. */
+ char *astorage = alloca (cif->nargs * MAXCOPYARG);
+ void *rvalue;
+ call_builder cb;
+ int return_by_ref;
+ int i;
+
+ cb.aregs = aregs;
+ cb.used_integer = cb.used_float = 0;
+ cb.used_stack = stack;
+
+ return_by_ref = passed_by_ref (&cb, cif->rtype, 0);
+ if (return_by_ref)
+ unmarshal (&cb, &ffi_type_pointer, 0, &rvalue);
+ else
+ rvalue = alloca (cif->rtype->size);
+
+ for (i = 0; i < cif->nargs; i++)
+ avalue[i]
+ = unmarshal (&cb, cif->arg_types[i], i >= cif->loongarch_nfixedargs,
+ astorage + i * MAXCOPYARG);
+
+ fun (cif, rvalue, avalue, user_data);
+
+ if (!return_by_ref && cif->rtype->type != FFI_TYPE_VOID)
+ {
+ cb.used_integer = cb.used_float = 0;
+ marshal (&cb, cif->rtype, 0, rvalue);
+ }
+}
+
+#if defined(FFI_EXEC_STATIC_TRAMP)
+void *
+ffi_tramp_arch (size_t *tramp_size, size_t *map_size)
+{
+ extern void *trampoline_code_table;
+
+ *tramp_size = 16;
+ /* A mapping size of 64K is chosen to cover the page sizes of 4K, 16K, and
+ 64K. */
+ *map_size = 1 << 16;
+ return &trampoline_code_table;
+}
+#endif
diff --git a/libffi/src/loongarch64/ffitarget.h b/libffi/src/loongarch64/ffitarget.h
new file mode 100644
index 0000000..5a4698a
--- /dev/null
+++ b/libffi/src/loongarch64/ffitarget.h
@@ -0,0 +1,82 @@
+/* -----------------------------------------------------------------*-C-*-
+ ffitarget.h - Copyright (c) 2022 Xu Chenghua <xuchenghua@loongson.cn>
+ 2022 Cheng Lulu <chenglulu@loongson.cn>
+
+ Target configuration macros for LoongArch.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ----------------------------------------------------------------------- */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error \
+ "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+#ifndef __loongarch__
+#error \
+ "libffi was configured for a LoongArch target but this does not appear to be a LoongArch compiler."
+#endif
+
+#ifndef LIBFFI_ASM
+
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+
+typedef enum ffi_abi
+{
+ FFI_FIRST_ABI = 0,
+ FFI_LP64S,
+ FFI_LP64F,
+ FFI_LP64D,
+ FFI_LAST_ABI,
+
+#if defined(__loongarch64)
+#if defined(__loongarch_soft_float)
+ FFI_DEFAULT_ABI = FFI_LP64S
+#elif defined(__loongarch_single_float)
+ FFI_DEFAULT_ABI = FFI_LP64F
+#elif defined(__loongarch_double_float)
+ FFI_DEFAULT_ABI = FFI_LP64D
+#else
+#error unsupported LoongArch floating-point ABI
+#endif
+#else
+#error unsupported LoongArch base architecture
+#endif
+} ffi_abi;
+
+#endif /* LIBFFI_ASM */
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+#define FFI_GO_CLOSURES 1
+#define FFI_TRAMPOLINE_SIZE 24
+#define FFI_NATIVE_RAW_API 0
+#define FFI_EXTRA_CIF_FIELDS \
+ unsigned loongarch_nfixedargs; \
+ unsigned loongarch_unused;
+#define FFI_TARGET_SPECIFIC_VARIADIC
+#endif
diff --git a/libffi/src/loongarch64/sysv.S b/libffi/src/loongarch64/sysv.S
new file mode 100644
index 0000000..aa7bde2
--- /dev/null
+++ b/libffi/src/loongarch64/sysv.S
@@ -0,0 +1,327 @@
+/* -----------------------------------------------------------------------
+ sysv.S - Copyright (c) 2022 Xu Chenghua <xuchenghua@loongson.cn>
+ 2022 Cheng Lulu <chenglulu@loongson.cn>
+
+ LoongArch Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+
+/* Define aliases so that we can handle all ABIs uniformly. */
+
+#if __SIZEOF_POINTER__ == 8
+# define PTRS 8
+# define LARG ld.d
+# define SARG st.d
+#else
+# define PTRS 4
+# define LARG ld.w
+# define SARG st.w
+#endif
+
+#if defined(__loongarch_single_float)
+# define FLTS 4
+# define FLD fld.w
+# define FST fst.w
+#elif defined(__loongarch_double_float)
+# define FLTS 8
+# define FLARG fld.d
+# define FSARG fst.d
+#elif defined(__loongarch_soft_float)
+# define FLTS 0
+#else
+#error unsupported LoongArch floating-point ABI
+#endif
+
+ .text
+ .globl ffi_call_asm
+ .type ffi_call_asm, @function
+ .hidden ffi_call_asm
+/* struct call_context
+ {
+ ABI_FLOAT fa[8];
+ size_t a[10];
+ }
+
+ - 8 floating point parameter/result registers (fa[0] - fa[7])
+ - 8 integer parameter/result registers (a[0] - a[7])
+ - 2 registers used by the assembly code to in-place construct its own stack
+ frame.
+ - frame pointer (a[8])
+ - return address (a[9])
+
+ void ffi_call_asm (size_t *stackargs, struct call_context *regargs,
+ void (*fn)(void), void *closure); */
+
+#define FRAME_LEN (8 * FLTS + 10 * PTRS)
+
+ffi_call_asm:
+ .cfi_startproc
+
+ /* We are NOT going to set up an ordinary stack frame. In order to pass
+ the stacked args to the called function, we adjust our stack pointer
+ to a0, which is in the _caller's_ alloca area. We establish our own
+ stack frame at the end of the call_context.
+
+ Anything below the arguments will be freed at this point, although
+ we preserve the call_context so that it can be read back in the
+ caller. */
+
+ .cfi_def_cfa 5, FRAME_LEN # Interim CFA based on a1.
+ SARG $fp, $a1, FRAME_LEN - 2*PTRS
+ .cfi_offset 22, -2*PTRS
+ SARG $ra, $a1, FRAME_LEN - 1*PTRS
+ .cfi_offset 1, -1*PTRS
+
+ addi.d $fp, $a1, FRAME_LEN
+ move $sp, $a0
+ .cfi_def_cfa 22, 0 # Our frame is fully set up.
+
+ # Load arguments.
+ move $t1, $a2
+ move $t2, $a3
+
+#if FLTS
+ FLARG $fa0, $fp, -FRAME_LEN+0*FLTS
+ FLARG $fa1, $fp, -FRAME_LEN+1*FLTS
+ FLARG $fa2, $fp, -FRAME_LEN+2*FLTS
+ FLARG $fa3, $fp, -FRAME_LEN+3*FLTS
+ FLARG $fa4, $fp, -FRAME_LEN+4*FLTS
+ FLARG $fa5, $fp, -FRAME_LEN+5*FLTS
+ FLARG $fa6, $fp, -FRAME_LEN+6*FLTS
+ FLARG $fa7, $fp, -FRAME_LEN+7*FLTS
+#endif
+
+ LARG $a0, $fp, -FRAME_LEN+8*FLTS+0*PTRS
+ LARG $a1, $fp, -FRAME_LEN+8*FLTS+1*PTRS
+ LARG $a2, $fp, -FRAME_LEN+8*FLTS+2*PTRS
+ LARG $a3, $fp, -FRAME_LEN+8*FLTS+3*PTRS
+ LARG $a4, $fp, -FRAME_LEN+8*FLTS+4*PTRS
+ LARG $a5, $fp, -FRAME_LEN+8*FLTS+5*PTRS
+ LARG $a6, $fp, -FRAME_LEN+8*FLTS+6*PTRS
+ LARG $a7, $fp, -FRAME_LEN+8*FLTS+7*PTRS
+
+ /* Call */
+ jirl $ra, $t1, 0
+
+#if FLTS
+ /* Save return values - only a0/a1 (fa0/fa1) are used. */
+ FSARG $fa0, $fp, -FRAME_LEN+0*FLTS
+ FSARG $fa1, $fp, -FRAME_LEN+1*FLTS
+#endif
+
+ SARG $a0, $fp, -FRAME_LEN+8*FLTS+0*PTRS
+ SARG $a1, $fp, -FRAME_LEN+8*FLTS+1*PTRS
+
+ /* Restore and return. */
+ addi.d $sp, $fp, -FRAME_LEN
+ .cfi_def_cfa 3, FRAME_LEN
+ LARG $ra, $fp, -1*PTRS
+ .cfi_restore 1
+ LARG $fp, $fp, -2*PTRS
+ .cfi_restore 22
+ jr $ra
+ .cfi_endproc
+ .size ffi_call_asm, .-ffi_call_asm
+
+
+/* ffi_closure_asm. Expects address of the passed-in ffi_closure in t0.
+ void ffi_closure_inner (ffi_cif *cif,
+ void (*fun)(ffi_cif *, void *, void **, void *),
+ void *user_data,
+ size_t *stackargs, struct call_context *regargs) */
+
+ .globl ffi_closure_asm
+ .hidden ffi_closure_asm
+ .type ffi_closure_asm, @function
+
+ffi_closure_asm:
+ .cfi_startproc
+ addi.d $sp, $sp, -FRAME_LEN
+ .cfi_def_cfa_offset FRAME_LEN
+
+ /* Make a frame. */
+ SARG $fp, $sp, FRAME_LEN - 2*PTRS
+ .cfi_offset 22, -2*PTRS
+ SARG $ra, $sp, FRAME_LEN - 1*PTRS
+ .cfi_offset 1, -1*PTRS
+ addi.d $fp, $sp, FRAME_LEN
+
+ /* Save arguments. */
+#if FLTS
+ FSARG $fa0, $sp, 0*FLTS
+ FSARG $fa1, $sp, 1*FLTS
+ FSARG $fa2, $sp, 2*FLTS
+ FSARG $fa3, $sp, 3*FLTS
+ FSARG $fa4, $sp, 4*FLTS
+ FSARG $fa5, $sp, 5*FLTS
+ FSARG $fa6, $sp, 6*FLTS
+ FSARG $fa7, $sp, 7*FLTS
+#endif
+
+ SARG $a0, $sp, 8*FLTS+0*PTRS
+ SARG $a1, $sp, 8*FLTS+1*PTRS
+ SARG $a2, $sp, 8*FLTS+2*PTRS
+ SARG $a3, $sp, 8*FLTS+3*PTRS
+ SARG $a4, $sp, 8*FLTS+4*PTRS
+ SARG $a5, $sp, 8*FLTS+5*PTRS
+ SARG $a6, $sp, 8*FLTS+6*PTRS
+ SARG $a7, $sp, 8*FLTS+7*PTRS
+
+ /* Enter C */
+ LARG $a0, $t0, FFI_TRAMPOLINE_SIZE+0*PTRS
+ LARG $a1, $t0, FFI_TRAMPOLINE_SIZE+1*PTRS
+ LARG $a2, $t0, FFI_TRAMPOLINE_SIZE+2*PTRS
+ addi.d $a3, $sp, FRAME_LEN
+ move $a4, $sp
+
+ bl ffi_closure_inner
+
+ /* Return values. */
+#if FLTS
+ FLARG $fa0, $sp, 0*FLTS
+ FLARG $fa1, $sp, 1*FLTS
+#endif
+
+ LARG $a0, $sp, 8*FLTS+0*PTRS
+ LARG $a1, $sp, 8*FLTS+1*PTRS
+
+ /* Restore and return. */
+ LARG $ra, $sp, FRAME_LEN-1*PTRS
+ .cfi_restore 1
+ LARG $fp, $sp, FRAME_LEN-2*PTRS
+ .cfi_restore 22
+ addi.d $sp, $sp, FRAME_LEN
+ .cfi_def_cfa_offset 0
+ jr $ra
+ .cfi_endproc
+ .size ffi_closure_asm, .-ffi_closure_asm
+
+/* Static trampoline code table, in which each element is a trampoline.
+
+ The trampoline clobbers t0 and t1, but we don't save them on the stack
+ because our psABI explicitly says they are scratch registers, at least for
+ ELF. Our dynamic trampoline is already clobbering them anyway.
+
+ The trampoline has two parameters - target code to jump to and data for
+ the target code. The trampoline extracts the parameters from its parameter
+ block (see tramp_table_map()). The trampoline saves the data address in
+ t0 and jumps to the target code. As ffi_closure_asm() already expects the
+ data address to be in t0, we don't need a "ffi_closure_asm_alt". */
+
+#if defined(FFI_EXEC_STATIC_TRAMP)
+ .align 16
+ .globl trampoline_code_table
+ .hidden trampoline_code_table
+ .type trampoline_code_table, @function
+
+trampoline_code_table:
+
+ .rept 65536 / 16
+ pcaddu12i $t1, 16 # 65536 >> 12
+ ld.d $t0, $t1, 0
+ ld.d $t1, $t1, 8
+ jirl $zero, $t1, 0
+ .endr
+ .size trampoline_code_table, .-trampoline_code_table
+
+ .align 2
+#endif
+
+/* ffi_go_closure_asm. Expects address of the passed-in ffi_go_closure in t2.
+ void ffi_closure_inner (ffi_cif *cif,
+ void (*fun)(ffi_cif *, void *, void **, void *),
+ void *user_data,
+ size_t *stackargs, struct call_context *regargs) */
+
+ .globl ffi_go_closure_asm
+ .hidden ffi_go_closure_asm
+ .type ffi_go_closure_asm, @function
+
+ffi_go_closure_asm:
+ .cfi_startproc
+ addi.d $sp, $sp, -FRAME_LEN
+ .cfi_def_cfa_offset FRAME_LEN
+
+ /* Make a frame. */
+ SARG $fp, $sp, FRAME_LEN - 2*PTRS
+ .cfi_offset 22, -2*PTRS
+ SARG $ra, $sp, FRAME_LEN - 1*PTRS
+ .cfi_offset 1, -1*PTRS
+ addi.d $fp, $sp, FRAME_LEN
+
+ /* Save arguments. */
+#if FLTS
+ FSARG $fa0, $sp, 0*FLTS
+ FSARG $fa1, $sp, 1*FLTS
+ FSARG $fa2, $sp, 2*FLTS
+ FSARG $fa3, $sp, 3*FLTS
+ FSARG $fa4, $sp, 4*FLTS
+ FSARG $fa5, $sp, 5*FLTS
+ FSARG $fa6, $sp, 6*FLTS
+ FSARG $fa7, $sp, 7*FLTS
+#endif
+
+ SARG $a0, $sp, 8*FLTS+0*PTRS
+ SARG $a1, $sp, 8*FLTS+1*PTRS
+ SARG $a2, $sp, 8*FLTS+2*PTRS
+ SARG $a3, $sp, 8*FLTS+3*PTRS
+ SARG $a4, $sp, 8*FLTS+4*PTRS
+ SARG $a5, $sp, 8*FLTS+5*PTRS
+ SARG $a6, $sp, 8*FLTS+6*PTRS
+ SARG $a7, $sp, 8*FLTS+7*PTRS
+
+ /* Enter C */
+ LARG $a0, $t2, 1*PTRS
+ LARG $a1, $t2, 2*PTRS
+ move $a2, $t2
+ addi.d $a3, $sp, FRAME_LEN
+ move $a4, $sp
+
+ bl ffi_closure_inner
+
+ /* Return values. */
+#if FLTS
+ FLARG $fa0, $sp, 0*FLTS
+ FLARG $fa1, $sp, 1*FLTS
+#endif
+
+ LARG $a0, $sp, 8*FLTS+0*PTRS
+ LARG $a1, $sp, 8*FLTS+1*PTRS
+
+ /* Restore and return. */
+ LARG $ra, $sp, FRAME_LEN-1*PTRS
+ .cfi_restore 1
+ LARG $fp, $sp, FRAME_LEN-2*PTRS
+ .cfi_restore 22
+ addi.d $sp, $sp, FRAME_LEN
+ .cfi_def_cfa_offset 0
+ jr $ra
+ .cfi_endproc
+ .size ffi_go_closure_asm, .-ffi_go_closure_asm
+
+#if defined __ELF__ && defined __linux__
+ .section .note.GNU-stack,"",%progbits
+#endif