diff options
author | Anton Samokhvalov <pg83@yandex.ru> | 2022-02-10 16:45:15 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:45:15 +0300 |
commit | 72cb13b4aff9bc9cf22e49251bc8fd143f82538f (patch) | |
tree | da2c34829458c7d4e74bdfbdf85dff449e9e7fb8 /contrib/restricted/libffi/src/x86 | |
parent | 778e51ba091dc39e7b7fcab2b9cf4dbedfb6f2b5 (diff) | |
download | ydb-72cb13b4aff9bc9cf22e49251bc8fd143f82538f.tar.gz |
Restoring authorship annotation for Anton Samokhvalov <pg83@yandex.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/restricted/libffi/src/x86')
-rw-r--r-- | contrib/restricted/libffi/src/x86/ffi.c | 334 | ||||
-rw-r--r-- | contrib/restricted/libffi/src/x86/ffi64.c | 1102 | ||||
-rw-r--r-- | contrib/restricted/libffi/src/x86/ffitarget.h | 200 | ||||
-rw-r--r-- | contrib/restricted/libffi/src/x86/sysv.S | 186 | ||||
-rw-r--r-- | contrib/restricted/libffi/src/x86/unix64.S | 286 | ||||
-rw-r--r-- | contrib/restricted/libffi/src/x86/win64.S | 74 |
6 files changed, 1091 insertions, 1091 deletions
diff --git a/contrib/restricted/libffi/src/x86/ffi.c b/contrib/restricted/libffi/src/x86/ffi.c index 9a592185a1..19a3cb8a5f 100644 --- a/contrib/restricted/libffi/src/x86/ffi.c +++ b/contrib/restricted/libffi/src/x86/ffi.c @@ -1,41 +1,41 @@ -/* ----------------------------------------------------------------------- +/* ----------------------------------------------------------------------- ffi.c - Copyright (c) 2017 Anthony Green Copyright (c) 1996, 1998, 1999, 2001, 2007, 2008 Red Hat, Inc. - Copyright (c) 2002 Ranjit Mathew - Copyright (c) 2002 Bo Thorsen - Copyright (c) 2002 Roger Sayle - Copyright (C) 2008, 2010 Free Software Foundation, Inc. - - x86 Foreign Function Interface - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - ``Software''), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. - ----------------------------------------------------------------------- */ - + Copyright (c) 2002 Ranjit Mathew + Copyright (c) 2002 Bo Thorsen + Copyright (c) 2002 Roger Sayle + Copyright (C) 2008, 2010 Free Software Foundation, Inc. + + x86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + #if defined(__i386__) || defined(_M_IX86) -#include <ffi.h> -#include <ffi_common.h> +#include <ffi.h> +#include <ffi_common.h> #include <stdint.h> -#include <stdlib.h> +#include <stdlib.h> #include "internal.h" - + /* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; all further uses in this file will refer to the 80-bit type. */ #if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE @@ -45,28 +45,28 @@ #else # undef FFI_TYPE_LONGDOUBLE # define FFI_TYPE_LONGDOUBLE 4 -#endif - +#endif + #if defined(__GNUC__) && !defined(__declspec) # define __declspec(x) __attribute__((x)) -#endif - +#endif + #if defined(_MSC_VER) && defined(_M_IX86) /* Stack is not 16-byte aligned on Windows. */ #define STACK_ALIGN(bytes) (bytes) #else #define STACK_ALIGN(bytes) FFI_ALIGN (bytes, 16) -#endif - +#endif + /* Perform machine dependent cif processing. */ ffi_status FFI_HIDDEN ffi_prep_cif_machdep(ffi_cif *cif) { size_t bytes = 0; int i, n, flags, cabi = cif->abi; - + switch (cabi) - { + { case FFI_SYSV: case FFI_STDCALL: case FFI_THISCALL: @@ -77,11 +77,11 @@ ffi_prep_cif_machdep(ffi_cif *cif) break; default: return FFI_BAD_ABI; - } - - switch (cif->rtype->type) - { - case FFI_TYPE_VOID: + } + + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: flags = X86_RET_VOID; break; case FFI_TYPE_FLOAT: @@ -93,41 +93,41 @@ ffi_prep_cif_machdep(ffi_cif *cif) case FFI_TYPE_LONGDOUBLE: flags = X86_RET_LDOUBLE; break; - case FFI_TYPE_UINT8: + case FFI_TYPE_UINT8: flags = X86_RET_UINT8; break; - case FFI_TYPE_UINT16: + case FFI_TYPE_UINT16: flags = X86_RET_UINT16; break; - case FFI_TYPE_SINT8: + case FFI_TYPE_SINT8: flags = X86_RET_SINT8; break; - case FFI_TYPE_SINT16: + case FFI_TYPE_SINT16: flags = X86_RET_SINT16; break; case FFI_TYPE_INT: case FFI_TYPE_SINT32: - case FFI_TYPE_UINT32: + case FFI_TYPE_UINT32: case FFI_TYPE_POINTER: flags = X86_RET_INT32; break; - case FFI_TYPE_SINT64: - case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: flags = X86_RET_INT64; - break; - case FFI_TYPE_STRUCT: -#ifndef X86 + break; + case FFI_TYPE_STRUCT: +#ifndef X86 /* ??? This should be a different ABI rather than an ifdef. */ - if (cif->rtype->size == 1) + if (cif->rtype->size == 1) flags = X86_RET_STRUCT_1B; - else if (cif->rtype->size == 2) + else if (cif->rtype->size == 2) flags = X86_RET_STRUCT_2B; - else if (cif->rtype->size == 4) + else if (cif->rtype->size == 4) flags = X86_RET_INT32; - else if (cif->rtype->size == 8) + else if (cif->rtype->size == 8) flags = X86_RET_INT64; - else -#endif + else +#endif { do_struct: switch (cabi) @@ -145,7 +145,7 @@ ffi_prep_cif_machdep(ffi_cif *cif) /* Allocate space for return value pointer. */ bytes += FFI_ALIGN (sizeof(void*), FFI_SIZEOF_ARG); } - break; + break; case FFI_TYPE_COMPLEX: switch (cif->rtype->elements[0]->type) { @@ -172,23 +172,23 @@ ffi_prep_cif_machdep(ffi_cif *cif) return FFI_BAD_TYPEDEF; } break; - default: + default: return FFI_BAD_TYPEDEF; - } + } cif->flags = flags; - + for (i = 0, n = cif->nargs; i < n; i++) - { + { ffi_type *t = cif->arg_types[i]; bytes = FFI_ALIGN (bytes, t->alignment); bytes += FFI_ALIGN (t->size, FFI_SIZEOF_ARG); - } + } cif->bytes = bytes; - + return FFI_OK; } - + static ffi_arg extend_basic_type(void *arg, int type) { @@ -202,7 +202,7 @@ extend_basic_type(void *arg, int type) return *(SINT16 *)arg; case FFI_TYPE_UINT16: return *(UINT16 *)arg; - + case FFI_TYPE_SINT32: case FFI_TYPE_UINT32: case FFI_TYPE_POINTER: @@ -212,8 +212,8 @@ extend_basic_type(void *arg, int type) default: abort(); } -} - +} + struct call_frame { void *ebp; /* 0 */ @@ -249,31 +249,31 @@ static const struct abi_params abi_params[FFI_LAST_ABI] = { #else #define FFI_DECLARE_FASTCALL __declspec(fastcall) #endif -#else +#else #define FFI_DECLARE_FASTCALL -#endif - +#endif + extern void FFI_DECLARE_FASTCALL ffi_call_i386(struct call_frame *, char *) FFI_HIDDEN; static void ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue, void *closure) -{ +{ size_t rsize, bytes; struct call_frame *frame; char *stack, *argp; ffi_type **arg_types; int flags, cabi, i, n, dir, narg_reg; const struct abi_params *pabi; - + flags = cif->flags; cabi = cif->abi; pabi = &abi_params[cabi]; dir = pabi->dir; - + rsize = 0; if (rvalue == NULL) - { + { switch (flags) { case X86_RET_FLOAT: @@ -290,20 +290,20 @@ ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, flags = X86_RET_VOID; break; } - } - + } + bytes = STACK_ALIGN (cif->bytes); stack = alloca(bytes + sizeof(*frame) + rsize); argp = (dir < 0 ? stack + bytes : stack); frame = (struct call_frame *)(stack + bytes); if (rsize) rvalue = frame + 1; - + frame->fn = fn; frame->flags = flags; frame->rvalue = rvalue; frame->regs[pabi->static_chain] = (unsigned)closure; - + narg_reg = 0; switch (flags) { @@ -319,9 +319,9 @@ ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, case X86_RET_STRUCTPOP: *(void **)argp = rvalue; argp += sizeof(void *); - break; - } - + break; + } + arg_types = cif->arg_types; for (i = 0, n = cif->nargs; i < n; i++) { @@ -329,11 +329,11 @@ ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, void *valp = avalue[i]; size_t z = ty->size; int t = ty->type; - + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT) { ffi_arg val = extend_basic_type (valp, t); - + if (t != FFI_TYPE_FLOAT && narg_reg < pabi->nregs) frame->regs[pabi->regs[narg_reg++]] = val; else if (dir < 0) @@ -351,7 +351,7 @@ ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, { size_t za = FFI_ALIGN (z, FFI_SIZEOF_ARG); size_t align = FFI_SIZEOF_ARG; - + /* Issue 434: For thiscall and fastcall, if the paramter passed as 64-bit integer or struct, all following integer paramters will be passed on stack. */ @@ -360,7 +360,7 @@ ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, || t == FFI_TYPE_UINT64 || t == FFI_TYPE_STRUCT)) narg_reg = 2; - + /* Alignment rules for arguments are quite complex. Vectors and structures with 16 byte alignment get it. Note that long double on Darwin does have 16 byte alignment, and does not get this @@ -387,29 +387,29 @@ ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, } } FFI_ASSERT (dir > 0 || argp == stack); - + ffi_call_i386 (frame, stack); } - + void ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) { ffi_call_int (cif, fn, rvalue, avalue, NULL); -} - +} + void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue, void *closure) -{ +{ ffi_call_int (cif, fn, rvalue, avalue, closure); } - + /** private members **/ - + void FFI_HIDDEN ffi_closure_i386(void); void FFI_HIDDEN ffi_closure_STDCALL(void); void FFI_HIDDEN ffi_closure_REGISTER(void); - + struct closure_frame { unsigned rettemp[4]; /* 0 */ @@ -418,10 +418,10 @@ struct closure_frame void (*fun)(ffi_cif*,void*,void**,void*); /* 32 */ void *user_data; /* 36 */ }; - + int FFI_HIDDEN FFI_DECLARE_FASTCALL ffi_closure_inner (struct closure_frame *frame, char *stack) -{ +{ ffi_cif *cif = frame->cif; int cabi, i, n, flags, dir, narg_reg; const struct abi_params *pabi; @@ -429,7 +429,7 @@ ffi_closure_inner (struct closure_frame *frame, char *stack) char *argp; void *rvalue; void **avalue; - + cabi = cif->abi; flags = cif->flags; narg_reg = 0; @@ -437,7 +437,7 @@ ffi_closure_inner (struct closure_frame *frame, char *stack) pabi = &abi_params[cabi]; dir = pabi->dir; argp = (dir < 0 ? stack + STACK_ALIGN (cif->bytes) : stack); - + switch (flags) { case X86_RET_STRUCTARG: @@ -455,18 +455,18 @@ ffi_closure_inner (struct closure_frame *frame, char *stack) frame->rettemp[0] = (unsigned)rvalue; break; } - + n = cif->nargs; avalue = alloca(sizeof(void *) * n); - + arg_types = cif->arg_types; for (i = 0; i < n; ++i) - { + { ffi_type *ty = arg_types[i]; size_t z = ty->size; int t = ty->type; void *valp; - + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT) { if (t != FFI_TYPE_FLOAT && narg_reg < pabi->nregs) @@ -482,15 +482,15 @@ ffi_closure_inner (struct closure_frame *frame, char *stack) argp += 4; } } - else + else { size_t za = FFI_ALIGN (z, FFI_SIZEOF_ARG); size_t align = FFI_SIZEOF_ARG; - + /* See the comment in ffi_call_int. */ if (t == FFI_TYPE_STRUCT && ty->alignment >= 16) align = 16; - + /* Issue 434: For thiscall and fastcall, if the paramter passed as 64-bit integer or struct, all following integer paramters will be passed on stack. */ @@ -499,7 +499,7 @@ ffi_closure_inner (struct closure_frame *frame, char *stack) || t == FFI_TYPE_UINT64 || t == FFI_TYPE_STRUCT)) narg_reg = 2; - + if (dir < 0) { /* ??? These reverse argument ABIs are probably too old @@ -514,31 +514,31 @@ ffi_closure_inner (struct closure_frame *frame, char *stack) argp += za; } } - + avalue[i] = valp; } - + frame->fun (cif, rvalue, avalue, frame->user_data); - + if (cabi == FFI_STDCALL) return flags + (cif->bytes << X86_RET_POP_SHIFT); else return flags; } - -ffi_status -ffi_prep_closure_loc (ffi_closure* closure, - ffi_cif* cif, - void (*fun)(ffi_cif*,void*,void**,void*), - void *user_data, - void *codeloc) -{ + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ char *tramp = closure->tramp; void (*dest)(void); int op = 0xb8; /* movl imm, %eax */ switch (cif->abi) - { + { case FFI_SYSV: case FFI_THISCALL: case FFI_FASTCALL: @@ -555,7 +555,7 @@ ffi_prep_closure_loc (ffi_closure* closure, break; default: return FFI_BAD_ABI; - } + } /* movl or pushl immediate. */ tramp[0] = op; @@ -583,7 +583,7 @@ ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, void (*dest)(void); switch (cif->abi) - { + { case FFI_SYSV: case FFI_MS_CDECL: dest = ffi_go_closure_ECX; @@ -598,38 +598,38 @@ ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, break; case FFI_REGISTER: default: - return FFI_BAD_ABI; - } - + return FFI_BAD_ABI; + } + closure->tramp = dest; closure->cif = cif; closure->fun = fun; - return FFI_OK; -} - -/* ------- Native raw API support -------------------------------- */ - -#if !FFI_NO_RAW_API - + return FFI_OK; +} + +/* ------- Native raw API support -------------------------------- */ + +#if !FFI_NO_RAW_API + void FFI_HIDDEN ffi_closure_raw_SYSV(void); void FFI_HIDDEN ffi_closure_raw_THISCALL(void); -ffi_status +ffi_status ffi_prep_raw_closure_loc (ffi_raw_closure *closure, ffi_cif *cif, - void (*fun)(ffi_cif*,void*,ffi_raw*,void*), - void *user_data, - void *codeloc) -{ + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc) +{ char *tramp = closure->tramp; void (*dest)(void); - int i; - + int i; + /* We currently don't support certain kinds of arguments for raw - closures. This should be implemented by a separate assembly - language routine, since it would require argument processing, - something we don't do now for performance. */ + closures. This should be implemented by a separate assembly + language routine, since it would require argument processing, + something we don't do now for performance. */ for (i = cif->nargs-1; i >= 0; i--) switch (cif->arg_types[i]->type) { @@ -637,9 +637,9 @@ ffi_prep_raw_closure_loc (ffi_raw_closure *closure, case FFI_TYPE_LONGDOUBLE: return FFI_BAD_TYPEDEF; } - + switch (cif->abi) - { + { case FFI_THISCALL: dest = ffi_closure_raw_THISCALL; break; @@ -648,7 +648,7 @@ ffi_prep_raw_closure_loc (ffi_raw_closure *closure, break; default: return FFI_BAD_ABI; - } + } /* movl imm, %eax. */ tramp[0] = 0xb8; @@ -660,28 +660,28 @@ ffi_prep_raw_closure_loc (ffi_raw_closure *closure, closure->cif = cif; closure->fun = fun; - closure->user_data = user_data; - - return FFI_OK; -} - -void + closure->user_data = user_data; + + return FFI_OK; +} + +void ffi_raw_call(ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *avalue) -{ +{ size_t rsize, bytes; struct call_frame *frame; char *stack, *argp; ffi_type **arg_types; int flags, cabi, i, n, narg_reg; const struct abi_params *pabi; - + flags = cif->flags; cabi = cif->abi; pabi = &abi_params[cabi]; - + rsize = 0; if (rvalue == NULL) - { + { switch (flags) { case X86_RET_FLOAT: @@ -698,19 +698,19 @@ ffi_raw_call(ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *avalue) flags = X86_RET_VOID; break; } - } - + } + bytes = STACK_ALIGN (cif->bytes); argp = stack = (void *)((uintptr_t)alloca(bytes + sizeof(*frame) + rsize + 15) & ~16); frame = (struct call_frame *)(stack + bytes); if (rsize) rvalue = frame + 1; - + frame->fn = fn; frame->flags = flags; frame->rvalue = rvalue; - + narg_reg = 0; switch (flags) { @@ -727,16 +727,16 @@ ffi_raw_call(ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *avalue) *(void **)argp = rvalue; argp += sizeof(void *); bytes -= sizeof(void *); - break; - } - + break; + } + arg_types = cif->arg_types; for (i = 0, n = cif->nargs; narg_reg < pabi->nregs && i < n; i++) { ffi_type *ty = arg_types[i]; size_t z = ty->size; int t = ty->type; - + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT && t != FFI_TYPE_FLOAT) { ffi_arg val = extend_basic_type (avalue, t); @@ -754,7 +754,7 @@ ffi_raw_call(ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *avalue) } if (i < n) memcpy (argp, avalue, bytes); - + ffi_call_i386 (frame, stack); } #endif /* !FFI_NO_RAW_API */ diff --git a/contrib/restricted/libffi/src/x86/ffi64.c b/contrib/restricted/libffi/src/x86/ffi64.c index dec331c958..e59e396ff0 100644 --- a/contrib/restricted/libffi/src/x86/ffi64.c +++ b/contrib/restricted/libffi/src/x86/ffi64.c @@ -1,308 +1,308 @@ -/* ----------------------------------------------------------------------- +/* ----------------------------------------------------------------------- ffi64.c - Copyright (c) 2011, 2018 Anthony Green Copyright (c) 2013 The Written Word, Inc. - Copyright (c) 2008, 2010 Red Hat, Inc. - Copyright (c) 2002, 2007 Bo Thorsen <bo@suse.de> - + Copyright (c) 2008, 2010 Red Hat, Inc. + Copyright (c) 2002, 2007 Bo Thorsen <bo@suse.de> + x86-64 Foreign Function Interface - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - ``Software''), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. - ----------------------------------------------------------------------- */ - -#include <ffi.h> -#include <ffi_common.h> - -#include <stdlib.h> -#include <stdarg.h> + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include <ffi.h> +#include <ffi_common.h> + +#include <stdlib.h> +#include <stdarg.h> #include <stdint.h> #include "internal64.h" - -#ifdef __x86_64__ - -#define MAX_GPR_REGS 6 -#define MAX_SSE_REGS 8 - -#if defined(__INTEL_COMPILER) + +#ifdef __x86_64__ + +#define MAX_GPR_REGS 6 +#define MAX_SSE_REGS 8 + +#if defined(__INTEL_COMPILER) #include "xmmintrin.h" -#define UINT128 __m128 -#else -#if defined(__SUNPRO_C) -#include <sunmedia_types.h> -#define UINT128 __m128i -#else -#define UINT128 __int128_t -#endif -#endif - -union big_int_union -{ - UINT32 i32; - UINT64 i64; - UINT128 i128; -}; - -struct register_args -{ - /* Registers for argument passing. */ - UINT64 gpr[MAX_GPR_REGS]; +#define UINT128 __m128 +#else +#if defined(__SUNPRO_C) +#include <sunmedia_types.h> +#define UINT128 __m128i +#else +#define UINT128 __int128_t +#endif +#endif + +union big_int_union +{ + UINT32 i32; + UINT64 i64; + UINT128 i128; +}; + +struct register_args +{ + /* Registers for argument passing. */ + UINT64 gpr[MAX_GPR_REGS]; union big_int_union sse[MAX_SSE_REGS]; UINT64 rax; /* ssecount */ UINT64 r10; /* static chain */ -}; - -extern void ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, +}; + +extern void ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, void *raddr, void (*fnaddr)(void)) FFI_HIDDEN; - -/* All reference to register classes here is identical to the code in - gcc/config/i386/i386.c. Do *not* change one without the other. */ - -/* Register class used for passing given 64bit part of the argument. - These represent classes as documented by the PS ABI, with the - exception of SSESF, SSEDF classes, that are basically SSE class, - just gcc will use SF or DFmode move instead of DImode to avoid - reformatting penalties. - - Similary we play games with INTEGERSI_CLASS to use cheaper SImode moves - whenever possible (upper half does contain padding). */ -enum x86_64_reg_class - { - X86_64_NO_CLASS, - X86_64_INTEGER_CLASS, - X86_64_INTEGERSI_CLASS, - X86_64_SSE_CLASS, - X86_64_SSESF_CLASS, - X86_64_SSEDF_CLASS, - X86_64_SSEUP_CLASS, - X86_64_X87_CLASS, - X86_64_X87UP_CLASS, - X86_64_COMPLEX_X87_CLASS, - X86_64_MEMORY_CLASS - }; - -#define MAX_CLASSES 4 - -#define SSE_CLASS_P(X) ((X) >= X86_64_SSE_CLASS && X <= X86_64_SSEUP_CLASS) - -/* x86-64 register passing implementation. See x86-64 ABI for details. Goal - of this code is to classify each 8bytes of incoming argument by the register - class and assign registers accordingly. */ - -/* Return the union class of CLASS1 and CLASS2. - See the x86-64 PS ABI for details. */ - -static enum x86_64_reg_class -merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2) -{ - /* Rule #1: If both classes are equal, this is the resulting class. */ - if (class1 == class2) - return class1; - - /* Rule #2: If one of the classes is NO_CLASS, the resulting class is - the other class. */ - if (class1 == X86_64_NO_CLASS) - return class2; - if (class2 == X86_64_NO_CLASS) - return class1; - - /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */ - if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS) - return X86_64_MEMORY_CLASS; - - /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */ - if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS) - || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS)) - return X86_64_INTEGERSI_CLASS; - if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS - || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS) - return X86_64_INTEGER_CLASS; - - /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class, - MEMORY is used. */ - if (class1 == X86_64_X87_CLASS - || class1 == X86_64_X87UP_CLASS - || class1 == X86_64_COMPLEX_X87_CLASS - || class2 == X86_64_X87_CLASS - || class2 == X86_64_X87UP_CLASS - || class2 == X86_64_COMPLEX_X87_CLASS) - return X86_64_MEMORY_CLASS; - - /* Rule #6: Otherwise class SSE is used. */ - return X86_64_SSE_CLASS; -} - -/* Classify the argument of type TYPE and mode MODE. - CLASSES will be filled by the register class used to pass each word - of the operand. The number of words is returned. In case the parameter - should be passed in memory, 0 is returned. As a special case for zero - sized containers, classes[0] will be NO_CLASS and 1 is returned. - - See the x86-64 PS ABI for details. -*/ + +/* All reference to register classes here is identical to the code in + gcc/config/i386/i386.c. Do *not* change one without the other. */ + +/* Register class used for passing given 64bit part of the argument. + These represent classes as documented by the PS ABI, with the + exception of SSESF, SSEDF classes, that are basically SSE class, + just gcc will use SF or DFmode move instead of DImode to avoid + reformatting penalties. + + Similary we play games with INTEGERSI_CLASS to use cheaper SImode moves + whenever possible (upper half does contain padding). */ +enum x86_64_reg_class + { + X86_64_NO_CLASS, + X86_64_INTEGER_CLASS, + X86_64_INTEGERSI_CLASS, + X86_64_SSE_CLASS, + X86_64_SSESF_CLASS, + X86_64_SSEDF_CLASS, + X86_64_SSEUP_CLASS, + X86_64_X87_CLASS, + X86_64_X87UP_CLASS, + X86_64_COMPLEX_X87_CLASS, + X86_64_MEMORY_CLASS + }; + +#define MAX_CLASSES 4 + +#define SSE_CLASS_P(X) ((X) >= X86_64_SSE_CLASS && X <= X86_64_SSEUP_CLASS) + +/* x86-64 register passing implementation. See x86-64 ABI for details. Goal + of this code is to classify each 8bytes of incoming argument by the register + class and assign registers accordingly. */ + +/* Return the union class of CLASS1 and CLASS2. + See the x86-64 PS ABI for details. */ + +static enum x86_64_reg_class +merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2) +{ + /* Rule #1: If both classes are equal, this is the resulting class. */ + if (class1 == class2) + return class1; + + /* Rule #2: If one of the classes is NO_CLASS, the resulting class is + the other class. */ + if (class1 == X86_64_NO_CLASS) + return class2; + if (class2 == X86_64_NO_CLASS) + return class1; + + /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */ + if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS) + return X86_64_MEMORY_CLASS; + + /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */ + if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS) + || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS)) + return X86_64_INTEGERSI_CLASS; + if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS + || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS) + return X86_64_INTEGER_CLASS; + + /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class, + MEMORY is used. */ + if (class1 == X86_64_X87_CLASS + || class1 == X86_64_X87UP_CLASS + || class1 == X86_64_COMPLEX_X87_CLASS + || class2 == X86_64_X87_CLASS + || class2 == X86_64_X87UP_CLASS + || class2 == X86_64_COMPLEX_X87_CLASS) + return X86_64_MEMORY_CLASS; + + /* Rule #6: Otherwise class SSE is used. */ + return X86_64_SSE_CLASS; +} + +/* Classify the argument of type TYPE and mode MODE. + CLASSES will be filled by the register class used to pass each word + of the operand. The number of words is returned. In case the parameter + should be passed in memory, 0 is returned. As a special case for zero + sized containers, classes[0] will be NO_CLASS and 1 is returned. + + See the x86-64 PS ABI for details. +*/ static size_t -classify_argument (ffi_type *type, enum x86_64_reg_class classes[], - size_t byte_offset) -{ - switch (type->type) - { - case FFI_TYPE_UINT8: - case FFI_TYPE_SINT8: - case FFI_TYPE_UINT16: - case FFI_TYPE_SINT16: - case FFI_TYPE_UINT32: - case FFI_TYPE_SINT32: - case FFI_TYPE_UINT64: - case FFI_TYPE_SINT64: - case FFI_TYPE_POINTER: +classify_argument (ffi_type *type, enum x86_64_reg_class classes[], + size_t byte_offset) +{ + switch (type->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: do_integer: - { + { size_t size = byte_offset + type->size; - - if (size <= 4) - { - classes[0] = X86_64_INTEGERSI_CLASS; - return 1; - } - else if (size <= 8) - { - classes[0] = X86_64_INTEGER_CLASS; - return 1; - } - else if (size <= 12) - { - classes[0] = X86_64_INTEGER_CLASS; - classes[1] = X86_64_INTEGERSI_CLASS; - return 2; - } - else if (size <= 16) - { + + if (size <= 4) + { + classes[0] = X86_64_INTEGERSI_CLASS; + return 1; + } + else if (size <= 8) + { + classes[0] = X86_64_INTEGER_CLASS; + return 1; + } + else if (size <= 12) + { + classes[0] = X86_64_INTEGER_CLASS; + classes[1] = X86_64_INTEGERSI_CLASS; + return 2; + } + else if (size <= 16) + { classes[0] = classes[1] = X86_64_INTEGER_CLASS; - return 2; - } - else - FFI_ASSERT (0); - } - case FFI_TYPE_FLOAT: - if (!(byte_offset % 8)) - classes[0] = X86_64_SSESF_CLASS; - else - classes[0] = X86_64_SSE_CLASS; - return 1; - case FFI_TYPE_DOUBLE: - classes[0] = X86_64_SSEDF_CLASS; - return 1; + return 2; + } + else + FFI_ASSERT (0); + } + case FFI_TYPE_FLOAT: + if (!(byte_offset % 8)) + classes[0] = X86_64_SSESF_CLASS; + else + classes[0] = X86_64_SSE_CLASS; + return 1; + case FFI_TYPE_DOUBLE: + classes[0] = X86_64_SSEDF_CLASS; + return 1; #if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE - case FFI_TYPE_LONGDOUBLE: - classes[0] = X86_64_X87_CLASS; - classes[1] = X86_64_X87UP_CLASS; - return 2; + case FFI_TYPE_LONGDOUBLE: + classes[0] = X86_64_X87_CLASS; + classes[1] = X86_64_X87UP_CLASS; + return 2; #endif - case FFI_TYPE_STRUCT: - { + case FFI_TYPE_STRUCT: + { const size_t UNITS_PER_WORD = 8; size_t words = (type->size + UNITS_PER_WORD - 1) / UNITS_PER_WORD; ffi_type **ptr; unsigned int i; - enum x86_64_reg_class subclasses[MAX_CLASSES]; - - /* If the struct is larger than 32 bytes, pass it on the stack. */ - if (type->size > 32) - return 0; - - for (i = 0; i < words; i++) - classes[i] = X86_64_NO_CLASS; - - /* Zero sized arrays or structures are NO_CLASS. We return 0 to - signalize memory class, so handle it as special case. */ - if (!words) - { + enum x86_64_reg_class subclasses[MAX_CLASSES]; + + /* If the struct is larger than 32 bytes, pass it on the stack. */ + if (type->size > 32) + return 0; + + for (i = 0; i < words; i++) + classes[i] = X86_64_NO_CLASS; + + /* Zero sized arrays or structures are NO_CLASS. We return 0 to + signalize memory class, so handle it as special case. */ + if (!words) + { case FFI_TYPE_VOID: - classes[0] = X86_64_NO_CLASS; - return 1; - } - - /* Merge the fields of structure. */ - for (ptr = type->elements; *ptr != NULL; ptr++) - { + classes[0] = X86_64_NO_CLASS; + return 1; + } + + /* Merge the fields of structure. */ + for (ptr = type->elements; *ptr != NULL; ptr++) + { size_t num; - + byte_offset = FFI_ALIGN (byte_offset, (*ptr)->alignment); - - num = classify_argument (*ptr, subclasses, byte_offset % 8); - if (num == 0) - return 0; - for (i = 0; i < num; i++) - { + + num = classify_argument (*ptr, subclasses, byte_offset % 8); + if (num == 0) + return 0; + for (i = 0; i < num; i++) + { size_t pos = byte_offset / 8; - classes[i + pos] = - merge_classes (subclasses[i], classes[i + pos]); - } - - byte_offset += (*ptr)->size; - } - - if (words > 2) - { - /* When size > 16 bytes, if the first one isn't - X86_64_SSE_CLASS or any other ones aren't - X86_64_SSEUP_CLASS, everything should be passed in - memory. */ - if (classes[0] != X86_64_SSE_CLASS) - return 0; - - for (i = 1; i < words; i++) - if (classes[i] != X86_64_SSEUP_CLASS) - return 0; - } - - /* Final merger cleanup. */ - for (i = 0; i < words; i++) - { - /* If one class is MEMORY, everything should be passed in - memory. */ - if (classes[i] == X86_64_MEMORY_CLASS) - return 0; - - /* The X86_64_SSEUP_CLASS should be always preceded by - X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */ + classes[i + pos] = + merge_classes (subclasses[i], classes[i + pos]); + } + + byte_offset += (*ptr)->size; + } + + if (words > 2) + { + /* When size > 16 bytes, if the first one isn't + X86_64_SSE_CLASS or any other ones aren't + X86_64_SSEUP_CLASS, everything should be passed in + memory. */ + if (classes[0] != X86_64_SSE_CLASS) + return 0; + + for (i = 1; i < words; i++) + if (classes[i] != X86_64_SSEUP_CLASS) + return 0; + } + + /* Final merger cleanup. */ + for (i = 0; i < words; i++) + { + /* If one class is MEMORY, everything should be passed in + memory. */ + if (classes[i] == X86_64_MEMORY_CLASS) + return 0; + + /* The X86_64_SSEUP_CLASS should be always preceded by + X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */ if (i > 1 && classes[i] == X86_64_SSEUP_CLASS - && classes[i - 1] != X86_64_SSE_CLASS - && classes[i - 1] != X86_64_SSEUP_CLASS) - { - /* The first one should never be X86_64_SSEUP_CLASS. */ - FFI_ASSERT (i != 0); - classes[i] = X86_64_SSE_CLASS; - } - - /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS, - everything should be passed in memory. */ + && classes[i - 1] != X86_64_SSE_CLASS + && classes[i - 1] != X86_64_SSEUP_CLASS) + { + /* The first one should never be X86_64_SSEUP_CLASS. */ + FFI_ASSERT (i != 0); + classes[i] = X86_64_SSE_CLASS; + } + + /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS, + everything should be passed in memory. */ if (i > 1 && classes[i] == X86_64_X87UP_CLASS - && (classes[i - 1] != X86_64_X87_CLASS)) - { - /* The first one should never be X86_64_X87UP_CLASS. */ - FFI_ASSERT (i != 0); - return 0; - } - } - return words; - } + && (classes[i - 1] != X86_64_X87_CLASS)) + { + /* The first one should never be X86_64_X87UP_CLASS. */ + FFI_ASSERT (i != 0); + return 0; + } + } + return words; + } case FFI_TYPE_COMPLEX: { ffi_type *inner = type->elements[0]; @@ -318,7 +318,7 @@ classify_argument (ffi_type *type, enum x86_64_reg_class classes[], case FFI_TYPE_UINT64: case FFI_TYPE_SINT64: goto do_integer; - + case FFI_TYPE_FLOAT: classes[0] = X86_64_SSE_CLASS; if (byte_offset % 8) @@ -337,72 +337,72 @@ classify_argument (ffi_type *type, enum x86_64_reg_class classes[], #endif } } - } + } abort(); -} - -/* Examine the argument and return set number of register required in each - class. Return zero iff parameter should be passed in memory, otherwise - the number of registers. */ - +} + +/* Examine the argument and return set number of register required in each + class. Return zero iff parameter should be passed in memory, otherwise + the number of registers. */ + static size_t -examine_argument (ffi_type *type, enum x86_64_reg_class classes[MAX_CLASSES], - _Bool in_return, int *pngpr, int *pnsse) -{ +examine_argument (ffi_type *type, enum x86_64_reg_class classes[MAX_CLASSES], + _Bool in_return, int *pngpr, int *pnsse) +{ size_t n; unsigned int i; int ngpr, nsse; - - n = classify_argument (type, classes, 0); - if (n == 0) - return 0; - - ngpr = nsse = 0; - for (i = 0; i < n; ++i) - switch (classes[i]) - { - case X86_64_INTEGER_CLASS: - case X86_64_INTEGERSI_CLASS: - ngpr++; - break; - case X86_64_SSE_CLASS: - case X86_64_SSESF_CLASS: - case X86_64_SSEDF_CLASS: - nsse++; - break; - case X86_64_NO_CLASS: - case X86_64_SSEUP_CLASS: - break; - case X86_64_X87_CLASS: - case X86_64_X87UP_CLASS: - case X86_64_COMPLEX_X87_CLASS: - return in_return != 0; - default: - abort (); - } - - *pngpr = ngpr; - *pnsse = nsse; - - return n; -} - -/* Perform machine dependent cif processing. */ - + + n = classify_argument (type, classes, 0); + if (n == 0) + return 0; + + ngpr = nsse = 0; + for (i = 0; i < n; ++i) + switch (classes[i]) + { + case X86_64_INTEGER_CLASS: + case X86_64_INTEGERSI_CLASS: + ngpr++; + break; + case X86_64_SSE_CLASS: + case X86_64_SSESF_CLASS: + case X86_64_SSEDF_CLASS: + nsse++; + break; + case X86_64_NO_CLASS: + case X86_64_SSEUP_CLASS: + break; + case X86_64_X87_CLASS: + case X86_64_X87UP_CLASS: + case X86_64_COMPLEX_X87_CLASS: + return in_return != 0; + default: + abort (); + } + + *pngpr = ngpr; + *pnsse = nsse; + + return n; +} + +/* Perform machine dependent cif processing. */ + #ifndef __ILP32__ extern ffi_status ffi_prep_cif_machdep_efi64(ffi_cif *cif); #endif ffi_status FFI_HIDDEN -ffi_prep_cif_machdep (ffi_cif *cif) -{ +ffi_prep_cif_machdep (ffi_cif *cif) +{ int gprcount, ssecount, i, avn, ngpr, nsse; unsigned flags; - enum x86_64_reg_class classes[MAX_CLASSES]; + enum x86_64_reg_class classes[MAX_CLASSES]; size_t bytes, n, rtype_size; ffi_type *rtype; - + #ifndef __ILP32__ if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) return ffi_prep_cif_machdep_efi64(cif); @@ -410,12 +410,12 @@ ffi_prep_cif_machdep (ffi_cif *cif) if (cif->abi != FFI_UNIX64) return FFI_BAD_ABI; - gprcount = ssecount = 0; - + gprcount = ssecount = 0; + rtype = cif->rtype; rtype_size = rtype->size; switch (rtype->type) - { + { case FFI_TYPE_VOID: flags = UNIX64_RET_VOID; break; @@ -457,18 +457,18 @@ ffi_prep_cif_machdep (ffi_cif *cif) break; #endif case FFI_TYPE_STRUCT: - n = examine_argument (cif->rtype, classes, 1, &ngpr, &nsse); - if (n == 0) - { - /* The return value is passed in memory. A pointer to that - memory is the first argument. Allocate a register for it. */ - gprcount++; - /* We don't have to do anything in asm for the return. */ + n = examine_argument (cif->rtype, classes, 1, &ngpr, &nsse); + if (n == 0) + { + /* The return value is passed in memory. A pointer to that + memory is the first argument. Allocate a register for it. */ + gprcount++; + /* We don't have to do anything in asm for the return. */ flags = UNIX64_RET_VOID | UNIX64_FLAG_RET_IN_MEM; - } + } else - { - _Bool sse0 = SSE_CLASS_P (classes[0]); + { + _Bool sse0 = SSE_CLASS_P (classes[0]); if (rtype_size == 4 && sse0) flags = UNIX64_RET_XMM32; @@ -487,7 +487,7 @@ ffi_prep_cif_machdep (ffi_cif *cif) flags = UNIX64_RET_ST_RAX_RDX; flags |= rtype_size << UNIX64_SIZE_SHIFT; } - } + } break; case FFI_TYPE_COMPLEX: switch (rtype->elements[0]->type) @@ -520,54 +520,54 @@ ffi_prep_cif_machdep (ffi_cif *cif) break; default: return FFI_BAD_TYPEDEF; - } - - /* Go over all arguments and determine the way they should be passed. - If it's in a register and there is space for it, let that be so. If - not, add it's size to the stack byte count. */ - for (bytes = 0, i = 0, avn = cif->nargs; i < avn; i++) - { - if (examine_argument (cif->arg_types[i], classes, 0, &ngpr, &nsse) == 0 - || gprcount + ngpr > MAX_GPR_REGS - || ssecount + nsse > MAX_SSE_REGS) - { - long align = cif->arg_types[i]->alignment; - - if (align < 8) - align = 8; - + } + + /* Go over all arguments and determine the way they should be passed. + If it's in a register and there is space for it, let that be so. If + not, add it's size to the stack byte count. */ + for (bytes = 0, i = 0, avn = cif->nargs; i < avn; i++) + { + if (examine_argument (cif->arg_types[i], classes, 0, &ngpr, &nsse) == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = cif->arg_types[i]->alignment; + + if (align < 8) + align = 8; + bytes = FFI_ALIGN (bytes, align); - bytes += cif->arg_types[i]->size; - } - else - { - gprcount += ngpr; - ssecount += nsse; - } - } - if (ssecount) + bytes += cif->arg_types[i]->size; + } + else + { + gprcount += ngpr; + ssecount += nsse; + } + } + if (ssecount) flags |= UNIX64_FLAG_XMM_ARGS; - cif->flags = flags; + cif->flags = flags; cif->bytes = (unsigned) FFI_ALIGN (bytes, 8); - - return FFI_OK; -} - + + return FFI_OK; +} + static void ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue, void *closure) -{ - enum x86_64_reg_class classes[MAX_CLASSES]; - char *stack, *argp; - ffi_type **arg_types; +{ + enum x86_64_reg_class classes[MAX_CLASSES]; + char *stack, *argp; + ffi_type **arg_types; int gprcount, ssecount, ngpr, nsse, i, avn, flags; - struct register_args *reg_args; - - /* Can't call 32-bit mode from 64-bit mode. */ - FFI_ASSERT (cif->abi == FFI_UNIX64); - - /* If the return value is a struct and we don't have a return value + struct register_args *reg_args; + + /* Can't call 32-bit mode from 64-bit mode. */ + FFI_ASSERT (cif->abi == FFI_UNIX64); + + /* If the return value is a struct and we don't have a return value address then we need to make one. Otherwise we can ignore it. */ flags = cif->flags; if (rvalue == NULL) @@ -577,104 +577,104 @@ ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, else flags = UNIX64_RET_VOID; } - - /* Allocate the space for the arguments, plus 4 words of temp space. */ - stack = alloca (sizeof (struct register_args) + cif->bytes + 4*8); - reg_args = (struct register_args *) stack; - argp = stack + sizeof (struct register_args); - + + /* Allocate the space for the arguments, plus 4 words of temp space. */ + stack = alloca (sizeof (struct register_args) + cif->bytes + 4*8); + reg_args = (struct register_args *) stack; + argp = stack + sizeof (struct register_args); + reg_args->r10 = (uintptr_t) closure; - gprcount = ssecount = 0; - - /* If the return value is passed in memory, add the pointer as the - first integer argument. */ + gprcount = ssecount = 0; + + /* If the return value is passed in memory, add the pointer as the + first integer argument. */ if (flags & UNIX64_FLAG_RET_IN_MEM) - reg_args->gpr[gprcount++] = (unsigned long) rvalue; - - avn = cif->nargs; - arg_types = cif->arg_types; - - for (i = 0; i < avn; ++i) - { + reg_args->gpr[gprcount++] = (unsigned long) rvalue; + + avn = cif->nargs; + arg_types = cif->arg_types; + + for (i = 0; i < avn; ++i) + { size_t n, size = arg_types[i]->size; - - n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse); - if (n == 0 - || gprcount + ngpr > MAX_GPR_REGS - || ssecount + nsse > MAX_SSE_REGS) - { - long align = arg_types[i]->alignment; - - /* Stack arguments are *always* at least 8 byte aligned. */ - if (align < 8) - align = 8; - - /* Pass this argument in memory. */ + + n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse); + if (n == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = arg_types[i]->alignment; + + /* Stack arguments are *always* at least 8 byte aligned. */ + if (align < 8) + align = 8; + + /* Pass this argument in memory. */ argp = (void *) FFI_ALIGN (argp, align); - memcpy (argp, avalue[i], size); - argp += size; - } - else - { - /* The argument is passed entirely in registers. */ - char *a = (char *) avalue[i]; + memcpy (argp, avalue[i], size); + argp += size; + } + else + { + /* The argument is passed entirely in registers. */ + char *a = (char *) avalue[i]; unsigned int j; - - for (j = 0; j < n; j++, a += 8, size -= 8) - { - switch (classes[j]) - { + + for (j = 0; j < n; j++, a += 8, size -= 8) + { + switch (classes[j]) + { case X86_64_NO_CLASS: case X86_64_SSEUP_CLASS: break; - case X86_64_INTEGER_CLASS: - case X86_64_INTEGERSI_CLASS: - /* Sign-extend integer arguments passed in general - purpose registers, to cope with the fact that - LLVM incorrectly assumes that this will be done - (the x86-64 PS ABI does not specify this). */ - switch (arg_types[i]->type) - { - case FFI_TYPE_SINT8: + case X86_64_INTEGER_CLASS: + case X86_64_INTEGERSI_CLASS: + /* Sign-extend integer arguments passed in general + purpose registers, to cope with the fact that + LLVM incorrectly assumes that this will be done + (the x86-64 PS ABI does not specify this). */ + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: reg_args->gpr[gprcount] = (SINT64) *((SINT8 *) a); - break; - case FFI_TYPE_SINT16: + break; + case FFI_TYPE_SINT16: reg_args->gpr[gprcount] = (SINT64) *((SINT16 *) a); - break; - case FFI_TYPE_SINT32: + break; + case FFI_TYPE_SINT32: reg_args->gpr[gprcount] = (SINT64) *((SINT32 *) a); - break; - default: - reg_args->gpr[gprcount] = 0; + break; + default: + reg_args->gpr[gprcount] = 0; memcpy (®_args->gpr[gprcount], a, size); - } - gprcount++; - break; - case X86_64_SSE_CLASS: - case X86_64_SSEDF_CLASS: + } + gprcount++; + break; + case X86_64_SSE_CLASS: + case X86_64_SSEDF_CLASS: memcpy (®_args->sse[ssecount++].i64, a, sizeof(UINT64)); - break; - case X86_64_SSESF_CLASS: + break; + case X86_64_SSESF_CLASS: memcpy (®_args->sse[ssecount++].i32, a, sizeof(UINT32)); - break; - default: - abort(); - } - } - } - } + break; + default: + abort(); + } + } + } + } reg_args->rax = ssecount; - - ffi_call_unix64 (stack, cif->bytes + sizeof (struct register_args), + + ffi_call_unix64 (stack, cif->bytes + sizeof (struct register_args), flags, rvalue, fn); -} - +} + #ifndef __ILP32__ extern void ffi_call_efi64(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue); #endif - + void ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) { @@ -687,7 +687,7 @@ ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) #endif ffi_call_int (cif, fn, rvalue, avalue, NULL); } - + #ifndef __ILP32__ extern void ffi_call_go_efi64(ffi_cif *cif, void (*fn)(void), void *rvalue, @@ -721,13 +721,13 @@ ffi_prep_closure_loc_efi64(ffi_closure* closure, void *codeloc); #endif -ffi_status -ffi_prep_closure_loc (ffi_closure* closure, - ffi_cif* cif, - void (*fun)(ffi_cif*, void*, void**, void*), - void *user_data, - void *codeloc) -{ +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ static const unsigned char trampoline[16] = { /* leaq -0x7(%rip),%r10 # 0x0 */ 0x4c, 0x8d, 0x15, 0xf9, 0xff, 0xff, 0xff, @@ -738,29 +738,29 @@ ffi_prep_closure_loc (ffi_closure* closure, }; void (*dest)(void); char *tramp = closure->tramp; - + #ifndef __ILP32__ if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) return ffi_prep_closure_loc_efi64(closure, cif, fun, user_data, codeloc); #endif if (cif->abi != FFI_UNIX64) return FFI_BAD_ABI; - + if (cif->flags & UNIX64_FLAG_XMM_ARGS) dest = ffi_closure_unix64_sse; else dest = ffi_closure_unix64; - + memcpy (tramp, trampoline, sizeof(trampoline)); *(UINT64 *)(tramp + 16) = (uintptr_t)dest; - - closure->cif = cif; - closure->fun = fun; - closure->user_data = user_data; - - return FFI_OK; -} - + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + int FFI_HIDDEN ffi_closure_unix64_inner(ffi_cif *cif, void (*fun)(ffi_cif*, void*, void**, void*), @@ -768,92 +768,92 @@ ffi_closure_unix64_inner(ffi_cif *cif, void *rvalue, struct register_args *reg_args, char *argp) -{ - void **avalue; - ffi_type **arg_types; - long i, avn; - int gprcount, ssecount, ngpr, nsse; +{ + void **avalue; + ffi_type **arg_types; + long i, avn; + int gprcount, ssecount, ngpr, nsse; int flags; - + avn = cif->nargs; flags = cif->flags; avalue = alloca(avn * sizeof(void *)); - gprcount = ssecount = 0; - + gprcount = ssecount = 0; + if (flags & UNIX64_FLAG_RET_IN_MEM) - { + { /* On return, %rax will contain the address that was passed by the caller in %rdi. */ void *r = (void *)(uintptr_t)reg_args->gpr[gprcount++]; *(void **)rvalue = r; rvalue = r; flags = (sizeof(void *) == 4 ? UNIX64_RET_UINT32 : UNIX64_RET_INT64); - } - - arg_types = cif->arg_types; - for (i = 0; i < avn; ++i) - { - enum x86_64_reg_class classes[MAX_CLASSES]; + } + + arg_types = cif->arg_types; + for (i = 0; i < avn; ++i) + { + enum x86_64_reg_class classes[MAX_CLASSES]; size_t n; - - n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse); - if (n == 0 - || gprcount + ngpr > MAX_GPR_REGS - || ssecount + nsse > MAX_SSE_REGS) - { - long align = arg_types[i]->alignment; - - /* Stack arguments are *always* at least 8 byte aligned. */ - if (align < 8) - align = 8; - - /* Pass this argument in memory. */ + + n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse); + if (n == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = arg_types[i]->alignment; + + /* Stack arguments are *always* at least 8 byte aligned. */ + if (align < 8) + align = 8; + + /* Pass this argument in memory. */ argp = (void *) FFI_ALIGN (argp, align); - avalue[i] = argp; - argp += arg_types[i]->size; - } - /* If the argument is in a single register, or two consecutive - integer registers, then we can use that address directly. */ - else if (n == 1 - || (n == 2 && !(SSE_CLASS_P (classes[0]) - || SSE_CLASS_P (classes[1])))) - { - /* The argument is in a single register. */ - if (SSE_CLASS_P (classes[0])) - { - avalue[i] = ®_args->sse[ssecount]; - ssecount += n; - } - else - { - avalue[i] = ®_args->gpr[gprcount]; - gprcount += n; - } - } - /* Otherwise, allocate space to make them consecutive. */ - else - { - char *a = alloca (16); + avalue[i] = argp; + argp += arg_types[i]->size; + } + /* If the argument is in a single register, or two consecutive + integer registers, then we can use that address directly. */ + else if (n == 1 + || (n == 2 && !(SSE_CLASS_P (classes[0]) + || SSE_CLASS_P (classes[1])))) + { + /* The argument is in a single register. */ + if (SSE_CLASS_P (classes[0])) + { + avalue[i] = ®_args->sse[ssecount]; + ssecount += n; + } + else + { + avalue[i] = ®_args->gpr[gprcount]; + gprcount += n; + } + } + /* Otherwise, allocate space to make them consecutive. */ + else + { + char *a = alloca (16); unsigned int j; - - avalue[i] = a; - for (j = 0; j < n; j++, a += 8) - { - if (SSE_CLASS_P (classes[j])) - memcpy (a, ®_args->sse[ssecount++], 8); - else - memcpy (a, ®_args->gpr[gprcount++], 8); - } - } - } - - /* Invoke the closure. */ + + avalue[i] = a; + for (j = 0; j < n; j++, a += 8) + { + if (SSE_CLASS_P (classes[j])) + memcpy (a, ®_args->sse[ssecount++], 8); + else + memcpy (a, ®_args->gpr[gprcount++], 8); + } + } + } + + /* Invoke the closure. */ fun (cif, rvalue, avalue, user_data); - - /* Tell assembly how to perform return type promotions. */ + + /* Tell assembly how to perform return type promotions. */ return flags; -} - +} + extern void ffi_go_closure_unix64(void) FFI_HIDDEN; extern void ffi_go_closure_unix64_sse(void) FFI_HIDDEN; @@ -883,4 +883,4 @@ ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, return FFI_OK; } -#endif /* __x86_64__ */ +#endif /* __x86_64__ */ diff --git a/contrib/restricted/libffi/src/x86/ffitarget.h b/contrib/restricted/libffi/src/x86/ffitarget.h index 85ccedfedc..170b5865fe 100644 --- a/contrib/restricted/libffi/src/x86/ffitarget.h +++ b/contrib/restricted/libffi/src/x86/ffitarget.h @@ -1,103 +1,103 @@ -/* -----------------------------------------------------------------*-C-*- +/* -----------------------------------------------------------------*-C-*- ffitarget.h - Copyright (c) 2012, 2014, 2018 Anthony Green - Copyright (c) 1996-2003, 2010 Red Hat, Inc. - Copyright (C) 2008 Free Software Foundation, Inc. - - Target configuration macros for x86 and x86-64. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - ``Software''), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. - - ----------------------------------------------------------------------- */ - -#ifndef LIBFFI_TARGET_H -#define LIBFFI_TARGET_H - -#ifndef LIBFFI_H -#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." -#endif - -/* ---- System specific configurations ----------------------------------- */ - -/* For code common to all platforms on x86 and x86_64. */ -#define X86_ANY - -#if defined (X86_64) && defined (__i386__) -#undef X86_64 -#define X86 -#endif - -#ifdef X86_WIN64 -#define FFI_SIZEOF_ARG 8 -#define USE_BUILTIN_FFS 0 /* not yet implemented in mingw-64 */ -#endif - + Copyright (c) 1996-2003, 2010 Red Hat, Inc. + Copyright (C) 2008 Free Software Foundation, Inc. + + Target configuration macros for x86 and x86-64. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +/* For code common to all platforms on x86 and x86_64. */ +#define X86_ANY + +#if defined (X86_64) && defined (__i386__) +#undef X86_64 +#define X86 +#endif + +#ifdef X86_WIN64 +#define FFI_SIZEOF_ARG 8 +#define USE_BUILTIN_FFS 0 /* not yet implemented in mingw-64 */ +#endif + #define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION #ifndef _MSC_VER #define FFI_TARGET_HAS_COMPLEX_TYPE #endif -/* ---- Generic type definitions ----------------------------------------- */ - -#ifndef LIBFFI_ASM -#ifdef X86_WIN64 -#ifdef _MSC_VER -typedef unsigned __int64 ffi_arg; -typedef __int64 ffi_sarg; -#else -typedef unsigned long long ffi_arg; -typedef long long ffi_sarg; -#endif -#else -#if defined __x86_64__ && defined __ILP32__ -#define FFI_SIZEOF_ARG 8 -#define FFI_SIZEOF_JAVA_RAW 4 -typedef unsigned long long ffi_arg; -typedef long long ffi_sarg; -#else -typedef unsigned long ffi_arg; -typedef signed long ffi_sarg; -#endif -#endif - -typedef enum ffi_abi { +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +#ifdef X86_WIN64 +#ifdef _MSC_VER +typedef unsigned __int64 ffi_arg; +typedef __int64 ffi_sarg; +#else +typedef unsigned long long ffi_arg; +typedef long long ffi_sarg; +#endif +#else +#if defined __x86_64__ && defined __ILP32__ +#define FFI_SIZEOF_ARG 8 +#define FFI_SIZEOF_JAVA_RAW 4 +typedef unsigned long long ffi_arg; +typedef long long ffi_sarg; +#else +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; +#endif +#endif + +typedef enum ffi_abi { #if defined(X86_WIN64) - FFI_FIRST_ABI = 0, + FFI_FIRST_ABI = 0, FFI_WIN64, /* sizeof(long double) == 8 - microsoft compilers */ FFI_GNUW64, /* sizeof(long double) == 16 - GNU compilers */ - FFI_LAST_ABI, + FFI_LAST_ABI, #ifdef __GNUC__ FFI_DEFAULT_ABI = FFI_GNUW64 #else FFI_DEFAULT_ABI = FFI_WIN64 #endif - + #elif defined(X86_64) || (defined (__x86_64__) && defined (X86_DARWIN)) FFI_FIRST_ABI = 1, FFI_UNIX64, - FFI_WIN64, + FFI_WIN64, FFI_EFI64 = FFI_WIN64, FFI_GNUW64, - FFI_LAST_ABI, + FFI_LAST_ABI, FFI_DEFAULT_ABI = FFI_UNIX64 - + #elif defined(X86_WIN32) FFI_FIRST_ABI = 0, FFI_SYSV = 1, @@ -109,7 +109,7 @@ typedef enum ffi_abi { FFI_REGISTER = 7, FFI_LAST_ABI, FFI_DEFAULT_ABI = FFI_MS_CDECL -#else +#else FFI_FIRST_ABI = 0, FFI_SYSV = 1, FFI_THISCALL = 3, @@ -118,30 +118,30 @@ typedef enum ffi_abi { FFI_PASCAL = 6, FFI_REGISTER = 7, FFI_MS_CDECL = 8, - FFI_LAST_ABI, - FFI_DEFAULT_ABI = FFI_SYSV -#endif -} ffi_abi; -#endif - -/* ---- Definitions for closures ----------------------------------------- */ - -#define FFI_CLOSURES 1 + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +#endif +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 #define FFI_GO_CLOSURES 1 -#define FFI_TYPE_SMALL_STRUCT_1B (FFI_TYPE_LAST + 1) -#define FFI_TYPE_SMALL_STRUCT_2B (FFI_TYPE_LAST + 2) -#define FFI_TYPE_SMALL_STRUCT_4B (FFI_TYPE_LAST + 3) -#define FFI_TYPE_MS_STRUCT (FFI_TYPE_LAST + 4) - +#define FFI_TYPE_SMALL_STRUCT_1B (FFI_TYPE_LAST + 1) +#define FFI_TYPE_SMALL_STRUCT_2B (FFI_TYPE_LAST + 2) +#define FFI_TYPE_SMALL_STRUCT_4B (FFI_TYPE_LAST + 3) +#define FFI_TYPE_MS_STRUCT (FFI_TYPE_LAST + 4) + #if defined (X86_64) || defined(X86_WIN64) \ || (defined (__x86_64__) && defined (X86_DARWIN)) # define FFI_TRAMPOLINE_SIZE 24 # define FFI_NATIVE_RAW_API 0 -#else +#else # define FFI_TRAMPOLINE_SIZE 12 # define FFI_NATIVE_RAW_API 1 /* x86 has native raw api support */ -#endif - -#endif - +#endif + +#endif + diff --git a/contrib/restricted/libffi/src/x86/sysv.S b/contrib/restricted/libffi/src/x86/sysv.S index 7c9598c93c..8d857a341f 100644 --- a/contrib/restricted/libffi/src/x86/sysv.S +++ b/contrib/restricted/libffi/src/x86/sysv.S @@ -1,39 +1,39 @@ -/* ----------------------------------------------------------------------- +/* ----------------------------------------------------------------------- sysv.S - Copyright (c) 2017 Anthony Green - Copyright (c) 2013 The Written Word, Inc. - Copyright (c) 1996,1998,2001-2003,2005,2008,2010 Red Hat, Inc. - - X86 Foreign Function Interface - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - ``Software''), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. - ----------------------------------------------------------------------- */ - + + X86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + #ifdef __i386__ #ifndef _MSC_VER - -#define LIBFFI_ASM -#include <fficonfig.h> -#include <ffi.h> + +#define LIBFFI_ASM +#include <fficonfig.h> +#include <ffi.h> #include "internal.h" - + #define C2(X, Y) X ## Y #define C1(X, Y) C2(X, Y) #ifdef __USER_LABEL_PREFIX__ @@ -41,19 +41,19 @@ #else # define C(X) X #endif - + #ifdef X86_DARWIN # define L(X) C1(L, X) #else # define L(X) C1(.L, X) #endif - + #ifdef __ELF__ # define ENDF(X) .type X,@function; .size X, . - X #else # define ENDF(X) #endif - + /* Handle win32 fastcall name mangling. */ #ifdef X86_WIN32 # define ffi_call_i386 @ffi_call_i386@8 @@ -62,7 +62,7 @@ # define ffi_call_i386 C(ffi_call_i386) # define ffi_closure_inner C(ffi_closure_inner) #endif - + /* This macro allows the safe creation of jump tables without an actual table. The entry points into the table are all 8 bytes. The use of ORG asserts that we're at the correct location. */ @@ -72,23 +72,23 @@ #else # define E(BASE, X) .balign 8; .org BASE + X * 8 #endif - + .text .balign 16 .globl ffi_call_i386 FFI_HIDDEN(ffi_call_i386) - + /* This is declared as - + void ffi_call_i386(struct call_frame *frame, char *argp) __attribute__((fastcall)); - + Thus the arguments are present in - + ecx: frame edx: argp */ - + ffi_call_i386: L(UW0): # cfi_startproc @@ -99,7 +99,7 @@ L(UW0): movl (%esp), %eax /* move the return address */ movl %ebp, (%ecx) /* store %ebp into local frame */ movl %eax, 4(%ecx) /* store retaddr into local frame */ - + /* New stack frame based off ebp. This is a itty bit of unwind trickery in that the CFA *has* changed. There is no easy way to describe it correctly on entry to the function. Fortunately, @@ -111,19 +111,19 @@ L(UW0): L(UW1): # cfi_def_cfa(%ebp, 8) # cfi_rel_offset(%ebp, 0) - + movl %edx, %esp /* set outgoing argument stack */ movl 20+R_EAX*4(%ebp), %eax /* set register arguments */ movl 20+R_EDX*4(%ebp), %edx movl 20+R_ECX*4(%ebp), %ecx - + call *8(%ebp) - + movl 12(%ebp), %ecx /* load return type code */ movl %ebx, 8(%ebp) /* preserve %ebx */ L(UW2): # cfi_rel_offset(%ebx, 8) - + andl $X86_RET_TYPE_MASK, %ecx #ifdef __PIC__ call C(__x86.get_pc_thunk.bx) @@ -134,7 +134,7 @@ L(pc1): #endif movl 16(%ebp), %ecx /* load result address */ jmp *%ebx - + .balign 8 L(store_table): E(L(store_table), X86_RET_FLOAT) @@ -181,7 +181,7 @@ L(UW3): ret L(UW4): # cfi_restore_state - + E(L(store_table), X86_RET_STRUCTPOP) jmp L(e1) E(L(store_table), X86_RET_STRUCTARG) @@ -192,30 +192,30 @@ E(L(store_table), X86_RET_STRUCT_1B) E(L(store_table), X86_RET_STRUCT_2B) movw %ax, (%ecx) jmp L(e1) - + /* Fill out the table so that bad values are predictable. */ E(L(store_table), X86_RET_UNUSED14) ud2 E(L(store_table), X86_RET_UNUSED15) ud2 - + L(UW5): # cfi_endproc ENDF(ffi_call_i386) - + /* The inner helper is declared as - + void ffi_closure_inner(struct closure_frame *frame, char *argp) __attribute_((fastcall)) - + Thus the arguments are placed in - + ecx: frame edx: argp */ - + /* Macros to help setting up the closure_data structure. */ - + #if HAVE_FASTCALL # define closure_FS (40 + 4) # define closure_CF 0 @@ -223,12 +223,12 @@ ENDF(ffi_call_i386) # define closure_FS (8 + 40 + 12) # define closure_CF 8 #endif - + #define FFI_CLOSURE_SAVE_REGS \ movl %eax, closure_CF+16+R_EAX*4(%esp); \ movl %edx, closure_CF+16+R_EDX*4(%esp); \ movl %ecx, closure_CF+16+R_ECX*4(%esp) - + #define FFI_CLOSURE_COPY_TRAMP_DATA \ movl FFI_TRAMPOLINE_SIZE(%eax), %edx; /* copy cif */ \ movl FFI_TRAMPOLINE_SIZE+4(%eax), %ecx; /* copy fun */ \ @@ -241,14 +241,14 @@ ENDF(ffi_call_i386) # define FFI_CLOSURE_PREP_CALL \ movl %esp, %ecx; /* load closure_data */ \ leal closure_FS+4(%esp), %edx; /* load incoming stack */ -#else +#else # define FFI_CLOSURE_PREP_CALL \ leal closure_CF(%esp), %ecx; /* load closure_data */ \ leal closure_FS+4(%esp), %edx; /* load incoming stack */ \ movl %ecx, (%esp); \ movl %edx, 4(%esp) -#endif - +#endif + #define FFI_CLOSURE_CALL_INNER(UWN) \ call ffi_closure_inner @@ -388,14 +388,14 @@ L(e2): addl $closure_FS, %esp L(UW16): # cfi_adjust_cfa_offset(-closure_FS) - ret + ret L(UW17): # cfi_adjust_cfa_offset(closure_FS) E(L(load_table2), X86_RET_STRUCTPOP) addl $closure_FS, %esp L(UW18): # cfi_adjust_cfa_offset(-closure_FS) - ret $4 + ret $4 L(UW19): # cfi_adjust_cfa_offset(closure_FS) E(L(load_table2), X86_RET_STRUCTARG) @@ -406,7 +406,7 @@ E(L(load_table2), X86_RET_STRUCT_1B) E(L(load_table2), X86_RET_STRUCT_2B) movzwl %ax, %eax jmp L(e2) - + /* Fill out the table so that bad values are predictable. */ E(L(load_table2), X86_RET_UNUSED14) ud2 @@ -566,8 +566,8 @@ L(UW31): # cfi_endproc ENDF(C(ffi_closure_STDCALL)) -#if !FFI_NO_RAW_API - +#if !FFI_NO_RAW_API + #define raw_closure_S_FS (16+16+12) .balign 16 @@ -599,15 +599,15 @@ L(UW34): call C(__x86.get_pc_thunk.bx) L(pc4): leal L(load_table4)-L(pc4)(%ebx, %eax, 8), %ecx -#else +#else leal L(load_table4)(,%eax, 8), %ecx -#endif +#endif movl raw_closure_S_FS-4(%esp), %ebx L(UW35): # cfi_restore(%ebx) movl 16(%esp), %eax /* Optimistic load */ jmp *%ecx - + .balign 8 L(load_table4): E(L(load_table4), X86_RET_FLOAT) @@ -660,13 +660,13 @@ E(L(load_table4), X86_RET_STRUCT_1B) E(L(load_table4), X86_RET_STRUCT_2B) movzwl %ax, %eax jmp L(e4) - + /* Fill out the table so that bad values are predictable. */ E(L(load_table4), X86_RET_UNUSED14) ud2 E(L(load_table4), X86_RET_UNUSED15) ud2 - + L(UW40): # cfi_endproc ENDF(C(ffi_closure_raw_SYSV)) @@ -717,13 +717,13 @@ L(pc5): leal L(load_table5)-L(pc5)(%ebx, %eax, 8), %ecx #else leal L(load_table5)(,%eax, 8), %ecx -#endif +#endif movl raw_closure_T_FS-4(%esp), %ebx L(UW47): # cfi_restore(%ebx) movl 16(%esp), %eax /* Optimistic load */ jmp *%ecx - + .balign 8 L(load_table5): E(L(load_table5), X86_RET_FLOAT) @@ -777,7 +777,7 @@ E(L(load_table5), X86_RET_STRUCT_1B) E(L(load_table5), X86_RET_STRUCT_2B) movzwl %ax, %eax jmp L(e5) - + /* Fill out the table so that bad values are predictable. */ E(L(load_table5), X86_RET_UNUSED14) ud2 @@ -800,10 +800,10 @@ ENDF(C(ffi_closure_raw_THISCALL)) .section .text.X,"axG",@progbits,X,comdat; \ .globl X; \ FFI_HIDDEN(X) -#else +#else # define COMDAT(X) -#endif - +#endif + #if defined(__PIC__) COMDAT(C(__x86.get_pc_thunk.bx)) C(__x86.get_pc_thunk.bx): @@ -828,15 +828,15 @@ EHFrame0: .section .eh_frame,"r" #elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) .section .eh_frame,EH_FRAME_FLAGS,@unwind -#else +#else .section .eh_frame,EH_FRAME_FLAGS,@progbits -#endif +#endif #ifdef HAVE_AS_X86_PCREL # define PCREL(X) X - . -#else +#else # define PCREL(X) X@rel -#endif +#endif /* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ #define ADV(N, P) .byte 2, L(N)-L(P) @@ -920,9 +920,9 @@ L(SFDE4): ADV(UW15, UW14) .byte 0xc0+3 /* DW_CFA_restore %ebx */ ADV(UW16, UW15) -#else +#else ADV(UW16, UW13) -#endif +#endif .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ ADV(UW17, UW16) .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ @@ -973,11 +973,11 @@ L(SFDE7): .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ ADV(UW30, UW29) .byte 0xc0+3 /* DW_CFA_restore %ebx */ -#endif +#endif .balign 4 L(EFDE7): - -#if !FFI_NO_RAW_API + +#if !FFI_NO_RAW_API .set L(set8),L(EFDE8)-L(SFDE8) .long L(set8) /* FDE Length */ L(SFDE8): @@ -1001,7 +1001,7 @@ L(SFDE8): .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ .balign 4 L(EFDE8): - + .set L(set9),L(EFDE9)-L(SFDE9) .long L(set9) /* FDE Length */ L(SFDE9): @@ -1034,7 +1034,7 @@ L(SFDE9): .balign 4 L(EFDE9): #endif /* !FFI_NO_RAW_API */ - + #ifdef _WIN32 .def @feat.00; .scl 3; @@ -1042,12 +1042,12 @@ L(EFDE9): .endef .globl @feat.00 @feat.00 = 1 -#endif - +#endif + #ifdef __APPLE__ .subsections_via_symbols .section __LD,__compact_unwind,regular,debug - + /* compact unwind for ffi_call_i386 */ .long C(ffi_call_i386) .set L1,L(UW5)-L(UW0) @@ -1124,6 +1124,6 @@ L(EFDE9): #endif /* ifndef _MSC_VER */ #endif /* ifdef __i386__ */ -#if defined __ELF__ && defined __linux__ - .section .note.GNU-stack,"",@progbits -#endif +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/contrib/restricted/libffi/src/x86/unix64.S b/contrib/restricted/libffi/src/x86/unix64.S index 41563f5c60..90b847311f 100644 --- a/contrib/restricted/libffi/src/x86/unix64.S +++ b/contrib/restricted/libffi/src/x86/unix64.S @@ -1,40 +1,40 @@ -/* ----------------------------------------------------------------------- - unix64.S - Copyright (c) 2013 The Written Word, Inc. - - Copyright (c) 2008 Red Hat, Inc - - Copyright (c) 2002 Bo Thorsen <bo@suse.de> - - x86-64 Foreign Function Interface - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - ``Software''), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. - ----------------------------------------------------------------------- */ - -#ifdef __x86_64__ -#define LIBFFI_ASM -#include <fficonfig.h> -#include <ffi.h> +/* ----------------------------------------------------------------------- + unix64.S - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 2008 Red Hat, Inc + - Copyright (c) 2002 Bo Thorsen <bo@suse.de> + + x86-64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifdef __x86_64__ +#define LIBFFI_ASM +#include <fficonfig.h> +#include <ffi.h> #include "internal64.h" #include "asmnames.h" - + .text - + /* This macro allows the safe creation of jump tables without an actual table. The entry points into the table are all 8 bytes. The use of ORG asserts that we're at the correct location. */ @@ -45,26 +45,26 @@ # define E(BASE, X) .balign 8; .org BASE + X * 8 #endif -/* ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, - void *raddr, void (*fnaddr)(void)); - - Bit o trickiness here -- ARGS+BYTES is the base of the stack frame - for this function. This has been allocated by ffi_call. We also - deallocate some of the stack that has been alloca'd. */ - +/* ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, + void *raddr, void (*fnaddr)(void)); + + Bit o trickiness here -- ARGS+BYTES is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + .balign 8 .globl C(ffi_call_unix64) FFI_HIDDEN(C(ffi_call_unix64)) - + C(ffi_call_unix64): L(UW0): - movq (%rsp), %r10 /* Load return address. */ - leaq (%rdi, %rsi), %rax /* Find local stack base. */ - movq %rdx, (%rax) /* Save flags. */ - movq %rcx, 8(%rax) /* Save raddr. */ - movq %rbp, 16(%rax) /* Save old frame pointer. */ - movq %r10, 24(%rax) /* Relocate return address. */ - movq %rax, %rbp /* Finalize local stack frame. */ + movq (%rsp), %r10 /* Load return address. */ + leaq (%rdi, %rsi), %rax /* Find local stack base. */ + movq %rdx, (%rax) /* Save flags. */ + movq %rcx, 8(%rax) /* Save raddr. */ + movq %rbp, 16(%rax) /* Save old frame pointer. */ + movq %r10, 24(%rax) /* Relocate return address. */ + movq %rax, %rbp /* Finalize local stack frame. */ /* New stack frame based off rbp. This is a itty bit of unwind trickery in that the CFA *has* changed. There is no easy way @@ -77,59 +77,59 @@ L(UW1): /* cfi_def_cfa(%rbp, 32) */ /* cfi_rel_offset(%rbp, 16) */ - movq %rdi, %r10 /* Save a copy of the register area. */ - movq %r8, %r11 /* Save a copy of the target fn. */ - movl %r9d, %eax /* Set number of SSE registers. */ - - /* Load up all argument registers. */ - movq (%r10), %rdi + movq %rdi, %r10 /* Save a copy of the register area. */ + movq %r8, %r11 /* Save a copy of the target fn. */ + movl %r9d, %eax /* Set number of SSE registers. */ + + /* Load up all argument registers. */ + movq (%r10), %rdi movq 0x08(%r10), %rsi movq 0x10(%r10), %rdx movq 0x18(%r10), %rcx movq 0x20(%r10), %r8 movq 0x28(%r10), %r9 movl 0xb0(%r10), %eax - testl %eax, %eax + testl %eax, %eax jnz L(load_sse) L(ret_from_load_sse): - + /* Deallocate the reg arg area, except for r10, then load via pop. */ leaq 0xb8(%r10), %rsp popq %r10 - - /* Call the user function. */ - call *%r11 - - /* Deallocate stack arg area; local stack frame in redzone. */ - leaq 24(%rbp), %rsp - - movq 0(%rbp), %rcx /* Reload flags. */ - movq 8(%rbp), %rdi /* Reload raddr. */ - movq 16(%rbp), %rbp /* Reload old frame pointer. */ + + /* Call the user function. */ + call *%r11 + + /* Deallocate stack arg area; local stack frame in redzone. */ + leaq 24(%rbp), %rsp + + movq 0(%rbp), %rcx /* Reload flags. */ + movq 8(%rbp), %rdi /* Reload raddr. */ + movq 16(%rbp), %rbp /* Reload old frame pointer. */ L(UW2): /* cfi_remember_state */ /* cfi_def_cfa(%rsp, 8) */ /* cfi_restore(%rbp) */ - - /* The first byte of the flags contains the FFI_TYPE. */ + + /* The first byte of the flags contains the FFI_TYPE. */ cmpb $UNIX64_RET_LAST, %cl - movzbl %cl, %r10d + movzbl %cl, %r10d leaq L(store_table)(%rip), %r11 ja L(sa) leaq (%r11, %r10, 8), %r10 /* Prep for the structure cases: scratch area in redzone. */ leaq -20(%rsp), %rsi - jmp *%r10 - + jmp *%r10 + .balign 8 L(store_table): E(L(store_table), UNIX64_RET_VOID) - ret + ret E(L(store_table), UNIX64_RET_UINT8) movzbl %al, %eax - movq %rax, (%rdi) - ret + movq %rax, (%rdi) + ret E(L(store_table), UNIX64_RET_UINT16) movzwl %ax, %eax movq %rax, (%rdi) @@ -139,29 +139,29 @@ E(L(store_table), UNIX64_RET_UINT32) movq %rax, (%rdi) ret E(L(store_table), UNIX64_RET_SINT8) - movsbq %al, %rax - movq %rax, (%rdi) - ret + movsbq %al, %rax + movq %rax, (%rdi) + ret E(L(store_table), UNIX64_RET_SINT16) - movswq %ax, %rax - movq %rax, (%rdi) - ret + movswq %ax, %rax + movq %rax, (%rdi) + ret E(L(store_table), UNIX64_RET_SINT32) - cltq - movq %rax, (%rdi) - ret + cltq + movq %rax, (%rdi) + ret E(L(store_table), UNIX64_RET_INT64) - movq %rax, (%rdi) - ret + movq %rax, (%rdi) + ret E(L(store_table), UNIX64_RET_XMM32) movd %xmm0, (%rdi) - ret + ret E(L(store_table), UNIX64_RET_XMM64) movq %xmm0, (%rdi) - ret + ret E(L(store_table), UNIX64_RET_X87) - fstpt (%rdi) - ret + fstpt (%rdi) + ret E(L(store_table), UNIX64_RET_X87_2) fstpt (%rdi) fstpt 16(%rdi) @@ -178,22 +178,22 @@ E(L(store_table), UNIX64_RET_ST_XMM0_XMM1) E(L(store_table), UNIX64_RET_ST_RAX_RDX) movq %rdx, 8(%rsi) L(s2): - movq %rax, (%rsi) + movq %rax, (%rsi) shrl $UNIX64_SIZE_SHIFT, %ecx - rep movsb - ret + rep movsb + ret .balign 8 L(s3): movq %xmm0, (%rsi) shrl $UNIX64_SIZE_SHIFT, %ecx rep movsb ret - + L(sa): call PLT(C(abort)) - /* Many times we can avoid loading any SSE registers at all. - It's not worth an indirect jump to load the exact set of - SSE registers needed; zero or all is a good compromise. */ + /* Many times we can avoid loading any SSE registers at all. + It's not worth an indirect jump to load the exact set of + SSE registers needed; zero or all is a good compromise. */ .balign 2 L(UW3): /* cfi_restore_state */ @@ -207,24 +207,24 @@ L(load_sse): movdqa 0x90(%r10), %xmm6 movdqa 0xa0(%r10), %xmm7 jmp L(ret_from_load_sse) - + L(UW4): ENDF(C(ffi_call_unix64)) - + /* 6 general registers, 8 vector registers, 32 bytes of rvalue, 8 bytes of alignment. */ #define ffi_closure_OFS_G 0 #define ffi_closure_OFS_V (6*8) #define ffi_closure_OFS_RVALUE (ffi_closure_OFS_V + 8*16) #define ffi_closure_FS (ffi_closure_OFS_RVALUE + 32 + 8) - + /* The location of rvalue within the red zone after deallocating the frame. */ #define ffi_closure_RED_RVALUE (ffi_closure_OFS_RVALUE - ffi_closure_FS) - + .balign 2 .globl C(ffi_closure_unix64_sse) FFI_HIDDEN(C(ffi_closure_unix64_sse)) - + C(ffi_closure_unix64_sse): L(UW5): subq $ffi_closure_FS, %rsp @@ -276,48 +276,48 @@ L(do_closure): leaq ffi_closure_FS+8(%rsp), %r9 /* Load argp */ call PLT(C(ffi_closure_unix64_inner)) - /* Deallocate stack frame early; return value is now in redzone. */ + /* Deallocate stack frame early; return value is now in redzone. */ addq $ffi_closure_FS, %rsp L(UW10): /* cfi_adjust_cfa_offset(-ffi_closure_FS) */ - - /* The first byte of the return value contains the FFI_TYPE. */ + + /* The first byte of the return value contains the FFI_TYPE. */ cmpb $UNIX64_RET_LAST, %al - movzbl %al, %r10d + movzbl %al, %r10d leaq L(load_table)(%rip), %r11 ja L(la) leaq (%r11, %r10, 8), %r10 leaq ffi_closure_RED_RVALUE(%rsp), %rsi - jmp *%r10 - + jmp *%r10 + .balign 8 L(load_table): E(L(load_table), UNIX64_RET_VOID) - ret + ret E(L(load_table), UNIX64_RET_UINT8) movzbl (%rsi), %eax - ret + ret E(L(load_table), UNIX64_RET_UINT16) movzwl (%rsi), %eax - ret + ret E(L(load_table), UNIX64_RET_UINT32) movl (%rsi), %eax - ret + ret E(L(load_table), UNIX64_RET_SINT8) movsbl (%rsi), %eax - ret + ret E(L(load_table), UNIX64_RET_SINT16) movswl (%rsi), %eax - ret + ret E(L(load_table), UNIX64_RET_SINT32) movl (%rsi), %eax - ret + ret E(L(load_table), UNIX64_RET_INT64) movq (%rsi), %rax - ret + ret E(L(load_table), UNIX64_RET_XMM32) movd (%rsi), %xmm0 - ret + ret E(L(load_table), UNIX64_RET_XMM64) movq (%rsi), %xmm0 ret @@ -346,16 +346,16 @@ L(l2): L(l3): movq (%rsi), %xmm0 ret - + L(la): call PLT(C(abort)) - + L(UW11): ENDF(C(ffi_closure_unix64)) - + .balign 2 .globl C(ffi_go_closure_unix64_sse) FFI_HIDDEN(C(ffi_go_closure_unix64_sse)) - + C(ffi_go_closure_unix64_sse): L(UW12): subq $ffi_closure_FS, %rsp @@ -396,11 +396,11 @@ L(sse_entry2): movl 4(%r10), %edi /* Load cif */ movl 8(%r10), %esi /* Load fun */ movl %r10d, %edx /* Load closure (user_data) */ -#else +#else movq 8(%r10), %rdi /* Load cif */ movq 16(%r10), %rsi /* Load fun */ movq %r10, %rdx /* Load closure (user_data) */ -#endif +#endif jmp L(do_closure) L(UW17): @@ -431,19 +431,19 @@ L(CIE): .set L(set0),L(ECIE)-L(SCIE) .long L(set0) /* CIE Length */ L(SCIE): - .long 0 /* CIE Identifier Tag */ - .byte 1 /* CIE Version */ + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ .ascii "zR\0" /* CIE Augmentation */ .byte 1 /* CIE Code Alignment Factor */ .byte 0x78 /* CIE Data Alignment Factor */ - .byte 0x10 /* CIE RA Column */ + .byte 0x10 /* CIE RA Column */ .byte 1 /* Augmentation size */ - .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp offset 8 */ .byte 0x80+16, 1 /* DW_CFA_offset, %rip offset 1*-8 */ .balign 8 L(ECIE): - + .set L(set1),L(EFDE1)-L(SFDE1) .long L(set1) /* FDE Length */ L(SFDE1): @@ -455,14 +455,14 @@ L(SFDE1): .byte 0xc, 6, 32 /* DW_CFA_def_cfa, %rbp 32 */ .byte 0x80+6, 2 /* DW_CFA_offset, %rbp 2*-8 */ ADV(UW2, UW1) - .byte 0xa /* DW_CFA_remember_state */ + .byte 0xa /* DW_CFA_remember_state */ .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp 8 */ - .byte 0xc0+6 /* DW_CFA_restore, %rbp */ + .byte 0xc0+6 /* DW_CFA_restore, %rbp */ ADV(UW3, UW2) - .byte 0xb /* DW_CFA_restore_state */ + .byte 0xb /* DW_CFA_restore_state */ .balign 8 L(EFDE1): - + .set L(set2),L(EFDE2)-L(SFDE2) .long L(set2) /* FDE Length */ L(SFDE2): @@ -475,7 +475,7 @@ L(SFDE2): .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ .balign 8 L(EFDE2): - + .set L(set3),L(EFDE3)-L(SFDE3) .long L(set3) /* FDE Length */ L(SFDE3): @@ -484,12 +484,12 @@ L(SFDE3): .long L(UW11)-L(UW8) /* Address range */ .byte 0 /* Augmentation size */ ADV(UW9, UW8) - .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0xe /* DW_CFA_def_cfa_offset */ .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ ADV(UW10, UW9) .byte 0xe, 8 /* DW_CFA_def_cfa_offset 8 */ L(EFDE3): - + .set L(set4),L(EFDE4)-L(SFDE4) .long L(set4) /* FDE Length */ L(SFDE4): @@ -498,11 +498,11 @@ L(SFDE4): .long L(UW14)-L(UW12) /* Address range */ .byte 0 /* Augmentation size */ ADV(UW13, UW12) - .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0xe /* DW_CFA_def_cfa_offset */ .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ .balign 8 L(EFDE4): - + .set L(set5),L(EFDE5)-L(SFDE5) .long L(set5) /* FDE Length */ L(SFDE5): @@ -518,7 +518,7 @@ L(EFDE5): #ifdef __APPLE__ .subsections_via_symbols .section __LD,__compact_unwind,regular,debug - + /* compact unwind for ffi_call_unix64 */ .quad C(ffi_call_unix64) .set L1,L(UW4)-L(UW0) @@ -526,7 +526,7 @@ L(EFDE5): .long 0x04000000 /* use dwarf unwind info */ .quad 0 .quad 0 - + /* compact unwind for ffi_closure_unix64_sse */ .quad C(ffi_closure_unix64_sse) .set L2,L(UW7)-L(UW5) @@ -560,7 +560,7 @@ L(EFDE5): .quad 0 #endif -#endif /* __x86_64__ */ -#if defined __ELF__ && defined __linux__ - .section .note.GNU-stack,"",@progbits -#endif +#endif /* __x86_64__ */ +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/contrib/restricted/libffi/src/x86/win64.S b/contrib/restricted/libffi/src/x86/win64.S index 2c334c82f9..ed60453d94 100644 --- a/contrib/restricted/libffi/src/x86/win64.S +++ b/contrib/restricted/libffi/src/x86/win64.S @@ -1,50 +1,50 @@ #ifdef __x86_64__ -#define LIBFFI_ASM -#include <fficonfig.h> -#include <ffi.h> +#define LIBFFI_ASM +#include <fficonfig.h> +#include <ffi.h> #include <ffi_cfi.h> #include "asmnames.h" - + #if defined(HAVE_AS_CFI_PSEUDO_OP) .cfi_sections .debug_frame #endif - + #ifdef X86_WIN64 #define SEH(...) __VA_ARGS__ #define arg0 %rcx #define arg1 %rdx #define arg2 %r8 #define arg3 %r9 -#else +#else #define SEH(...) #define arg0 %rdi #define arg1 %rsi #define arg2 %rdx #define arg3 %rcx #endif - + /* This macro allows the safe creation of jump tables without an actual table. The entry points into the table are all 8 bytes. The use of ORG asserts that we're at the correct location. */ /* ??? The clang assembler doesn't handle .org with symbolic expressions. */ #if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) # define E(BASE, X) .balign 8 -#else +#else # define E(BASE, X) .balign 8; .org BASE + X * 8 -#endif - +#endif + .text - + /* ffi_call_win64 (void *stack, struct win64_call_frame *frame, void *r10) - + Bit o trickiness here -- FRAME is the base of the stack frame for this function. This has been allocated by ffi_call. We also deallocate some of the stack that has been alloca'd. */ - + .align 8 .globl C(ffi_call_win64) FFI_HIDDEN(C(ffi_call_win64)) - + SEH(.seh_proc ffi_call_win64) C(ffi_call_win64): cfi_startproc @@ -59,9 +59,9 @@ C(ffi_call_win64): SEH(.seh_setframe %rbp, 0) SEH(.seh_endprologue) movq arg0, %rsp - + movq arg2, %r10 - + /* Load all slots into both general and xmm registers. */ movq (%rsp), %rcx movsd (%rsp), %xmm0 @@ -71,9 +71,9 @@ C(ffi_call_win64): movsd 16(%rsp), %xmm2 movq 24(%rsp), %r9 movsd 24(%rsp), %xmm3 - + call *16(%rbp) - + movl 24(%rbp), %ecx movq 32(%rbp), %r8 leaq 0f(%rip), %r10 @@ -81,7 +81,7 @@ C(ffi_call_win64): leaq (%r10, %rcx, 8), %r10 ja 99f jmp *%r10 - + /* Below, we're space constrained most of the time. Thus we eschew the modern "mov, pop, ret" sequence (5 bytes) for "leave, ret" (2 bytes). */ .macro epilogue @@ -92,7 +92,7 @@ C(ffi_call_win64): ret cfi_restore_state .endm - + .align 8 0: E(0b, FFI_TYPE_VOID) @@ -153,26 +153,26 @@ E(0b, FFI_TYPE_SMALL_STRUCT_2B) E(0b, FFI_TYPE_SMALL_STRUCT_4B) movl %eax, (%r8) epilogue - + .align 8 99: call PLT(C(abort)) - + epilogue - + cfi_endproc SEH(.seh_endproc) - - + + /* 32 bytes of outgoing register stack space, 8 bytes of alignment, 16 bytes of result, 32 bytes of xmm registers. */ #define ffi_clo_FS (32+8+16+32) #define ffi_clo_OFF_R (32+8) #define ffi_clo_OFF_X (32+8+16) - + .align 8 .globl C(ffi_go_closure_win64) FFI_HIDDEN(C(ffi_go_closure_win64)) - + SEH(.seh_proc ffi_go_closure_win64) C(ffi_go_closure_win64): cfi_startproc @@ -181,18 +181,18 @@ C(ffi_go_closure_win64): movq %rdx, 16(%rsp) movq %r8, 24(%rsp) movq %r9, 32(%rsp) - + movq 8(%r10), %rcx /* load cif */ movq 16(%r10), %rdx /* load fun */ movq %r10, %r8 /* closure is user_data */ jmp 0f cfi_endproc SEH(.seh_endproc) - + .align 8 .globl C(ffi_closure_win64) FFI_HIDDEN(C(ffi_closure_win64)) - + SEH(.seh_proc ffi_closure_win64) C(ffi_closure_win64): cfi_startproc @@ -201,7 +201,7 @@ C(ffi_closure_win64): movq %rdx, 16(%rsp) movq %r8, 24(%rsp) movq %r9, 32(%rsp) - + movq FFI_TRAMPOLINE_SIZE(%r10), %rcx /* load cif */ movq FFI_TRAMPOLINE_SIZE+8(%r10), %rdx /* load fun */ movq FFI_TRAMPOLINE_SIZE+16(%r10), %r8 /* load user_data */ @@ -210,28 +210,28 @@ C(ffi_closure_win64): cfi_adjust_cfa_offset(ffi_clo_FS) SEH(.seh_stackalloc ffi_clo_FS) SEH(.seh_endprologue) - + /* Save all sse arguments into the stack frame. */ movsd %xmm0, ffi_clo_OFF_X(%rsp) movsd %xmm1, ffi_clo_OFF_X+8(%rsp) movsd %xmm2, ffi_clo_OFF_X+16(%rsp) movsd %xmm3, ffi_clo_OFF_X+24(%rsp) - + leaq ffi_clo_OFF_R(%rsp), %r9 call PLT(C(ffi_closure_win64_inner)) - + /* Load the result into both possible result registers. */ movq ffi_clo_OFF_R(%rsp), %rax movsd ffi_clo_OFF_R(%rsp), %xmm0 - + addq $ffi_clo_FS, %rsp cfi_adjust_cfa_offset(-ffi_clo_FS) ret - + cfi_endproc SEH(.seh_endproc) #endif /* __x86_64__ */ - + #if defined __ELF__ && defined __linux__ .section .note.GNU-stack,"",@progbits #endif |