1 /* -----------------------------------------------------------------------
2 closures.c - Copyright (c) 2007, 2009, 2010 Red Hat, Inc.
3 Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc
4 Copyright (c) 2011 Plausible Labs Cooperative, Inc.
5
6 Code to allocate and deallocate memory for closures.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 ``Software''), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice shall be included
17 in all copies or substantial portions of the Software.
18
19 THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
23 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 DEALINGS IN THE SOFTWARE.
27 ----------------------------------------------------------------------- */
28
29 #if defined __linux__ && !defined _GNU_SOURCE
30 #define _GNU_SOURCE 1
31 #endif
32
33 #include <ffi.h>
34 #include <ffi_common.h>
35
36 #if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE
37 # if __gnu_linux__ && !defined(__ANDROID__)
38 /* This macro indicates it may be forbidden to map anonymous memory
39 with both write and execute permission. Code compiled when this
40 option is defined will attempt to map such pages once, but if it
41 fails, it falls back to creating a temporary file in a writable and
42 executable filesystem and mapping pages from it into separate
43 locations in the virtual memory space, one location writable and
44 another executable. */
45 # define FFI_MMAP_EXEC_WRIT 1
46 # define HAVE_MNTENT 1
47 # endif
48 # if defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)
49 /* Windows systems may have Data Execution Protection (DEP) enabled,
50 which requires the use of VirtualMalloc/VirtualFree to alloc/free
51 executable memory. */
52 # define FFI_MMAP_EXEC_WRIT 1
53 # endif
54 #endif
55
56 #if FFI_MMAP_EXEC_WRIT && !defined FFI_MMAP_EXEC_SELINUX
57 # ifdef __linux__
58 /* When defined to 1 check for SELinux and if SELinux is active,
59 don't attempt PROT_EXEC|PROT_WRITE mapping at all, as that
60 might cause audit messages. */
61 # define FFI_MMAP_EXEC_SELINUX 1
62 # endif
63 #endif
64
65 #if FFI_CLOSURES
66
67 # if FFI_EXEC_TRAMPOLINE_TABLE
68
69 // Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations.
70
71 # elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */
72
73 #define USE_LOCKS 1
74 #define USE_DL_PREFIX 1
75 #ifdef __GNUC__
76 #ifndef USE_BUILTIN_FFS
77 #define USE_BUILTIN_FFS 1
78 #endif
79 #endif
80
81 /* We need to use mmap, not sbrk. */
82 #define HAVE_MORECORE 0
83
84 /* We could, in theory, support mremap, but it wouldn't buy us anything. */
85 #define HAVE_MREMAP 0
86
87 /* We have no use for this, so save some code and data. */
88 #define NO_MALLINFO 1
89
90 /* We need all allocations to be in regular segments, otherwise we
91 lose track of the corresponding code address. */
92 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
93
94 /* Don't allocate more than a page unless needed. */
95 #define DEFAULT_GRANULARITY ((size_t)malloc_getpagesize)
96
97 #if FFI_CLOSURE_TEST
98 /* Don't release single pages, to avoid a worst-case scenario of
99 continuously allocating and releasing single pages, but release
100 pairs of pages, which should do just as well given that allocations
101 are likely to be small. */
102 #define DEFAULT_TRIM_THRESHOLD ((size_t)malloc_getpagesize)
103 #endif
104
105 #include <sys/types.h>
106 #include <sys/stat.h>
107 #include <fcntl.h>
108 #include <errno.h>
109 #ifndef _MSC_VER
110 #include <unistd.h>
111 #endif
112 #include <string.h>
113 #include <stdio.h>
114 #if !defined(X86_WIN32) && !defined(X86_WIN64)
115 #ifdef HAVE_MNTENT
116 #include <mntent.h>
117 #endif /* HAVE_MNTENT */
118 #include <sys/param.h>
119 #include <pthread.h>
120
121 /* We don't want sys/mman.h to be included after we redefine mmap and
122 dlmunmap. */
123 #include <sys/mman.h>
124 #define LACKS_SYS_MMAN_H 1
125
126 #if FFI_MMAP_EXEC_SELINUX
127 #include <sys/statfs.h>
128 #include <stdlib.h>
129
130 static int selinux_enabled = -1;
131
132 static int
133 selinux_enabled_check (void)
134 {
220 #define is_emutramp_enabled() 0
221 #endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
222
223 /* Declare all functions defined in dlmalloc.c as static. */
224 static void *dlmalloc(size_t);
225 static void dlfree(void*);
226 static void *dlcalloc(size_t, size_t) MAYBE_UNUSED;
227 static void *dlrealloc(void *, size_t) MAYBE_UNUSED;
228 static void *dlmemalign(size_t, size_t) MAYBE_UNUSED;
229 static void *dlvalloc(size_t) MAYBE_UNUSED;
230 static int dlmallopt(int, int) MAYBE_UNUSED;
231 static size_t dlmalloc_footprint(void) MAYBE_UNUSED;
232 static size_t dlmalloc_max_footprint(void) MAYBE_UNUSED;
233 static void** dlindependent_calloc(size_t, size_t, void**) MAYBE_UNUSED;
234 static void** dlindependent_comalloc(size_t, size_t*, void**) MAYBE_UNUSED;
235 static void *dlpvalloc(size_t) MAYBE_UNUSED;
236 static int dlmalloc_trim(size_t) MAYBE_UNUSED;
237 static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED;
238 static void dlmalloc_stats(void) MAYBE_UNUSED;
239
240 #if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
241 /* Use these for mmap and munmap within dlmalloc.c. */
242 static void *dlmmap(void *, size_t, int, int, int, off_t);
243 static int dlmunmap(void *, size_t);
244 #endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
245
246 #define mmap dlmmap
247 #define munmap dlmunmap
248
249 #include "dlmalloc.c"
250
251 #undef mmap
252 #undef munmap
253
254 #if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
255
256 /* A mutex used to synchronize access to *exec* variables in this file. */
257 static pthread_mutex_t open_temp_exec_file_mutex = PTHREAD_MUTEX_INITIALIZER;
258
259 /* A file descriptor of a temporary file from which we'll map
260 executable pages. */
261 static int execfd = -1;
262
263 /* The amount of space already allocated from the temporary file. */
264 static size_t execsize = 0;
265
266 /* Open a temporary file name, and immediately unlink it. */
267 static int
268 open_temp_exec_file_name (char *name, int flags)
269 {
270 int fd;
271
272 #ifdef HAVE_MKOSTEMP
273 fd = mkostemp (name, flags);
274 #else
291 #ifdef O_TMPFILE
292 int fd;
293 #endif
294
295 #ifdef O_CLOEXEC
296 flags = O_CLOEXEC;
297 #else
298 flags = 0;
299 #endif
300
301 #ifdef O_TMPFILE
302 fd = open (dir, flags | O_RDWR | O_EXCL | O_TMPFILE, 0700);
303 /* If the running system does not support the O_TMPFILE flag then retry without it. */
304 if (fd != -1 || (errno != EINVAL && errno != EISDIR && errno != EOPNOTSUPP)) {
305 return fd;
306 } else {
307 errno = 0;
308 }
309 #endif
310
311 lendir = strlen (dir);
312 tempname = __builtin_alloca (lendir + sizeof (suffix));
313
314 if (!tempname)
315 return -1;
316
317 memcpy (tempname, dir, lendir);
318 memcpy (tempname + lendir, suffix, sizeof (suffix));
319
320 return open_temp_exec_file_name (tempname, flags);
321 }
322
323 /* Open a temporary file in the directory in the named environment
324 variable. */
325 static int
326 open_temp_exec_file_env (const char *envvar)
327 {
328 const char *value = getenv (envvar);
329
330 if (!value)
331 return -1;
432 {
433 int fd;
434
435 do
436 {
437 fd = open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func
438 (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].arg);
439
440 if (!open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat
441 || fd == -1)
442 {
443 if (open_temp_exec_file_opts_next ())
444 break;
445 }
446 }
447 while (fd == -1);
448
449 return fd;
450 }
451
452 /* Map in a chunk of memory from the temporary exec file into separate
453 locations in the virtual memory address space, one writable and one
454 executable. Returns the address of the writable portion, after
455 storing an offset to the corresponding executable portion at the
456 last word of the requested chunk. */
457 static void *
458 dlmmap_locked (void *start, size_t length, int prot, int flags, off_t offset)
459 {
460 void *ptr;
461
462 if (execfd == -1)
463 {
464 open_temp_exec_file_opts_idx = 0;
465 retry_open:
466 execfd = open_temp_exec_file ();
467 if (execfd == -1)
468 return MFAIL;
469 }
470
471 offset = execsize;
472
473 if (ftruncate (execfd, offset + length))
474 return MFAIL;
475
476 flags &= ~(MAP_PRIVATE | MAP_ANONYMOUS);
477 flags |= MAP_SHARED;
478
479 ptr = mmap (NULL, length, (prot & ~PROT_WRITE) | PROT_EXEC,
480 flags, execfd, offset);
481 if (ptr == MFAIL)
482 {
483 if (!offset)
484 {
485 close (execfd);
486 goto retry_open;
487 }
488 ftruncate (execfd, offset);
489 return MFAIL;
490 }
491 else if (!offset
492 && open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat)
493 open_temp_exec_file_opts_next ();
494
495 start = mmap (start, length, prot, flags, execfd, offset);
496
497 if (start == MFAIL)
498 {
499 munmap (ptr, length);
500 ftruncate (execfd, offset);
501 return start;
502 }
503
504 mmap_exec_offset ((char *)start, length) = (char*)ptr - (char*)start;
505
506 execsize += length;
507
508 return start;
509 }
510
511 /* Map in a writable and executable chunk of memory if possible.
512 Failing that, fall back to dlmmap_locked. */
513 static void *
514 dlmmap (void *start, size_t length, int prot,
515 int flags, int fd, off_t offset)
516 {
517 void *ptr;
518
519 assert (start == NULL && length % malloc_getpagesize == 0
520 && prot == (PROT_READ | PROT_WRITE)
521 && flags == (MAP_PRIVATE | MAP_ANONYMOUS)
522 && fd == -1 && offset == 0);
523
524 #if FFI_CLOSURE_TEST
525 printf ("mapping in %zi\n", length);
526 #endif
527
528 if (execfd == -1 && is_emutramp_enabled ())
529 {
530 ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset);
531 return ptr;
532 }
533
534 if (execfd == -1 && !is_selinux_enabled ())
535 {
536 ptr = mmap (start, length, prot | PROT_EXEC, flags, fd, offset);
537
538 if (ptr != MFAIL || (errno != EPERM && errno != EACCES))
539 /* Cool, no need to mess with separate segments. */
540 return ptr;
541
542 /* If MREMAP_DUP is ever introduced and implemented, try mmap
543 with ((prot & ~PROT_WRITE) | PROT_EXEC) and mremap with
544 MREMAP_DUP and prot at this point. */
545 }
546
547 if (execsize == 0 || execfd == -1)
553 return ptr;
554 }
555
556 return dlmmap_locked (start, length, prot, flags, offset);
557 }
558
559 /* Release memory at the given address, as well as the corresponding
560 executable page if it's separate. */
561 static int
562 dlmunmap (void *start, size_t length)
563 {
564 /* We don't bother decreasing execsize or truncating the file, since
565 we can't quite tell whether we're unmapping the end of the file.
566 We don't expect frequent deallocation anyway. If we did, we
567 could locate pages in the file by writing to the pages being
568 deallocated and checking that the file contents change.
569 Yuck. */
570 msegmentptr seg = segment_holding (gm, start);
571 void *code;
572
573 #if FFI_CLOSURE_TEST
574 printf ("unmapping %zi\n", length);
575 #endif
576
577 if (seg && (code = add_segment_exec_offset (start, seg)) != start)
578 {
579 int ret = munmap (code, length);
580 if (ret)
581 return ret;
582 }
583
584 return munmap (start, length);
585 }
586
587 #if FFI_CLOSURE_FREE_CODE
588 /* Return segment holding given code address. */
589 static msegmentptr
590 segment_holding_code (mstate m, char* addr)
591 {
592 msegmentptr sp = &m->seg;
593 for (;;) {
594 if (addr >= add_segment_exec_offset (sp->base, sp)
595 && addr < add_segment_exec_offset (sp->base, sp) + sp->size)
596 return sp;
597 if ((sp = sp->next) == 0)
598 return 0;
599 }
600 }
601 #endif
602
603 #endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
604
605 /* Allocate a chunk of memory with the given size. Returns a pointer
606 to the writable address, and sets *CODE to the executable
607 corresponding virtual address. */
608 void *
609 ffi_closure_alloc (size_t size, void **code)
610 {
611 void *ptr;
612
613 if (!code)
614 return NULL;
615
616 ptr = dlmalloc (size);
617
618 if (ptr)
619 {
620 msegmentptr seg = segment_holding (gm, ptr);
621 #ifdef GSTREAMER_LITE
622 if (seg == NULL)
623 return NULL;
624 #endif // GSTREAMER_LITE
625
626 *code = add_segment_exec_offset (ptr, seg);
627 }
628
629 return ptr;
630 }
631
632 /* Release a chunk of memory allocated with ffi_closure_alloc. If
633 FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the
634 writable or the executable address given. Otherwise, only the
635 writable address can be provided here. */
636 void
637 ffi_closure_free (void *ptr)
638 {
639 #if FFI_CLOSURE_FREE_CODE
640 msegmentptr seg = segment_holding_code (gm, ptr);
641
642 if (seg)
643 ptr = sub_segment_exec_offset (ptr, seg);
644 #endif
645
646 dlfree (ptr);
647 }
648
649
650 #if FFI_CLOSURE_TEST
651 /* Do some internal sanity testing to make sure allocation and
652 deallocation of pages are working as intended. */
653 int main ()
654 {
655 void *p[3];
656 #define GET(idx, len) do { p[idx] = dlmalloc (len); printf ("allocated %zi for p[%i]\n", (len), (idx)); } while (0)
657 #define PUT(idx) do { printf ("freeing p[%i]\n", (idx)); dlfree (p[idx]); } while (0)
658 GET (0, malloc_getpagesize / 2);
659 GET (1, 2 * malloc_getpagesize - 64 * sizeof (void*));
660 PUT (1);
661 GET (1, 2 * malloc_getpagesize);
662 GET (2, malloc_getpagesize / 2);
663 PUT (1);
664 PUT (0);
665 PUT (2);
666 return 0;
667 }
668 #endif /* FFI_CLOSURE_TEST */
669 # else /* ! FFI_MMAP_EXEC_WRIT */
670
671 /* On many systems, memory returned by malloc is writable and
672 executable, so just use it. */
673
674 #include <stdlib.h>
675
676 void *
677 ffi_closure_alloc (size_t size, void **code)
678 {
679 if (!code)
680 return NULL;
681
682 return *code = malloc (size);
683 }
684
685 void
686 ffi_closure_free (void *ptr)
687 {
688 free (ptr);
689 }
690
691 # endif /* ! FFI_MMAP_EXEC_WRIT */
692 #endif /* FFI_CLOSURES */
|
1 /* -----------------------------------------------------------------------
2 closures.c - Copyright (c) 2019 Anthony Green
3 Copyright (c) 2007, 2009, 2010 Red Hat, Inc.
4 Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc
5 Copyright (c) 2011 Plausible Labs Cooperative, Inc.
6
7 Code to allocate and deallocate memory for closures.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 ``Software''), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice shall be included
18 in all copies or substantial portions of the Software.
19
20 THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
24 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
25 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27 DEALINGS IN THE SOFTWARE.
28 ----------------------------------------------------------------------- */
29
30 #if defined __linux__ && !defined _GNU_SOURCE
31 #define _GNU_SOURCE 1
32 #endif
33
34 #include <fficonfig.h>
35 #include <ffi.h>
36 #include <ffi_common.h>
37
38 #ifdef __NetBSD__
39 #include <sys/param.h>
40 #endif
41
42 #if __NetBSD_Version__ - 0 >= 799007200
43 /* NetBSD with PROT_MPROTECT */
44 #include <sys/mman.h>
45
46 #include <stddef.h>
47 #include <unistd.h>
48
49 static const size_t overhead =
50 (sizeof(max_align_t) > sizeof(void *) + sizeof(size_t)) ?
51 sizeof(max_align_t)
52 : sizeof(void *) + sizeof(size_t);
53
54 #define ADD_TO_POINTER(p, d) ((void *)((uintptr_t)(p) + (d)))
55
56 void *
57 ffi_closure_alloc (size_t size, void **code)
58 {
59 static size_t page_size;
60 size_t rounded_size;
61 void *codeseg, *dataseg;
62 int prot;
63
64 /* Expect that PAX mprotect is active and a separate code mapping is necessary. */
65 if (!code)
66 return NULL;
67
68 /* Obtain system page size. */
69 if (!page_size)
70 page_size = sysconf(_SC_PAGESIZE);
71
72 /* Round allocation size up to the next page, keeping in mind the size field and pointer to code map. */
73 rounded_size = (size + overhead + page_size - 1) & ~(page_size - 1);
74
75 /* Primary mapping is RW, but request permission to switch to PROT_EXEC later. */
76 prot = PROT_READ | PROT_WRITE | PROT_MPROTECT(PROT_EXEC);
77 dataseg = mmap(NULL, rounded_size, prot, MAP_ANON | MAP_PRIVATE, -1, 0);
78 if (dataseg == MAP_FAILED)
79 return NULL;
80
81 /* Create secondary mapping and switch it to RX. */
82 codeseg = mremap(dataseg, rounded_size, NULL, rounded_size, MAP_REMAPDUP);
83 if (codeseg == MAP_FAILED) {
84 munmap(dataseg, rounded_size);
85 return NULL;
86 }
87 if (mprotect(codeseg, rounded_size, PROT_READ | PROT_EXEC) == -1) {
88 munmap(codeseg, rounded_size);
89 munmap(dataseg, rounded_size);
90 return NULL;
91 }
92
93 /* Remember allocation size and location of the secondary mapping for ffi_closure_free. */
94 memcpy(dataseg, &rounded_size, sizeof(rounded_size));
95 memcpy(ADD_TO_POINTER(dataseg, sizeof(size_t)), &codeseg, sizeof(void *));
96 *code = ADD_TO_POINTER(codeseg, overhead);
97 return ADD_TO_POINTER(dataseg, overhead);
98 }
99
100 void
101 ffi_closure_free (void *ptr)
102 {
103 void *codeseg, *dataseg;
104 size_t rounded_size;
105
106 dataseg = ADD_TO_POINTER(ptr, -overhead);
107 memcpy(&rounded_size, dataseg, sizeof(rounded_size));
108 memcpy(&codeseg, ADD_TO_POINTER(dataseg, sizeof(size_t)), sizeof(void *));
109 munmap(dataseg, rounded_size);
110 munmap(codeseg, rounded_size);
111 }
112 #else /* !NetBSD with PROT_MPROTECT */
113
114 #if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE
115 # if __linux__ && !defined(__ANDROID__)
116 /* This macro indicates it may be forbidden to map anonymous memory
117 with both write and execute permission. Code compiled when this
118 option is defined will attempt to map such pages once, but if it
119 fails, it falls back to creating a temporary file in a writable and
120 executable filesystem and mapping pages from it into separate
121 locations in the virtual memory space, one location writable and
122 another executable. */
123 # define FFI_MMAP_EXEC_WRIT 1
124 # define HAVE_MNTENT 1
125 # endif
126 # if defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)
127 /* Windows systems may have Data Execution Protection (DEP) enabled,
128 which requires the use of VirtualMalloc/VirtualFree to alloc/free
129 executable memory. */
130 # define FFI_MMAP_EXEC_WRIT 1
131 # endif
132 #endif
133
134 #if FFI_MMAP_EXEC_WRIT && !defined FFI_MMAP_EXEC_SELINUX
135 # if defined(__linux__) && !defined(__ANDROID__)
136 /* When defined to 1 check for SELinux and if SELinux is active,
137 don't attempt PROT_EXEC|PROT_WRITE mapping at all, as that
138 might cause audit messages. */
139 # define FFI_MMAP_EXEC_SELINUX 1
140 # endif
141 #endif
142
143 #if FFI_CLOSURES
144
145 #if FFI_EXEC_TRAMPOLINE_TABLE
146
147 #ifdef __MACH__
148
149 #include <mach/mach.h>
150 #include <pthread.h>
151 #include <stdio.h>
152 #include <stdlib.h>
153
154 extern void *ffi_closure_trampoline_table_page;
155
156 typedef struct ffi_trampoline_table ffi_trampoline_table;
157 typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry;
158
159 struct ffi_trampoline_table
160 {
161 /* contiguous writable and executable pages */
162 vm_address_t config_page;
163 vm_address_t trampoline_page;
164
165 /* free list tracking */
166 uint16_t free_count;
167 ffi_trampoline_table_entry *free_list;
168 ffi_trampoline_table_entry *free_list_pool;
169
170 ffi_trampoline_table *prev;
171 ffi_trampoline_table *next;
172 };
173
174 struct ffi_trampoline_table_entry
175 {
176 void *(*trampoline) (void);
177 ffi_trampoline_table_entry *next;
178 };
179
180 /* Total number of trampolines that fit in one trampoline table */
181 #define FFI_TRAMPOLINE_COUNT (PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE)
182
183 static pthread_mutex_t ffi_trampoline_lock = PTHREAD_MUTEX_INITIALIZER;
184 static ffi_trampoline_table *ffi_trampoline_tables = NULL;
185
186 static ffi_trampoline_table *
187 ffi_trampoline_table_alloc (void)
188 {
189 ffi_trampoline_table *table;
190 vm_address_t config_page;
191 vm_address_t trampoline_page;
192 vm_address_t trampoline_page_template;
193 vm_prot_t cur_prot;
194 vm_prot_t max_prot;
195 kern_return_t kt;
196 uint16_t i;
197
198 /* Allocate two pages -- a config page and a placeholder page */
199 config_page = 0x0;
200 kt = vm_allocate (mach_task_self (), &config_page, PAGE_MAX_SIZE * 2,
201 VM_FLAGS_ANYWHERE);
202 if (kt != KERN_SUCCESS)
203 return NULL;
204
205 /* Remap the trampoline table on top of the placeholder page */
206 trampoline_page = config_page + PAGE_MAX_SIZE;
207 trampoline_page_template = (vm_address_t)&ffi_closure_trampoline_table_page;
208 #ifdef __arm__
209 /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */
210 trampoline_page_template &= ~1UL;
211 #endif
212 kt = vm_remap (mach_task_self (), &trampoline_page, PAGE_MAX_SIZE, 0x0,
213 VM_FLAGS_OVERWRITE, mach_task_self (), trampoline_page_template,
214 FALSE, &cur_prot, &max_prot, VM_INHERIT_SHARE);
215 if (kt != KERN_SUCCESS)
216 {
217 vm_deallocate (mach_task_self (), config_page, PAGE_MAX_SIZE * 2);
218 return NULL;
219 }
220
221 /* We have valid trampoline and config pages */
222 table = calloc (1, sizeof (ffi_trampoline_table));
223 table->free_count = FFI_TRAMPOLINE_COUNT;
224 table->config_page = config_page;
225 table->trampoline_page = trampoline_page;
226
227 /* Create and initialize the free list */
228 table->free_list_pool =
229 calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry));
230
231 for (i = 0; i < table->free_count; i++)
232 {
233 ffi_trampoline_table_entry *entry = &table->free_list_pool[i];
234 entry->trampoline =
235 (void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE));
236
237 if (i < table->free_count - 1)
238 entry->next = &table->free_list_pool[i + 1];
239 }
240
241 table->free_list = table->free_list_pool;
242
243 return table;
244 }
245
246 static void
247 ffi_trampoline_table_free (ffi_trampoline_table *table)
248 {
249 /* Remove from the list */
250 if (table->prev != NULL)
251 table->prev->next = table->next;
252
253 if (table->next != NULL)
254 table->next->prev = table->prev;
255
256 /* Deallocate pages */
257 vm_deallocate (mach_task_self (), table->config_page, PAGE_MAX_SIZE * 2);
258
259 /* Deallocate free list */
260 free (table->free_list_pool);
261 free (table);
262 }
263
264 void *
265 ffi_closure_alloc (size_t size, void **code)
266 {
267 /* Create the closure */
268 ffi_closure *closure = malloc (size);
269 if (closure == NULL)
270 return NULL;
271
272 pthread_mutex_lock (&ffi_trampoline_lock);
273
274 /* Check for an active trampoline table with available entries. */
275 ffi_trampoline_table *table = ffi_trampoline_tables;
276 if (table == NULL || table->free_list == NULL)
277 {
278 table = ffi_trampoline_table_alloc ();
279 if (table == NULL)
280 {
281 pthread_mutex_unlock (&ffi_trampoline_lock);
282 free (closure);
283 return NULL;
284 }
285
286 /* Insert the new table at the top of the list */
287 table->next = ffi_trampoline_tables;
288 if (table->next != NULL)
289 table->next->prev = table;
290
291 ffi_trampoline_tables = table;
292 }
293
294 /* Claim the free entry */
295 ffi_trampoline_table_entry *entry = ffi_trampoline_tables->free_list;
296 ffi_trampoline_tables->free_list = entry->next;
297 ffi_trampoline_tables->free_count--;
298 entry->next = NULL;
299
300 pthread_mutex_unlock (&ffi_trampoline_lock);
301
302 /* Initialize the return values */
303 *code = entry->trampoline;
304 closure->trampoline_table = table;
305 closure->trampoline_table_entry = entry;
306
307 return closure;
308 }
309
310 void
311 ffi_closure_free (void *ptr)
312 {
313 ffi_closure *closure = ptr;
314
315 pthread_mutex_lock (&ffi_trampoline_lock);
316
317 /* Fetch the table and entry references */
318 ffi_trampoline_table *table = closure->trampoline_table;
319 ffi_trampoline_table_entry *entry = closure->trampoline_table_entry;
320
321 /* Return the entry to the free list */
322 entry->next = table->free_list;
323 table->free_list = entry;
324 table->free_count++;
325
326 /* If all trampolines within this table are free, and at least one other table exists, deallocate
327 * the table */
328 if (table->free_count == FFI_TRAMPOLINE_COUNT
329 && ffi_trampoline_tables != table)
330 {
331 ffi_trampoline_table_free (table);
332 }
333 else if (ffi_trampoline_tables != table)
334 {
335 /* Otherwise, bump this table to the top of the list */
336 table->prev = NULL;
337 table->next = ffi_trampoline_tables;
338 if (ffi_trampoline_tables != NULL)
339 ffi_trampoline_tables->prev = table;
340
341 ffi_trampoline_tables = table;
342 }
343
344 pthread_mutex_unlock (&ffi_trampoline_lock);
345
346 /* Free the closure */
347 free (closure);
348 }
349
350 #endif
351
352 // Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations.
353
354 #elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */
355
356 #define USE_LOCKS 1
357 #define USE_DL_PREFIX 1
358 #ifdef __GNUC__
359 #ifndef USE_BUILTIN_FFS
360 #define USE_BUILTIN_FFS 1
361 #endif
362 #endif
363
364 /* We need to use mmap, not sbrk. */
365 #define HAVE_MORECORE 0
366
367 /* We could, in theory, support mremap, but it wouldn't buy us anything. */
368 #define HAVE_MREMAP 0
369
370 /* We have no use for this, so save some code and data. */
371 #define NO_MALLINFO 1
372
373 /* We need all allocations to be in regular segments, otherwise we
374 lose track of the corresponding code address. */
375 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
376
377 /* Don't allocate more than a page unless needed. */
378 #define DEFAULT_GRANULARITY ((size_t)malloc_getpagesize)
379
380 #include <sys/types.h>
381 #include <sys/stat.h>
382 #include <fcntl.h>
383 #include <errno.h>
384 #ifndef _MSC_VER
385 #include <unistd.h>
386 #endif
387 #include <string.h>
388 #include <stdio.h>
389 #if !defined(X86_WIN32) && !defined(X86_WIN64) && !defined(_M_ARM64)
390 #ifdef HAVE_MNTENT
391 #include <mntent.h>
392 #endif /* HAVE_MNTENT */
393 #include <sys/param.h>
394 #include <pthread.h>
395
396 /* We don't want sys/mman.h to be included after we redefine mmap and
397 dlmunmap. */
398 #include <sys/mman.h>
399 #define LACKS_SYS_MMAN_H 1
400
401 #if FFI_MMAP_EXEC_SELINUX
402 #include <sys/statfs.h>
403 #include <stdlib.h>
404
405 static int selinux_enabled = -1;
406
407 static int
408 selinux_enabled_check (void)
409 {
495 #define is_emutramp_enabled() 0
496 #endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
497
498 /* Declare all functions defined in dlmalloc.c as static. */
499 static void *dlmalloc(size_t);
500 static void dlfree(void*);
501 static void *dlcalloc(size_t, size_t) MAYBE_UNUSED;
502 static void *dlrealloc(void *, size_t) MAYBE_UNUSED;
503 static void *dlmemalign(size_t, size_t) MAYBE_UNUSED;
504 static void *dlvalloc(size_t) MAYBE_UNUSED;
505 static int dlmallopt(int, int) MAYBE_UNUSED;
506 static size_t dlmalloc_footprint(void) MAYBE_UNUSED;
507 static size_t dlmalloc_max_footprint(void) MAYBE_UNUSED;
508 static void** dlindependent_calloc(size_t, size_t, void**) MAYBE_UNUSED;
509 static void** dlindependent_comalloc(size_t, size_t*, void**) MAYBE_UNUSED;
510 static void *dlpvalloc(size_t) MAYBE_UNUSED;
511 static int dlmalloc_trim(size_t) MAYBE_UNUSED;
512 static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED;
513 static void dlmalloc_stats(void) MAYBE_UNUSED;
514
515 #if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
516 /* Use these for mmap and munmap within dlmalloc.c. */
517 static void *dlmmap(void *, size_t, int, int, int, off_t);
518 static int dlmunmap(void *, size_t);
519 #endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
520
521 #define mmap dlmmap
522 #define munmap dlmunmap
523
524 #include "dlmalloc.c"
525
526 #undef mmap
527 #undef munmap
528
529 #if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
530
531 /* A mutex used to synchronize access to *exec* variables in this file. */
532 static pthread_mutex_t open_temp_exec_file_mutex = PTHREAD_MUTEX_INITIALIZER;
533
534 /* A file descriptor of a temporary file from which we'll map
535 executable pages. */
536 static int execfd = -1;
537
538 /* The amount of space already allocated from the temporary file. */
539 static size_t execsize = 0;
540
541 /* Open a temporary file name, and immediately unlink it. */
542 static int
543 open_temp_exec_file_name (char *name, int flags)
544 {
545 int fd;
546
547 #ifdef HAVE_MKOSTEMP
548 fd = mkostemp (name, flags);
549 #else
566 #ifdef O_TMPFILE
567 int fd;
568 #endif
569
570 #ifdef O_CLOEXEC
571 flags = O_CLOEXEC;
572 #else
573 flags = 0;
574 #endif
575
576 #ifdef O_TMPFILE
577 fd = open (dir, flags | O_RDWR | O_EXCL | O_TMPFILE, 0700);
578 /* If the running system does not support the O_TMPFILE flag then retry without it. */
579 if (fd != -1 || (errno != EINVAL && errno != EISDIR && errno != EOPNOTSUPP)) {
580 return fd;
581 } else {
582 errno = 0;
583 }
584 #endif
585
586 lendir = (int) strlen (dir);
587 tempname = __builtin_alloca (lendir + sizeof (suffix));
588
589 if (!tempname)
590 return -1;
591
592 memcpy (tempname, dir, lendir);
593 memcpy (tempname + lendir, suffix, sizeof (suffix));
594
595 return open_temp_exec_file_name (tempname, flags);
596 }
597
598 /* Open a temporary file in the directory in the named environment
599 variable. */
600 static int
601 open_temp_exec_file_env (const char *envvar)
602 {
603 const char *value = getenv (envvar);
604
605 if (!value)
606 return -1;
707 {
708 int fd;
709
710 do
711 {
712 fd = open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func
713 (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].arg);
714
715 if (!open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat
716 || fd == -1)
717 {
718 if (open_temp_exec_file_opts_next ())
719 break;
720 }
721 }
722 while (fd == -1);
723
724 return fd;
725 }
726
727 /* We need to allocate space in a file that will be backing a writable
728 mapping. Several problems exist with the usual approaches:
729 - fallocate() is Linux-only
730 - posix_fallocate() is not available on all platforms
731 - ftruncate() does not allocate space on filesystems with sparse files
732 Failure to allocate the space will cause SIGBUS to be thrown when
733 the mapping is subsequently written to. */
734 static int
735 allocate_space (int fd, off_t offset, off_t len)
736 {
737 static size_t page_size;
738
739 /* Obtain system page size. */
740 if (!page_size)
741 page_size = sysconf(_SC_PAGESIZE);
742
743 unsigned char buf[page_size];
744 memset (buf, 0, page_size);
745
746 while (len > 0)
747 {
748 off_t to_write = (len < page_size) ? len : page_size;
749 if (write (fd, buf, to_write) < to_write)
750 return -1;
751 len -= to_write;
752 }
753
754 return 0;
755 }
756
757 /* Map in a chunk of memory from the temporary exec file into separate
758 locations in the virtual memory address space, one writable and one
759 executable. Returns the address of the writable portion, after
760 storing an offset to the corresponding executable portion at the
761 last word of the requested chunk. */
762 static void *
763 dlmmap_locked (void *start, size_t length, int prot, int flags, off_t offset)
764 {
765 void *ptr;
766
767 if (execfd == -1)
768 {
769 open_temp_exec_file_opts_idx = 0;
770 retry_open:
771 execfd = open_temp_exec_file ();
772 if (execfd == -1)
773 return MFAIL;
774 }
775
776 offset = execsize;
777
778 if (allocate_space (execfd, offset, length))
779 return MFAIL;
780
781 flags &= ~(MAP_PRIVATE | MAP_ANONYMOUS);
782 flags |= MAP_SHARED;
783
784 ptr = mmap (NULL, length, (prot & ~PROT_WRITE) | PROT_EXEC,
785 flags, execfd, offset);
786 if (ptr == MFAIL)
787 {
788 if (!offset)
789 {
790 close (execfd);
791 goto retry_open;
792 }
793 if (ftruncate (execfd, offset) != 0)
794 {
795 /* Fixme : Error logs can be added here. Returning an error for
796 * ftruncte() will not add any advantage as it is being
797 * validating in the error case. */
798 }
799
800 return MFAIL;
801 }
802 else if (!offset
803 && open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat)
804 open_temp_exec_file_opts_next ();
805
806 start = mmap (start, length, prot, flags, execfd, offset);
807
808 if (start == MFAIL)
809 {
810 munmap (ptr, length);
811 if (ftruncate (execfd, offset) != 0)
812 {
813 /* Fixme : Error logs can be added here. Returning an error for
814 * ftruncte() will not add any advantage as it is being
815 * validating in the error case. */
816 }
817 return start;
818 }
819
820 mmap_exec_offset ((char *)start, length) = (char*)ptr - (char*)start;
821
822 execsize += length;
823
824 return start;
825 }
826
827 /* Map in a writable and executable chunk of memory if possible.
828 Failing that, fall back to dlmmap_locked. */
829 static void *
830 dlmmap (void *start, size_t length, int prot,
831 int flags, int fd, off_t offset)
832 {
833 void *ptr;
834
835 assert (start == NULL && length % malloc_getpagesize == 0
836 && prot == (PROT_READ | PROT_WRITE)
837 && flags == (MAP_PRIVATE | MAP_ANONYMOUS)
838 && fd == -1 && offset == 0);
839
840 if (execfd == -1 && is_emutramp_enabled ())
841 {
842 ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset);
843 return ptr;
844 }
845
846 if (execfd == -1 && !is_selinux_enabled ())
847 {
848 ptr = mmap (start, length, prot | PROT_EXEC, flags, fd, offset);
849
850 if (ptr != MFAIL || (errno != EPERM && errno != EACCES))
851 /* Cool, no need to mess with separate segments. */
852 return ptr;
853
854 /* If MREMAP_DUP is ever introduced and implemented, try mmap
855 with ((prot & ~PROT_WRITE) | PROT_EXEC) and mremap with
856 MREMAP_DUP and prot at this point. */
857 }
858
859 if (execsize == 0 || execfd == -1)
865 return ptr;
866 }
867
868 return dlmmap_locked (start, length, prot, flags, offset);
869 }
870
871 /* Release memory at the given address, as well as the corresponding
872 executable page if it's separate. */
873 static int
874 dlmunmap (void *start, size_t length)
875 {
876 /* We don't bother decreasing execsize or truncating the file, since
877 we can't quite tell whether we're unmapping the end of the file.
878 We don't expect frequent deallocation anyway. If we did, we
879 could locate pages in the file by writing to the pages being
880 deallocated and checking that the file contents change.
881 Yuck. */
882 msegmentptr seg = segment_holding (gm, start);
883 void *code;
884
885 if (seg && (code = add_segment_exec_offset (start, seg)) != start)
886 {
887 int ret = munmap (code, length);
888 if (ret)
889 return ret;
890 }
891
892 return munmap (start, length);
893 }
894
895 #if FFI_CLOSURE_FREE_CODE
896 /* Return segment holding given code address. */
897 static msegmentptr
898 segment_holding_code (mstate m, char* addr)
899 {
900 msegmentptr sp = &m->seg;
901 for (;;) {
902 if (addr >= add_segment_exec_offset (sp->base, sp)
903 && addr < add_segment_exec_offset (sp->base, sp) + sp->size)
904 return sp;
905 if ((sp = sp->next) == 0)
906 return 0;
907 }
908 }
909 #endif
910
911 #endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
912
913 /* Allocate a chunk of memory with the given size. Returns a pointer
914 to the writable address, and sets *CODE to the executable
915 corresponding virtual address. */
916 void *
917 ffi_closure_alloc (size_t size, void **code)
918 {
919 void *ptr;
920
921 if (!code)
922 return NULL;
923
924 ptr = dlmalloc (size);
925
926 if (ptr)
927 {
928 msegmentptr seg = segment_holding (gm, ptr);
929 #ifdef GSTREAMER_LITE
930 if (seg == NULL)
931 return NULL;
932 #endif // GSTREAMER_LITE
933
934 *code = add_segment_exec_offset (ptr, seg);
935 }
936
937 return ptr;
938 }
939
940 void *
941 ffi_data_to_code_pointer (void *data)
942 {
943 msegmentptr seg = segment_holding (gm, data);
944 /* We expect closures to be allocated with ffi_closure_alloc(), in
945 which case seg will be non-NULL. However, some users take on the
946 burden of managing this memory themselves, in which case this
947 we'll just return data. */
948 if (seg)
949 return add_segment_exec_offset (data, seg);
950 else
951 return data;
952 }
953
954 /* Release a chunk of memory allocated with ffi_closure_alloc. If
955 FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the
956 writable or the executable address given. Otherwise, only the
957 writable address can be provided here. */
958 void
959 ffi_closure_free (void *ptr)
960 {
961 #if FFI_CLOSURE_FREE_CODE
962 msegmentptr seg = segment_holding_code (gm, ptr);
963
964 if (seg)
965 ptr = sub_segment_exec_offset (ptr, seg);
966 #endif
967
968 dlfree (ptr);
969 }
970
971 # else /* ! FFI_MMAP_EXEC_WRIT */
972
973 /* On many systems, memory returned by malloc is writable and
974 executable, so just use it. */
975
976 #include <stdlib.h>
977
978 void *
979 ffi_closure_alloc (size_t size, void **code)
980 {
981 if (!code)
982 return NULL;
983
984 return *code = malloc (size);
985 }
986
987 void
988 ffi_closure_free (void *ptr)
989 {
990 free (ptr);
991 }
992
993 void *
994 ffi_data_to_code_pointer (void *data)
995 {
996 return data;
997 }
998
999 # endif /* ! FFI_MMAP_EXEC_WRIT */
1000 #endif /* FFI_CLOSURES */
1001
1002 #endif /* NetBSD with PROT_MPROTECT */
|