blob: 1d6c3808b2174ca1a8a18d9fc7911a5b8269ba1b [file] [log] [blame]
Louis Dionneeb8650a2021-11-17 21:25:011//===----------------------------------------------------------------------===//
Howard Hinnant73ab1862012-01-24 21:41:272//
Chandler Carruth57b08b02019-01-19 10:56:403// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Howard Hinnant73ab1862012-01-24 21:41:276//
Howard Hinnant73ab1862012-01-24 21:41:277//===----------------------------------------------------------------------===//
8
Igor Kudrind9edde42016-10-07 08:48:289#include "fallback_malloc.h"
10
Asiri Rathnayake97ba9fa2017-01-03 12:58:3411#include <__threading_support>
Petr Hosek996e62e2019-05-30 01:34:4112#ifndef _LIBCXXABI_HAS_NO_THREADS
Michał Górnya9b5fff2019-12-02 10:49:2013#if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB)
Petr Hosek996e62e2019-05-30 01:34:4114#pragma comment(lib, "pthread")
15#endif
16#endif
Jonathan Roelofs40e98422014-05-06 21:30:5617
Louis Dionne04501a22019-10-01 18:43:0218#include <stdlib.h> // for malloc, calloc, free
19#include <string.h> // for memset
Louis Dionnea78aaa12020-11-12 20:14:3320#include <new> // for std::__libcpp_aligned_{alloc,free}
Igor Kudrind9edde42016-10-07 08:48:2821
Igor Kudrind9edde42016-10-07 08:48:2822// A small, simple heap manager based (loosely) on
Howard Hinnant73ab1862012-01-24 21:41:2723// the startup heap manager from FreeBSD, optimized for space.
24//
25// Manages a fixed-size memory pool, supports malloc and free only.
26// No support for realloc.
27//
28// Allocates chunks in multiples of four bytes, with a four byte header
29// for each chunk. The overhead of each chunk is kept low by keeping pointers
30// as two byte offsets within the heap, rather than (4 or 8 byte) pointers.
31
32namespace {
33
Jonathan Roelofs40e98422014-05-06 21:30:5634// When POSIX threads are not available, make the mutex operations a nop
Asiri Rathnayake7c98baa2016-09-21 09:09:3235#ifndef _LIBCXXABI_HAS_NO_THREADS
Arthur O'Dwyer05337a72022-02-08 18:08:5936static _LIBCPP_CONSTINIT std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER;
Asiri Rathnayake7c98baa2016-09-21 09:09:3237#else
Arthur O'Dwyer05337a72022-02-08 18:08:5938static _LIBCPP_CONSTINIT void* heap_mutex = 0;
Jonathan Roelofs40e98422014-05-06 21:30:5639#endif
Howard Hinnant73ab1862012-01-24 21:41:2740
41class mutexor {
42public:
Asiri Rathnayake7c98baa2016-09-21 09:09:3243#ifndef _LIBCXXABI_HAS_NO_THREADS
Eric Fiselier8524cba2017-03-04 03:23:1544 mutexor(std::__libcpp_mutex_t* m) : mtx_(m) {
45 std::__libcpp_mutex_lock(mtx_);
46 }
47 ~mutexor() { std::__libcpp_mutex_unlock(mtx_); }
Asiri Rathnayake7c98baa2016-09-21 09:09:3248#else
Eric Fiselier8524cba2017-03-04 03:23:1549 mutexor(void*) {}
50 ~mutexor() {}
Jonathan Roelofs40e98422014-05-06 21:30:5651#endif
Howard Hinnant73ab1862012-01-24 21:41:2752private:
Eric Fiselier8524cba2017-03-04 03:23:1553 mutexor(const mutexor& rhs);
54 mutexor& operator=(const mutexor& rhs);
Asiri Rathnayake7c98baa2016-09-21 09:09:3255#ifndef _LIBCXXABI_HAS_NO_THREADS
Eric Fiselier8524cba2017-03-04 03:23:1556 std::__libcpp_mutex_t* mtx_;
Jonathan Roelofs40e98422014-05-06 21:30:5657#endif
Asiri Rathnayake97ba9fa2017-01-03 12:58:3458};
Howard Hinnant73ab1862012-01-24 21:41:2759
Igor Kudrind9edde42016-10-07 08:48:2860static const size_t HEAP_SIZE = 512;
Eric Fiselier8524cba2017-03-04 03:23:1561char heap[HEAP_SIZE] __attribute__((aligned));
Howard Hinnant73ab1862012-01-24 21:41:2762
63typedef unsigned short heap_offset;
64typedef unsigned short heap_size;
65
66struct heap_node {
Eric Fiselier8524cba2017-03-04 03:23:1567 heap_offset next_node; // offset into heap
68 heap_size len; // size in units of "sizeof(heap_node)"
Howard Hinnant73ab1862012-01-24 21:41:2769};
70
Eric Fiselier8524cba2017-03-04 03:23:1571static const heap_node* list_end =
72 (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap
73static heap_node* freelist = NULL;
Howard Hinnant73ab1862012-01-24 21:41:2774
Eric Fiselier8524cba2017-03-04 03:23:1575heap_node* node_from_offset(const heap_offset offset) {
76 return (heap_node*)(heap + (offset * sizeof(heap_node)));
77}
Howard Hinnant73ab1862012-01-24 21:41:2778
Eric Fiselier8524cba2017-03-04 03:23:1579heap_offset offset_from_node(const heap_node* ptr) {
80 return static_cast<heap_offset>(
81 static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) /
82 sizeof(heap_node));
83}
Igor Kudrind9edde42016-10-07 08:48:2884
Eric Fiselier8524cba2017-03-04 03:23:1585void init_heap() {
86 freelist = (heap_node*)heap;
87 freelist->next_node = offset_from_node(list_end);
88 freelist->len = HEAP_SIZE / sizeof(heap_node);
89}
Igor Kudrind9edde42016-10-07 08:48:2890
Howard Hinnant73ab1862012-01-24 21:41:2791// How big a chunk we allocate
Eric Fiselier8524cba2017-03-04 03:23:1592size_t alloc_size(size_t len) {
93 return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1;
94}
Howard Hinnant73ab1862012-01-24 21:41:2795
Eric Fiselier8524cba2017-03-04 03:23:1596bool is_fallback_ptr(void* ptr) {
97 return ptr >= heap && ptr < (heap + HEAP_SIZE);
98}
Howard Hinnant73ab1862012-01-24 21:41:2799
Eric Fiselier8524cba2017-03-04 03:23:15100void* fallback_malloc(size_t len) {
101 heap_node *p, *prev;
102 const size_t nelems = alloc_size(len);
103 mutexor mtx(&heap_mutex);
Igor Kudrind9edde42016-10-07 08:48:28104
Eric Fiselier8524cba2017-03-04 03:23:15105 if (NULL == freelist)
106 init_heap();
Howard Hinnant73ab1862012-01-24 21:41:27107
Eric Fiselier8524cba2017-03-04 03:23:15108 // Walk the free list, looking for a "big enough" chunk
109 for (p = freelist, prev = 0; p && p != list_end;
110 prev = p, p = node_from_offset(p->next_node)) {
Howard Hinnant73ab1862012-01-24 21:41:27111
Eric Fiselier8524cba2017-03-04 03:23:15112 if (p->len > nelems) { // chunk is larger, shorten, and return the tail
113 heap_node* q;
Saleem Abdulrasool8d5ab872015-06-03 17:25:35114
Eric Fiselier8524cba2017-03-04 03:23:15115 p->len = static_cast<heap_size>(p->len - nelems);
116 q = p + p->len;
117 q->next_node = 0;
118 q->len = static_cast<heap_size>(nelems);
119 return (void*)(q + 1);
Howard Hinnant73ab1862012-01-24 21:41:27120 }
Eric Fiselier8524cba2017-03-04 03:23:15121
122 if (p->len == nelems) { // exact size match
123 if (prev == 0)
124 freelist = node_from_offset(p->next_node);
125 else
126 prev->next_node = p->next_node;
127 p->next_node = 0;
128 return (void*)(p + 1);
129 }
130 }
131 return NULL; // couldn't find a spot big enough
Howard Hinnant73ab1862012-01-24 21:41:27132}
133
134// Return the start of the next block
Eric Fiselier8524cba2017-03-04 03:23:15135heap_node* after(struct heap_node* p) { return p + p->len; }
Howard Hinnant73ab1862012-01-24 21:41:27136
Eric Fiselier8524cba2017-03-04 03:23:15137void fallback_free(void* ptr) {
138 struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk
139 struct heap_node *p, *prev;
Howard Hinnant73ab1862012-01-24 21:41:27140
Eric Fiselier8524cba2017-03-04 03:23:15141 mutexor mtx(&heap_mutex);
Howard Hinnant73ab1862012-01-24 21:41:27142
143#ifdef DEBUG_FALLBACK_MALLOC
Louis Dionnecc69d212020-10-13 19:47:31144 std::printf("Freeing item at %d of size %d\n", offset_from_node(cp), cp->len);
Howard Hinnant73ab1862012-01-24 21:41:27145#endif
146
Eric Fiselier8524cba2017-03-04 03:23:15147 for (p = freelist, prev = 0; p && p != list_end;
148 prev = p, p = node_from_offset(p->next_node)) {
Howard Hinnant73ab1862012-01-24 21:41:27149#ifdef DEBUG_FALLBACK_MALLOC
Louis Dionnecc69d212020-10-13 19:47:31150 std::printf(" p=%d, cp=%d, after(p)=%d, after(cp)=%d\n",
151 offset_from_node(p), offset_from_node(cp),
152 offset_from_node(after(p)), offset_from_node(after(cp)));
Howard Hinnant73ab1862012-01-24 21:41:27153#endif
Eric Fiselier8524cba2017-03-04 03:23:15154 if (after(p) == cp) {
Howard Hinnant73ab1862012-01-24 21:41:27155#ifdef DEBUG_FALLBACK_MALLOC
Louis Dionnecc69d212020-10-13 19:47:31156 std::printf(" Appending onto chunk at %d\n", offset_from_node(p));
Howard Hinnant73ab1862012-01-24 21:41:27157#endif
Eric Fiselier8524cba2017-03-04 03:23:15158 p->len = static_cast<heap_size>(
159 p->len + cp->len); // make the free heap_node larger
160 return;
161 } else if (after(cp) == p) { // there's a free heap_node right after
Howard Hinnant73ab1862012-01-24 21:41:27162#ifdef DEBUG_FALLBACK_MALLOC
Louis Dionnecc69d212020-10-13 19:47:31163 std::printf(" Appending free chunk at %d\n", offset_from_node(p));
Howard Hinnant73ab1862012-01-24 21:41:27164#endif
Eric Fiselier8524cba2017-03-04 03:23:15165 cp->len = static_cast<heap_size>(cp->len + p->len);
166 if (prev == 0) {
167 freelist = cp;
168 cp->next_node = p->next_node;
169 } else
170 prev->next_node = offset_from_node(cp);
171 return;
172 }
173 }
Howard Hinnant73ab1862012-01-24 21:41:27174// Nothing to merge with, add it to the start of the free list
175#ifdef DEBUG_FALLBACK_MALLOC
Louis Dionnecc69d212020-10-13 19:47:31176 std::printf(" Making new free list entry %d\n", offset_from_node(cp));
Howard Hinnant73ab1862012-01-24 21:41:27177#endif
Eric Fiselier8524cba2017-03-04 03:23:15178 cp->next_node = offset_from_node(freelist);
179 freelist = cp;
Howard Hinnant73ab1862012-01-24 21:41:27180}
181
182#ifdef INSTRUMENT_FALLBACK_MALLOC
Eric Fiselier8524cba2017-03-04 03:23:15183size_t print_free_list() {
184 struct heap_node *p, *prev;
185 heap_size total_free = 0;
186 if (NULL == freelist)
187 init_heap();
Igor Kudrind9edde42016-10-07 08:48:28188
Eric Fiselier8524cba2017-03-04 03:23:15189 for (p = freelist, prev = 0; p && p != list_end;
190 prev = p, p = node_from_offset(p->next_node)) {
Louis Dionnecc69d212020-10-13 19:47:31191 std::printf("%sOffset: %d\tsize: %d Next: %d\n",
192 (prev == 0 ? "" : " "), offset_from_node(p), p->len, p->next_node);
Eric Fiselier8524cba2017-03-04 03:23:15193 total_free += p->len;
194 }
Louis Dionnecc69d212020-10-13 19:47:31195 std::printf("Total Free space: %d\n", total_free);
Eric Fiselier8524cba2017-03-04 03:23:15196 return total_free;
197}
Howard Hinnant73ab1862012-01-24 21:41:27198#endif
Eric Fiselier8524cba2017-03-04 03:23:15199} // end unnamed namespace
Igor Kudrind9edde42016-10-07 08:48:28200
201namespace __cxxabiv1 {
202
Eric Fiselier8524cba2017-03-04 03:23:15203struct __attribute__((aligned)) __aligned_type {};
Eric Fiselierc74a2e12017-03-04 02:04:45204
Eric Fiselier8524cba2017-03-04 03:23:15205void* __aligned_malloc_with_fallback(size_t size) {
Eric Fiselierc74a2e12017-03-04 02:04:45206#if defined(_WIN32)
Louis Dionnea78aaa12020-11-12 20:14:33207 if (void* dest = std::__libcpp_aligned_alloc(alignof(__aligned_type), size))
Eric Fiselier8524cba2017-03-04 03:23:15208 return dest;
Eric Fiselier51fbb2e2018-10-11 00:18:54209#elif defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION)
Louis Dionne04501a22019-10-01 18:43:02210 if (void* dest = ::malloc(size))
Eric Fiselier8524cba2017-03-04 03:23:15211 return dest;
Eric Fiselierc74a2e12017-03-04 02:04:45212#else
Eric Fiselier8524cba2017-03-04 03:23:15213 if (size == 0)
214 size = 1;
Louis Dionnea78aaa12020-11-12 20:14:33215 if (void* dest = std::__libcpp_aligned_alloc(__alignof(__aligned_type), size))
Eric Fiselier8524cba2017-03-04 03:23:15216 return dest;
Eric Fiselierc74a2e12017-03-04 02:04:45217#endif
Eric Fiselier8524cba2017-03-04 03:23:15218 return fallback_malloc(size);
Igor Kudrind9edde42016-10-07 08:48:28219}
220
Eric Fiselier8524cba2017-03-04 03:23:15221void* __calloc_with_fallback(size_t count, size_t size) {
Louis Dionne04501a22019-10-01 18:43:02222 void* ptr = ::calloc(count, size);
Eric Fiselier8524cba2017-03-04 03:23:15223 if (NULL != ptr)
Igor Kudrind9edde42016-10-07 08:48:28224 return ptr;
Eric Fiselier8524cba2017-03-04 03:23:15225 // if calloc fails, fall back to emergency stash
226 ptr = fallback_malloc(size * count);
227 if (NULL != ptr)
Louis Dionne04501a22019-10-01 18:43:02228 ::memset(ptr, 0, size * count);
Eric Fiselier8524cba2017-03-04 03:23:15229 return ptr;
Igor Kudrind9edde42016-10-07 08:48:28230}
231
Eric Fiselierc74a2e12017-03-04 02:04:45232void __aligned_free_with_fallback(void* ptr) {
233 if (is_fallback_ptr(ptr))
Eric Fiselier8524cba2017-03-04 03:23:15234 fallback_free(ptr);
Eric Fiselierc74a2e12017-03-04 02:04:45235 else {
Louis Dionned67e58f2020-12-01 22:43:33236#if defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION)
237 ::free(ptr);
238#else
Louis Dionnea78aaa12020-11-12 20:14:33239 std::__libcpp_aligned_free(ptr);
Louis Dionned67e58f2020-12-01 22:43:33240#endif
Eric Fiselierc74a2e12017-03-04 02:04:45241 }
242}
243
Eric Fiselier8524cba2017-03-04 03:23:15244void __free_with_fallback(void* ptr) {
245 if (is_fallback_ptr(ptr))
246 fallback_free(ptr);
247 else
Louis Dionne04501a22019-10-01 18:43:02248 ::free(ptr);
Igor Kudrind9edde42016-10-07 08:48:28249}
250
Igor Kudrind9edde42016-10-07 08:48:28251} // namespace __cxxabiv1