summaryrefslogtreecommitdiff
path: root/core/alloc.c
diff options
context:
space:
mode:
authorDanny Holman <dholman@gymli.org>2024-09-15 14:49:26 -0500
committerDanny Holman <dholman@gymli.org>2024-09-15 14:49:26 -0500
commit5b2c84c0b6880c66657e6fdd0f802a2187c25d05 (patch)
treef2e46a1dbb8ca1496245449893c6a08e980867c5 /core/alloc.c
parent1064864b5112653e0038adeca05ae6db090bf587 (diff)
build: break the engine into its subsystemsv0.60
Break the source code into various subsystem directories. This allows certain subsystems to be disabled at compile time, if needed. Move the build system from raw Makefiles to a CMake generator. This drastically simplifies the build and requires only editing a single file, rather than the several make.config files in subsystem directories. Signed-off-by: Danny Holman <dholman@gymli.org>
Diffstat (limited to 'core/alloc.c')
-rw-r--r--core/alloc.c105
1 files changed, 105 insertions, 0 deletions
diff --git a/core/alloc.c b/core/alloc.c
new file mode 100644
index 0000000..88ebb8f
--- /dev/null
+++ b/core/alloc.c
@@ -0,0 +1,105 @@
+#include <rune/core/alloc.h>
+#include <rune/core/logging.h>
+#include <stdlib.h>
+
+// TODO: implement block coalescing so we can reuse freed blocks
+
+#define DEADBLOCK ((void*)0xDEADBEEF)
+
+static struct mem_block first_block;
+
+static struct mem_block* _find_free_block(size_t sz) {
+ struct list_head *temp = &first_block.list;
+ struct mem_block *block;
+ while (temp != NULL) {
+ block = (struct mem_block*)((void*)temp - offsetof(struct mem_block, list));
+ if (block->sz == sz && block->free == 1)
+ return block;
+ temp = temp->next;
+ }
+ return NULL;
+}
+
+static struct mem_block* _find_block(void *ptr) {
+ struct list_head *temp = &first_block.list;
+ struct mem_block *block;
+ while (temp != NULL) {
+ block = (struct mem_block*)((void*)temp - offsetof(struct mem_block, list));
+ if (block->ptr == ptr)
+ return block;
+ temp = temp->next;
+ }
+ return NULL;
+}
+
+void* rune_alloc(size_t sz) {
+ if (sz == 0)
+ return NULL;
+
+ if (first_block.ptr == NULL) {
+ first_block.ptr = DEADBLOCK;
+ first_block.sz = 0;
+ }
+
+ struct mem_block *block = _find_free_block(sz);
+ if (block != NULL) {
+ block->free = 0;
+ return block->ptr;
+ }
+
+ block = malloc(sizeof(struct mem_block));
+ if (block == NULL) {
+ log_output(LOG_ERROR, "Cannot allocate block of size %d", sz);
+ return NULL;
+ }
+ block->ptr = malloc(sz);
+ block->sz = sz;
+ block->free = 0;
+ list_add(&block->list, &first_block.list);
+ log_output(LOG_DEBUG, "Alloc'd block of size %d", sz);
+ return block->ptr;
+}
+
+void* rune_calloc(size_t nmemb, size_t sz) {
+ void *ret = rune_alloc(sz);
+ for (size_t *i = (size_t*)ret; i < (size_t*)ret+sz; i++)
+ *i = nmemb;
+ return ret;
+}
+
+void* rune_realloc(void *ptr, size_t sz) {
+ if (ptr == NULL || sz == 0)
+ return NULL;
+
+ struct mem_block *block = _find_block(ptr);
+ if (block == NULL)
+ return rune_alloc(sz);
+ return block->ptr;
+}
+
+void rune_free(void *ptr) {
+ if (ptr == NULL)
+ return;
+
+ struct mem_block *block = _find_block(ptr);
+ if (block->free == 1)
+ return;
+ block->free = 1;
+ log_output(LOG_DEBUG, "Freed block of size %d", block->sz);
+}
+
+void rune_free_all(void) {
+ struct list_head *temp = &first_block.list;
+ struct mem_block *block;
+ while (temp != NULL) {
+ block = (struct mem_block*)((void*)temp - offsetof(struct mem_block, list));
+ if (block->ptr == DEADBLOCK) {
+ temp = temp->next;
+ continue;
+ }
+
+ if (block->ptr != NULL)
+ free(block->ptr);
+ temp = temp->next;
+ }
+}