summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorDanny Holman <dholman@gymli.org>2024-05-27 14:25:27 -0500
committerDanny Holman <dholman@gymli.org>2024-05-27 14:26:45 -0500
commit5fb0ba537ab15f9c83afd9a939cf57c84d443856 (patch)
tree50592128b2ae9779dc570c1ae89db3f3d64ae495 /arch
parente472fbf564a8dee7ec4cb03b3e77a213a017bc76 (diff)
arch: i386: pmem: refactor the physical MM
Refactor the physical memory manager to be dramatically simpler. The new design uses only a single freelist, and only uses temporary mappings where needed. Signed-off-by: Danny Holman <dholman@gymli.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/include/kernel/pmem.h15
-rw-r--r--arch/i386/kernel/pmem.c91
2 files changed, 41 insertions, 65 deletions
diff --git a/arch/i386/include/kernel/pmem.h b/arch/i386/include/kernel/pmem.h
index ddd9c8b..da169c7 100644
--- a/arch/i386/include/kernel/pmem.h
+++ b/arch/i386/include/kernel/pmem.h
@@ -4,25 +4,16 @@
#include <kernel/multiboot.h>
#include <stdint.h>
-#define PFA_BLOCK_FREE 1
-#define PFA_BLOCK_ALLOC 3
-
#define PFA_ALLOC_ERR 0xFFFFFFFF
struct pfa_page {
struct pfa_page *next;
};
-struct pfa_zone {
- uintptr_t start;
- uintptr_t size;
- struct pfa_page *freelist;
-};
-
-int pfa_init(struct mboot_info *header);
+void pfa_init(struct mboot_info *header);
uintptr_t pfa_alloc(void);
-void pfa_free(struct pfa_zone *zone, uintptr_t paddr);
-void pfa_free_range(struct pfa_zone *zone, uintptr_t pstart, uintptr_t pend);
+void pfa_free(uintptr_t paddr);
+void pfa_free_range(uintptr_t start, uintptr_t end);
#endif
diff --git a/arch/i386/kernel/pmem.c b/arch/i386/kernel/pmem.c
index 851b085..e59c80b 100644
--- a/arch/i386/kernel/pmem.c
+++ b/arch/i386/kernel/pmem.c
@@ -2,70 +2,55 @@
#include <kernel/asm.h>
#include <kernel/panic.h>
#include <kernel/paging.h>
-#include <kernel/string.h>
-
-static struct pfa_zone bios_area;
-static struct pfa_zone himem;
-
-int pfa_init(struct mboot_info *header) {
- bios_area.start = 0;
- bios_area.size = 0;
- bios_area.freelist = NULL;
-
- himem.start = HIMEM_START;
- himem.size = 0;
- himem.freelist = NULL;
-
- struct mboot_mmap_entry *mme = (struct mboot_mmap_entry*)(header->mmap_addr);
- map_page(NULL, (uintptr_t)mme, (uintptr_t)mme, PD_PRES);
- while ((uintptr_t)mme < (header->mmap_addr + header->mmap_length)) {
- if (mme->addr_low >= KSTART && mme->addr_low <= KEND)
- continue;
- if (mme->type == MBOOT_MEM_AVAILABLE) {
- if (mme->addr_low < HIMEM_START)
- pfa_free_range(&bios_area, (uintptr_t)mme->addr_low, (uintptr_t)(mme->addr_low+mme->len_low));
- else
- pfa_free_range(&himem, (uintptr_t)mme->addr_low, (uintptr_t)(mme->addr_low+mme->len_low));
- }
- unmap_page(NULL, (uintptr_t)mme);
- mme += sizeof(struct mboot_mmap_entry);
- map_page(NULL, (uintptr_t)mme, (uintptr_t)mme, PD_PRES);
+#include <libk/io.h>
+#include <libk/string.h>
+
+struct pfa_page freelist;
+
+void pfa_init(struct mboot_info *header) {
+ struct mboot_mmap_entry *mme;
+ struct mboot_mmap_entry *temp_map;
+ for (uintptr_t i = 0; i < header->mmap_length; i += sizeof(struct mboot_mmap_entry)) {
+ mme = (struct mboot_mmap_entry*)(header->mmap_addr + i);
+ temp_map = (struct mboot_mmap_entry*)GET_VADDR(mme);
+ map_page(NULL, (uintptr_t)mme, (uintptr_t)temp_map, PD_PRES);
+ if (temp_map->type == MBOOT_MEM_AVAILABLE)
+ pfa_free_range(temp_map->addr_low, temp_map->addr_low + temp_map->len_low);
+ unmap_page(NULL, (uintptr_t)temp_map);
}
- unmap_page(NULL, (uintptr_t)mme);
- return 0;
}
uintptr_t pfa_alloc(void) {
- struct pfa_page *temp = bios_area.freelist;
- map_page(NULL, (uintptr_t)temp, (uintptr_t)temp, PD_RW | PD_PRES);
- if (temp == NULL)
+ struct pfa_page *ret = freelist.next;
+ if (ret == NULL)
return PFA_ALLOC_ERR;
- bios_area.freelist = temp->next;
- memset(temp, PFA_BLOCK_ALLOC, 32);
- unmap_page(NULL, (uintptr_t)temp);
- return (uintptr_t)temp;
+ struct pfa_page *temp_map = (struct pfa_page*)0x1000;
+ map_page(NULL, (uintptr_t)ret, (uintptr_t)temp_map, PD_PRES | PD_RW);
+ memset((char*)temp_map, 0, PAGE_SIZE);
+ unmap_page(NULL, (uintptr_t)temp_map);
+ return (uintptr_t)ret;
}
-void pfa_free(struct pfa_zone *zone, uintptr_t paddr) {
+void pfa_free(uintptr_t paddr) {
if (paddr % PAGE_SIZE != 0)
panic("Task attempted to free non-aligned memory");
+ if (paddr == 0)
+ return;
if (paddr >= KSTART && paddr < KEND)
- panic("Task attempted to free kernel memory");
-
- map_page(NULL, paddr, paddr, PD_PRES | PD_RW);
- memset((void*)paddr, PFA_BLOCK_FREE, 32);
- struct pfa_page *temp = (struct pfa_page*)paddr;
- temp->next = zone->freelist;
- zone->freelist = temp;
- unmap_page(NULL, paddr);
- zone->size += PAGE_SIZE;
+ return;
+ if (paddr >= 0x80000 && paddr < 0x100000)
+ return;
+
+ struct pfa_page *temp_map = (struct pfa_page*)0x1000;
+ map_page(NULL, paddr, (uintptr_t)temp_map, PD_PRES | PD_RW);
+ memset(temp_map, 1, 32);
+ temp_map->next = freelist.next;
+ freelist.next = (struct pfa_page*)paddr;
+ unmap_page(NULL, (uintptr_t)temp_map);
}
-void pfa_free_range(struct pfa_zone *zone, uintptr_t pstart, uintptr_t pend) {
- uintptr_t p = PGROUNDUP(pstart);
- while (p <= pend) {
- pfa_free(zone, p);
- p += PAGE_SIZE;
- }
+void pfa_free_range(uintptr_t start, uintptr_t end) {
+ for (uintptr_t i = start; i < end; i += PAGE_SIZE)
+ pfa_free(i);
}