summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c108
1 files changed, 56 insertions, 52 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d893813..b6096da 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1,70 +1,74 @@
#include <kernel/sched.h>
-#include <kernel/container_of.h>
-#include <kernel/data/list.h>
-#include <kernel/string.h>
-#include <kernel/kmalloc.h>
+#include <kernel/kthread.h>
+#include <kernel/panic.h>
+#include <libk/io.h>
+#include <libk/kmalloc.h>
+#include <libk/string.h>
#include <stddef.h>
-static struct list_head *ready_queue;
-static struct list_head *wait_queue;
-
-static struct task_block *cur = NULL;
-static long int next_id = 1;
+static struct kthread *run_queue = NULL;
+static struct kthread *wait_queue = NULL;
+static struct kthread *cur = NULL;
static int scheduler_enabled = 0;
-
-static void _enqueue(struct list_head *queue, struct task_block *task) {
- struct list_head *temp = queue;
- while (temp->next != NULL)
- temp = temp->next;
- list_add(&task->list, temp);
-}
-
-static struct task_block* _dequeue(struct list_head *queue) {
- if (queue->next == NULL)
- return NULL;
- struct list_head *temp = queue->next;
- list_del(temp);
- struct task_block *ret = container_of(temp, struct task_block, list);
- return ret;
-}
+static struct kmutex scheduler_lock;
void sched_init(void) {
- ready_queue = kmalloc(sizeof(struct list_head));
- wait_queue = kmalloc(sizeof(struct list_head));
- struct task_block *boot_task = init_task(0, 0);
- boot_task->threads->state = THREAD_RUNNING;
- cur = boot_task;
-
- switch_thread(boot_task->threads, boot_task->threads);
+ disable_ints();
+ cur = kthread_create(NULL, NULL);
+ cur->stack = NULL;
+ switch_thread(cur, cur);
+ cur->state = THREAD_RUNNING;
scheduler_enabled = 1;
+ enable_ints();
}
void schedule_next(void) {
if (scheduler_enabled == 0)
return;
+ if (run_queue == NULL)
+ return;
- struct task_block *task = _dequeue(ready_queue);
- if (task == NULL)
+ struct kthread *thread = cur;
+ schedule_thread(cur);
+ cur = run_queue;
+ run_queue = run_queue->next;
+ cur->next = NULL;
+ cur->state = THREAD_RUNNING;
+ switch_thread(thread, cur);
+}
+
+void schedule_thread(struct kthread *thread) {
+ if (run_queue == NULL) {
+ run_queue = thread;
return;
- if (cur != NULL)
- _enqueue(ready_queue, cur);
- disable_ints();
- switch_thread(cur->threads, task->threads);
- enable_ints();
- task->threads->state = THREAD_RUNNING;
- cur->threads->state = THREAD_READY;
- cur = task;
+ }
+
+ kmutex_lock(&scheduler_lock);
+ struct kthread *temp = run_queue;
+ while (temp->next != NULL)
+ temp = temp->next;
+ temp->next = thread;
+ thread->state = THREAD_READY;
+ kmutex_unlock(&scheduler_lock);
}
-struct task_block* init_task(int nice, unsigned int ppid) {
- struct task_block *ret = kmalloc(sizeof(struct task_block));
- ret->pid = next_id++;
- ret->parent_pid = ppid;
- ret->nice = nice;
- ret->num_threads = 1;
- ret->threads = kmalloc(sizeof(struct thread_block));
- ret->threads->tid = ret->pid;
- ret->threads->tgid = ret->pid;
- return ret;
+void block_thread(struct kthread *thread) {
+ kmutex_lock(&scheduler_lock);
+ thread->next = wait_queue;
+ wait_queue = thread;
+ kmutex_unlock(&scheduler_lock);
+}
+
+void unblock_thread(struct kthread *thread) {
+ kmutex_lock(&scheduler_lock);
+ struct kthread *temp = wait_queue;
+ while (temp != NULL) {
+ if (temp->next == thread) {
+ temp->next = thread->next;
+ return;
+ }
+ temp = temp->next;
+ }
+ kmutex_unlock(&scheduler_lock);
}