summaryrefslogtreecommitdiff
path: root/kernel/kalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kalloc.c')
-rw-r--r--kernel/kalloc.c130
1 files changed, 118 insertions, 12 deletions
diff --git a/kernel/kalloc.c b/kernel/kalloc.c
index 0699e7e..85cf6b7 100644
--- a/kernel/kalloc.c
+++ b/kernel/kalloc.c
@@ -9,6 +9,9 @@
#include "riscv.h"
#include "defs.h"
+// NOTE: leave interrupts disabled to avoid deadlocks & race conditions when using this macro!!!
+#define CUR_KMEM (kmem_list[cpuid()])
+
void freerange(void *pa_start, void *pa_end);
extern char end[]; // first address after kernel.
@@ -18,15 +21,55 @@ struct run {
struct run *next;
};
-struct {
+struct kmem {
struct spinlock lock;
struct run *freelist;
-} kmem;
+};
+
+struct kmem kmem_list[NCPU];
+
+int phypg_refcnt[PHYSTOP/PGSIZE];
+
+struct spinlock refcnt_lock;
+
+// Increase the refcnt
+int
+refcnt_inc(uint64 pa)
+{
+ acquire(&refcnt_lock);
+ int *prefcnt = &phypg_refcnt[pa/PGSIZE];
+ if(pa > PHYSTOP || *prefcnt < 1)
+ panic("increase refcnt");
+ (*prefcnt)++;
+ release(&refcnt_lock);
+ return *prefcnt;
+}
+
+// Decrease the refcnt
+int
+refcnt_dec(uint64 pa)
+{
+ acquire(&refcnt_lock);
+ int *prefcnt = &phypg_refcnt[pa/PGSIZE];
+ if(pa > PHYSTOP || *prefcnt < 1)
+ panic("decrease refcnt");
+ (*prefcnt)--;
+ release(&refcnt_lock);
+ return *prefcnt;
+}
void
kinit()
{
- initlock(&kmem.lock, "kmem");
+ for(int i = 0; i < NCPU; i++){
+ static char lock_name[8];
+ snprintf(lock_name, sizeof(lock_name), "kmem.%d", i);
+ initlock(&kmem_list[i].lock, lock_name);
+ }
+ // init all refcnt to 1, which would later be freed to 0 by kfree()
+ for(uint64 p = PGROUNDUP((uint64)end); p + PGSIZE <= PHYSTOP; p += PGSIZE)
+ phypg_refcnt[p/PGSIZE] = 1;
+ initlock(&refcnt_lock, "refcnt");
freerange(end, (void*)PHYSTOP);
}
@@ -51,15 +94,24 @@ kfree(void *pa)
if(((uint64)pa % PGSIZE) != 0 || (char*)pa < end || (uint64)pa >= PHYSTOP)
panic("kfree");
+ refcnt_dec((uint64)pa);
+
+ if(phypg_refcnt[(uint64)pa/PGSIZE] > 0)
+ // We still have refs to this phy page, do not actually free it
+ return;
+
// Fill with junk to catch dangling refs.
memset(pa, 1, PGSIZE);
r = (struct run*)pa;
- acquire(&kmem.lock);
- r->next = kmem.freelist;
- kmem.freelist = r;
- release(&kmem.lock);
+ push_off();
+ struct kmem *kmem = &CUR_KMEM;
+ acquire(&kmem->lock);
+ r->next = kmem->freelist;
+ kmem->freelist = r;
+ release(&kmem->lock);
+ pop_off();
}
// Allocate one 4096-byte page of physical memory.
@@ -70,13 +122,67 @@ kalloc(void)
{
struct run *r;
- acquire(&kmem.lock);
- r = kmem.freelist;
- if(r)
- kmem.freelist = r->next;
- release(&kmem.lock);
+ push_off();
+ struct kmem *kmem = &CUR_KMEM;
+ acquire(&kmem->lock);
+ r = kmem->freelist;
+ if(r){
+ acquire(&refcnt_lock);
+ if(phypg_refcnt[(uint64)r/PGSIZE])
+ panic("kalloc: invalid refcnt");
+ phypg_refcnt[(uint64)r/PGSIZE] = 1;
+ release(&refcnt_lock);
+ kmem->freelist = r->next;
+ }
+
+ // release the origin lock to avoid deadlocks
+ release(&kmem->lock);
+
+ if(!r){
+ // try to steal mem from other cpu's kmem
+ for(int i = 0; i < NCPU; i++){
+ if(kmem == &kmem_list[i])
+ continue;
+
+ acquire(&kmem_list[i].lock);
+ struct run *f = kmem_list[i].freelist;
+ if(f){
+ r = f;
+ kmem_list[i].freelist = f->next;
+ }
+ if(r){
+ // acquire the refcnt lock to set refcnt
+ // lock is a must to prevent refcnt races
+ acquire(&refcnt_lock);
+ // release previous lock now
+ release(&kmem_list[i].lock);
+ if(phypg_refcnt[(uint64)r/PGSIZE])
+ panic("kalloc: invalid refcnt");
+ phypg_refcnt[(uint64)r/PGSIZE] = 1;
+ release(&refcnt_lock);
+ break;
+ }
+ release(&kmem_list[i].lock);
+ }
+ }
if(r)
memset((char*)r, 5, PGSIZE); // fill with junk
+ pop_off();
return (void*)r;
}
+
+int
+get_freemem(void)
+{
+ int n;
+ struct run *r;
+
+ for(int i = 0; i < NCPU; i++){
+ acquire(&kmem_list[i].lock);
+ for(n = 0, r = kmem_list[i].freelist; r; r = r->next)
+ n++;
+ release(&kmem_list[i].lock);
+ }
+ return n * PGSIZE;
+}