summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/kalloc.c91
1 files changed, 71 insertions, 20 deletions
diff --git a/kernel/kalloc.c b/kernel/kalloc.c
index 581f0f6..85cf6b7 100644
--- a/kernel/kalloc.c
+++ b/kernel/kalloc.c
@@ -9,6 +9,9 @@
#include "riscv.h"
#include "defs.h"
+// NOTE: leave interrupts disabled to avoid deadlocks & race conditions when using this macro!!!
+#define CUR_KMEM (kmem_list[cpuid()])
+
void freerange(void *pa_start, void *pa_end);
extern char end[]; // first address after kernel.
@@ -18,23 +21,27 @@ struct run {
struct run *next;
};
-struct {
+struct kmem {
struct spinlock lock;
struct run *freelist;
-} kmem;
+};
+
+struct kmem kmem_list[NCPU];
int phypg_refcnt[PHYSTOP/PGSIZE];
+struct spinlock refcnt_lock;
+
// Increase the refcnt
int
refcnt_inc(uint64 pa)
{
- acquire(&kmem.lock);
+ acquire(&refcnt_lock);
int *prefcnt = &phypg_refcnt[pa/PGSIZE];
if(pa > PHYSTOP || *prefcnt < 1)
panic("increase refcnt");
(*prefcnt)++;
- release(&kmem.lock);
+ release(&refcnt_lock);
return *prefcnt;
}
@@ -42,22 +49,27 @@ refcnt_inc(uint64 pa)
int
refcnt_dec(uint64 pa)
{
- acquire(&kmem.lock);
+ acquire(&refcnt_lock);
int *prefcnt = &phypg_refcnt[pa/PGSIZE];
if(pa > PHYSTOP || *prefcnt < 1)
panic("decrease refcnt");
(*prefcnt)--;
- release(&kmem.lock);
+ release(&refcnt_lock);
return *prefcnt;
}
void
kinit()
{
- initlock(&kmem.lock, "kmem");
+ for(int i = 0; i < NCPU; i++){
+ static char lock_name[8];
+ snprintf(lock_name, sizeof(lock_name), "kmem.%d", i);
+ initlock(&kmem_list[i].lock, lock_name);
+ }
// init all refcnt to 1, which would later be freed to 0 by kfree()
for(uint64 p = PGROUNDUP((uint64)end); p + PGSIZE <= PHYSTOP; p += PGSIZE)
phypg_refcnt[p/PGSIZE] = 1;
+ initlock(&refcnt_lock, "refcnt");
freerange(end, (void*)PHYSTOP);
}
@@ -93,10 +105,13 @@ kfree(void *pa)
r = (struct run*)pa;
- acquire(&kmem.lock);
- r->next = kmem.freelist;
- kmem.freelist = r;
- release(&kmem.lock);
+ push_off();
+ struct kmem *kmem = &CUR_KMEM;
+ acquire(&kmem->lock);
+ r->next = kmem->freelist;
+ kmem->freelist = r;
+ release(&kmem->lock);
+ pop_off();
}
// Allocate one 4096-byte page of physical memory.
@@ -107,18 +122,53 @@ kalloc(void)
{
struct run *r;
- acquire(&kmem.lock);
- r = kmem.freelist;
+ push_off();
+ struct kmem *kmem = &CUR_KMEM;
+ acquire(&kmem->lock);
+ r = kmem->freelist;
if(r){
+ acquire(&refcnt_lock);
if(phypg_refcnt[(uint64)r/PGSIZE])
panic("kalloc: invalid refcnt");
phypg_refcnt[(uint64)r/PGSIZE] = 1;
- kmem.freelist = r->next;
+ release(&refcnt_lock);
+ kmem->freelist = r->next;
+ }
+
+ // release the origin lock to avoid deadlocks
+ release(&kmem->lock);
+
+ if(!r){
+ // try to steal mem from other cpu's kmem
+ for(int i = 0; i < NCPU; i++){
+ if(kmem == &kmem_list[i])
+ continue;
+
+ acquire(&kmem_list[i].lock);
+ struct run *f = kmem_list[i].freelist;
+ if(f){
+ r = f;
+ kmem_list[i].freelist = f->next;
+ }
+ if(r){
+ // acquire the refcnt lock to set refcnt
+ // lock is a must to prevent refcnt races
+ acquire(&refcnt_lock);
+ // release previous lock now
+ release(&kmem_list[i].lock);
+ if(phypg_refcnt[(uint64)r/PGSIZE])
+ panic("kalloc: invalid refcnt");
+ phypg_refcnt[(uint64)r/PGSIZE] = 1;
+ release(&refcnt_lock);
+ break;
+ }
+ release(&kmem_list[i].lock);
+ }
}
- release(&kmem.lock);
if(r)
memset((char*)r, 5, PGSIZE); // fill with junk
+ pop_off();
return (void*)r;
}
@@ -128,10 +178,11 @@ get_freemem(void)
int n;
struct run *r;
- acquire(&kmem.lock);
- for (n = 0, r = kmem.freelist; r; r = r->next)
- n++;
- release(&kmem.lock);
-
+ for(int i = 0; i < NCPU; i++){
+ acquire(&kmem_list[i].lock);
+ for(n = 0, r = kmem_list[i].freelist; r; r = r->next)
+ n++;
+ release(&kmem_list[i].lock);
+ }
return n * PGSIZE;
}