summaryrefslogtreecommitdiff
path: root/kernel/vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/vm.c')
-rw-r--r--kernel/vm.c32
1 files changed, 24 insertions, 8 deletions
diff --git a/kernel/vm.c b/kernel/vm.c
index 580669f..bdb53c2 100644
--- a/kernel/vm.c
+++ b/kernel/vm.c
@@ -97,8 +97,8 @@ walk(pagetable_t pagetable, uint64 va, int alloc)
}
// Look up a virtual address, return the physical address,
-// Can only be used to look up user pages.
// or 0 if not mapped.
+// Can only be used to look up user pages.
uint64
walkaddr(pagetable_t pagetable, uint64 va)
{
@@ -119,8 +119,9 @@ walkaddr(pagetable_t pagetable, uint64 va)
// Create PTEs for virtual addresses starting at va that refer to
// physical addresses starting at pa. va and size might not
-// be page-aligned.
-void
+// be page-aligned. Returns 0 on success, -1 if walk() couldn't
+// allocate a needed page-table page.
+int
mappages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm)
{
uint64 a, last;
@@ -130,7 +131,7 @@ mappages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm)
last = PGROUNDDOWN(va + size - 1);
for(;;){
if((pte = walk(pagetable, a, 1)) == 0)
- panic("mappages: walk");
+ return -1;
if(*pte & PTE_V)
panic("remap");
*pte = PA2PTE(pa) | perm | PTE_V;
@@ -139,6 +140,7 @@ mappages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm)
a += PGSIZE;
pa += PGSIZE;
}
+ return 0;
}
// Remove mappings from a page table. The mappings in
@@ -222,7 +224,11 @@ uvmalloc(pagetable_t pagetable, uint64 oldsz, uint64 newsz)
return 0;
}
memset(mem, 0, PGSIZE);
- mappages(pagetable, a, PGSIZE, (uint64)mem, PTE_W|PTE_X|PTE_R|PTE_U);
+ if(mappages(pagetable, a, PGSIZE, (uint64)mem, PTE_W|PTE_X|PTE_R|PTE_U) != 0){
+ kfree(mem);
+ uvmdealloc(pagetable, a, oldsz);
+ return 0;
+ }
}
return newsz;
}
@@ -273,7 +279,9 @@ uvmfree(pagetable_t pagetable, uint64 sz)
// its memory into a child's page table.
// Copies both the page table and the
// physical memory.
-void
+// returns 0 on success, -1 on failure.
+// frees any allocated pages on failure.
+int
uvmcopy(pagetable_t old, pagetable_t new, uint64 sz)
{
pte_t *pte;
@@ -289,10 +297,18 @@ uvmcopy(pagetable_t old, pagetable_t new, uint64 sz)
pa = PTE2PA(*pte);
flags = PTE_FLAGS(*pte);
if((mem = kalloc()) == 0)
- panic("uvmcopy: kalloc failed");
+ goto err;
memmove(mem, (char*)pa, PGSIZE);
- mappages(new, i, PGSIZE, (uint64)mem, flags);
+ if(mappages(new, i, PGSIZE, (uint64)mem, flags) != 0){
+ kfree(mem);
+ goto err;
+ }
}
+ return 0;
+
+ err:
+ unmappages(new, 0, i, 1);
+ return -1;
}
// Copy from kernel to user.