summaryrefslogtreecommitdiff
path: root/kernel/vm.c
diff options
context:
space:
mode:
authorRobert Morris <[email protected]>2019-07-24 15:28:37 -0400
committerRobert Morris <[email protected]>2019-07-24 15:28:37 -0400
commit9a817bd134cd83c8a8c8cc96f022a0c22cc5bdb4 (patch)
tree6007c6292c5d07144f3fa27be732fe27223a87d5 /kernel/vm.c
parentda898a11b61fd36e430ffd89b495dd5693c66e30 (diff)
downloadxv6-labs-9a817bd134cd83c8a8c8cc96f022a0c22cc5bdb4.tar.gz
xv6-labs-9a817bd134cd83c8a8c8cc96f022a0c22cc5bdb4.tar.bz2
xv6-labs-9a817bd134cd83c8a8c8cc96f022a0c22cc5bdb4.zip
rationalize some vm function names
Diffstat (limited to 'kernel/vm.c')
-rw-r--r--kernel/vm.c98
1 files changed, 49 insertions, 49 deletions
diff --git a/kernel/vm.c b/kernel/vm.c
index 5063956..33469d1 100644
--- a/kernel/vm.c
+++ b/kernel/vm.c
@@ -27,26 +27,26 @@ kvminit()
memset(kernel_pagetable, 0, PGSIZE);
// uart registers
- kmap(UART0, UART0, PGSIZE, PTE_R | PTE_W);
+ kvmmap(UART0, UART0, PGSIZE, PTE_R | PTE_W);
// virtio mmio disk interface
- kmap(VIRTIO0, VIRTIO0, PGSIZE, PTE_R | PTE_W);
+ kvmmap(VIRTIO0, VIRTIO0, PGSIZE, PTE_R | PTE_W);
// CLINT
- kmap(CLINT, CLINT, 0x10000, PTE_R | PTE_W);
+ kvmmap(CLINT, CLINT, 0x10000, PTE_R | PTE_W);
// PLIC
- kmap(PLIC, PLIC, 0x400000, PTE_R | PTE_W);
+ kvmmap(PLIC, PLIC, 0x400000, PTE_R | PTE_W);
// map kernel text executable and read-only.
- kmap(KERNBASE, KERNBASE, (uint64)etext-KERNBASE, PTE_R | PTE_X);
+ kvmmap(KERNBASE, KERNBASE, (uint64)etext-KERNBASE, PTE_R | PTE_X);
// map kernel data and the physical RAM we'll make use of.
- kmap((uint64)etext, (uint64)etext, PHYSTOP-(uint64)etext, PTE_R | PTE_W);
+ kvmmap((uint64)etext, (uint64)etext, PHYSTOP-(uint64)etext, PTE_R | PTE_W);
// map the trampoline for trap entry/exit to
// the highest virtual address in the kernel.
- kmap(TRAMPOLINE, (uint64)trampout, PGSIZE, PTE_R | PTE_X);
+ kvmmap(TRAMPOLINE, (uint64)trampout, PGSIZE, PTE_R | PTE_X);
}
// Switch h/w page table register to the kernel's page table,
@@ -114,10 +114,30 @@ walkaddr(pagetable_t pagetable, uint64 va)
// only used when booting.
// does not flush TLB or enable paging.
void
-kmap(uint64 va, uint64 pa, uint64 sz, int perm)
+kvmmap(uint64 va, uint64 pa, uint64 sz, int perm)
{
if(mappages(kernel_pagetable, va, sz, pa, perm) != 0)
- panic("kmap");
+ panic("kvmmap");
+}
+
+// translate a kernel virtual address to
+// a physical address. only needed for
+// addresses on the stack.
+// assumes va is page aligned.
+uint64
+kvmpa(uint64 va)
+{
+ uint64 off = va % PGSIZE;
+ pte_t *pte;
+ uint64 pa;
+
+ pte = walk(kernel_pagetable, va, 0);
+ if(pte == 0)
+ panic("kernelpa");
+ if((*pte & PTE_V) == 0)
+ panic("kernelpa");
+ pa = PTE2PA(*pte);
+ return pa+off;
}
// Create PTEs for virtual addresses starting at va that refer to
@@ -150,7 +170,7 @@ mappages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm)
// the given range must exist. Optionally free the
// physical memory.
void
-unmappages(pagetable_t pagetable, uint64 va, uint64 size, int do_free)
+uvmunmap(pagetable_t pagetable, uint64 va, uint64 size, int do_free)
{
uint64 a, last;
pte_t *pte;
@@ -160,13 +180,13 @@ unmappages(pagetable_t pagetable, uint64 va, uint64 size, int do_free)
last = PGROUNDDOWN(va + size - 1);
for(;;){
if((pte = walk(pagetable, a, 0)) == 0)
- panic("unmappages: walk");
+ panic("uvmunmap: walk");
if((*pte & PTE_V) == 0){
printf("va=%p pte=%p\n", a, *pte);
- panic("unmappages: not mapped");
+ panic("uvmunmap: not mapped");
}
if(PTE_FLAGS(*pte) == PTE_V)
- panic("unmappages: not a leaf");
+ panic("uvmunmap: not a leaf");
if(do_free){
pa = PTE2PA(*pte);
kfree((void*)pa);
@@ -245,7 +265,7 @@ uvmdealloc(pagetable_t pagetable, uint64 oldsz, uint64 newsz)
{
if(newsz >= oldsz)
return oldsz;
- unmappages(pagetable, newsz, oldsz - newsz, 1);
+ uvmunmap(pagetable, newsz, oldsz - newsz, 1);
return newsz;
}
@@ -274,7 +294,7 @@ freewalk(pagetable_t pagetable)
void
uvmfree(pagetable_t pagetable, uint64 sz)
{
- unmappages(pagetable, 0, sz, 1);
+ uvmunmap(pagetable, 0, sz, 1);
freewalk(pagetable);
}
@@ -310,10 +330,23 @@ uvmcopy(pagetable_t old, pagetable_t new, uint64 sz)
return 0;
err:
- unmappages(new, 0, i, 1);
+ uvmunmap(new, 0, i, 1);
return -1;
}
+// mark a PTE invalid for user access.
+// used by exec for the user stack guard page.
+void
+uvmclear(pagetable_t pagetable, uint64 va)
+{
+ pte_t *pte;
+
+ pte = walk(pagetable, va, 0);
+ if(pte == 0)
+ panic("clearpteu");
+ *pte &= ~PTE_U;
+}
+
// Copy from kernel to user.
// Copy len bytes from src to virtual address dstva in a given page table.
// Return 0 on success, -1 on error.
@@ -406,36 +439,3 @@ copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64 max)
return -1;
}
}
-
-// translate a kernel virtual address to
-// a physical address. only needed for
-// addresses on the stack.
-// assumes va is page aligned.
-uint64
-kernelpa(uint64 va)
-{
- uint64 off = va % PGSIZE;
- pte_t *pte;
- uint64 pa;
-
- pte = walk(kernel_pagetable, va, 0);
- if(pte == 0)
- panic("kernelpa");
- if((*pte & PTE_V) == 0)
- panic("kernelpa");
- pa = PTE2PA(*pte);
- return pa+off;
-}
-
-// mark a PTE invalid for user access.
-// used by exec for the user stack guard page.
-void
-clearpteu(pagetable_t pagetable, uint64 va)
-{
- pte_t *pte;
-
- pte = walk(pagetable, va, 0);
- if(pte == 0)
- panic("clearpteu");
- *pte &= ~PTE_U;
-}