summaryrefslogtreecommitdiff
path: root/kernel/proc.c
diff options
context:
space:
mode:
authorMole Shang <[email protected]>2024-02-19 21:51:26 +0800
committerMole Shang <[email protected]>2024-02-19 22:13:01 +0800
commit09ba9112386d5d59d7f2a31c469768c582acb939 (patch)
treeb1dce737cdd94164e8d7f3f651ea7a86a22a42f3 /kernel/proc.c
parentd86118fc80267649b4791c8c0c72ebd60edf1ef2 (diff)
downloadxv6-labs-09ba9112386d5d59d7f2a31c469768c582acb939.tar.gz
xv6-labs-09ba9112386d5d59d7f2a31c469768c582acb939.tar.bz2
xv6-labs-09ba9112386d5d59d7f2a31c469768c582acb939.zip
lab mmap: finish
Diffstat (limited to 'kernel/proc.c')
-rw-r--r--kernel/proc.c156
1 files changed, 155 insertions, 1 deletions
diff --git a/kernel/proc.c b/kernel/proc.c
index 9a9bae9..923729d 100644
--- a/kernel/proc.c
+++ b/kernel/proc.c
@@ -1,4 +1,5 @@
#include "types.h"
+#include "fcntl.h"
#include "param.h"
#include "memlayout.h"
#include "riscv.h"
@@ -160,6 +161,14 @@ found:
return 0;
}
+ // setup mmap vm area
+ p->cur_max_vm_addr = MAX_VM_ADDR;
+ for(int i = 0; i < MAX_VM_AREA; i++){
+ struct vm_area* vma = &p->vma[i];
+ memset(&p->vma[i], 0, sizeof(struct vm_area));
+ vma->start_addr = -1;
+ }
+
// Set up new context to start executing at forkret,
// which returns to user space.
memset(&p->context, 0, sizeof(p->context));
@@ -328,7 +337,7 @@ fork(void)
}
// Copy user memory from parent to child.
- if(uvmcopy(p->pagetable, np->pagetable, p->sz) < 0){
+ if(uvmcopy(p->pagetable, np->pagetable, 0, p->sz) < 0){
freeproc(np);
release(&np->lock);
return -1;
@@ -350,6 +359,17 @@ fork(void)
np->ofile[i] = filedup(p->ofile[i]);
np->cwd = idup(p->cwd);
+ // copy vm areas
+ // TODO: cow & remap vm to pa
+ for(int i = 0; i < MAX_VM_AREA; i++){
+ if(p->vma[i].start_addr != -1){
+ memmove(&np->vma[i], &p->vma[i], sizeof(struct vm_area));
+ uvmcopy(p->pagetable, np->pagetable, p->vma[i].start_addr, p->vma[i].len);
+ if(p->vma[i].file)
+ filedup(p->vma[i].file);
+ }
+ }
+
safestrcpy(np->name, p->name, sizeof(p->name));
pid = np->pid;
@@ -393,6 +413,12 @@ exit(int status)
if(p == initproc)
panic("init exiting");
+ for(int i = 0; i < MAX_VM_AREA; i++){
+ if(p->vma[i].start_addr != -1){
+ uvmunmap(p->pagetable, p->vma[i].start_addr, p->vma[i].len/PGSIZE, 1);
+ }
+ }
+
// Close all open files.
for(int fd = 0; fd < NOFILE; fd++){
if(p->ofile[fd]){
@@ -768,3 +794,131 @@ pgaccess(uint64 base, int len, uint64 mask_addr)
// now copyout the mask to user memory
return copyout(pgtbl, mask_addr, (char *)&mask, sizeof(mask));
}
+
+// lab mmap
+// all the addrs should be page aligned.
+uint64 mmap(uint64 addr, uint64 len, int prot, int flags, int fd, struct file* file, uint64 offset)
+{
+ struct proc *p = myproc();
+ struct vm_area *vma = 0;
+
+ len = PGROUNDUP(len);
+ addr = PGROUNDDOWN(addr);
+
+ if(p->sz+len > MAXVA)
+ return -1;
+
+ if(offset < 0 || offset & PGSIZE)
+ return -1;
+
+ for(int i = 0; i < MAX_VM_AREA; i++){
+ if(p->vma[i].start_addr == -1){
+ vma = &p->vma[i];
+ break;
+ }
+ }
+ if(!vma)
+ goto mmap_bad;
+
+ vma->len = len;
+
+ if(addr >= p->sz && addr <= MAXVA)
+ vma->start_addr = addr;
+ else if (addr == 0) {
+ vma->start_addr = p->cur_max_vm_addr - len;
+ } else {
+ goto mmap_bad;
+ }
+ p->cur_max_vm_addr = vma->start_addr;
+
+ vma->fd = fd;
+ filedup(file);
+ vma->flags = flags;
+ vma->prot = prot;
+ vma->file = file;
+ vma->roff = offset;
+
+ int pte_flags = PTE_U|PTE_V;
+
+ if(vma->prot & PROT_READ)
+ pte_flags |= PTE_R;
+ if(vma->prot & PROT_WRITE)
+ pte_flags |= PTE_W;
+ if(vma->prot & PROT_EXEC)
+ pte_flags |= PTE_X;
+
+ for(uint64 a = vma->start_addr; a < vma->start_addr+len; a+= PGSIZE){
+ char *mem = kalloc();
+
+ if(mem == 0){
+ goto mmap_bad;
+ }
+ memset(mem, 0, PGSIZE);
+ vma->roff = mmap_read(vma->file, (uint64)mem, vma->roff, PGSIZE);
+
+ if(mappages(p->pagetable, a, PGSIZE, (uint64)mem, pte_flags) != 0){
+ kfree(mem);
+ uvmunmap(p->pagetable, a, PGSIZE, 0);
+ goto mmap_bad;
+ }
+ }
+
+ return vma->start_addr;
+
+mmap_bad:
+ return 0xffffffffffffffff;
+}
+
+// all the addrs should be page aligned.
+// ceil-round if not.
+int munmap(uint64 addr, uint64 len)
+{
+ struct proc *p = myproc();
+ struct vm_area *vma = 0;
+ int r = 0, dec_refcnt = 0;
+
+ if(addr+len > MAXVA)
+ return -1;
+
+ addr = PGROUNDDOWN(addr);
+ len = PGROUNDUP(len);
+
+ for(int i = 0; i < MAX_VM_AREA; i++){
+ if((p->vma[i].start_addr != -1) && addr >= p->vma[i].start_addr && addr+len <= p->vma[i].start_addr+p->vma[i].len){
+ vma = &p->vma[i];
+ break;
+ }
+ }
+
+ if(!vma){
+ return -1;
+ }
+
+ if(vma->flags & MAP_SHARED){
+ // do the writeback
+ vma->woff = addr-vma->start_addr;
+ if(vma->woff+len >= vma->roff){
+ // should decrease refcnt
+ dec_refcnt = 1;
+ }
+ r = munmap_write(vma->file, addr, vma->woff, len);
+ if(r)
+ return r;
+ if(dec_refcnt){
+ fileclose(vma->file);
+ }
+ vma->woff += len;
+ }
+
+ uvmunmap(p->pagetable, addr, len/PGSIZE, 1);
+
+ vma->len -= len;
+ if(dec_refcnt){
+ // mark it invalid
+ memset(vma, 0, sizeof(struct vm_area));
+ vma->start_addr = -1;
+ } else
+ vma->start_addr += len;
+
+ return r;
+}