summaryrefslogtreecommitdiff
path: root/kernel/proc.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/proc.c')
-rw-r--r--kernel/proc.c647
1 files changed, 647 insertions, 0 deletions
diff --git a/kernel/proc.c b/kernel/proc.c
new file mode 100644
index 0000000..3d65b46
--- /dev/null
+++ b/kernel/proc.c
@@ -0,0 +1,647 @@
+#include "types.h"
+#include "param.h"
+#include "memlayout.h"
+#include "riscv.h"
+#include "spinlock.h"
+#include "proc.h"
+#include "defs.h"
+
+struct cpu cpus[NCPU];
+
+struct proc proc[NPROC];
+
+struct proc *initproc;
+
+int nextpid = 1;
+struct spinlock pid_lock;
+
+extern void forkret(void);
+static void wakeup1(struct proc *chan);
+
+extern char trampoline[]; // trampoline.S
+
+void
+procinit(void)
+{
+ struct proc *p;
+
+ initlock(&pid_lock, "nextpid");
+ for(p = proc; p < &proc[NPROC]; p++) {
+ initlock(&p->lock, "proc");
+
+ // Allocate a page for the process's kernel stack.
+ // Map it high in memory, followed by an invalid
+ // guard page.
+ char *pa = kalloc();
+ if(pa == 0)
+ panic("kalloc");
+ uint64 va = KSTACK((int) (p - proc));
+ kvmmap(va, (uint64)pa, PGSIZE, PTE_R | PTE_W);
+ p->kstack = va;
+ }
+ kvminithart();
+}
+
+// Must be called with interrupts disabled,
+// to prevent race with process being moved
+// to a different CPU.
+int
+cpuid()
+{
+ int id = r_tp();
+ return id;
+}
+
+// Return this CPU's cpu struct.
+// Interrupts must be disabled.
+struct cpu*
+mycpu(void) {
+ int id = cpuid();
+ struct cpu *c = &cpus[id];
+ return c;
+}
+
+// Return the current struct proc *, or zero if none.
+struct proc*
+myproc(void) {
+ push_off();
+ struct cpu *c = mycpu();
+ struct proc *p = c->proc;
+ pop_off();
+ return p;
+}
+
+int
+allocpid() {
+ int pid;
+
+ acquire(&pid_lock);
+ pid = nextpid;
+ nextpid = nextpid + 1;
+ release(&pid_lock);
+
+ return pid;
+}
+
+// Look in the process table for an UNUSED proc.
+// If found, initialize state required to run in the kernel,
+// and return with p->lock held.
+// If there are no free procs, return 0.
+static struct proc*
+allocproc(void)
+{
+ struct proc *p;
+
+ for(p = proc; p < &proc[NPROC]; p++) {
+ acquire(&p->lock);
+ if(p->state == UNUSED) {
+ goto found;
+ } else {
+ release(&p->lock);
+ }
+ }
+ return 0;
+
+found:
+ p->pid = allocpid();
+
+ // Allocate a trapframe page.
+ if((p->tf = (struct trapframe *)kalloc()) == 0){
+ release(&p->lock);
+ return 0;
+ }
+
+ // An empty user page table.
+ p->pagetable = proc_pagetable(p);
+
+ // Set up new context to start executing at forkret,
+ // which returns to user space.
+ memset(&p->context, 0, sizeof p->context);
+ p->context.ra = (uint64)forkret;
+ p->context.sp = p->kstack + PGSIZE;
+
+ return p;
+}
+
+// free a proc structure and the data hanging from it,
+// including user pages.
+// p->lock must be held.
+static void
+freeproc(struct proc *p)
+{
+ if(p->tf)
+ kfree((void*)p->tf);
+ p->tf = 0;
+ if(p->pagetable)
+ proc_freepagetable(p->pagetable, p->sz);
+ p->pagetable = 0;
+ p->sz = 0;
+ p->pid = 0;
+ p->parent = 0;
+ p->name[0] = 0;
+ p->chan = 0;
+ p->killed = 0;
+ p->state = UNUSED;
+}
+
+// Create a page table for a given process,
+// with no user pages, but with trampoline pages.
+pagetable_t
+proc_pagetable(struct proc *p)
+{
+ pagetable_t pagetable;
+
+ // An empty page table.
+ pagetable = uvmcreate();
+
+ // map the trampoline code (for system call return)
+ // at the highest user virtual address.
+ // only the supervisor uses it, on the way
+ // to/from user space, so not PTE_U.
+ mappages(pagetable, TRAMPOLINE, PGSIZE,
+ (uint64)trampoline, PTE_R | PTE_X);
+
+ // map the trapframe just below TRAMPOLINE, for trampoline.S.
+ mappages(pagetable, TRAPFRAME, PGSIZE,
+ (uint64)(p->tf), PTE_R | PTE_W);
+
+ return pagetable;
+}
+
+// Free a process's page table, and free the
+// physical memory it refers to.
+void
+proc_freepagetable(pagetable_t pagetable, uint64 sz)
+{
+ uvmunmap(pagetable, TRAMPOLINE, PGSIZE, 0);
+ uvmunmap(pagetable, TRAPFRAME, PGSIZE, 0);
+ if(sz > 0)
+ uvmfree(pagetable, sz);
+}
+
+// a user program that calls exec("/init")
+// od -t xC initcode
+uchar initcode[] = {
+ 0x17, 0x05, 0x00, 0x00, 0x13, 0x05, 0x05, 0x02,
+ 0x97, 0x05, 0x00, 0x00, 0x93, 0x85, 0x05, 0x02,
+ 0x9d, 0x48, 0x73, 0x00, 0x00, 0x00, 0x89, 0x48,
+ 0x73, 0x00, 0x00, 0x00, 0xef, 0xf0, 0xbf, 0xff,
+ 0x2f, 0x69, 0x6e, 0x69, 0x74, 0x00, 0x00, 0x01,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00
+};
+
+// Set up first user process.
+void
+userinit(void)
+{
+ struct proc *p;
+
+ p = allocproc();
+ initproc = p;
+
+ // allocate one user page and copy init's instructions
+ // and data into it.
+ uvminit(p->pagetable, initcode, sizeof(initcode));
+ p->sz = PGSIZE;
+
+ // prepare for the very first "return" from kernel to user.
+ p->tf->epc = 0; // user program counter
+ p->tf->sp = PGSIZE; // user stack pointer
+
+ safestrcpy(p->name, "initcode", sizeof(p->name));
+ p->cwd = namei("/");
+
+ p->state = RUNNABLE;
+
+ release(&p->lock);
+}
+
+// Grow or shrink user memory by n bytes.
+// Return 0 on success, -1 on failure.
+int
+growproc(int n)
+{
+ uint sz;
+ struct proc *p = myproc();
+
+ sz = p->sz;
+ if(n > 0){
+ if((sz = uvmalloc(p->pagetable, sz, sz + n)) == 0) {
+ return -1;
+ }
+ } else if(n < 0){
+ if((sz = uvmdealloc(p->pagetable, sz, sz + n)) == 0) {
+ return -1;
+ }
+ }
+ p->sz = sz;
+ return 0;
+}
+
+// Create a new process, copying the parent.
+// Sets up child kernel stack to return as if from fork() system call.
+int
+fork(void)
+{
+ int i, pid;
+ struct proc *np;
+ struct proc *p = myproc();
+
+ // Allocate process.
+ if((np = allocproc()) == 0){
+ return -1;
+ }
+
+ // Copy user memory from parent to child.
+ if(uvmcopy(p->pagetable, np->pagetable, p->sz) < 0){
+ freeproc(np);
+ release(&np->lock);
+ return -1;
+ }
+ np->sz = p->sz;
+
+ np->parent = p;
+
+ // copy saved user registers.
+ *(np->tf) = *(p->tf);
+
+ // Cause fork to return 0 in the child.
+ np->tf->a0 = 0;
+
+ // increment reference counts on open file descriptors.
+ for(i = 0; i < NOFILE; i++)
+ if(p->ofile[i])
+ np->ofile[i] = filedup(p->ofile[i]);
+ np->cwd = idup(p->cwd);
+
+ safestrcpy(np->name, p->name, sizeof(p->name));
+
+ pid = np->pid;
+
+ np->state = RUNNABLE;
+
+ release(&np->lock);
+
+ return pid;
+}
+
+// Pass p's abandoned children to init.
+// Caller must hold p->lock and parent->lock.
+void
+reparent(struct proc *p, struct proc *parent) {
+ struct proc *pp;
+ int child_of_init = (p->parent == initproc);
+
+ for(pp = proc; pp < &proc[NPROC]; pp++){
+ // this code uses pp->parent without holding pp->lock.
+ // acquiring the lock first could cause a deadlock
+ // if pp or a child of pp were also in exit()
+ // and about to try to lock p.
+ if(pp->parent == p){
+ // pp->parent can't change between the check and the acquire()
+ // because only the parent changes it, and we're the parent.
+ acquire(&pp->lock);
+ pp->parent = initproc;
+ if(pp->state == ZOMBIE) {
+ if(!child_of_init)
+ acquire(&initproc->lock);
+ wakeup1(initproc);
+ if(!child_of_init)
+ release(&initproc->lock);
+ }
+ release(&pp->lock);
+ }
+ }
+}
+
+// Exit the current process. Does not return.
+// An exited process remains in the zombie state
+// until its parent calls wait().
+void
+exit(void)
+{
+ struct proc *p = myproc();
+
+ if(p == initproc)
+ panic("init exiting");
+
+ // Close all open files.
+ for(int fd = 0; fd < NOFILE; fd++){
+ if(p->ofile[fd]){
+ struct file *f = p->ofile[fd];
+ fileclose(f);
+ p->ofile[fd] = 0;
+ }
+ }
+
+ begin_op();
+ iput(p->cwd);
+ end_op();
+ p->cwd = 0;
+
+ acquire(&p->parent->lock);
+
+ acquire(&p->lock);
+
+ // Give any children to init.
+ reparent(p, p->parent);
+
+ // Parent might be sleeping in wait().
+ wakeup1(p->parent);
+
+ p->state = ZOMBIE;
+
+ release(&p->parent->lock);
+
+ // Jump into the scheduler, never to return.
+ sched();
+ panic("zombie exit");
+}
+
+// Wait for a child process to exit and return its pid.
+// Return -1 if this process has no children.
+int
+wait(void)
+{
+ struct proc *np;
+ int havekids, pid;
+ struct proc *p = myproc();
+
+ // hold p->lock for the whole time to avoid lost
+ // wakeups from a child's exit().
+ acquire(&p->lock);
+
+ for(;;){
+ // Scan through table looking for exited children.
+ havekids = 0;
+ for(np = proc; np < &proc[NPROC]; np++){
+ // this code uses np->parent without holding np->lock.
+ // acquiring the lock first would cause a deadlock,
+ // since np might be an ancestor, and we already hold p->lock.
+ if(np->parent == p){
+ // np->parent can't change between the check and the acquire()
+ // because only the parent changes it, and we're the parent.
+ acquire(&np->lock);
+ havekids = 1;
+ if(np->state == ZOMBIE){
+ // Found one.
+ pid = np->pid;
+ freeproc(np);
+ release(&np->lock);
+ release(&p->lock);
+ return pid;
+ }
+ release(&np->lock);
+ }
+ }
+
+ // No point waiting if we don't have any children.
+ if(!havekids || p->killed){
+ release(&p->lock);
+ return -1;
+ }
+
+ // Wait for a child to exit.
+ sleep(p, &p->lock); //DOC: wait-sleep
+ }
+}
+
+// Per-CPU process scheduler.
+// Each CPU calls scheduler() after setting itself up.
+// Scheduler never returns. It loops, doing:
+// - choose a process to run.
+// - swtch to start running that process.
+// - eventually that process transfers control
+// via swtch back to the scheduler.
+void
+scheduler(void)
+{
+ struct proc *p;
+ struct cpu *c = mycpu();
+
+ c->proc = 0;
+ for(;;){
+ // Avoid deadlock by ensuring that devices can interrupt.
+ intr_on();
+
+ for(p = proc; p < &proc[NPROC]; p++) {
+ acquire(&p->lock);
+ if(p->state == RUNNABLE) {
+ // Switch to chosen process. It is the process's job
+ // to release its lock and then reacquire it
+ // before jumping back to us.
+ p->state = RUNNING;
+ c->proc = p;
+ swtch(&c->scheduler, &p->context);
+
+ // Process is done running for now.
+ // It should have changed its p->state before coming back.
+ c->proc = 0;
+ }
+ release(&p->lock);
+ }
+ }
+}
+
+// Switch to scheduler. Must hold only p->lock
+// and have changed proc->state. Saves and restores
+// intena because intena is a property of this
+// kernel thread, not this CPU. It should
+// be proc->intena and proc->noff, but that would
+// break in the few places where a lock is held but
+// there's no process.
+void
+sched(void)
+{
+ int intena;
+ struct proc *p = myproc();
+
+ if(!holding(&p->lock))
+ panic("sched p->lock");
+ if(mycpu()->noff != 1)
+ panic("sched locks");
+ if(p->state == RUNNING)
+ panic("sched running");
+ if(intr_get())
+ panic("sched interruptible");
+
+ intena = mycpu()->intena;
+ swtch(&p->context, &mycpu()->scheduler);
+ mycpu()->intena = intena;
+}
+
+// Give up the CPU for one scheduling round.
+void
+yield(void)
+{
+ struct proc *p = myproc();
+ acquire(&p->lock); //DOC: yieldlock
+ p->state = RUNNABLE;
+ sched();
+ release(&p->lock);
+}
+
+// A fork child's very first scheduling by scheduler()
+// will swtch to forkret.
+void
+forkret(void)
+{
+ static int first = 1;
+
+ // Still holding p->lock from scheduler.
+ release(&myproc()->lock);
+
+ if (first) {
+ // File system initialization must be run in the context of a
+ // regular process (e.g., because it calls sleep), and thus cannot
+ // be run from main().
+ first = 0;
+ fsinit(ROOTDEV);
+ }
+
+ usertrapret();
+}
+
+// Atomically release lock and sleep on chan.
+// Reacquires lock when awakened.
+void
+sleep(void *chan, struct spinlock *lk)
+{
+ struct proc *p = myproc();
+
+ // Must acquire p->lock in order to
+ // change p->state and then call sched.
+ // Once we hold p->lock, we can be
+ // guaranteed that we won't miss any wakeup
+ // (wakeup locks p->lock),
+ // so it's okay to release lk.
+ if(lk != &p->lock){ //DOC: sleeplock0
+ acquire(&p->lock); //DOC: sleeplock1
+ release(lk);
+ }
+
+ // Go to sleep.
+ p->chan = chan;
+ p->state = SLEEPING;
+
+ sched();
+
+ // Tidy up.
+ p->chan = 0;
+
+ // Reacquire original lock.
+ if(lk != &p->lock){ //DOC: sleeplock2
+ release(&p->lock);
+ acquire(lk);
+ }
+}
+
+// Wake up all processes sleeping on chan.
+// Must be called without any p->lock.
+void
+wakeup(void *chan)
+{
+ struct proc *p;
+
+ for(p = proc; p < &proc[NPROC]; p++) {
+ acquire(&p->lock);
+ if(p->state == SLEEPING && p->chan == chan) {
+ p->state = RUNNABLE;
+ }
+ release(&p->lock);
+ }
+}
+
+// Wake up p if it is sleeping in wait(); used by exit().
+// Caller must hold p->lock.
+static void
+wakeup1(struct proc *p)
+{
+ if(p->chan == p && p->state == SLEEPING) {
+ p->state = RUNNABLE;
+ }
+}
+
+// Kill the process with the given pid.
+// The victim won't exit until it tries to return
+// to user space (see usertrap() in trap.c).
+int
+kill(int pid)
+{
+ struct proc *p;
+
+ for(p = proc; p < &proc[NPROC]; p++){
+ acquire(&p->lock);
+ if(p->pid == pid){
+ p->killed = 1;
+ if(p->state == SLEEPING){
+ // Wake process from sleep().
+ p->state = RUNNABLE;
+ }
+ release(&p->lock);
+ return 0;
+ }
+ release(&p->lock);
+ }
+ return -1;
+}
+
+// Copy to either a user address, or kernel address,
+// depending on usr_dst.
+// Returns 0 on success, -1 on error.
+int
+either_copyout(int user_dst, uint64 dst, void *src, uint64 len)
+{
+ struct proc *p = myproc();
+ if(user_dst){
+ return copyout(p->pagetable, dst, src, len);
+ } else {
+ memmove((char *)dst, src, len);
+ return 0;
+ }
+}
+
+// Copy from either a user address, or kernel address,
+// depending on usr_src.
+// Returns 0 on success, -1 on error.
+int
+either_copyin(void *dst, int user_src, uint64 src, uint64 len)
+{
+ struct proc *p = myproc();
+ if(user_src){
+ return copyin(p->pagetable, dst, src, len);
+ } else {
+ memmove(dst, (char*)src, len);
+ return 0;
+ }
+}
+
+// Print a process listing to console. For debugging.
+// Runs when user types ^P on console.
+// No lock to avoid wedging a stuck machine further.
+void
+procdump(void)
+{
+ static char *states[] = {
+ [UNUSED] "unused",
+ [SLEEPING] "sleep ",
+ [RUNNABLE] "runble",
+ [RUNNING] "run ",
+ [ZOMBIE] "zombie"
+ };
+ struct proc *p;
+ char *state;
+
+ printf("\n");
+ for(p = proc; p < &proc[NPROC]; p++){
+ if(p->state == UNUSED)
+ continue;
+ if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
+ state = states[p->state];
+ else
+ state = "???";
+ printf("%d %s %s", p->pid, state, p->name);
+ printf("\n");
+ }
+}