summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/defs.h4
-rw-r--r--kernel/fs.c9
-rw-r--r--kernel/kcsan.c323
-rw-r--r--kernel/pipe.c3
-rw-r--r--kernel/riscv.h3
-rw-r--r--kernel/spinlock.h4
-rw-r--r--kernel/sprintf.c91
-rw-r--r--kernel/start.c5
-rw-r--r--kernel/stats.c66
-rw-r--r--kernel/virtio_disk.c26
10 files changed, 529 insertions, 5 deletions
diff --git a/kernel/defs.h b/kernel/defs.h
index 02226b5..541c97e 100644
--- a/kernel/defs.h
+++ b/kernel/defs.h
@@ -1,3 +1,7 @@
+#ifdef LAB_MMAP
+typedef unsigned long size_t;
+typedef long int off_t;
+#endif
struct buf;
struct context;
struct file;
diff --git a/kernel/fs.c b/kernel/fs.c
index c6bab15..6c4079e 100644
--- a/kernel/fs.c
+++ b/kernel/fs.c
@@ -295,11 +295,11 @@ ilock(struct inode *ip)
struct buf *bp;
struct dinode *dip;
- if(ip == 0 || ip->ref < 1)
+ if(ip == 0 || atomic_read4(&ip->ref) < 1)
panic("ilock");
acquiresleep(&ip->lock);
-
+
if(ip->valid == 0){
bp = bread(ip->dev, IBLOCK(ip->inum, sb));
dip = (struct dinode*)bp->data + ip->inum%IPB;
@@ -320,7 +320,7 @@ ilock(struct inode *ip)
void
iunlock(struct inode *ip)
{
- if(ip == 0 || !holdingsleep(&ip->lock) || ip->ref < 1)
+ if(ip == 0 || !holdingsleep(&ip->lock) || atomic_read4(&ip->ref) < 1)
panic("iunlock");
releasesleep(&ip->lock);
@@ -416,7 +416,6 @@ bmap(struct inode *ip, uint bn)
brelse(bp);
return addr;
}
-
panic("bmap: out of range");
}
@@ -447,7 +446,7 @@ itrunc(struct inode *ip)
bfree(ip->dev, ip->addrs[NDIRECT]);
ip->addrs[NDIRECT] = 0;
}
-
+
ip->size = 0;
iupdate(ip);
}
diff --git a/kernel/kcsan.c b/kernel/kcsan.c
new file mode 100644
index 0000000..90861ba
--- /dev/null
+++ b/kernel/kcsan.c
@@ -0,0 +1,323 @@
+#include "types.h"
+#include "param.h"
+#include "memlayout.h"
+#include "spinlock.h"
+#include "riscv.h"
+#include "proc.h"
+#include "defs.h"
+
+//
+// Race detector using gcc's thread sanitizer. It delays all stores
+// and loads and monitors if any other CPU is using the same address.
+// If so, we have a race and print out the backtrace of the thread
+// that raced and the thread that set the watchpoint.
+//
+
+//
+// To run with kcsan:
+// make clean
+// make KCSAN=1 qemu
+//
+
+// The number of watch points.
+#define NWATCH (NCPU)
+
+// The number of cycles to delay stores, whatever that means on qemu.
+//#define DELAY_CYCLES 20000
+#define DELAY_CYCLES 200000
+
+#define MAXTRACE 20
+
+int
+trace(uint64 *trace, int maxtrace)
+{
+ uint64 i = 0;
+
+ push_off();
+
+ uint64 fp = r_fp();
+ uint64 ra, low = PGROUNDDOWN(fp) + 16, high = PGROUNDUP(fp);
+
+ while(!(fp & 7) && fp >= low && fp < high){
+ ra = *(uint64*)(fp - 8);
+ fp = *(uint64*)(fp - 16);
+ trace[i++] = ra;
+ if(i >= maxtrace)
+ break;
+ }
+
+ pop_off();
+
+ return i;
+}
+
+struct watch {
+ uint64 addr;
+ int write;
+ int race;
+ uint64 trace[MAXTRACE];
+ int tracesz;
+};
+
+struct {
+ struct spinlock lock;
+ struct watch points[NWATCH];
+ int on;
+} tsan;
+
+static struct watch*
+wp_lookup(uint64 addr)
+{
+ for(struct watch *w = &tsan.points[0]; w < &tsan.points[NWATCH]; w++) {
+ if(w->addr == addr) {
+ return w;
+ }
+ }
+ return 0;
+}
+
+static int
+wp_install(uint64 addr, int write)
+{
+ for(struct watch *w = &tsan.points[0]; w < &tsan.points[NWATCH]; w++) {
+ if(w->addr == 0) {
+ w->addr = addr;
+ w->write = write;
+ w->tracesz = trace(w->trace, MAXTRACE);
+ return 1;
+ }
+ }
+ panic("wp_install");
+ return 0;
+}
+
+static void
+wp_remove(uint64 addr)
+{
+ for(struct watch *w = &tsan.points[0]; w < &tsan.points[NWATCH]; w++) {
+ if(w->addr == addr) {
+ w->addr = 0;
+ w->tracesz = 0;
+ return;
+ }
+ }
+ panic("remove");
+}
+
+static void
+printtrace(uint64 *t, int n)
+{
+ int i;
+
+ for(i = 0; i < n; i++) {
+ printf("%p\n", t[i]);
+ }
+}
+
+static void
+race(char *s, struct watch *w) {
+ uint64 t[MAXTRACE];
+ int n;
+
+ n = trace(t, MAXTRACE);
+ printf("== race detected ==\n");
+ printf("backtrace for racing %s\n", s);
+ printtrace(t, n);
+ printf("backtrace for watchpoint:\n");
+ printtrace(w->trace, w->tracesz);
+ printf("==========\n");
+}
+
+// cycle counter
+static inline uint64
+r_cycle()
+{
+ uint64 x;
+ asm volatile("rdcycle %0" : "=r" (x) );
+ return x;
+}
+
+static void delay(void) __attribute__((noinline));
+static void delay() {
+ uint64 stop = r_cycle() + DELAY_CYCLES;
+ uint64 c = r_cycle();
+ while(c < stop) {
+ c = r_cycle();
+ }
+}
+
+static void
+kcsan_read(uint64 addr, int sz)
+{
+ struct watch *w;
+
+ acquire(&tsan.lock);
+ if((w = wp_lookup(addr)) != 0) {
+ if(w->write) {
+ race("load", w);
+ }
+ release(&tsan.lock);
+ return;
+ }
+ release(&tsan.lock);
+}
+
+static void
+kcsan_write(uint64 addr, int sz)
+{
+ struct watch *w;
+
+ acquire(&tsan.lock);
+ if((w = wp_lookup(addr)) != 0) {
+ race("store", w);
+ release(&tsan.lock);
+ }
+
+ // no watchpoint; try to install one
+ if(wp_install(addr, 1)) {
+
+ release(&tsan.lock);
+
+ // XXX maybe read value at addr before and after delay to catch
+ // races of unknown origins (e.g., device).
+
+ delay();
+
+ acquire(&tsan.lock);
+
+ wp_remove(addr);
+ }
+ release(&tsan.lock);
+}
+
+// tsan.on will only have effect with "make KCSAN=1"
+void
+kcsaninit(void)
+{
+ initlock(&tsan.lock, "tsan");
+ tsan.on = 1;
+ __sync_synchronize();
+}
+
+//
+// Calls inserted by compiler into kernel binary, except for this file.
+//
+
+void
+__tsan_init(void)
+{
+}
+
+void
+__tsan_read1(uint64 addr)
+{
+ if(!tsan.on)
+ return;
+ // kcsan_read(addr, 1);
+}
+
+void
+__tsan_read2(uint64 addr)
+{
+ if(!tsan.on)
+ return;
+ kcsan_read(addr, 2);
+}
+
+void
+__tsan_read4(uint64 addr)
+{
+ if(!tsan.on)
+ return;
+ kcsan_read(addr, 4);
+}
+
+void
+__tsan_read8(uint64 addr)
+{
+ if(!tsan.on)
+ return;
+ kcsan_read(addr, 8);
+}
+
+void
+__tsan_read_range(uint64 addr, uint64 size)
+{
+ if(!tsan.on)
+ return;
+ kcsan_read(addr, size);
+}
+
+void
+__tsan_write1(uint64 addr)
+{
+ if(!tsan.on)
+ return;
+ // kcsan_write(addr, 1);
+}
+
+void
+__tsan_write2(uint64 addr)
+{
+ if(!tsan.on)
+ return;
+ kcsan_write(addr, 2);
+}
+
+void
+__tsan_write4(uint64 addr)
+{
+ if(!tsan.on)
+ return;
+ kcsan_write(addr, 4);
+}
+
+void
+__tsan_write8(uint64 addr)
+{
+ if(!tsan.on)
+ return;
+ kcsan_write(addr, 8);
+}
+
+void
+__tsan_write_range(uint64 addr, uint64 size)
+{
+ if(!tsan.on)
+ return;
+ kcsan_write(addr, size);
+}
+
+void
+__tsan_atomic_thread_fence(int order)
+{
+ __sync_synchronize();
+}
+
+uint32
+__tsan_atomic32_load(uint *ptr, uint *val, int order)
+{
+ uint t;
+ __atomic_load(ptr, &t, __ATOMIC_SEQ_CST);
+ return t;
+}
+
+void
+__tsan_atomic32_store(uint *ptr, uint val, int order)
+{
+ __atomic_store(ptr, &val, __ATOMIC_SEQ_CST);
+}
+
+// We don't use this
+void
+__tsan_func_entry(uint64 pc)
+{
+}
+
+// We don't use this
+void
+__tsan_func_exit(void)
+{
+}
+
+
diff --git a/kernel/pipe.c b/kernel/pipe.c
index f6b501a..41a9c5e 100644
--- a/kernel/pipe.c
+++ b/kernel/pipe.c
@@ -68,6 +68,9 @@ pipeclose(struct pipe *pi, int writable)
}
if(pi->readopen == 0 && pi->writeopen == 0){
release(&pi->lock);
+#ifdef LAB_LOCK
+ freelock(&pi->lock);
+#endif
kfree((char*)pi);
} else
release(&pi->lock);
diff --git a/kernel/riscv.h b/kernel/riscv.h
index 7d6eb6e..af18972 100644
--- a/kernel/riscv.h
+++ b/kernel/riscv.h
@@ -355,6 +355,9 @@ typedef uint64 *pagetable_t; // 512 PTEs
#define PTE_A (1L << 6) // riscv access bit
#define PTE_C (1L << 8) // RSW low bit, use it to mark whether a page is COW
+
+
+
// shift a physical address to the right place for a PTE.
#define PA2PTE(pa) ((((uint64)pa) >> 12) << 10)
diff --git a/kernel/spinlock.h b/kernel/spinlock.h
index 4392820..9bac216 100644
--- a/kernel/spinlock.h
+++ b/kernel/spinlock.h
@@ -5,5 +5,9 @@ struct spinlock {
// For debugging:
char *name; // Name of lock.
struct cpu *cpu; // The cpu holding the lock.
+#ifdef LAB_LOCK
+ int nts;
+ int n;
+#endif
};
diff --git a/kernel/sprintf.c b/kernel/sprintf.c
new file mode 100644
index 0000000..050eb85
--- /dev/null
+++ b/kernel/sprintf.c
@@ -0,0 +1,91 @@
+#include <stdarg.h>
+
+#include "types.h"
+#include "param.h"
+#include "spinlock.h"
+#include "sleeplock.h"
+#include "fs.h"
+#include "file.h"
+#include "riscv.h"
+#include "defs.h"
+
+static char digits[] = "0123456789abcdef";
+
+static int
+sputc(char *s, char c)
+{
+ *s = c;
+ return 1;
+}
+
+static int
+sprintint(char *s, int xx, int base, int sign)
+{
+ char buf[16];
+ int i, n;
+ uint x;
+
+ if(sign && (sign = xx < 0))
+ x = -xx;
+ else
+ x = xx;
+
+ i = 0;
+ do {
+ buf[i++] = digits[x % base];
+ } while((x /= base) != 0);
+
+ if(sign)
+ buf[i++] = '-';
+
+ n = 0;
+ while(--i >= 0)
+ n += sputc(s+n, buf[i]);
+ return n;
+}
+
+int
+snprintf(char *buf, int sz, char *fmt, ...)
+{
+ va_list ap;
+ int i, c;
+ int off = 0;
+ char *s;
+
+ if (fmt == 0)
+ panic("null fmt");
+
+ va_start(ap, fmt);
+ for(i = 0; off < sz && (c = fmt[i] & 0xff) != 0; i++){
+ if(c != '%'){
+ off += sputc(buf+off, c);
+ continue;
+ }
+ c = fmt[++i] & 0xff;
+ if(c == 0)
+ break;
+ switch(c){
+ case 'd':
+ off += sprintint(buf+off, va_arg(ap, int), 10, 1);
+ break;
+ case 'x':
+ off += sprintint(buf+off, va_arg(ap, int), 16, 1);
+ break;
+ case 's':
+ if((s = va_arg(ap, char*)) == 0)
+ s = "(null)";
+ for(; *s && off < sz; s++)
+ off += sputc(buf+off, *s);
+ break;
+ case '%':
+ off += sputc(buf+off, '%');
+ break;
+ default:
+ // Print unknown % sequence to draw attention.
+ off += sputc(buf+off, '%');
+ off += sputc(buf+off, c);
+ break;
+ }
+ }
+ return off;
+}
diff --git a/kernel/start.c b/kernel/start.c
index e16f18a..bf03bc0 100644
--- a/kernel/start.c
+++ b/kernel/start.c
@@ -38,6 +38,11 @@ start()
w_mideleg(0xffff);
w_sie(r_sie() | SIE_SEIE | SIE_STIE | SIE_SSIE);
+#ifdef KCSAN
+ // allow supervisor to read cycle counter register
+ w_mcounteren(r_mcounteren()|0x3);
+#endif
+
// configure Physical Memory Protection to give supervisor mode
// access to all of physical memory.
w_pmpaddr0(0x3fffffffffffffull);
diff --git a/kernel/stats.c b/kernel/stats.c
new file mode 100644
index 0000000..b7a8e5f
--- /dev/null
+++ b/kernel/stats.c
@@ -0,0 +1,66 @@
+#include <stdarg.h>
+
+#include "types.h"
+#include "param.h"
+#include "spinlock.h"
+#include "sleeplock.h"
+#include "fs.h"
+#include "file.h"
+#include "riscv.h"
+#include "defs.h"
+
+#define BUFSZ 4096
+static struct {
+ struct spinlock lock;
+ char buf[BUFSZ];
+ int sz;
+ int off;
+} stats;
+
+int statscopyin(char*, int);
+int statslock(char*, int);
+
+int
+statswrite(int user_src, uint64 src, int n)
+{
+ return -1;
+}
+
+int
+statsread(int user_dst, uint64 dst, int n)
+{
+ int m;
+
+ acquire(&stats.lock);
+
+ if(stats.sz == 0) {
+#ifdef LAB_LOCK
+ stats.sz = statslock(stats.buf, BUFSZ);
+#endif
+ }
+ m = stats.sz - stats.off;
+
+ if (m > 0) {
+ if(m > n)
+ m = n;
+ if(either_copyout(user_dst, dst, stats.buf+stats.off, m) != -1) {
+ stats.off += m;
+ }
+ } else {
+ m = -1;
+ stats.sz = 0;
+ stats.off = 0;
+ }
+ release(&stats.lock);
+ return m;
+}
+
+void
+statsinit(void)
+{
+ initlock(&stats.lock, "stats");
+
+ devsw[STATS].read = statsread;
+ devsw[STATS].write = statswrite;
+}
+
diff --git a/kernel/virtio_disk.c b/kernel/virtio_disk.c
index ae6c164..dfca2bc 100644
--- a/kernel/virtio_disk.c
+++ b/kernel/virtio_disk.c
@@ -212,6 +212,28 @@ alloc3_desc(int *idx)
return 0;
}
+#ifdef LAB_LOCK
+//
+// check that there are at most NBUF distinct
+// struct buf's, which the lock lab requires.
+//
+static struct buf *xbufs[NBUF];
+static void
+checkbuf(struct buf *b)
+{
+ for(int i = 0; i < NBUF; i++){
+ if(xbufs[i] == b){
+ return;
+ }
+ if(xbufs[i] == 0){
+ xbufs[i] = b;
+ return;
+ }
+ }
+ panic("more than NBUF bufs");
+}
+#endif
+
void
virtio_disk_rw(struct buf *b, int write)
{
@@ -219,6 +241,10 @@ virtio_disk_rw(struct buf *b, int write)
acquire(&disk.vdisk_lock);
+#ifdef LAB_LOCK
+ checkbuf(b);
+#endif
+
// the spec's Section 5.2 says that legacy block operations use
// three descriptors: one for type/reserved/sector, one for the
// data, one for a 1-byte status result.