diff options
author | Christian Cunningham <cc@localhost> | 2022-03-24 09:38:08 -0700 |
---|---|---|
committer | Christian Cunningham <cc@localhost> | 2022-03-24 09:38:08 -0700 |
commit | 93bf62580a68533dc8252b9a2a055c02f34ecb67 (patch) | |
tree | 1b1ca92ebbe107a998136a1442c0dba5be885e13 /kernel | |
parent | 3e64dda5d5c350cc325650133f7e64967f1efe84 (diff) |
Modularized
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/boot.S | 118 | ||||
-rw-r--r-- | kernel/cpu/irq.c | 93 | ||||
-rw-r--r-- | kernel/drivers/uart.S | 53 | ||||
-rw-r--r-- | kernel/drivers/uart.c | 83 | ||||
-rw-r--r-- | kernel/exceptions/data.S | 29 | ||||
-rw-r--r-- | kernel/exceptions/fiq.S | 27 | ||||
-rw-r--r-- | kernel/exceptions/irq.S | 28 | ||||
-rw-r--r-- | kernel/exceptions/prefetch.S | 13 | ||||
-rw-r--r-- | kernel/exceptions/svc.S | 145 | ||||
-rw-r--r-- | kernel/exceptions/undefined.S | 21 | ||||
-rw-r--r-- | kernel/globals.S | 7 | ||||
-rw-r--r-- | kernel/globals.c | 24 | ||||
-rw-r--r-- | kernel/graphics/lfb.c | 218 | ||||
-rw-r--r-- | kernel/graphics/mbox.c | 37 | ||||
-rw-r--r-- | kernel/lib/kmem.c | 38 | ||||
-rw-r--r-- | kernel/lib/mmu.S | 45 | ||||
-rw-r--r-- | kernel/lib/mmu.c | 33 | ||||
-rw-r--r-- | kernel/lib/queue.c | 55 | ||||
-rw-r--r-- | kernel/lib/strings.c | 119 | ||||
-rw-r--r-- | kernel/sys/core.c | 58 | ||||
-rw-r--r-- | kernel/sys/kernel.S | 32 | ||||
-rw-r--r-- | kernel/sys/power.c | 39 | ||||
-rw-r--r-- | kernel/sys/schedule.S | 53 | ||||
-rw-r--r-- | kernel/sys/schedule.c | 468 | ||||
-rw-r--r-- | kernel/tests/test.S | 31 | ||||
-rw-r--r-- | kernel/tests/test.c | 545 | ||||
-rw-r--r-- | kernel/util/lock.c | 20 | ||||
-rw-r--r-- | kernel/util/mutex.c | 110 | ||||
-rw-r--r-- | kernel/util/status.c | 133 | ||||
-rw-r--r-- | kernel/util/time.c | 76 |
30 files changed, 2751 insertions, 0 deletions
diff --git a/kernel/boot.S b/kernel/boot.S new file mode 100644 index 0000000..46ef3d0 --- /dev/null +++ b/kernel/boot.S @@ -0,0 +1,118 @@ +// To keep this in the first portion of the binary. +.section ".text.boot" + +// Make _start global. +.globl _start + +.include "macros.inc" + +_start: +reset: + cpsid aif + + // Exit Hypervisor Mode + mrs r0, cpsr + and r1, r0, #0x1F + cmp r1, #0x1A + bne 1f + bic r0, r0, #0x1f + orr r0, r0, #0x13 + msr spsr_cxsf, r0 + add r0, pc, #4 + msr ELR_hyp, r0 + eret + +1: + // disable core0,1,2. + mrc p15, #0, r1, c0, c0, #5 + and r1, r1, #3 + cmp r1, #1 + beq runcore1 + cmp r1, #2 + beq runcore2 + cmp r1, #3 + bge runcore3 + + init_core 0 + + // Clear out bss. + ldr r4, =__bss_start + ldr r9, =__bss_end + mov r5, #0 + mov r6, #0 + mov r7, #0 + mov r8, #0 + b 2f + +1: // store multiple at r4. + stmia r4!, {r5-r8} + +2: // If we are still below bss_end, loop. + cmp r4, r9 + blo 1b + + // Clear mailboxes + mov r4, #0 + ldr r5, =mbox_core0 + str r4, [r5] + ldr r5, =mbox_core1 + str r4, [r5] + ldr r5, =mbox_core2 + str r4, [r5] + ldr r5, =mbox_core3 + str r4, [r5] + + // Call kernel_main + ldr r3, =kernel_main + blx r3 + +runcore1: + init_core 1 + b io_halt +runcore2: + init_core 2 + b io_halt +runcore3: + init_core 3 + b io_halt + +.globl io_halt +io_halt: + wfi + b io_halt + +.align 5 +vector: + ldr pc, reset_handler + ldr pc, undefined_handler + ldr pc, svc_handler + ldr pc, prefetch_handler + ldr pc, data_handler + ldr pc, unused_handler + ldr pc, irq_handler + ldr pc, fiq_handler + +reset_handler: .word reset +undefined_handler: .word undefined +svc_handler: .word svc +prefetch_handler: .word prefetch +data_handler: .word data +unused_handler: .word io_halt +irq_handler: .word irq +fiq_handler: .word fiq + +.section .data +.globl mbox_core0 +mbox_core0: .word 0 +.globl mbox_core1 +mbox_core1: .word 0 +.globl mbox_core2 +mbox_core2: .word 0 +.globl mbox_core3 +mbox_core3: .word 0 + +.section .bss.estacks +core_stacks 0 +core_stacks 1 +core_stacks 2 +core_stacks 3 diff --git a/kernel/cpu/irq.c b/kernel/cpu/irq.c new file mode 100644 index 0000000..f89bba9 --- /dev/null +++ b/kernel/cpu/irq.c @@ -0,0 +1,93 @@ +#include <cpu.h> +#include <cpu/irq.h> +#include <globals.h> +#include <graphics/lfb.h> +#include <symbols.h> +#include <sys/core.h> +#include <sys/schedule.h> +#include <tests/test.h> +#include <util/mutex.h> +#include <util/status.h> +#include <util/time.h> +#include <usr/main.h> + +#define CPS 1000 + +void handle_data(unsigned char); + +static unsigned long counter = 0; +unsigned long c_irq_handler(void) +{ + unsigned long source = load32(CORE0_IRQ_SOURCE); + // Check if GPU Interrupt + if (source & (1 << 8)) { + // Check if UART Interrupt + if(load32(IRQ_PENDING2) & (1 << 25)) { + // Check if UART Interrupt is Masked + if(load32(UART0_MIS) & (1<<4)) { + // Get the UART data + unsigned long data = load32(UART0_DR); + + // Handle the recieved data +#ifdef DEBUG + // Ctrl+G to output scheduler debug info + if (data == 0x7) { + uart_scheduler(); + uart_mutexes(); + } +#endif + // Add task to handle the data + { + add_thread(handle_data, (void*)data, PRIORITIES-1); + return 1; + } + } + } + // Check if System Time Compare 0 Triggered the Interrupt + if (*(volatile unsigned long*)SYS_TIMER_CS & SYS_TIMER_SC_M0) { + volatile unsigned long* timer_cs = (volatile unsigned long*)SYS_TIMER_CS; + volatile unsigned long* timer_chi = (volatile unsigned long*)SYS_TIMER_CHI; + volatile unsigned long* nexttime = (volatile unsigned long*)SYS_TIMER_C0; + add_thread_without_duplicate(main, 0, 0); + *nexttime = *timer_chi + USR_TIME; + *timer_cs = SYS_TIMER_SC_M0; + return 1; + } + } + // Check if CNTV triggered the interrupt + else if (source & (1 << 3)) { + // Reset the counter + write_cntv_tval(cntfrq); + counter++; + if (counter % 0x6000 == 0) + counter = 0; + } + return 0; +} + +unsigned long c_fiq_handler(void) +{ + unsigned long source = load32(CORE0_FIQ_SOURCE); + // Check if CNTV triggered the interrupt + if (source & (1 << 3)) { + write_cntv_tval(cntfrq); + } + return 0; +} + +void handle_data(unsigned char data) +{ + // Newline Case + if (data == 0x0D) { + // Backspace Case + } else if (data == 0x08 || data == 0x7F) { + } else if (data == 0x61) { + add_thread(uart_scheduler, 0, 2); + } else if (data == 0x62) { + //add_thread(test_entry, 0, 2); + } + // Draw it on the screen + { + draw_chex32(0, 9, data, 0xAA00FF); + } +} diff --git a/kernel/drivers/uart.S b/kernel/drivers/uart.S new file mode 100644 index 0000000..38957c2 --- /dev/null +++ b/kernel/drivers/uart.S @@ -0,0 +1,53 @@ +.section ".text" + +.globl uart_char +uart_char: + mov r2, #0x1000 + movt r2, #0x3f20 +1: + ldr r3, [r2, #24] + tst r3, #0b100000 + bne 1b + str r0, [r2] + bx lr + +.globl uart_string +uart_string: + push {r4, lr} + mov r4, r0 + ldrb r0, [r0] + cmp r0, #0 + popeq {r4, pc} +1: + bl uart_char + ldrb r0, [r4, #1]! + cmp r0, #0 + bne 1b + pop {r4, pc} + +.globl uart_hex +uart_hex: + push {r4, lr} + mov r2, #0x1000 + movt r2, #0x3f20 +1: + ldr r3, [r2, #24] + tst r3, #0b100000 + bne 1b + mov r3, #7 +2: + mov r1, r0 + asr r1, r3 + asr r1, r3 + asr r1, r3 + asr r1, r3 + and r1, #0xf + add r1, #0x30 + cmp r1, #0x3A + blt 3f + add r1, #7 +3: + str r1, [r2] + subs r3, #1 + bge 2b // Jump back to wait for availablilty + pop {r4, pc} diff --git a/kernel/drivers/uart.c b/kernel/drivers/uart.c new file mode 100644 index 0000000..68c70d6 --- /dev/null +++ b/kernel/drivers/uart.c @@ -0,0 +1,83 @@ +#include <drivers/uart.h> +#include <lib/kmem.h> +#include <lib/strings.h> +#include <sys/core.h> +#include <sys/schedule.h> +#include <symbols.h> +#include <util/lock.h> + +#define UART_BUFFER_SIZE 0x400 +struct UartBuffer { + char buffer[UART_BUFFER_SIZE]; + unsigned long roffset; + unsigned long woffset; + struct Lock l; +} ubuffer; + +void uart_init(void) +{ + ubuffer.roffset = 0; + ubuffer.woffset = 0; + ubuffer.l.pid = 0; + + // Disable UART0 + store32(0x0, UART0_CR); + // Setup GPIO on pin 14 and 15 + store32(0x0, (unsigned long)GPPUD); + delay(150); + store32((1 << 14) | (1 << 15), (unsigned long)GPPUDCLK0); + delay(150); + store32(0x0, (unsigned long)GPPUDCLK0); + // Clear pending interrupts + store32(0x7FF, UART0_ICR); + // Set to 3Mhz + store32(1, UART0_IBRD); + store32(40, UART0_FBRD); + // Enable FIFO and 8 bit transmission + store32((1<<4)|(1<<5)|(1<<6), UART0_LCRH); + // Mask all interrupts + store32((1<<1)|(1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)|(1<<10), UART0_IMSC); + // Enable UART0 + store32((1<<0)|(1<<8)|(1<<9), UART0_CR); +} + +// s = zero-terminated string +void* uart_print(char* s) +{ + lock(&ubuffer.l); + char* ptr = s; + while (1) { + if (*ptr == 0) + break; + ubuffer.buffer[ubuffer.woffset] = *ptr; + if ((ubuffer.woffset+1)%UART_BUFFER_SIZE == ubuffer.roffset) + return ptr; + ubuffer.woffset++; + ubuffer.woffset %= UART_BUFFER_SIZE; + ptr += 1; + } + // Low priority flush run whenever + add_thread_without_duplicate(uart_flush, 0, PRIORITIES-1); + unlock(&ubuffer.l); + return 0; +} + +void uart_flush(void) +{ + while (ubuffer.roffset != ubuffer.woffset) { + uart_char(ubuffer.buffer[ubuffer.roffset++]); + ubuffer.roffset %= UART_BUFFER_SIZE; + } +} + +void uart_10(unsigned long val) +{ + char* dptr = u32_to_str(val); + uart_string(dptr); +} + +void uart_hexn(unsigned long c_val) +{ + uart_hex(c_val); + uart_char('\n'); +} diff --git a/kernel/exceptions/data.S b/kernel/exceptions/data.S new file mode 100644 index 0000000..fe33215 --- /dev/null +++ b/kernel/exceptions/data.S @@ -0,0 +1,29 @@ +.section ".text.exceptions" +.globl data +data: + cpsid aif + stmfd sp!, {r0-r12,lr} + ldr r4, [lr, #-4] + // Output return address + mov r0, #80 + mov r1, #0 + mov r2, r4 + sub r2, #8 + bl draw_hex32 + // Output the data at the address + mov r0, #80 + mov r1, #1 + ldr r2, [r4, #-8] + bl draw_hex32 + // Output the Program Status + mov r0, #80 + mov r1, #2 + mrs r2, spsr + bl draw_hex32 + // Output the data-fault register + mov r0, #80 + mov r1, #3 + mrc p15, 0, r2, c5, c0, 0 //// https://developer.arm.com/documentation/ddi0464/d/System-Control/Register-descriptions/Data-Fault-Status-Register?lang=en + bl draw_hex32 + ldmfd sp!, {r0-r12,lr} + subs pc, lr, #4 // Should be 8 once I can actually handle the abort diff --git a/kernel/exceptions/fiq.S b/kernel/exceptions/fiq.S new file mode 100644 index 0000000..005ed76 --- /dev/null +++ b/kernel/exceptions/fiq.S @@ -0,0 +1,27 @@ +.section ".text.exceptions" +.globl fiq +fiq: + cpsid aif + stmfd sp!, {r0-r12,lr} + bl c_fiq_handler + cmp r0, #1 + bne 1f + // Schedule if interrupted a thread + mrs r1, spsr + and r1, r1, #0x1f + cmp r1, #0x10 + bne 1f + ldmfd sp!, {r0-r12,lr} + // Don't skip missed instruction upon return + sub lr, #4 + push {r3} + // Store the instruction in a special area for + // future processing + ldr r3, =irqlr + str lr, [r3, #0] + pop {r3} + cps #0x13 + b schedule +1: + ldmfd sp!, {r0-r12,lr} + subs pc, lr, #4 diff --git a/kernel/exceptions/irq.S b/kernel/exceptions/irq.S new file mode 100644 index 0000000..a7e78bc --- /dev/null +++ b/kernel/exceptions/irq.S @@ -0,0 +1,28 @@ +.section ".text.exceptions" +.globl irq +irq: + cpsid ai + stmfd sp!, {r0-r12,lr} + // Run IRQ handler + bl c_irq_handler + cmp r0, #1 + bne 1f + // Schedule if interrupted a thread + mrs r1, spsr + and r1, r1, #0x1f + cmp r1, #0x10 + bne 1f + ldmfd sp!, {r0-r12,lr} + // Don't skip missed instruction upon return + sub lr, #4 + push {r3} + // Store the instruction in a special area for + // future processing + ldr r3, =irqlr + str lr, [r3, #0] + pop {r3} + cps #0x13 + b schedule +1: + ldmfd sp!, {r0-r12,lr} + subs pc, lr, #4 diff --git a/kernel/exceptions/prefetch.S b/kernel/exceptions/prefetch.S new file mode 100644 index 0000000..59674bd --- /dev/null +++ b/kernel/exceptions/prefetch.S @@ -0,0 +1,13 @@ +.section ".text.exceptions" +.globl prefetch +prefetch: + cpsid aif + stmfd sp!, {r0-r12,lr} + ldr r4, [lr, #-4] + // Output return address + mov r0, #98 + mov r1, #0 + mov r2, r4 + bl draw_hex32 + ldmfd sp!, {r0-r12,lr} + subs pc, lr, #4 diff --git a/kernel/exceptions/svc.S b/kernel/exceptions/svc.S new file mode 100644 index 0000000..a24bac9 --- /dev/null +++ b/kernel/exceptions/svc.S @@ -0,0 +1,145 @@ +.section ".text.exceptions" +.globl svc +svc: + cpsid aif + stmfd sp!, {r0-r12,lr} + // Get the SVC Exception # + ldr r0, [lr, #-4] + bic r0, #0xFF000000 + // Check it is within our defined SVC + cmp r0, #7 + adrle r3, svc_table_1 + ldrle pc, [r3, r0, LSL #2] + sub r0, #8 + cmp r0, #7 + bgt svc_exit + //// Jump to the appropriate Call + adr r3, svc_table_2 + ldr pc, [r3, r0, LSL #2] +svc_000000: // SYS_YIELD + bl yield + ldmfd sp!, {r0-r12,lr} + b schedule +svc_000001: // SYS_TIME + mov r2, #0x3004 + movt r2, #0x3F00 + ldr r0, [r2, #4] // <- SYS_TIMER_CLO + ldr r1, [r2, #0] // <- SYS_TIMER_CHI + str r0, [sp] // Return value + str r1, [sp, #4] // Return value hi + b svc_exit +svc_000002: // Run Schedule + ldmfd sp!, {r0-r12,lr} + b schedule +svc_000003: // Add Thread + ldr r0, [sp, #0] + ldr r1, [sp, #4] + ldr r2, [sp, #8] + and r2, #0xFF + bl svc_add_thread + str r0, [sp, #0] + ldmfd sp!, {r0-r12,lr} + b schedule +svc_000004: // Lock Lock (usr_r0 = struct Lock*) + ldr r3, =scheduler + ldr r2, [r3, #0] // struct Thread* rthread + ldr r1, [r2, #0x10] // unsigned long pid + ldr r0, [sp, #0] // struct Lock* m +1: clrex + ldrex r2, [r0, #0] + cmp r2, #0 + // If it is not available, wait-queue the thread + bne svc_000004_delay_mutex + // Otherwise lock it + strexeq r2, r1, [r0, #0] + teq r2, #0 + bne 1b + dmb + b svc_exit +svc_000004_delay_mutex: // Wait-queue the current thread + // r0 = struct Lock* m + bl sched_mutex_yield + ldmfd sp!, {r0-r12,lr} + sub lr, #4 + b schedule +svc_000005: // Release Lock + ldr r0, [sp, #0] // struct Lock* m + mov r1, #0 + dmb + // Unlock + str r1, [r0, #0] + dsb + sev + // Awake any threads waiting for this lock + bl sched_mutex_resurrect + ldmfd sp!, {r0-r12,lr} + b schedule + b svc_exit +svc_000006: // Semaphore decrease + ldr r0, [sp, #0] // struct Semaphore* s +1: clrex + ldrex r2, [r0, #0] + cmp r2, #0 + beq svc_000006_delay_semaphore + sub r1, r2, #1 + strex r2, r1, [r0, #0] + teq r2, #0 + bne 1b + dmb + b svc_exit +svc_000006_delay_semaphore: + bl sched_semaphore_yield + ldmfd sp!, {r0-r12,lr} + sub lr, #4 + b schedule + b svc_exit +svc_000007: // Semaphore increase + ldr r0, [sp, #0] // struct Semaphore* s +1: clrex + ldrex r2, [r0, #0] + add r1, r2, #1 + strexeq r2, r1, [r0, #0] + teq r2, #0 + bne 1b + dmb + cmp r1, #1 + bne svc_exit + mov r1, #1 + bl sched_semaphore_resurrect + ldmfd sp!, {r0-r12,lr} + b schedule + b svc_exit +svc_000008: // Semaphore add # + ldr r0, [sp, #0] // struct Semaphore* s + ldr r3, [sp, #1] // unsigned long # times to increase +1: clrex + ldrex r2, [r0, #0] + add r1, r2, #1 + strexeq r2, r1, [r0, #0] + teq r2, #0 + bne 1b + dmb + mov r1, r3 + bl sched_semaphore_resurrect + ldmfd sp!, {r0-r12,lr} + b schedule + b svc_exit +svc_000009: // SYS_TIME_2 + mrc p15, 0, r0, c9, c13, 0 + str r0, [sp, #0] + b svc_exit +svc_exit: + ldmfd sp!, {r0-r12,pc}^ + +svc_table_1: + .word svc_000000 + .word svc_000001 + .word svc_000002 + .word svc_000003 + .word svc_000004 + .word svc_000005 + .word svc_000006 + .word svc_000007 +svc_table_2: + .word svc_000008 + .word svc_000009 diff --git a/kernel/exceptions/undefined.S b/kernel/exceptions/undefined.S new file mode 100644 index 0000000..856e30f --- /dev/null +++ b/kernel/exceptions/undefined.S @@ -0,0 +1,21 @@ +.section ".text.exceptions" +.globl undefined +undefined: + cpsid aif + stmfd sp!, {r0-r12,lr} + ldr r4, [lr, #-4] + mov r0, #62 + mov r1, #0 + mov r2, r4 + bl draw_hex32 + // Output lr + ldr r0, [sp, #0x34] + sub r2, r0, #4 + mov r0, #62 + mov r1, #1 + bl draw_hex32 + // Skip instruction for now + // In future, + // ldmfd sp!, {r0-r12,lr} // Note the lack of ^ since subs will handle it + // subs pc, lr, #4 + ldmfd sp!, {r0-r12,pc}^ diff --git a/kernel/globals.S b/kernel/globals.S new file mode 100644 index 0000000..b808053 --- /dev/null +++ b/kernel/globals.S @@ -0,0 +1,7 @@ +.section ".bss" +.globl irqlr +irqlr: + .word 0 +.globl cntfrq +cntfrq: + .word 0 diff --git a/kernel/globals.c b/kernel/globals.c new file mode 100644 index 0000000..5118e96 --- /dev/null +++ b/kernel/globals.c @@ -0,0 +1,24 @@ +#define GLOBALS_C +#include <sys/schedule.h> +#include <util/mutex.h> +char* os_name = "Jobbed"; +#ifndef VERSION +char* os_info_v = "?"; +#else +char* os_info_v = VERSION; +#endif + +__attribute__((section(".bss"))) unsigned long nextpid; +__attribute__((section(".bss"))) unsigned long stimel; +__attribute__((section(".bss"))) unsigned long stimeh; +__attribute__((section(".bss"))) struct Scheduler scheduler; +__attribute__((section(".bss"))) struct MutexManager mutex_manager; +__attribute__((section(".bss"))) struct Thread usrloopthread; +__attribute__((section(".bss"))) unsigned int gwidth; +__attribute__((section(".bss"))) unsigned int gheight; +__attribute__((section(".bss"))) unsigned int gpitch; +__attribute__((section(".bss"))) unsigned int gisrgb; +__attribute__((section(".bss.mutexs"))) struct Mutex mutexs[MAX_MUTEXS]; +__attribute__((section(".bss.mutexe"))) struct Entry mutex_entries[MAX_MUTEXS]; +__attribute__((section(".bss.threads"))) struct Thread threads[MAX_THREADS]; +__attribute__((section(".bss.threade"))) struct Entry thread_entries[MAX_THREADS]; diff --git a/kernel/graphics/lfb.c b/kernel/graphics/lfb.c new file mode 100644 index 0000000..8c41b1c --- /dev/null +++ b/kernel/graphics/lfb.c @@ -0,0 +1,218 @@ +#include <drivers/uart.h> +#include <globals.h> +#include <graphics/glyphs.h> +#include <graphics/lfb.h> +#include <graphics/mbox.h> +#include <lib/strings.h> + +unsigned char *lfb; /* raw frame buffer address */ + +#define SCR_WIDTH 1920 +#define SCR_HEIGHT 1080 + +/** + * Set screen resolution + */ +void lfb_init(void) +{ + mbox[0] = 35*4; + mbox[1] = MBOX_REQUEST; + + mbox[2] = 0x48003; //set phy wh + mbox[3] = 8; + mbox[4] = 8; + mbox[5] = SCR_WIDTH; //FrameBufferInfo.width + mbox[6] = SCR_HEIGHT; //FrameBufferInfo.height + + mbox[7] = 0x48004; //set virt wh + mbox[8] = 8; + mbox[9] = 8; + mbox[10] = SCR_WIDTH; //FrameBufferInfo.virtual_width + mbox[11] = SCR_HEIGHT; //FrameBufferInfo.virtual_height + + mbox[12] = 0x48009; //set virt offset + mbox[13] = 8; + mbox[14] = 8; + mbox[15] = 0; //FrameBufferInfo.x_offset + mbox[16] = 0; //FrameBufferInfo.y.offset + + mbox[17] = 0x48005; //set depth + mbox[18] = 4; + mbox[19] = 4; + mbox[20] = 32; //FrameBufferInfo.depth + + mbox[21] = 0x48006; //set pixel order + mbox[22] = 4; + mbox[23] = 4; + mbox[24] = 1; //RGB, not BGR preferably + + mbox[25] = 0x40001; //get framebuffer, gets alignment on request + mbox[26] = 8; + mbox[27] = 8; + mbox[28] = 4096; //FrameBufferInfo.pointer + mbox[29] = 0; //FrameBufferInfo.size + + mbox[30] = 0x40008; //get pitch + mbox[31] = 4; + mbox[32] = 4; + mbox[33] = 0; //FrameBufferInfo.pitch + + mbox[34] = MBOX_TAG_LAST; + + //this might not return exactly what we asked for, could be + //the closest supported resolution instead + if(mbox_call(MBOX_CH_PROP) && mbox[20]==32 && mbox[28]!=0) { + mbox[28]&=0x3FFFFFFF; //convert GPU address to ARM address + gwidth=mbox[5]; //get actual physical width + gheight=mbox[6]; //get actual physical height + gpitch=mbox[33]; //get number of bytes per line + gisrgb=mbox[24]; //get the actual channel order + lfb=(void*)((unsigned long)mbox[28]); + } else { + uart_string("Unable to set screen resolution to 1024x768x32\n"); + } +} + +void clear_screen(void) +{ + unsigned char *ptr=lfb; + for(unsigned int y = 0; y < gheight; y++) { + for(unsigned int x = 0; x < gwidth; x++) { + *(unsigned int*)ptr = 0x000000; + ptr += 4; + } + } +} + +/** + * Show a picture + */ +void lfb_showpicture(void) +{ + clear_screen(); +#define FWIDTH 240 +#define FHEIGHT 80 + draw_cbox(SCR_WIDTH-FWIDTH, SCR_HEIGHT-FHEIGHT*2, FWIDTH, FHEIGHT, 0x0057b7); + draw_cbox(SCR_WIDTH-FWIDTH, SCR_HEIGHT-FHEIGHT, FWIDTH, FHEIGHT, 0xffd700); +} + +void draw_cpixel(unsigned int lx, unsigned int ly, unsigned int c) +{ + unsigned char* ptr = lfb; + ptr += (gpitch*ly+lx*4); + *((unsigned int*)ptr) = gisrgb ? (unsigned int)((c&0xFF)<<16 | (c&0xFF00) | (c&0xFF0000)>>16) : c; +} + +void draw_cbox(unsigned int lx, unsigned int ly, unsigned int dx, unsigned int dy, unsigned int c) +{ + unsigned char* ptr = lfb; + ptr += (gpitch*ly+lx*4); + for(unsigned int y = 0; y < dy; y++) { + for(unsigned int x = 0; x < dx; x++) { + *((unsigned int*)ptr) = gisrgb ? (unsigned int)((c&0xFF)<<16 | (c&0xFF00) | (c&0xFF0000)>>16) : c; + ptr += 4; + } + ptr += gpitch - dx*4; + } +} + +void draw_cbyte(unsigned int lx, unsigned int ly, unsigned char letter, unsigned int c) +{ + unsigned int x, y; + unsigned char* ptr = lfb; + ptr += (gpitch*ly*GLYPH_Y+lx*4*GLYPH_X); + unsigned char ltr = (letter & 0xF) + 0x30; + if (ltr > 0x39) { + ltr += 7; + } + for(y=0; y<GLYPH_Y; y++) { + for(x=0; x<GLYPH_X; x++) { + if((0x80 >> ((GLYPH_X-1)-x)) & glyphs[y+GLYPH_Y*(ltr)]) { + *((unsigned int*)ptr) = gisrgb ? (unsigned int)((c&0xFF)<<16 | (c&0xFF00) | (c&0xFF0000)>>16) : c; + } else { + *((unsigned int*)ptr) = 0x000000; + } + ptr += 4; + } + ptr += gpitch - GLYPH_X*4; + } +} + +void draw_byte(unsigned int lx, unsigned int ly, unsigned char letter) +{ + draw_cbyte(lx, ly, letter, 0xFFFFFF); +} + +void draw_cletter(unsigned int lx, unsigned int ly, unsigned char letter, unsigned int c) +{ + unsigned int x, y; + unsigned char* ptr = lfb; + ptr += (gpitch*ly*GLYPH_Y+lx*4*GLYPH_X); + unsigned char ltr = letter & 0x7F; + for(y=0; y<GLYPH_Y; y++) { + for(x=0; x<GLYPH_X; x++) { + if((0x80 >> ((GLYPH_X-1)-x)) & glyphs[y+GLYPH_Y*(ltr)]) { + *((unsigned int*)ptr) = gisrgb ? (unsigned int)((c&0xFF)<<16 | (c&0xFF00) | (c&0xFF0000)>>16) : c; + } else { + *((unsigned int*)ptr) = 0x000000; + } + ptr += 4; + } + ptr += gpitch - GLYPH_X*4; + } +} + +void draw_letter(unsigned int lx, unsigned int ly, unsigned char letter) +{ + draw_cletter(lx, ly, letter, 0xFFFFFF); +} + +void draw_cstring(unsigned int lx, unsigned int ly, char* s, unsigned int c) +{ + unsigned int x = lx % GG_MAX_X, y = ly % GG_MAX_Y; + unsigned int idx = 0; + while(s[idx] != 0) { + draw_cletter(x++, y, s[idx++], c); + if (x > GG_MAX_X) { + y += 1; + x = 0; + } + // CHECK Y EVENTUALLY + } +} + +void draw_string(unsigned int lx, unsigned int ly, char* s) +{ + draw_cstring(lx, ly, s, 0xFFFFFF); +} + +void draw_chex32(unsigned int lx, unsigned int ly, unsigned long val, unsigned int c) +{ + unsigned int x = lx % GG_MAX_X, y = ly % GG_MAX_Y; + for(unsigned int i = 0; i < GLYPH_X; i++) { + draw_cbyte(x++, y, 0xF & (val >> ((GLYPH_X-1)-i)*4), c); + if (x > GG_MAX_X) { + y += 1; + x = 0; + } + // CHECK Y EVENTUALLY + } +} + +void draw_hex32(unsigned int lx, unsigned int ly, unsigned long val) +{ + draw_chex32(lx, ly, val, 0xFFFFFF); +} + +unsigned long draw_cu10(unsigned int lx, unsigned int ly, unsigned long val, unsigned int c) +{ + string_t vals = u32_to_str(val); + unsigned long len = strlen(vals); + draw_cstring(lx, ly, vals, c); + return len; +} + +unsigned long draw_u10(unsigned int lx, unsigned int ly, unsigned long val) +{ + return draw_cu10(lx, ly, val, 0xFFFFFF); +} diff --git a/kernel/graphics/mbox.c b/kernel/graphics/mbox.c new file mode 100644 index 0000000..0dac497 --- /dev/null +++ b/kernel/graphics/mbox.c @@ -0,0 +1,37 @@ +#include <symbols.h> + +/* mailbox message buffer */ +volatile unsigned int __attribute__((aligned(16))) mbox[36]; + +#define VIDEOCORE_MBOX (MMIO_BASE+0x0000B880) +#define MBOX_READ ((volatile unsigned int*)(VIDEOCORE_MBOX+0x0)) +#define MBOX_POLL ((volatile unsigned int*)(VIDEOCORE_MBOX+0x10)) +#define MBOX_SENDER ((volatile unsigned int*)(VIDEOCORE_MBOX+0x14)) +#define MBOX_STATUS ((volatile unsigned int*)(VIDEOCORE_MBOX+0x18)) +#define MBOX_CONFIG ((volatile unsigned int*)(VIDEOCORE_MBOX+0x1C)) +#define MBOX_WRITE ((volatile unsigned int*)(VIDEOCORE_MBOX+0x20)) +#define MBOX_RESPONSE 0x80000000 +#define MBOX_FULL 0x80000000 +#define MBOX_EMPTY 0x40000000 + +/** + * Make a mailbox call. Returns 0 on failure, non-zero on success + */ +int mbox_call(unsigned char ch) +{ + unsigned int r = (((unsigned int)((unsigned long)&mbox)&~0xF) | (ch&0xF)); + /* wait until we can write to the mailbox */ + do{asm volatile("nop");}while(*MBOX_STATUS & MBOX_FULL); + /* write the address of our message to the mailbox with channel identifier */ + *MBOX_WRITE = r; + /* now wait for the response */ + while(1) { + /* is there a response? */ + do{asm volatile("nop");}while(*MBOX_STATUS & MBOX_EMPTY); + /* is it a response to our message? */ + if(r == *MBOX_READ) + /* is it a valid successful response? */ + return mbox[1]==MBOX_RESPONSE; + } + return 0; +} diff --git a/kernel/lib/kmem.c b/kernel/lib/kmem.c new file mode 100644 index 0000000..9861f12 --- /dev/null +++ b/kernel/lib/kmem.c @@ -0,0 +1,38 @@ +#include <globals.h> +#include <drivers/uart.h> +#include <lib/kmem.h> + +// Output longs at address +void kmemshow32(void* data, unsigned long length) +{ + unsigned long* ptr = data; + for(unsigned long i = 0; i < length; i++) { + uart_hex(*ptr); + ptr+=1; + if (i != length-1) + uart_char(' '); + } + uart_char('\n'); +} + +// Output bytes at address +void kmemshow(void* data, unsigned long length) +{ + unsigned char* ptr = data; + for(unsigned long i = 0; i < length; i++) { + char tmp = *ptr>>4; + tmp += 0x30; + if (tmp > 0x39) + tmp += 0x7; + uart_char(tmp); + tmp = *ptr&0xF; + tmp += 0x30; + if (tmp > 0x39) + tmp += 0x7; + uart_char(tmp); + ptr+=1; + if (i != length-1) + uart_char(' '); + } + uart_char('\n'); +} diff --git a/kernel/lib/mmu.S b/kernel/lib/mmu.S new file mode 100644 index 0000000..faca3cc --- /dev/null +++ b/kernel/lib/mmu.S @@ -0,0 +1,45 @@ +.section .text +.globl mmu_start +mmu_start: + mov r2, #0 + // Invalidate Caches + mcr p15,0,r2,c7,c1,6 + // Invalidate TLB entries + mcr p15,0,r2,c8,c7,0 + // Data synchronisation barrier + mcr p15,0,r2,c7,c10,4 + + // Set all domains to 0b11 + mvn r2, #0 + bic r2, #0xC + mcr p15,0,r2,c3,c0,0 + + // Set the translation table base address (remember to align 16 KiB!) + mcr p15,0,r0,c2,c0,0 + mcr p15,0,r0,c2,c0,1 + mov r3, #0 + mcr p15,0,r3,c2,c0,2 + + // Set the bits mentioned above + mrc p15,0,r2,c1,c0,0 + orr r2,r2,r1 + mcr p15,0,r2,c1,c0,0 + bx lr + +.globl mmu_stop +mmu_stop: + mrc p15,0,r2,c1,c0,0 + bic r2,#0x1000 + bic r2,#0x0004 + bic r2,#0x0001 + mcr p15,0,r2,c1,c0,0 + bx lr + +.globl tlb_invalidate +tlb_invalidate: + mov r2, #0 + // Invalidate Entries + mcr p15, 0, r2, c8, c7, 0 + // DSB + mcr p15, 0, r2, c7, c10, 4 + bx lr diff --git a/kernel/lib/mmu.c b/kernel/lib/mmu.c new file mode 100644 index 0000000..e9dda7a --- /dev/null +++ b/kernel/lib/mmu.c @@ -0,0 +1,33 @@ +#include <lib/mmu.h> + +#define CACHABLE 0x08 +#define BUFFERABLE 0x04 +#define NO_PERMISSIONS_REQUIRED 0b11 << 10 +#define MMU_TABLE_BASE 0x00004000 + +void mmu_start(unsigned long base, unsigned long flags); + +void mmu_section(unsigned long virtual, unsigned long physical, unsigned long flags) +{ + unsigned long offset = virtual >> 20; + unsigned long* entry = (unsigned long*)(MMU_TABLE_BASE | (offset << 2)); + unsigned long physval = (physical & 0xFFF00000) | (flags & 0x7FFC) | 0x00C02; + *entry = physval; +} + +extern unsigned long __bss_end; +void mmu_init(void) +{ + for (unsigned long addr = 0x00000000;; addr += 0x00100000) { + if (addr < (unsigned long)&__bss_end + 0x00100000) { + mmu_section(addr, addr, CACHABLE | BUFFERABLE); + } else { + mmu_section(addr, addr, NO_PERMISSIONS_REQUIRED); + } + if (addr == 0x02000000) + mmu_section(addr, addr, CACHABLE | BUFFERABLE | NO_PERMISSIONS_REQUIRED); + if (addr == 0xFFF00000) + break; + } + mmu_start(MMU_TABLE_BASE,0x00000001|0x1000|0x0004); +} diff --git a/kernel/lib/queue.c b/kernel/lib/queue.c new file mode 100644 index 0000000..1fc35f6 --- /dev/null +++ b/kernel/lib/queue.c @@ -0,0 +1,55 @@ +#include <lib/queue.h> + +void push_to_queue(struct Entry* e, struct Queue* q) +{ + q->end.next->next = e; + q->end.next = e; + e->next = &q->end; +} + +void prepend_to_queue(struct Entry* e, struct Queue* q) +{ + e->next = q->start.next; + q->start.next = e; + if (e->next->entry_type == END_ENTRY) + q->end.next = e; +} + +struct Entry* pop_from_queue(struct Queue* q) +{ + if (q->start.next->entry_type == END_ENTRY) + return 0; + struct Entry* e = q->start.next; + q->start.next = e->next; + if (e->next->entry_type == END_ENTRY) + q->end.next = &q->start; + return e; +} + +struct Entry* remove_next_from_queue(struct Entry* e) +{ + struct Entry* prev = e; + struct Entry* remove = e->next; + struct Entry* next = remove->next; + if (remove->entry_type != VALUE_ENTRY) + return 0; + prev->next = next; + if (next->entry_type == END_ENTRY) + next->next = prev; + return remove; +} + +struct Entry* find_value(void* value, struct Queue* q) +{ + struct Entry* prev; + struct Entry* entry; + prev = &q->start; + entry = prev->next; + while (entry->entry_type != END_ENTRY) { + if (entry->value == value) + return prev; + prev = entry; + entry = prev->next; + } + return 0; +} diff --git a/kernel/lib/strings.c b/kernel/lib/strings.c new file mode 100644 index 0000000..674af19 --- /dev/null +++ b/kernel/lib/strings.c @@ -0,0 +1,119 @@ +#include <lib/kmem.h> +#include <lib/strings.h> + +unsigned long strlen(string_t s) +{ + unsigned long len = 0; + while (s[len] != 0) { + len += 1; + } + return len; +} + +void strcpy(string_t src, string_t dest) +{ + unsigned long idx = 0; + while (src[idx] != 0) { + dest[idx] = src[idx]; + idx++; + } + dest[idx] = src[idx]; +} + +unsigned char strcmp(string_t a, string_t b) +{ + unsigned long idx = 0; + while (a[idx] != 0 && b[idx] != 0) { + if (a[idx] != b[idx]) { + return 0; + } + idx += 1; + } + return a[idx] == b[idx]; +} + +unsigned char strcmpn(string_t a, string_t b, unsigned int n) +{ + unsigned long idx = 0; + while (a[idx] != 0 && b[idx] != 0 && idx+1 < n) { + if (a[idx] != b[idx]) { + return 0; + } + idx += 1; + } + return a[idx] == b[idx]; +} + +char* zhex32_to_str(unsigned long value) +{ + static char data[10]; + char tmp = 0; + char isz = -1; + for (int i = 0; i < 8; i++) { + tmp = (value >> 4*(8-i-1))&0xF; + if (isz == 0xFF && tmp != 0) + isz = i; + if(tmp > 0x9) + tmp += 7; + tmp += 0x30; + data[i] = tmp; + } + return data+isz; +} + +char* hex32_to_str(unsigned long value) +{ + static char data[10]; + char tmp = 0; + for (int i = 0; i < 8; i++) { + tmp = (value >> 4*(8-i-1))&0xF; + if(tmp > 0x9) + tmp += 7; + tmp += 0x30; + data[i] = tmp; + } + return data; +} + +char* u32_to_str(unsigned long value) +{ + unsigned long t = value; + unsigned long c; + static char data[12]; + char* dptr = data + 9; + for (int i = 0; i <= 10; i++) { + c = t%10; + *dptr = 0x30 + (c&0xF); + t /= 10; + if (t==0) + break; + dptr -= 1; + } + return dptr; +} + +char* s32_to_str(unsigned long value) +{ + long t = value; + unsigned long c; + char is_neg = 0; + if (t < 0) { + t = -t; + is_neg = 1; + } + static char data[13]; + char* dptr = data + 10; + for (int i = 0; i <= 10; i++) { + c = t%10; + *dptr = 0x30 + (c&0xF); + t /= 10; + if (t==0) + break; + dptr -= 1; + } + if (is_neg) { + dptr -= 1; + *dptr = '-'; + } + return dptr; +} diff --git a/kernel/sys/core.c b/kernel/sys/core.c new file mode 100644 index 0000000..d76b712 --- /dev/null +++ b/kernel/sys/core.c @@ -0,0 +1,58 @@ +#include <cpu/irq.h> +#include <cpu.h> +#include <drivers/uart.h> +#include <globals.h> +#include <graphics/lfb.h> +#include <lib/kmem.h> +#include <lib/mmu.h> +#include <lib/strings.h> +#include <symbols.h> +#include <sys/core.h> +#include <sys/power.h> +#include <sys/schedule.h> +#include <util/mutex.h> +#include <util/status.h> +#include <util/time.h> + +// Initialize IRQs +void sysinit(void) +{ + // Initialize System Globals + stimeh = *(unsigned long*)SYS_TIMER_CHI; + stimel = *(unsigned long*)SYS_TIMER_CLO; + *(unsigned long*) SYS_TIMER_C0 = 2000000 + stimeh; // 2 second trigger + uart_init(); + ///... + + // Route GPU interrupts to Core 0 + store32(0x00, GPU_INTERRUPTS_ROUTING); + + // Mask Overrun of UART0 + store32(1<<4, UART0_IMSC); + // Enable UART GPU IRQ + store32(1<<25, IRQ_ENABLE2); + // Enable Timer + //// Get the frequency + cntfrq = read_cntfrq(); + // Clear cntv interrupt and set next 1 second timer + write_cntv_tval(cntfrq); + // Route timer to core0 fiq + routing_core0cntv_to_core0fiq(); + // Enable timer + enablecntv(); + // Enable system timer + store32(SYS_TIMER_SC_M0, IRQ_ENABLE1); + + // Graphics Initialize + lfb_init(); + lfb_showpicture(); + + // Initialize Memory Management Unit + mmu_init(); + + // Initialize Mutex Manager + mutex_init(); + + // Start Scheduler + init_scheduler(); +} diff --git a/kernel/sys/kernel.S b/kernel/sys/kernel.S new file mode 100644 index 0000000..71b22a1 --- /dev/null +++ b/kernel/sys/kernel.S @@ -0,0 +1,32 @@ +.section ".text.kernel" + +.include "macros.inc" + +.globl kernel_main +kernel_main: + bl sysinit + bl status + ldr r2, =ttbr_msg + mov r0, #23 + mov r1, #0 + mov r3, #0xFF00 + bl draw_cstring + // Initialize System Cycle Counter + mov r0, #1 + mcr p15, 0, r0, c9, c14, 0 + mov r0, #1 + mcr p15, 0, r0, c9, c12, 0 + mov r0, #0x80000000 + mcr p15, 0, r0, c9, c12, 1 + + // Intentional undefined instruction + // .word 0xf7f0a000 + cpsie ai, #0x10 + svc #2 // Start scheduling! +2: + wfe + b 2b + +.section .data +ttbr_msg: + .asciz "MMU Initialized!" diff --git a/kernel/sys/power.c b/kernel/sys/power.c new file mode 100644 index 0000000..c4f12a9 --- /dev/null +++ b/kernel/sys/power.c @@ -0,0 +1,39 @@ +#include <symbols.h> +#include <sys/core.h> +#include <sys/power.h> + +//https://github.com/raspberrypi/linux/blob/aeaa2460db088fb2c97ae56dec6d7d0058c68294/drivers/watchdog/bcm2835_wdt.c +void wdt_start(void) +{ + store32(BCM2835_PERI_BASE + PM_WDOG, PM_PASSWORD | (SECS_TO_WDOG_TICS(100) & PM_WDOG_TIME_SET)); + unsigned long cur = load32(BCM2835_PERI_BASE + PM_RSTC); + store32(BCM2835_PERI_BASE + PM_RSTC, PM_PASSWORD | (cur & PM_RSTC_WRCFG_CLR) | PM_RSTC_WRCFG_FULL_RESET); +} + +void wdt_stop(void) +{ + store32(BCM2835_PERI_BASE + PM_RSTC, PM_PASSWORD | PM_RSTC_RESET); +} + +void __bcm2835_restart(unsigned char partition) +{ + unsigned long val, rsts; + rsts = (partition & 1) | ((partition & 0b10) << 1) | + ((partition & 0b100) << 2) | ((partition & 0b1000) << 3) | + ((partition & 0b10000) << 4) | ((partition & 0b100000) << 5); + val = load32(BCM2835_PERI_BASE + PM_RSTS); + val &= PM_RSTS_PARTITION_CLR; + val |= PM_PASSWORD | rsts; + store32(BCM2835_PERI_BASE + PM_RSTS, val); + store32(BCM2835_PERI_BASE + PM_WDOG, 10 | PM_PASSWORD); + val = load32(BCM2835_PERI_BASE + PM_RSTC); + val &= PM_RSTC_WRCFG_CLR; + val |= PM_PASSWORD | PM_RSTC_WRCFG_FULL_RESET; + store32(BCM2835_PERI_BASE + PM_RSTC, val); + delay(1); +} + +void bcm2835_power_off(void) +{ + __bcm2835_restart(63); // Partition 63 => Halt +} diff --git a/kernel/sys/schedule.S b/kernel/sys/schedule.S new file mode 100644 index 0000000..a47252c --- /dev/null +++ b/kernel/sys/schedule.S @@ -0,0 +1,53 @@ +.section ".text" +.globl schedule + +.include "macros.inc" + +// Assumption: Enter in SVC mode +schedule: + preserve_ctx + ldr r1, =irqlr + ldr r0, [r1] + cmp r0, #0 + beq 1f + // Replace LR with IRQ's LR + ldr r3, =scheduler + ldr r2, [r3, #0] // struct Thread* rthread + str r0, [r2, #0] // svc_lr -> void* pc + // Clear IRQ's LR + mov r0, #0 + str r0, [r1] +1: + bl next_thread // Thread* next -> r0 + ldr r3, =scheduler + str r0, [r3, #0] // next -> rthread + restore_ctx + subs pc, lr, #0 + +.globl cleanup +cleanup: + bl c_cleanup + // usrloop -> rthread + ldr r3, =scheduler + ldr r2, =usrloopthread + str r2, [r3, #0] + ldr sp, [r2, #4] + ldmfd sp!,{lr} + ldmfd sp!,{r0-r12} + ldr lr, =kernel_usr_task_loop + // svc sched + svc #2 +.globl kernel_usr_task_loop +kernel_usr_task_loop: + wfe + b kernel_usr_task_loop + +.globl add_thread +add_thread: + mrs r3, cpsr + and r3, #0x1F + cmp r3, #0x10 + beq 1f + b svc_add_thread +1: svc #3 + bx lr diff --git a/kernel/sys/schedule.c b/kernel/sys/schedule.c new file mode 100644 index 0000000..9b6d46e --- /dev/null +++ b/kernel/sys/schedule.c @@ -0,0 +1,468 @@ +#include <cpu.h> +#include <globals.h> +#include <graphics/lfb.h> +#include <drivers/uart.h> +#include <lib/kmem.h> +#include <sys/schedule.h> +#include <util/mutex.h> + +extern void kernel_usr_task_loop(void); + +void init_scheduler(void) +{ + // Set rthread to usrloopthread - an infinitely running thread so that the pointer will never be null + usrloopthread.pc = (void*)kernel_usr_task_loop; + usrloopthread.sp = (void*)0x5FC8; + *(unsigned long**)usrloopthread.sp = (unsigned long*)kernel_usr_task_loop; + usrloopthread.sp_base = -1; + usrloopthread.mptr = 0; + usrloopthread.pid = -1; + usrloopthread.priority = -1; + usrloopthread.old_priority = -1; + usrloopthread.status = THREAD_READY; + usrloopthread.offset = -1; + scheduler.rthread = &usrloopthread; + + // Initialize Scheduling Queues + for (unsigned long p = 0; p < PRIORITIES; p++) { + // Ready Init + scheduler.ready[p].start.value = 0; + scheduler.ready[p].start.next = &scheduler.ready[p].end; + scheduler.ready[p].start.entry_type = START_ENTRY; + scheduler.ready[p].end.value = 0; + scheduler.ready[p].end.next = &scheduler.ready[p].start; + scheduler.ready[p].end.entry_type = END_ENTRY; + // Mutex Wait Init + scheduler.mwait[p].start.value = 0; + scheduler.mwait[p].start.next = &scheduler.mwait[p].end; + scheduler.mwait[p].start.entry_type = START_ENTRY; + scheduler.mwait[p].end.value = 0; + scheduler.mwait[p].end.next = &scheduler.mwait[p].start; + scheduler.mwait[p].end.entry_type = END_ENTRY; + // Signal Wait Init + scheduler.swait[p].start.value = 0; + scheduler.swait[p].start.next = &scheduler.swait[p].end; + scheduler.swait[p].start.entry_type = START_ENTRY; + scheduler.swait[p].end.value = 0; + scheduler.swait[p].end.next = &scheduler.swait[p].start; + scheduler.swait[p].end.entry_type = END_ENTRY; + } + + // Initialize nextpid + nextpid = FIRST_AVAIL_PID; + + // Initialize Threads - Stack Base and Offsets + for (unsigned long i = 0; i < MAX_THREADS; i++) { + struct Thread* t = &threads[i]; + t->offset = i; + t->sp_base = 0x20000000 - STACK_SIZE*i; + thread_entries[i].value = t; + thread_entries[i].next = &thread_entries[(i+1)]; + thread_entries[i].entry_type = VALUE_ENTRY; + } + // Initialize the free queue + scheduler.free_threads.start.value = 0; + scheduler.free_threads.start.entry_type = START_ENTRY; + scheduler.free_threads.end.value = 0; + scheduler.free_threads.end.entry_type = END_ENTRY; + scheduler.free_threads.start.next = &thread_entries[0]; + scheduler.free_threads.end.next = &thread_entries[MAX_THREADS-1]; + thread_entries[MAX_THREADS-1].next = &scheduler.free_threads.end; +} + +void push_thread_to_queue(struct Thread* t, unsigned char type, unsigned char priority) +{ + struct Entry* entry = &thread_entries[t->offset]; + struct Queue* queue; + if (type == THREAD_READY) { + queue = &scheduler.ready[priority]; + } else if (type == THREAD_MWAIT) { + queue = &scheduler.mwait[priority]; + } else if (type == THREAD_SWAIT) { + queue = &scheduler.swait[priority]; + } else { + return; + } + push_to_queue(entry, queue); + //queue->end.next->next = entry; + //queue->end.next = entry; + //entry->next = &queue->end; +} + +void prepend_thread_to_queue(struct Thread* t, unsigned char type, unsigned char priority) +{ + struct Entry* entry = &thread_entries[t->offset]; + struct Queue* queue; + if (type == THREAD_READY) { + queue = &scheduler.ready[priority]; + } else if (type == THREAD_MWAIT) { + queue = &scheduler.mwait[priority]; + } else if (type == THREAD_SWAIT) { + queue = &scheduler.swait[priority]; + } else { + return; + } + prepend_to_queue(entry, queue); +} + +struct Entry* pop_thread_from_queue(unsigned char type, unsigned char priority) +{ + struct Entry* entry = 0; + struct Queue* queue; + if (type == THREAD_READY) { + queue = &scheduler.ready[priority]; + } else if (type == THREAD_MWAIT) { + queue = &scheduler.mwait[priority]; + } else if (type == THREAD_SWAIT) { + queue = &scheduler.swait[priority]; + } else { + return entry; + } + return pop_from_queue(queue); +} + +struct Entry* find_pid(unsigned long pid) +{ + for (unsigned char p = 0; p < PRIORITIES; p++) { + struct Queue* queue; + struct Entry* prev; + struct Entry* entry; + + queue = &scheduler.ready[p]; + prev = &queue->start; + entry = prev->next; + while (entry->entry_type != END_ENTRY) { + if (((struct Thread*)entry->value)->pid == pid) + return prev; + prev = entry; + entry = entry->next; + } + + queue = &scheduler.mwait[p]; + prev = &queue->start; + entry = prev->next; + while (entry->entry_type != END_ENTRY) { + if (((struct Thread*)entry->value)->pid == pid) + return prev; + prev = entry; + entry = entry->next; + } + + queue = &scheduler.swait[p]; + prev = &queue->start; + entry = prev->next; + while (entry->entry_type != END_ENTRY) { + if (((struct Thread*)entry->value)->pid == pid) + return prev; + prev = entry; + entry = entry->next; + } + } + return 0; +} + +struct Entry* find_mutex_wait_next(void* m) +{ + for (unsigned char p = 0; p < PRIORITIES; p++) { + struct Queue* queue = &scheduler.mwait[p]; + struct Entry* prev = &queue->start; + struct Entry* entry = prev->next; + while (entry->entry_type != END_ENTRY) { + if (((struct Thread*)entry->value)->mptr == m) + return prev; + prev = entry; + entry = entry->next; + } + } + return 0; +} + +struct Entry* find_signal_wait_next(void* s) +{ + for (unsigned char p = 0; p < PRIORITIES; p++) { + struct Queue* queue = &scheduler.swait[p]; + struct Entry* prev = &queue->start; + struct Entry* entry = prev->next; + while (entry->entry_type != END_ENTRY) { + if (((struct Thread*)entry->value)->mptr == s) + return prev; + prev = entry; + entry = entry->next; + } + } + return 0; +} + +struct Entry* get_unused_thread(void) +{ + struct Queue* q = &scheduler.free_threads; + // If we have no available free threads + // return null pointer + if (q->start.next->entry_type == END_ENTRY) + return 0; + // Otherwise, get the next thread + return pop_from_queue(q); +} + +unsigned char find_duplicate(void* pc) +{ + for (unsigned char p = 0; p < PRIORITIES; p++) { + struct Queue* queue = &scheduler.ready[p]; + struct Entry* entry = queue->start.next; + while (entry->entry_type == VALUE_ENTRY) { + if (((struct Thread*)entry->value)->pc == pc) { + return 1; + } + } + } + return 0; +} + +unsigned char add_thread_without_duplicate(void* pc, void* arg, unsigned char priority) +{ + if (!find_duplicate(pc)) { + return add_thread(pc, arg, priority); + } + return 1; +} + +unsigned char svc_add_thread(void* pc, void* arg, unsigned char priority) +{ + struct Entry* thread_entry = get_unused_thread(); + // The only point-of-failure is not having a thread available + if (thread_entry == 0) + return 1; + struct Thread* thread = thread_entry->value; + /// Thread Setup + thread->pc = pc; + unsigned long* argp = (void*)thread->sp_base; + argp -= 13; + *argp = (unsigned long)arg; // Set r0 to the argument + argp -= 1; + *(unsigned long**)argp = (unsigned long*)cleanup; // Set lr to the cleanup function + thread->sp = argp; + thread->status = THREAD_READY; + thread->mptr = (void*)0; + thread->pid = nextpid++; + // Reset next pid on overflow + if (nextpid < FIRST_AVAIL_PID) { + nextpid = FIRST_AVAIL_PID; + } + // Cap Priority Level + if (priority >= PRIORITIES) + thread->priority = PRIORITIES - 1; + else + thread->priority = priority; + // This thread is new + thread->old_priority = -1; + // Reserved for non-preemptible tasking + thread->preempt = 0; + /// Add Thread to Scheduler + push_thread_to_queue(thread, THREAD_READY, thread->priority); + return 0; +} + +void uart_scheduler(void) +{ + uart_string("Scheduler Info\n==============\nCurrent\n"); + uart_hex((unsigned long)scheduler.rthread); + uart_char(' '); + kmemshow32((void*)scheduler.rthread, 9); + unsigned long length; + for(int p = 0; p < PRIORITIES; p++) { + uart_string("Priority "); + uart_10(p); + uart_char('\n'); + struct Queue* queue; + struct Entry* entry; + + queue = &scheduler.ready[p]; + uart_string("Ready Queue\n"); + entry = queue->start.next; + length = 0; + while (entry->entry_type != END_ENTRY) { + uart_hex((unsigned long)entry->value); + uart_char(' '); + kmemshow32((void*)entry->value, 9); + entry = entry->next; + length++; + } + uart_hexn(length); + + queue = &scheduler.mwait[p]; + uart_string("Mutex Wait Queue\n"); + entry = queue->start.next; + length = 0; + while (entry->entry_type != END_ENTRY) { + uart_hex((unsigned long)entry->value); + uart_char(' '); + kmemshow32((void*)entry->value, 9); + entry = entry->next; + length++; + } + uart_hexn(length); + + queue = &scheduler.swait[p]; + uart_string("Signal Wait Queue\n"); + entry = queue->start.next; + length = 0; + while (entry->entry_type != END_ENTRY) { + uart_hex((unsigned long)entry->value); + uart_char(' '); + kmemshow32((void*)entry->value, 9); + entry = entry->next; + length++; + } + uart_hexn(length); + } + // Count number of free threads + struct Queue* queue = &scheduler.free_threads; + struct Entry* entry = queue->start.next; + while (entry->entry_type != END_ENTRY) { + entry = entry->next; + length++; + } + uart_hexn(length); + uart_string("==============\n"); +} + +struct Thread* next_thread(void) +{ + // Recurse through all priorities to try to find a ready thread + for (int p = 0; p < PRIORITIES; p++) { + struct Queue* rq = &scheduler.ready[p]; + if (rq->start.next->entry_type == END_ENTRY) + continue; + return rq->start.next->value; + } + // No thread found, use basic usrloopthread while waiting for new thread + return &usrloopthread; +} + +void c_cleanup(void) +{ + struct Thread* rt = scheduler.rthread; + struct Entry* e = pop_thread_from_queue(THREAD_READY, rt->priority); + // Add to free threads + push_to_queue(e, &scheduler.free_threads); +} + +void yield(void) +{ + struct Thread* rthread = scheduler.rthread; + // usrloopthread should not be yielded + if (rthread == &usrloopthread) + return; + // Put current thread at the end of its ready queue, + // thus any threads of the same priority can be run first + unsigned char priority = rthread->priority; + struct Entry* tq; + // Remove from top of queue + tq = pop_thread_from_queue(THREAD_READY, priority); + if (tq != 0) { + // Add to bottom of queue + push_thread_to_queue(tq->value, THREAD_READY, priority); + } +} + +void sched_mutex_yield(void* m) +{ + struct Thread* rthread = scheduler.rthread; + // usrloopthread should not be yielded + if (rthread == &usrloopthread) + return; + unsigned char priority = rthread->priority; + // Signify which lock this thread is waiting for + rthread->mptr = m; + struct Entry* rt; + // Remove from top of running queue + rt = pop_thread_from_queue(THREAD_READY, priority); + if (rt != 0) + // Push to bottom of wait queue + push_thread_to_queue(rt->value, THREAD_MWAIT, priority); + // Find the thread that has the mutex locked + struct Mutex* mtex = m; + struct Entry* mutex_next = find_pid(mtex->pid); + if (mutex_next == 0) + return; + // The next thread is the one with the lock + struct Entry* mutex_thread_entry = mutex_next->next; + // Check if it is lower priority + if (((struct Thread*)mutex_thread_entry->value)->priority > priority) { + // Remove it from the old priority queue + remove_next_from_queue(mutex_next); + struct Thread* t = mutex_thread_entry->value; + // Preserve the old priority + if (t->old_priority == 0xFF) + t->old_priority = t->priority; + t->priority = priority; + // Add it to the higher priority queue + push_thread_to_queue(t, THREAD_READY, priority); + } +} + +void sched_semaphore_yield(void* s) +{ + struct Thread* rthread = scheduler.rthread; + // usrloopthread should not be yielded + if (rthread == &usrloopthread) + return; + unsigned char priority = rthread->priority; + // Signify which lock this thread is waiting for + rthread->mptr = s; + struct Entry* rt; + // Remove from top of running queue + rt = pop_thread_from_queue(THREAD_READY, priority); + if (rt != 0) + // Push to bottom of wait queue + push_thread_to_queue(rt->value, THREAD_SWAIT, priority); +} + +void sched_mutex_resurrect(void* m) +{ + // Find any mutex to resurrect + struct Entry* prev = find_mutex_wait_next(m); + if (prev == 0) + return; + struct Entry* entry = prev->next; + struct Thread* thread = entry->value; + // Resurrect the thread + thread->mptr = 0; + // Remove from wait queue + entry = remove_next_from_queue(prev); + if (entry == 0) + return; + // Add to ready queue + push_thread_to_queue(entry->value, THREAD_READY, ((struct Thread*)entry->value)->priority); + // Demote current thread + struct Thread* rthread = scheduler.rthread; + unsigned long p = rthread->priority; + unsigned long op = rthread->old_priority; + // Restore the original priority level + if (op != 0xFF) { + struct Entry* tentry = pop_thread_from_queue(THREAD_READY, p); + ((struct Thread*)tentry->value)->priority = op; + ((struct Thread*)tentry->value)->old_priority = 0xFF; + prepend_thread_to_queue(tentry->value, THREAD_READY, op); + } +} + +void sched_semaphore_resurrect(void* s, unsigned long count) +{ + while (count--) { + // Find any signal/ semaphore to resurrect + struct Entry* prev = find_signal_wait_next(s); + if (prev == 0) + return; + struct Entry* entry = prev->next; + struct Thread* thread = entry->value; + // Resurrect the thread + thread->mptr = 0; + // Remove from wait queue + entry = remove_next_from_queue(prev); + if (entry == 0) + return; + // Add to ready queue + push_thread_to_queue(entry->value, THREAD_READY, ((struct Thread*)entry->value)->priority); + } +} diff --git a/kernel/tests/test.S b/kernel/tests/test.S new file mode 100644 index 0000000..e80b6be --- /dev/null +++ b/kernel/tests/test.S @@ -0,0 +1,31 @@ +.section .text + +a.btest: + push {lr} + mov r0, #5 + cmp r0, #4 + pop {pc} + +.globl atest +atest: + push {lr} + ldr r0, =a.btest + mov r1, #0 + mov r2, #0 + bl add_thread + mov r0, #5 + subs r0, #5 + svc #0 + beq 1f + mov r0, #0 + mov r1, #11 + mov r2, #0x4E + mov r3, #0xFF0000 + bl draw_cletter + pop {pc} +1: mov r0, #0 + mov r1, #11 + mov r2, #0x59 + mov r3, #0xFF00 + bl draw_cletter + pop {pc} diff --git a/kernel/tests/test.c b/kernel/tests/test.c new file mode 100644 index 0000000..d954ade --- /dev/null +++ b/kernel/tests/test.c @@ -0,0 +1,545 @@ +#include <cpu.h> +//#include <drivers/uart.h> +#include <globals.h> +#include <graphics/lfb.h> +#include <lib/kmem.h> +#include <sys/core.h> +#include <sys/schedule.h> +#include <util/lock.h> +#include <util/mutex.h> +#include <util/status.h> + +extern void atest(void); +void qualitative_tests(void); + +void nooptest(void) {} + +void mutex_contention_helper(struct Mutex* m) +{ + lock_mutex(m); + sys0(SYS_YIELD); + unlock_mutex(m); +} + +static int x = 0; +static int y = 13; +#define TEST_STR_CLR " " +#define TEST_RESULT_WIDTH 15 +#define TEST_COUNT 4096 +#define TEST_BIN_COUNT 32 +void test_entry(void) +{ + x = 0; + draw_hex32(0, y-1, nextpid); + draw_string(0, y+4, "Starting tests"); + unsigned long long ti, tf, dt=0,len; + unsigned int tidx = 0; + unsigned long bins[TEST_BIN_COUNT]; + for (int i = 0; i < TEST_BIN_COUNT; i++) { + bins[i] = 0; + } + + // Test 1: Trace Time + dt = 0; + for(int i = 0; i < TEST_COUNT; i++) { + sys0_64(SYS_TIME, &ti); + sys0_64(SYS_TIME, &tf); + dt += tf - ti; + if ((tf-ti) < TEST_BIN_COUNT) + bins[(tf-ti)]++; + } + for (int i = 0; i < TEST_BIN_COUNT; i++) { + draw_hex32(tidx, y+6+i, i); + draw_string(tidx+9, y+6+i, TEST_STR_CLR); + draw_u10(tidx+9, y+6+i, bins[i]); + bins[i] = 0; + } + draw_string(tidx, y+5, " "); + len = draw_u10(tidx, y+5, dt/TEST_COUNT); + draw_u10(tidx+len+1, y+5, dt%TEST_COUNT); + tidx += TEST_RESULT_WIDTH; + draw_hex32(0, y-1, nextpid); + + // Test 2: Yield Time + dt = 0; + for(int i = 0; i < TEST_COUNT; i++) { + sys0_64(SYS_TIME, &ti); + sys0(SYS_YIELD); + sys0_64(SYS_TIME, &tf); + dt += tf - ti; + if ((tf-ti) < TEST_BIN_COUNT) + bins[(tf-ti)]++; + } + for (int i = 0; i < TEST_BIN_COUNT; i++) { + draw_hex32(tidx, y+6+i, i); + draw_string(tidx+9, y+6+i, TEST_STR_CLR); + draw_u10(tidx+9, y+6+i, bins[i]); + bins[i] = 0; + } + draw_string(tidx, y+5, " "); + len = draw_u10(tidx, y+5, dt/TEST_COUNT); + draw_u10(tidx+len+1, y+5, dt%TEST_COUNT); + tidx += TEST_RESULT_WIDTH; + draw_hex32(0, y-1, nextpid); + + // Test 3: Add Thread, Lower Priority + dt = 0; + for(int i = 0; i < TEST_COUNT; i++) { + sys0_64(SYS_TIME, &ti); + add_thread(nooptest, 0, 3); + sys0_64(SYS_TIME, &tf); + dt += tf - ti; + if ((tf-ti) < TEST_BIN_COUNT) + bins[(tf-ti)]++; + } + for (int i = 0; i < TEST_BIN_COUNT; i++) { + draw_hex32(tidx, y+6+i, i); + draw_string(tidx+9, y+6+i, TEST_STR_CLR); + draw_u10(tidx+9, y+6+i, bins[i]); + bins[i] = 0; + } + draw_string(tidx, y+5, " "); + len = draw_u10(tidx, y+5, dt/TEST_COUNT); + draw_u10(tidx+len+1, y+5, dt%TEST_COUNT); + tidx += TEST_RESULT_WIDTH; + draw_hex32(0, y-1, nextpid); + + // Test 4: Add Thread, Higher Priority + dt = 0; + for(int i = 0; i < TEST_COUNT; i++) { + sys0_64(SYS_TIME, &ti); + add_thread(nooptest, 0, 0); + sys0_64(SYS_TIME, &tf); + dt += tf - ti; + if ((tf-ti) < TEST_BIN_COUNT) + bins[(tf-ti)]++; + } + for (int i = 0; i < TEST_BIN_COUNT; i++) { + draw_hex32(tidx, y+6+i, i); + draw_string(tidx+9, y+6+i, TEST_STR_CLR); + draw_u10(tidx+9, y+6+i, bins[i]); + bins[i] = 0; + } + draw_string(tidx, y+5, " "); + len = draw_u10(tidx, y+5, dt/TEST_COUNT); + draw_u10(tidx+len+1, y+5, dt%TEST_COUNT); + tidx += TEST_RESULT_WIDTH; + draw_hex32(0, y-1, nextpid); + + // Test 5: Create Mutex + dt = 0; + for(int i = 0; i < TEST_COUNT; i++) { + sys0_64(SYS_TIME, &ti); + struct Mutex* m = create_mutex(0); + sys0_64(SYS_TIME, &tf); + delete_mutex(m); + dt += tf - ti; + if ((tf-ti) < TEST_BIN_COUNT) + bins[(tf-ti)]++; + } + for (int i = 0; i < TEST_BIN_COUNT; i++) { + draw_hex32(tidx, y+6+i, i); + draw_string(tidx+9, y+6+i, TEST_STR_CLR); + draw_u10(tidx+9, y+6+i, bins[i]); + bins[i] = 0; + } + draw_string(tidx, y+5, " "); + len = draw_u10(tidx, y+5, dt/TEST_COUNT); + draw_u10(tidx+len+1, y+5, dt%TEST_COUNT); + tidx += TEST_RESULT_WIDTH; + draw_hex32(0, y-1, nextpid); + + // Test 6: Delete Mutex + dt = 0; + for(int i = 0; i < TEST_COUNT; i++) { + struct Mutex* m = create_mutex(0); + sys0_64(SYS_TIME, &ti); + delete_mutex(m); + sys0_64(SYS_TIME, &tf); + dt += tf - ti; + if ((tf-ti) < TEST_BIN_COUNT) + bins[(tf-ti)]++; + } + for (int i = 0; i < TEST_BIN_COUNT; i++) { + draw_hex32(tidx, y+6+i, i); + draw_string(tidx+9, y+6+i, TEST_STR_CLR); + draw_u10(tidx+9, y+6+i, bins[i]); + bins[i] = 0; + } + draw_string(tidx, y+5, " "); + len = draw_u10(tidx, y+5, dt/TEST_COUNT); + draw_u10(tidx+len+1, y+5, dt%TEST_COUNT); + tidx += TEST_RESULT_WIDTH; + draw_hex32(0, y-1, nextpid); + + // Test 7: Lock Mutex + dt = 0; + for(int i = 0; i < TEST_COUNT; i++) { + struct Mutex* m = create_mutex(0); + sys0_64(SYS_TIME, &ti); + lock_mutex(m); + sys0_64(SYS_TIME, &tf); + delete_mutex(m); + dt += tf - ti; + if ((tf-ti) < TEST_BIN_COUNT) + bins[(tf-ti)]++; + } + for (int i = 0; i < TEST_BIN_COUNT; i++) { + draw_hex32(tidx, y+6+i, i); + draw_string(tidx+9, y+6+i, TEST_STR_CLR); + draw_u10(tidx+9, y+6+i, bins[i]); + bins[i] = 0; + } + draw_string(tidx, y+5, " "); + len = draw_u10(tidx, y+5, dt/TEST_COUNT); + draw_u10(tidx+len+1, y+5, dt%TEST_COUNT); + tidx += TEST_RESULT_WIDTH; + draw_hex32(0, y-1, nextpid); + + // Test 7a: Lock Contended Mutex + dt = 0; + for(int i = 0; i < TEST_COUNT; i++) { + struct Mutex* m = create_mutex(0); + add_thread(mutex_contention_helper, m, 2); + sys0(SYS_YIELD); + sys0_64(SYS_TIME, &ti); + lock_mutex(m); + sys0_64(SYS_TIME, &tf); + delete_mutex(m); + dt += tf - ti; + if ((tf-ti) < TEST_BIN_COUNT) + bins[(tf-ti)]++; + } + for (int i = 0; i < TEST_BIN_COUNT; i++) { + draw_hex32(tidx, y+6+i, i); + draw_string(tidx+9, y+6+i, TEST_STR_CLR); + draw_u10(tidx+9, y+6+i, bins[i]); + bins[i] = 0; + } + draw_string(tidx, y+5, " "); + len = draw_u10(tidx, y+5, dt/TEST_COUNT); + draw_u10(tidx+len+1, y+5, dt%TEST_COUNT); + tidx += TEST_RESULT_WIDTH; + draw_hex32(0, y-1, nextpid); + + // Test 8: Unlock Mutex + dt = 0; + for(int i = 0; i < TEST_COUNT; i++) { + struct Mutex* m = create_mutex(0); + lock_mutex(m); + sys0_64(SYS_TIME, &ti); + unlock_mutex(m); + sys0_64(SYS_TIME, &tf); + delete_mutex(m); + dt += tf - ti; + if ((tf-ti) < TEST_BIN_COUNT) + bins[(tf-ti)]++; + } + for (int i = 0; i < TEST_BIN_COUNT; i++) { + draw_hex32(tidx, y+6+i, i); + draw_string(tidx+9, y+6+i, TEST_STR_CLR); + draw_u10(tidx+9, y+6+i, bins[i]); + bins[i] = 0; + } + draw_string(tidx, y+5, " "); + len = draw_u10(tidx, y+5, dt/TEST_COUNT); + draw_u10(tidx+len+1, y+5, dt%TEST_COUNT); + tidx += TEST_RESULT_WIDTH; + draw_hex32(0, y-1, nextpid); + + // Semaphore + static unsigned long sem = 0; + + // Test 9: Semaphore Decrease + dt = 0; + for(int i = 0; i < TEST_COUNT; i++) { + sem = 1; + sys0_64(SYS_TIME, &ti); + sys1(SYS_SEMAPHORE_P, &sem); + sys0_64(SYS_TIME, &tf); + dt += tf - ti; + if ((tf-ti) < TEST_BIN_COUNT) + bins[(tf-ti)]++; + } + for (int i = 0; i < TEST_BIN_COUNT; i++) { + draw_hex32(tidx, y+6+i, i); + draw_string(tidx+9, y+6+i, TEST_STR_CLR); + draw_u10(tidx+9, y+6+i, bins[i]); + bins[i] = 0; + } + draw_string(tidx, y+5, " "); + len = draw_u10(tidx, y+5, dt/TEST_COUNT); + draw_u10(tidx+len+1, y+5, dt%TEST_COUNT); + tidx += TEST_RESULT_WIDTH; + draw_hex32(0, y-1, nextpid); + + // Test 10: Semaphore Increase + dt = 0; + for(int i = 0; i < TEST_COUNT; i++) { + sem = 0; + sys0_64(SYS_TIME, &ti); + sys1(SYS_SEMAPHORE_V, &sem); + sys0_64(SYS_TIME, &tf); + dt += tf - ti; + if ((tf-ti) < TEST_BIN_COUNT) + bins[(tf-ti)]++; + } + for (int i = 0; i < TEST_BIN_COUNT; i++) { + draw_hex32(tidx, y+6+i, i); + draw_string(tidx+9, y+6+i, TEST_STR_CLR); + draw_u10(tidx+9, y+6+i, bins[i]); + bins[i] = 0; + } + draw_string(tidx, y+5, " "); + len = draw_u10(tidx, y+5, dt/TEST_COUNT); + draw_u10(tidx+len+1, y+5, dt%TEST_COUNT); + tidx += TEST_RESULT_WIDTH; + draw_hex32(0, y-1, nextpid); + + // Test 10a: Semaphore Increase - No Schedule + dt = 0; + for(int i = 0; i < TEST_COUNT; i++) { + sem = 1; + sys0_64(SYS_TIME, &ti); + sys1(SYS_SEMAPHORE_V, &sem); + sys0_64(SYS_TIME, &tf); + dt += tf - ti; + if ((tf-ti) < TEST_BIN_COUNT) + bins[(tf-ti)]++; + } + for (int i = 0; i < TEST_BIN_COUNT; i++) { + draw_hex32(tidx, y+6+i, i); + draw_string(tidx+9, y+6+i, TEST_STR_CLR); + draw_u10(tidx+9, y+6+i, bins[i]); + bins[i] = 0; + } + draw_string(tidx, y+5, " "); + len = draw_u10(tidx, y+5, dt/TEST_COUNT); + draw_u10(tidx+len+1, y+5, dt%TEST_COUNT); + tidx += TEST_RESULT_WIDTH; + draw_hex32(0, y-1, nextpid); + +// // Test 7: Tick Latency +//#define DELAY_TIME 512000 +// unsigned long center = 0; +// sys0_64(SYS_TIME, &ti); +// delay(DELAY_TIME); +// sys0_64(SYS_TIME, &tf); +// center = (tf - ti - 10); +// if (10 > (tf-ti)) +// center = 0; +// dt = 0; +// unsigned long j = 0; +// for(int i = 0; i < TEST_COUNT; i++) { +// sys0_64(SYS_TIME, &ti); +// delay(DELAY_TIME); +// sys0_64(SYS_TIME, &tf); +// dt += tf - ti; +// if ((tf-ti-center) < TEST_BIN_COUNT) +// bins[(tf-ti)-center]++; +// else +// j++; +// } +// for (int i = 0; i < TEST_BIN_COUNT; i++) { +// draw_hex32(tidx, y+6+i, i); +// draw_string(tidx+9, y+6+i, TEST_STR_CLR); +// draw_u10(tidx+9, y+6+i, bins[i]); +// bins[i] = 0; +// } +// draw_hex32(tidx, y+4, j); +// draw_string(tidx, y+5, " "); +// len = draw_u10(tidx, y+5, dt/TEST_COUNT); +// draw_u10(tidx+len+1, y+5, dt%TEST_COUNT); +// tidx += TEST_RESULT_WIDTH; +// draw_hex32(0, y-1, nextpid); + + add_thread(qualitative_tests, 0, 4); +} + +//static struct Mutex testm = {.addr = 0, .pid = 0}; +static struct Lock testm = {.pid = 0}; + +void priority_inversion_test1(void); +void priority_inversion_test2(void); +void priority_inversion_test3(void); +void priority_inversion_test4(void); + +void priority_inversion_test1(void) +{ + draw_cletter(x++, y+2, 'S', 0xFF0000); + // Try Lock + draw_cletter(x++, y+2, 'T', 0xFF0000); + lock(&testm); + // Lock Acquired + draw_cletter(x++, y+2, 'L', 0xFF0000); + // Add Thread to Assist with Priority Inversion + // Check + // - Show that this thread gets temporarily + // promoted + add_thread(priority_inversion_test3, 0, 2); + // Unlock + draw_cletter(x++, y+2, 'U', 0xFF0000); + unlock(&testm); + draw_cletter(x++, y+2, 'F', 0xFF0000); +} + +void priority_inversion_test2(void) +{ + draw_cletter(x++, y+0, 'S', 0x0000FF); + // Add Thread to Assist with Priority Inversion + // Check + // - Show that Thread 1 is Prepended To Queue + add_thread(priority_inversion_test4, 0, 3); + // Try Lock + draw_cletter(x++, y+0, 'T', 0x0000FF); + lock(&testm); + // Lock Acquired + draw_cletter(x++, y+0, 'L', 0x0000FF); + // Unlock + draw_cletter(x++, y+0, 'U', 0x0000FF); + unlock(&testm); + draw_cletter(x++, y+0, 'F', 0x0000FF); +} + +void priority_inversion_test3(void) +{ + draw_cletter(x++, y+1, 'S', 0x00FF00); + // Add thread to Assist with Priority Inversion + // Check + // - Add high priority thread that will try + // to lock the mutex + add_thread(priority_inversion_test2, 0, 1); + draw_cletter(x++, y+1, 'F', 0x00FF00); +} + +void priority_inversion_test4(void) +{ + draw_cletter(x++, y+2, 'S', 0xAFAF00); + // Do nothing, + // just show that this is executed last + draw_cletter(x++, y+2, 'F', 0xAFAF00); +} + +static unsigned long test_semaphore = 0; + +void semaphore_test1(void) +{ + draw_cletter(x++, y+1, ' ', 0xFF0000); + draw_cletter(x++, y+1, 'S', 0xFF0000); + // Try to decrement semaphore + draw_cletter(x++, y+1, 'T', 0xFF0000); + sys1(SYS_SEMAPHORE_P, &test_semaphore); + // Semaphore decremented + draw_cletter(x++, y+1, 'P', 0xFF0000); + draw_cletter(x++, y+1, 'V', 0xFF0000); + sys1(SYS_SEMAPHORE_V, &test_semaphore); + draw_cletter(x++, y+1, 'V', 0xFF0000); + sys1(SYS_SEMAPHORE_V, &test_semaphore); + draw_cletter(x++, y+1, 'V', 0xFF0000); + sys1(SYS_SEMAPHORE_V, &test_semaphore); + // Try to decrement semaphore + draw_cletter(x++, y+1, 'T', 0xFF0000); + sys1(SYS_SEMAPHORE_P, &test_semaphore); + // Semaphore decremented + draw_cletter(x++, y+1, 'P', 0xFF0000); + // Try to decrement semaphore + draw_cletter(x++, y+1, 'T', 0xFF0000); + sys1(SYS_SEMAPHORE_P, &test_semaphore); + // Semaphore decremented + draw_cletter(x++, y+1, 'P', 0xFF0000); + // Try to decrement semaphore + draw_cletter(x++, y+1, 'T', 0xFF0000); + sys1(SYS_SEMAPHORE_P, &test_semaphore); + // Semaphore decremented + draw_cletter(x++, y+1, 'P', 0xFF0000); + // Try to decrement semaphore + draw_cletter(x++, y+1, 'T', 0xFF0000); + sys1(SYS_SEMAPHORE_P, &test_semaphore); + // Semaphore decremented + draw_cletter(x++, y+1, 'P', 0xFF0000); + draw_cletter(x++, y+1, 'F', 0xFF0000); +} + +void semaphore_test2(void) +{ + draw_cletter(x++, y+2, 'S', 0xFF00); + // Increment semaphore + draw_cletter(x++, y+2, 'V', 0xFF00); + sys1(SYS_SEMAPHORE_V, &test_semaphore); + // Increment semaphore + draw_cletter(x++, y+2, 'V', 0xFF00); + sys1(SYS_SEMAPHORE_V, &test_semaphore); + draw_cletter(x++, y+2, 'F', 0xFF00); +} + +static struct Mutex* dead1 = 0; +static struct Mutex* dead2 = 0; + +void deadlock_test2(void) +{ + draw_cletter(x++, y+1, 'S', 0xFF0000); + // Try Lock 1 + draw_cletter(x++, y+1, 'T', 0xFF0000); + lock_mutex(dead1); + // Lock 1 Acquired + draw_cletter(x++, y+1, 'L', 0xFF0000); + // Try Lock 2 + draw_cletter(x++, y+1, 't', 0xFF0000); + lock_mutex(dead2); + // Lock 2 Acquired + draw_cletter(x++, y+1, 'l', 0xFF0000); + // Unlock Locks + draw_cletter(x++, y+1, 'u', 0xFF0000); + unlock_mutex(dead2); + draw_cletter(x++, y+1, 'U', 0xFF0000); + unlock_mutex(dead1); + draw_cletter(x++, y+1, 'F', 0xFF0000); +} + +void deadlock_test1(void) +{ + draw_cletter(x++, y+2, ' ', 0xFF00); + draw_cletter(x++, y+2, 'S', 0xFF00); + dead1 = create_mutex((void*)0xDEADBEEF); + dead2 = create_mutex((void*)0x12345678); + // Try Lock 2 + draw_cletter(x++, y+2, 't', 0xFF00); + lock_mutex(dead2); + // Lock 2 Acquired + draw_cletter(x++, y+2, 'l', 0xFF00); + // Create Higher priority thread to + // check deadlock condition + draw_cletter(x++, y+2, 'A', 0xFF00); + add_thread(deadlock_test2, 0, 4); + // Try Lock 1 - This would deadlock + // if no mechanism is in place to + // prevent it + draw_cletter(x++, y+2, 'T', 0xFF00); + lock_mutex(dead1); + // Lock 1 Acquired - Deadlock condition + // properly handled + draw_cletter(x++, y+2, 'L', 0xFF00); + // Unlock Locks + draw_cletter(x++, y+2, 'u', 0xFF00); + unlock_mutex(dead2); + draw_cletter(x++, y+2, 'U', 0xFF00); + unlock_mutex(dead1); + delete_mutex(dead1); + delete_mutex(dead2); + draw_cletter(x++, y+2, 'F', 0xFF00); +} + +void qualitative_tests(void) +{ + draw_string(0, y+0, " "); + draw_string(0, y+1, " "); + draw_string(0, y+2, " "); + draw_string(0, y+3, " "); + x = 0; + add_thread(atest, 0, 0); + add_thread(priority_inversion_test1, 0, 3); + add_thread(deadlock_test1, 0, 5); + add_thread(semaphore_test1, 0, 6); + add_thread(semaphore_test2, 0, 7); + add_thread(time_status, 0, 8); +} diff --git a/kernel/util/lock.c b/kernel/util/lock.c new file mode 100644 index 0000000..c9fe654 --- /dev/null +++ b/kernel/util/lock.c @@ -0,0 +1,20 @@ +#include <cpu.h> +#include <cpu/atomic/swap.h> +#include <util/mutex.h> +#include <util/lock.h> + +void lock(struct Lock* l) +{ + unsigned long mode = getmode() & 0x1F; + if (mode == 0x10) { + sys1(SYS_LOCK, l); + } +} + +void unlock(struct Lock* l) +{ + unsigned long mode = getmode() & 0x1F; + if (mode == 0x10) { + sys1(SYS_UNLOCK, l); + } +} diff --git a/kernel/util/mutex.c b/kernel/util/mutex.c new file mode 100644 index 0000000..8e85f8f --- /dev/null +++ b/kernel/util/mutex.c @@ -0,0 +1,110 @@ +#include <cpu.h> +#include <drivers/uart.h> +#include <util/mutex.h> +#include <util/lock.h> +#include <globals.h> + +void mutex_init(void) +{ + for (unsigned long m = 0; m < MAX_MUTEXS; m++) { + mutexs[m].pid = 0; + mutexs[m].addr = 0; + mutex_entries[m].value = &mutexs[m]; + mutex_entries[m].entry_type = VALUE_ENTRY; + mutex_entries[m].next = &mutex_entries[m+1]; + } + // Initialize Free Mutexs + mutex_manager.free.start.value = 0; + mutex_manager.free.start.next = &mutex_entries[0]; + mutex_manager.free.start.entry_type = START_ENTRY; + mutex_manager.free.end.value = 0; + mutex_manager.free.end.next = &mutex_entries[MAX_MUTEXS-1]; + mutex_entries[MAX_MUTEXS-1].next = &mutex_manager.free.end; + mutex_manager.free.end.entry_type = END_ENTRY; + // Initialize In-use Mutexs + mutex_manager.used.start.value = 0; + mutex_manager.used.start.next = &mutex_manager.used.end; + mutex_manager.used.start.entry_type = START_ENTRY; + mutex_manager.used.end.value = 0; + mutex_manager.used.end.next = &mutex_manager.used.start; + mutex_manager.used.end.entry_type = END_ENTRY; +} + +struct Mutex* create_mutex(void* addr) +{ + struct Entry* e = pop_from_queue(&mutex_manager.free); + if (e == 0) + return 0; + struct Mutex* m = e->value; + m->pid = 0; + m->addr = addr; + push_to_queue(e, &mutex_manager.used); + return e->value; +} + +unsigned char delete_mutex(struct Mutex* m) +{ + struct Entry* entry = find_value(m, &mutex_manager.used); + if (entry == 0) + return 1; + // Remove it from the queue + struct Entry* theentry = remove_next_from_queue(entry); + // Add it to the free queue + prepend_to_queue(theentry, &mutex_manager.free); + return 0; +} + +void uart_mutexes(void) +{ + struct Entry* entry = mutex_manager.used.start.next; + while (entry->entry_type == VALUE_ENTRY) + { + struct Mutex* m = entry->value; + uart_hex((unsigned long)m); + uart_char(' '); + uart_hex(m->pid); + uart_char(' '); + uart_hexn((unsigned long)m->addr); + entry = entry->next; + } + unsigned long count = 0; + entry = mutex_manager.free.start.next; + while (entry->entry_type == VALUE_ENTRY) { + count++; + entry = entry->next; + } + uart_hexn(count); +} + +void lock_mutex(struct Mutex* m) +{ + struct Thread* rthread = scheduler.rthread; + unsigned long rpid = rthread->pid; + unsigned long mode = getmode() & 0x1F; + if (mode == 0x10) { + // Find this mutex + struct Entry* mentry = find_value(m, &mutex_manager.used); + // If it is not a managed mutex, break away + if (mentry == 0) + return; + struct Entry* entry = mutex_manager.used.start.next; + // Ensure this thread locks all mutexs sequentially + // To avoid a deadlock + while (entry->entry_type == VALUE_ENTRY) { + struct Mutex* vmutex = entry->value; + // If this thread had locked it + // Toggle the lock to prevent deadlock + if (vmutex->pid == rpid) { + sys1(SYS_UNLOCK, vmutex); + sys1(SYS_LOCK, vmutex); + } + entry = entry->next; + } + sys1(SYS_LOCK, m); + } +} + +void unlock_mutex(struct Mutex* m) +{ + unlock((struct Lock*)m); +} diff --git a/kernel/util/status.c b/kernel/util/status.c new file mode 100644 index 0000000..456e89d --- /dev/null +++ b/kernel/util/status.c @@ -0,0 +1,133 @@ +#include <cpu.h> +#include <globals.h> +#include <graphics/lfb.h> +#include <symbols.h> +#include <lib/strings.h> +#include <lib/kmem.h> +#include <sys/core.h> +#include <sys/schedule.h> +#include <util/mutex.h> +#include <util/status.h> +#include <util/time.h> + +void output_irq_status(void) +{ + // Basic IRQ + unsigned long ib_val = load32(IRQ_BASIC_ENABLE); + // IRQ 1 + unsigned long i1_val = load32(IRQ_ENABLE1); + // IRQ 2 + unsigned long i2_val = load32(IRQ_ENABLE2); + // FIQ + unsigned long f_val = load32(FIQ_CONTROL); + + // Check GPU Interrupt Routing + unsigned long g_val = load32(GPU_INTERRUPTS_ROUTING); + draw_cletter(0, 1, (g_val & 0b11) + 0x30, 0x1EA1A1); + draw_cletter(1, 1, ((g_val >> 2) & 0b11) + 0x30, 0x1EA1A1); + + draw_chex32(4, 1, ib_val, 0x1EA1A1); + draw_chex32(4+9, 1, i1_val, 0x1EA1A1); + draw_chex32(4+9*2, 1, i2_val, 0x1EA1A1); + draw_chex32(4+9*3, 1, f_val, 0x1EA1A1); + + // Check UART IRQ + if (i2_val & (1<<25)) { + draw_cstring(0, 2, "UART", 0x00FF00); + } else if (f_val == 57) { + draw_cstring(0, 2, "UART", 0xFFA500); + } else { + draw_cstring(0, 2, "UART", 0xFF0000); + } + + // Check UART IRQ + if (i1_val & (1<<0)) { + draw_cstring(5, 2, "STIMERCMP", 0x00FF00); + } else if (f_val == 1) { + draw_cstring(5, 2, "STIMERCMP", 0xFFA500); + } else { + draw_cstring(5, 2, "STIMERCMP", 0xFF0000); + } + + if (load32(CORE0_TIMER_IRQCNTL) & 0xF) { + draw_cstring(4+9+2, 2, "LTIMER", 0x00FF00); + } else if (load32(CORE0_TIMER_IRQCNTL) & 0xF0) { + draw_cstring(4+9+2, 2, "LTIMER", 0xFFA500); + } else { + draw_cstring(4+9+2, 2, "LTIMER", 0xFF0000); + } +} + +void time_status(void) +{ + // Report Sys Timer Stataus + unsigned long systime; + draw_string(0, 8, "Sys Timer Status"); + systime = *(volatile unsigned long*)SYS_TIMER_CS; + draw_hex32(17, 8, systime); + draw_string(17+8, 8, ":"); + unsigned long long tval = get_time(); + draw_hex32(17+8, 8, (tval >> 32)); + draw_hex32(17+8+8, 8, tval); + systime = *(volatile unsigned long*)SYS_TIMER_C0; + draw_hex32(19+14+8+1, 8, systime); + draw_string(19+14+9+8, 8, "|"); + draw_string(19+14+18, 8, " "); + draw_u10(19+14+18, 8, ((unsigned long)tval)/1000000); +} + +void status(void) +{ + // OS Info + draw_cstring(7, 0, "v", 0x00FFFF); + draw_cstring(0, 0, os_name, 0xFF0000); + draw_cstring(8, 0, os_info_v, 0x00FFFF); + + // GPU IRQ Statuses + output_irq_status(); + + // Timer Status + draw_cstring(0, 3, "TIMER", 0x00FF00); + // Output the frequency + draw_string(6, 3, "@"); + unsigned long frq = read_cntfrq()/1000; + unsigned long fs_len = draw_u10(8, 3, frq) + 1; + draw_string(8+fs_len, 3, "kHz"); + // Output the value + unsigned long v = read_cntv_tval(); + unsigned long vs_len = draw_u10(8+fs_len+4, 3, v)+1; + draw_string(8+fs_len+4 +vs_len, 3, " "); + draw_letter(8+fs_len+4 +vs_len+1, 3, '|'); + draw_hex32(8+fs_len+7+vs_len, 3, v); + + // Video Status + draw_cstring(0, 4, "VIDEO", 0x00FF00); + unsigned long gw_len = draw_u10(6, 4, gwidth); + unsigned long gh_len = draw_u10(6+gw_len+1, 4, gheight) + 1; + draw_letter(6+gw_len, 4, 'x'); + if(gisrgb) + draw_string(6+gw_len+gh_len + 1, 4, "RGB"); + else + draw_string(6+gw_len+gh_len + 1, 4, "BGR"); + + // Core Stacks + draw_string(0, 5, "SVC IRQ FIQ User/SYS\n"); + unsigned long sp = (unsigned long)getsvcstack(); + draw_hex32(0, 6, sp); + sp = (unsigned long)getirqstack(); + draw_hex32(9, 6, sp); + sp = (unsigned long)getfiqstack(); + draw_hex32(9*2, 6, sp); + sp = (unsigned long)getsysstack(); + draw_hex32(9*3, 6, sp); + + // Report Core that updated status + unsigned long coren; + asm volatile ( + "mrc p15, #0, %0, c0, c0, #5\n" + "and %0, %0, #3" : "=r"(coren) :: "cc"); + draw_string(0, 7, "Status Updated by Core #"); + draw_hex32(24, 7, coren); + + time_status(); +} diff --git a/kernel/util/time.c b/kernel/util/time.c new file mode 100644 index 0000000..abb9c8d --- /dev/null +++ b/kernel/util/time.c @@ -0,0 +1,76 @@ +#include <symbols.h> +#include <sys/core.h> + +// CCNT - Cycle Timer (Close to ns resolution) + +void routing_core0cntv_to_core0fiq(void) +{ + store32(0x80, CORE0_TIMER_IRQCNTL); +} + +void routing_core0cntv_to_core0irq(void) +{ + store32(0x08, CORE0_TIMER_IRQCNTL); +} + +unsigned long read_core0timer_pending(void) +{ + unsigned long tmp; + tmp = load32(CORE0_IRQ_SOURCE); + return tmp; +} + +unsigned long long read_cntvct(void) +{ + unsigned long long val; + asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (val)); + return (val); +} + +unsigned long long read_cntvoff(void) +{ + unsigned long long val; + asm volatile("mrrc p15, 4, %Q0, %R0, c14" : "=r" (val)); + return (val); +} + +unsigned long read_cntv_tval(void) +{ + unsigned long val; + asm volatile ("mrc p15, 0, %0, c14, c3, 0" : "=r"(val) ); + return val; +} + +void write_cntv_tval(unsigned long val) +{ + asm volatile ("mcr p15, 0, %0, c14, c3, 0" :: "r"(val) ); + return; +} + +unsigned long read_cntfrq(void) +{ + unsigned long val; + asm volatile ("mrc p15, 0, %0, c14, c0, 0" : "=r"(val) ); + return val; +} + +unsigned long long get_time(void) +{ + union { + unsigned long long tval; + struct { + unsigned long high; + unsigned long low; + } tvalb; + } t; + t.tvalb.low = *(unsigned long*)SYS_TIMER_CLO; + t.tvalb.high = *(unsigned long*)SYS_TIMER_CHI; + return t.tval; +} + +void wait_msec(unsigned int n) +{ + unsigned long start = *(volatile unsigned long*)SYS_TIMER_CHI; + while (*(volatile unsigned long*)SYS_TIMER_CHI - start < n) + asm volatile("nop"); +} |