diff --git a/boot.s b/boot.s new file mode 100644 index 0000000..0cb7ffd --- /dev/null +++ b/boot.s @@ -0,0 +1,212 @@ +/* Declare constants for the multiboot header. */ +.set ALIGN, 1<<0 /* align loaded modules on page boundaries */ +.set MEMINFO, 1<<1 /* provide memory map */ +.set FLAGS, ALIGN | MEMINFO /* this is the Multiboot 'flag' field */ +.set MAGIC, 0x1BADB002 /* 'magic number' lets bootloader find the header */ +.set CHECKSUM, -(MAGIC + FLAGS) /* checksum of above, to prove we are multiboot */ + +/* +Declare a multiboot header that marks the program as a kernel. These are magic +values that are documented in the multiboot standard. The bootloader will +search for this signature in the first 8 KiB of the kernel file, aligned at a +32-bit boundary. The signature is in its own section so the header can be +forced to be within the first 8 KiB of the kernel file. +*/ +.section .multiboot +.align 4 +.long MAGIC +.long FLAGS +.long CHECKSUM + +/* +The multiboot standard does not define the value of the stack pointer register +(esp) and it is up to the kernel to provide a stack. This allocates room for a +small stack by creating a symbol at the bottom of it, then allocating 16384 +bytes for it, and finally creating a symbol at the top. The stack grows +downwards on x86. The stack is in its own section so it can be marked nobits, +which means the kernel file is smaller because it does not contain an +uninitialized stack. The stack on x86 must be 16-byte aligned according to the +System V ABI standard and de-facto extensions. The compiler will assume the +stack is properly aligned and failure to align the stack will result in +undefined behavior. +*/ +.section .bss +.align 16 +stack_bottom: +.skip 16384 # 16 KiB +stack_top: + +/* +The linker script specifies _start as the entry point to the kernel and the +bootloader will jump to this position once the kernel has been loaded. It +doesn't make sense to return from this function as the bootloader is gone. +*/ +.section .text + +.global _start +.global _kernel_early +/*.global loadPageDirectory +.global enablePaging*/ + +.type _start, @function + +/*load_page_directory: + push %ebp + mov %esp, %ebp + mov 8(%esp), %eax + mov %eax, %cr3 + mov %ebp, %esp + pop %ebp + ret + +enable_paging: + push %ebp + mov %esp, %ebp + mov %cr0, %eax + or $0x80000000, %eax + mov %eax, %cr0 + mov %ebp, %esp + pop %ebp + ret*/ + +enable_sse_asm: + push %eax + push %ebx + push %ecx + push %edx + + # Check CPUID support + pushf + pop %eax + mov %eax, %ecx + xor $0x200000, %eax + push %eax + popf + pushf + pop %eax + xor %ecx, %eax + jz .no_cpuid + + # Check for SSE + mov $1, %eax + cpuid + test $0x02000000, %edx + jz .no_sse + + # Enable SSE + mov %cr0, %eax + and $~0x4, %eax # Clear EM (bit 2) + or $0x2, %eax # Set MP (bit 1) + mov %eax, %cr0 + + mov %cr4, %eax + or $0x600, %eax # Set OSFXSR | OSXMMEXCPT + mov %eax, %cr4 + + lea sse_initialized, %ebx + movl $1, (%ebx) + +.no_sse: +.no_cpuid: + pop %edx + pop %ecx + pop %ebx + pop %eax + ret + +_kernel_early: + call _init + + /* + TODO: add more stuff here that needs to be ran before the main kernel code. + */ + + ret + +_start: + /* + The bootloader has loaded us into 32-bit protected mode on a x86 + machine. Interrupts are disabled. Paging is disabled. The processor + state is as defined in the multiboot standard. The kernel has full + control of the CPU. The kernel can only make use of hardware features + and any code it provides as part of itself. There's no printf + function, unless the kernel provides its own header and a + printf implementation. There are no security restrictions, no + safeguards, no debugging mechanisms, only what the kernel provides + itself. It has absolute and complete power over the + machine. + */ + + /* + To set up a stack, we set the esp register to point to the top of the + stack (as it grows downwards on x86 systems). This is necessarily done + in assembly as languages such as C cannot function without a stack. + */ + movl $stack_top, %esp + andl $0xFFFFFFF0, %esp + movl %esp, %ebp + + + /* + This is a good place to initialize crucial processor state before the + high-level kernel is entered. It's best to minimize the early + environment where crucial features are offline. Note that the + processor is not fully initialized yet: Features such as floating + point instructions and instruction set extensions are not initialized + yet. The GDT should be loaded here. Paging should be enabled here. + C++ features such as global constructors and exceptions will require + runtime support to work as well. + */ + + cli /* Just in case */ + + call enable_sse_asm + + push %eax + push %ebx + + + /* + Call _kernel_early, early low-level initialization will happen there; + please note that while _kernel_early is written in assembler, + kernel_early is written in C. (kernel_early is called by _kernel_early, don't be confused. ;) ) + */ + call _kernel_early + + /* + Enter the high-level kernel. The ABI requires the stack is 16-byte + aligned at the time of the call instruction (which afterwards pushes + the return pointer of size 4 bytes). The stack was originally 16-byte + aligned above and we've pushed a multiple of 16 bytes to the + stack since (pushed 0 bytes so far), so the alignment has thus been + preserved and the call is well defined. + */ + call kernel_main + + /* + If the system has nothing more to do, put the computer into an + infinite loop. To do that: + 1) Disable interrupts with cli (clear interrupt enable in eflags). + They are already disabled by the bootloader, so this is not needed. + Mind that you might later enable interrupts and return from + kernel_main (which is sort of nonsensical to do). + 2) Wait for the next interrupt to arrive with hlt (halt instruction). + Since they are disabled, this will lock up the computer. + 3) Jump to the hlt instruction if it ever wakes up due to a + non-maskable interrupt occurring or due to system management mode. + */ + cli +1: hlt + jmp 1b + +/* +Set the size of the _start symbol to the current location '.' minus its start. +This is useful when debugging or when you implement call tracing. +*/ +.size _start, . - _start + +.section .data + +.global sse_initialized + +sse_initialized: .word 0 diff --git a/crti.s b/crti.s new file mode 100644 index 0000000..cebea3e --- /dev/null +++ b/crti.s @@ -0,0 +1,15 @@ +.section .init +.global _init +.type _init, @function +_init: + push %ebp + movl %esp, %ebp + /* gcc will nicely put the contents of crtbegin.o's .init section here. */ + +.section .fini +.global _fini +.type _fini, @function +_fini: + push %ebp + movl %esp, %ebp + /* gcc will nicely put the contents of crtbegin.o's .fini section here. */ diff --git a/gdt.asm b/gdt.asm new file mode 100644 index 0000000..9b9fa94 --- /dev/null +++ b/gdt.asm @@ -0,0 +1,15 @@ +global gdt_flush + +gdt_flush: + mov eax, [esp + 4] + lgdt [eax] + ; Reload segment registers + mov ax, 0x10 ; Kernel data segment + mov ds, ax + mov es, ax + mov fs, ax + mov gs, ax + mov ss, ax + jmp 0x08:.flush ; Kernel code segment +.flush: + ret diff --git a/grub.cfg b/grub.cfg new file mode 100644 index 0000000..e2f6a63 --- /dev/null +++ b/grub.cfg @@ -0,0 +1,3 @@ +menuentry "Espresso" { + multiboot /boot/espresso.bin +} diff --git a/idt.asm b/idt.asm new file mode 100644 index 0000000..4bdeb86 --- /dev/null +++ b/idt.asm @@ -0,0 +1,40 @@ +[bits 32] +global idt_load +global common_isr_stub + +extern isr_handler +extern _push_regs +extern _pop_regs + +idt_load: + mov eax, [esp + 4] + lidt [eax] + ret + +common_isr_stub: + call _push_regs ; Push all general-purpose registers + push ds + push es + push fs + push gs + + mov ax, 0x10 ; Load kernel data segment + mov ds, ax + mov es, ax + mov fs, ax + mov gs, ax + + push esp ; Pass pointer to the register state + call isr_handler ; Call your C ISR handler + + add esp, 4 ; Clean up stack from push esp + pop gs + pop fs + pop es + pop ds + + call _pop_regs ; restore all general-purpose registers + + add esp, 8 ; Remove int_no and err_code + sti + iret