Print this page
Use `iretq` method of jumping to 32-bit so VirtualBox (and maybe QEMU/KVM too?)
can cope and not get its `ljmp <mem48>` emulation bug(s) tickled.
   1 /*
   2  * Parts copyright Michael Brown <mbrown@fensystems.co.uk>
   3  *
   4  * Copyright (c) 2019, Joyent, Inc.
   5  */
   6 
   7 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL )
   8 
   9 /* CR0: protection enabled */
  10 #define CR0_PE ( 1 << 0 )
  11 
  12 /* CR0: paging */
  13 #define CR0_PG ( 1 << 31 )
  14 
  15 /* CR4: physical address extensions */
  16 #define CR4_PSE ( 1 << 4 )
  17 #define CR4_PAE ( 1 << 5 )
  18 #define CR4_PGE ( 1 << 7 )
  19 
  20 /* Extended feature enable MSR (EFER) */
  21 #define MSR_EFER 0xc0000080
  22 
  23 /* EFER: long mode enable */
  24 #define EFER_LME ( 1 << 8 )


  45          * %ebx -> multiboot info pointer (physical)
  46          */
  47         .align 16
  48         .globl multiboot2_entry
  49 
  50 multiboot2_entry:
  51         cli
  52 
  53         movq    %rsi, %rbx /* mb2 infop */
  54         movq    %rdx, %rsi /* entry address */
  55 
  56         /* Load the mb2-mandated code and data segments.  */
  57         leaq    entry_gdt_base(%rip), %rcx
  58         leaq    entry_gdt(%rip), %rax
  59         movq    %rax, (%rcx)
  60 
  61         leaq    entry_gdtr(%rip), %rax
  62         lgdt    (%rax)
  63 
  64         /* Load our new %cs. */
  65         ljmp    *newcs_vector







  66 
  67         .code32
  68 newcs:
  69 
  70         movw    $GDTSEL_DATA, %ax
  71         movw    %ax, %ds
  72         movw    %ax, %es
  73         movw    %ax, %fs
  74         movw    %ax, %gs
  75         movw    %ax, %ss
  76 
  77         /* Disable paging */
  78         movl    %cr0, %eax
  79         andl    $~CR0_PG, %eax
  80         movl    %eax, %cr0
  81 
  82         movl    %cr4, %eax
  83         andb    $~(CR4_PAE | CR4_PGE | CR4_PSE), %al
  84         movl    %eax, %cr4
  85 


  91 
  92         /* %ebx still has our infop */
  93         movl    %edi, %eax
  94         jmp     *%esi
  95 
  96         /*
  97          * %rdi -> struct mb2 *
  98          * %rsi -> stack pointer to switch to
  99          * %rdx -> &multiboot2_enter_kernel
 100          */
 101         .align 16
 102         .code64
 103         .globl multiboot2_bounce
 104 
 105 multiboot2_bounce:
 106         movq    %rsi, %rsp
 107         jmp     *%rdx
 108 
 109         .data
 110 
 111 newcs_vector:
 112         .long   newcs, GDTSEL_CODE
 113 
 114         .align 16
 115 entry_gdt:
 116         /* null entry */
 117         .word   0x0, 0x0
 118         .byte   0x0, 0x0, 0x0, 0x0
 119 
 120         /* 32 bit protected mode code segment */
 121         .word   0xffff, 0x0
 122         .byte   0x0, 0x9f, 0xcf, 0x0
 123 
 124         /* 32 bit protected mode data segment */
 125         .word   0xffff, 0x0
 126         .byte   0x0, 0x93, 0xcf, 0x0
 127 
 128 entry_gdt_end:
 129         .equ    entry_gdt_length, entry_gdt_end - entry_gdt
 130 
 131         .align 16
 132 entry_gdtr:
 133         .word entry_gdt_length - 1
   1 /*
   2  * Parts copyright Michael Brown <mbrown@fensystems.co.uk>
   3  *
   4  * Copyright 2020 Joyent, Inc.
   5  */
   6 
   7 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL )
   8 
   9 /* CR0: protection enabled */
  10 #define CR0_PE ( 1 << 0 )
  11 
  12 /* CR0: paging */
  13 #define CR0_PG ( 1 << 31 )
  14 
  15 /* CR4: physical address extensions */
  16 #define CR4_PSE ( 1 << 4 )
  17 #define CR4_PAE ( 1 << 5 )
  18 #define CR4_PGE ( 1 << 7 )
  19 
  20 /* Extended feature enable MSR (EFER) */
  21 #define MSR_EFER 0xc0000080
  22 
  23 /* EFER: long mode enable */
  24 #define EFER_LME ( 1 << 8 )


  45          * %ebx -> multiboot info pointer (physical)
  46          */
  47         .align 16
  48         .globl multiboot2_entry
  49 
  50 multiboot2_entry:
  51         cli
  52 
  53         movq    %rsi, %rbx /* mb2 infop */
  54         movq    %rdx, %rsi /* entry address */
  55 
  56         /* Load the mb2-mandated code and data segments.  */
  57         leaq    entry_gdt_base(%rip), %rcx
  58         leaq    entry_gdt(%rip), %rax
  59         movq    %rax, (%rcx)
  60 
  61         leaq    entry_gdtr(%rip), %rax
  62         lgdt    (%rax)
  63 
  64         /* Load our new %cs. */
  65         movq    %rsp, %rax
  66         pushq   $GDTSEL_DATA
  67         pushq   %rax
  68         pushf
  69         pushq   $GDTSEL_CODE
  70         lea     newcs(%rip), %rax
  71         pushq   %rax
  72         iretq
  73 
  74         .code32
  75 newcs:
  76 
  77         movw    $GDTSEL_DATA, %ax
  78         movw    %ax, %ds
  79         movw    %ax, %es
  80         movw    %ax, %fs
  81         movw    %ax, %gs
  82         movw    %ax, %ss
  83 
  84         /* Disable paging */
  85         movl    %cr0, %eax
  86         andl    $~CR0_PG, %eax
  87         movl    %eax, %cr0
  88 
  89         movl    %cr4, %eax
  90         andb    $~(CR4_PAE | CR4_PGE | CR4_PSE), %al
  91         movl    %eax, %cr4
  92 


  98 
  99         /* %ebx still has our infop */
 100         movl    %edi, %eax
 101         jmp     *%esi
 102 
 103         /*
 104          * %rdi -> struct mb2 *
 105          * %rsi -> stack pointer to switch to
 106          * %rdx -> &multiboot2_enter_kernel
 107          */
 108         .align 16
 109         .code64
 110         .globl multiboot2_bounce
 111 
 112 multiboot2_bounce:
 113         movq    %rsi, %rsp
 114         jmp     *%rdx
 115 
 116         .data
 117 



 118         .align 16
 119 entry_gdt:
 120         /* null entry */
 121         .word   0x0, 0x0
 122         .byte   0x0, 0x0, 0x0, 0x0
 123 
 124         /* 32 bit protected mode code segment */
 125         .word   0xffff, 0x0
 126         .byte   0x0, 0x9f, 0xcf, 0x0
 127 
 128         /* 32 bit protected mode data segment */
 129         .word   0xffff, 0x0
 130         .byte   0x0, 0x93, 0xcf, 0x0
 131 
 132 entry_gdt_end:
 133         .equ    entry_gdt_length, entry_gdt_end - entry_gdt
 134 
 135         .align 16
 136 entry_gdtr:
 137         .word entry_gdt_length - 1