Ticket #6310: x86_64_kernel_sources.unfinished.patch

File x86_64_kernel_sources.unfinished.patch, 76.1 KB (added by mmlr, 14 years ago)
  • src/system/kernel/lib/arch/x86_64/arch_string.S

     
     1/*
     2 * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
     3 * Distributed under the terms of the MIT License.
     4 *
     5 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
     6 * Distributed under the terms of the NewOS License.
     7*/
     8
     9#if !_BOOT_MODE
     10#   include "asm_offsets.h"
     11#endif
     12
     13#include <asm_defs.h>
     14
     15
     16// We don't need the indirection in the boot loader.
     17#if _BOOT_MODE
     18#   define memcpy_generic   memcpy
     19#   define memset_generic   memset
     20#endif
     21
     22
     23.align 4
     24FUNCTION(memcpy_generic):
     25    push    %rsi
     26    push    %rdi
     27    mov 12(%rsp),%rdi   /* dest */
     28    mov %rdi,%rax   /* save dest ptr as return address */
     29    mov 16(%rsp),%rsi   /* source */
     30    mov 20(%rsp),%rcx   /* count */
     31
     32    /* move by words */
     33    // TODO: The addresses might not be aligned!
     34    cld
     35    shr $2,%rcx
     36    rep
     37    movsl
     38
     39    /* move any remaining data by bytes */
     40    mov 20(%rsp),%rcx
     41    and $3,%rcx
     42    rep
     43    movsb
     44
     45    pop %rdi
     46    pop %rsi
     47    ret
     48FUNCTION_END(memcpy_generic)
     49SYMBOL(memcpy_generic_end):
     50
     51
     52/* void *memset(void *dest, int value, size_t length); */
     53.align 4
     54FUNCTION(memset_generic):
     55    push        %rbp
     56    mov     %rsp, %rbp
     57
     58    // %eax, %ecx, and %edx are scratch registers -- we only have to save %edi
     59    push        %rdi
     60
     61    // get the parameters
     62    mov     16(%rbp), %rcx
     63    mov     12(%rbp), %rax
     64    mov     8(%rbp), %rdi
     65
     66    // When touching less than 12 bytes, we just do it bytewise. We might be
     67    // able to process one or two lwords lwordwise, but the additional overhead
     68    // isn't worth it.
     69    cmp     $12, %rcx
     70    jl      2f
     71
     72    // buffer address lword-aligned?
     73    mov     %rdi, %rdx
     74    and     $0x3, %rdx
     75    jz      1f
     76
     77    // the buffer is unaligned -- copy the first bytes bytewise
     78    mov     $4, %rcx
     79    sub     %rdx, %rcx
     80    rep     stosb
     81
     82    mov     16(%rbp), %rcx
     83    sub     $4, %rcx
     84    add     %rdx, %rcx
     85
     861:  // lwordwise
     87    // prepare %eax -- the low byte must be copied to the other bytes
     88    mov     %al, %ah
     89    mov     %rax, %rdx
     90    shl     $16, %rax
     91    mov     %dx, %ax
     92
     93    // get the unaligned remainder into %edx
     94    mov     %rcx, %rdx
     95    and     $0x3, %rdx
     96
     97    // write words
     98    shr     $2, %rcx
     99    rep     stosl
     100
     101    mov     %rdx, %rcx
     102
     1032:  // bytewise (remaining bytes)
     104    rep     stosb
     105
     106    pop     %rdi
     107
     108    // return value is the value passed in
     109    mov     8(%rbp), %rax
     110
     111    mov     %rbp, %rsp
     112    pop     %rbp
     113    ret
     114FUNCTION_END(memset_generic)
     115SYMBOL(memset_generic_end):
     116
     117
     118#if !_BOOT_MODE
     119
     120.align 4
     121FUNCTION(memcpy):
     122//  jmp     *(gOptimizedFunctions + X86_OPTIMIZED_FUNCTIONS_memcpy)
     123FUNCTION_END(memcpy)
     124
     125FUNCTION(memset):
     126//  jmp     *(gOptimizedFunctions + X86_OPTIMIZED_FUNCTIONS_memset)
     127FUNCTION_END(memset)
     128
     129#endif  // !_BOOT_MODE
  • src/system/kernel/arch/x86_64/arch_x86_64.S

     
     1/*
     2 * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
     3 * Distributed under the terms of the MIT License.
     4 *
     5 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
     6 * Copyright 2002, Michael Noisternig. All rights reserved.
     7 * Distributed under the terms of the NewOS License.
     8 */
     9
     10
     11#include <asm_defs.h>
     12
     13#include <arch/x86_64/descriptors.h>
     14
     15
     16.text
     17
     18/*! \fn void arch_cpu_user_TLB_invalidate()
     19    Invalidates the TLB. Must be called with interrupts disabled.
     20*/
     21FUNCTION(arch_cpu_user_TLB_invalidate):
     22    mov %cr3, %rax
     23    mov %rax, %cr3
     24    ret
     25FUNCTION_END(arch_cpu_user_TLB_invalidate)
     26
     27/* status_t arch_cpu_user_memcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
     28FUNCTION(arch_cpu_user_memcpy):
     29    push    %rsi
     30    push    %rdi
     31    mov 12(%rsp),%rdi   /* dest */
     32    mov 16(%rsp),%rsi   /* source */
     33    mov 20(%rsp),%rcx   /* count */
     34
     35    /* set the fault handler */
     36    mov 24(%rsp),%rdx   /* fault handler */
     37    mov (%rdx),%rax
     38    movl    $.L_user_memcpy_error, (%rdx)
     39
     40    /* move by words */
     41    cld
     42    shr $2,%rcx
     43    rep
     44    movsl
     45
     46    /* move any remaining data by bytes */
     47    mov 20(%rsp),%rcx
     48    and $3,%rcx
     49    rep
     50    movsb
     51
     52    /* restore the old fault handler */
     53    mov %rax,(%rdx)
     54    xor %rax,%rax
     55
     56    pop %rdi
     57    pop %rsi
     58    ret
     59
     60    /* error condition */
     61.L_user_memcpy_error:
     62    /* restore the old fault handler */
     63    mov %rax,(%rdx)
     64    mov $-1,%rax    /* return a generic error, the wrapper routine will deal with it */
     65    pop %rdi
     66    pop %rsi
     67    ret
     68FUNCTION_END(arch_cpu_user_memcpy)
     69
     70/* uint64 x86_64_read_rbp(); */
     71FUNCTION(x86_64_read_rbp):
     72    mov %rbp, %rax
     73    ret
     74FUNCTION_END(x86_64_read_rbp)
     75
     76
     77/*! \fn void arch_debug_call_with_fault_handler(cpu_ent* cpu,
     78        jmp_buf jumpBuffer, void (*function)(void*), void* parameter)
     79
     80    Called by debug_call_with_fault_handler() to do the dirty work of setting
     81    the fault handler and calling the function. If the function causes a page
     82    fault, the arch_debug_call_with_fault_handler() calls longjmp() with the
     83    given \a jumpBuffer. Otherwise it returns normally.
     84
     85    debug_call_with_fault_handler() has already saved the CPU's fault_handler
     86    and fault_handler_stack_pointer and will reset them later, so
     87    arch_debug_call_with_fault_handler() doesn't need to care about it.
     88
     89    \param cpu The \c cpu_ent for the current CPU.
     90    \param jumpBuffer Buffer to be used for longjmp().
     91    \param function The function to be called.
     92    \param parameter The parameter to be passed to the function to be called.
     93*/
     94FUNCTION(arch_debug_call_with_fault_handler):
     95    ret
     96FUNCTION_END(arch_debug_call_with_fault_handler)
     97
     98
     99/* void x86_64_fxrstor(const void *fpu_state); */
     100FUNCTION(x86_64_fxrstor):
     101    mov 8(%rsp), %rax
     102    fxrstor (%rax)
     103    ret
     104FUNCTION_END(x86_64_fxrstor)
     105
     106/* void x86_64_frstor(const void *fpu_state); */
     107FUNCTION(x86_64_frstor):
     108    mov 8(%rsp), %rax
     109    frstor  (%rax)
     110    ret
     111FUNCTION_END(x86_64_frstor)
     112
     113/* void x86_64_fxsave(void *fpu_state); */
     114FUNCTION(x86_64_fxsave):
     115    mov 8(%rsp), %rax
     116    fxsave  (%rax)
     117    ret
     118FUNCTION_END(x86_64_fxsave)
     119
     120/* void x86_64_fnsave(void *fpu_state); */
     121FUNCTION(x86_64_fnsave):
     122    mov 8(%rsp), %rax
     123    fnsave  (%rax)
     124    ret
     125FUNCTION_END(x86_64_fnsave)
     126
     127/* void x86_64_fsave_swap(void *old_fpu_state, const void *new_fpu_state); */
     128FUNCTION(x86_64_fnsave_swap):
     129    mov 8(%rsp),%rax
     130    fnsave  (%rax)
     131    mov 16(%rsp),%rax
     132    frstor  (%rax)
     133    ret
     134FUNCTION_END(x86_64_fnsave_swap)
     135
     136/* void x86_64_fxsave_swap(void *old_fpu_state, const void *new_fpu_state); */
     137FUNCTION(x86_64_fxsave_swap):
     138    mov 8(%rsp),%rax
     139    fxsave  (%rax)
     140    mov 16(%rsp),%rax
     141    fxrstor (%rax)
     142    ret
     143FUNCTION_END(x86_64_fxsave_swap)
     144
     145/* uint64 x86_64_read_cr0(); */
     146FUNCTION(x86_64_read_cr0):
     147    mov %cr0, %rax
     148    ret
     149FUNCTION_END(x86_64_read_cr0)
     150
     151/* void x86_64_write_cr0(uint64 value); */
     152FUNCTION(x86_64_write_cr0):
     153    mov 8(%rsp), %rax
     154    mov %rax, %cr0
     155    ret
     156FUNCTION_END(x86_64_write_cr0)
     157
     158/* uint64 x86_64_read_cr4(); */
     159FUNCTION(x86_64_read_cr4):
     160    mov %cr4, %rax
     161    ret
     162FUNCTION_END(x86_64_read_cr4)
     163
     164/* void x86_64_write_cr4(uint64 value); */
     165FUNCTION(x86_64_write_cr4):
     166    mov 8(%rsp), %rax
     167    mov %rax, %cr4
     168    ret
     169FUNCTION_END(x86_64_write_cr4)
     170
     171/* uint64 x86_64_read_msr(uint64 register); */
     172FUNCTION(x86_64_read_msr):
     173    movl    4(%esp), %ecx
     174    rdmsr
     175    ret
     176FUNCTION_END(x86_64_read_msr)
     177
     178/* void x86_write_msr(uint32 register, uint64 value); */
     179FUNCTION(x86_64_write_msr):
     180    ret
     181FUNCTION_END(x86_64_write_msr)
     182
     183/* void x86_64_context_switch(struct arch_thread* oldState,
     184    struct arch_thread* newState); */
     185FUNCTION(x86_64_context_switch):
     186    ret
     187FUNCTION_END(x86_64_context_switch)
     188
     189/* void x86_64_swap_pgdir(uint64 newPageDir); */
     190FUNCTION(x86_64_swap_pgdir):
     191    mov 8(%rsp),%rax
     192    mov %rax,%cr3
     193    ret
     194FUNCTION_END(x86_64_swap_pgdir)
     195
     196/* thread exit stub - is copied to the userspace stack in arch_thread_enter_uspace() */
     197    .align 4
     198FUNCTION(x86_64_userspace_thread_exit):
     199FUNCTION_END(x86_64_userspace_thread_exit)
     200SYMBOL(x86_64_end_userspace_thread_exit):
     201
     202
     203/* void x86_64_enter_userspace(addr_t entry, addr_t stackTop); */
     204FUNCTION(x86_64_enter_userspace):
     205    iret
     206FUNCTION_END(x86_64_enter_userspace)
  • src/system/kernel/arch/x86_64/arch_user_debugger.cpp

     
     1/*
     2 * Copyright 2010, Nathan Mentley, nathanmentley@gmail.com.
     3 * Distributed under the terms of the MIT License.
     4 *
     5 * Copyright 2005-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
     6 * Distributed under the terms of the MIT License.
     7 */
     8
     9
     10#include <arch/user_debugger.h>
     11
     12#include <string.h>
     13
     14#include <debugger.h>
     15#include <driver_settings.h>
     16#include <int.h>
     17#include <team.h>
     18#include <thread.h>
     19#include <util/AutoLock.h>
     20
     21
     22//#define TRACE_ARCH_USER_DEBUGGER
     23#ifdef TRACE_ARCH_USER_DEBUGGER
     24#   define TRACE(x) dprintf x
     25#else
     26#   define TRACE(x) ;
     27#endif
     28
     29#define B_NO_MORE_BREAKPOINTS               B_BUSY
     30#define B_NO_MORE_WATCHPOINTS               B_BUSY
     31#define B_BAD_WATCHPOINT_ALIGNMENT          B_BAD_VALUE
     32#define B_WATCHPOINT_TYPE_NOT_SUPPORTED     B_NOT_SUPPORTED
     33#define B_WATCHPOINT_LENGTH_NOT_SUPPORTED   B_NOT_SUPPORTED
     34#define B_BREAKPOINT_NOT_FOUND              B_NAME_NOT_FOUND
     35#define B_WATCHPOINT_NOT_FOUND              B_NAME_NOT_FOUND
     36    // TODO: Make those real error codes.
     37
     38// The software breakpoint instruction (int3).
     39const uint8 kX86SoftwareBreakpoint[1] = { 0xcc };
     40
     41// maps breakpoint slot index to LEN_i LSB number
     42static const uint32 sDR7Len[4] = {
     43    X86_DR7_LEN0_LSB, X86_DR7_LEN1_LSB, X86_DR7_LEN2_LSB, X86_DR7_LEN3_LSB
     44};
     45
     46// maps breakpoint slot index to R/W_i LSB number
     47static const uint32 sDR7RW[4] = {
     48    X86_DR7_RW0_LSB, X86_DR7_RW1_LSB, X86_DR7_RW2_LSB, X86_DR7_RW3_LSB
     49};
     50
     51// maps breakpoint slot index to L_i bit number
     52static const uint32 sDR7L[4] = {
     53    X86_DR7_L0, X86_DR7_L1, X86_DR7_L2, X86_DR7_L3
     54};
     55
     56// maps breakpoint slot index to G_i bit number
     57static const uint32 sDR7G[4] = {
     58    X86_DR7_G0, X86_DR7_G1, X86_DR7_G2, X86_DR7_G3
     59};
     60
     61// maps breakpoint slot index to B_i bit number
     62static const uint32 sDR6B[4] = {
     63    X86_DR6_B0, X86_DR6_B1, X86_DR6_B2, X86_DR6_B3
     64};
     65
     66// Enables a hack to make single stepping work under qemu. Set via kernel
     67// driver settings.
     68static bool sQEmuSingleStepHack = false;
     69
     70
     71static void
     72get_iframe_registers(struct iframe *frame, debug_cpu_state *cpuState)
     73{
     74    cpuState->gs = frame->gs;
     75    cpuState->fs = frame->fs;
     76    cpuState->es = frame->es;
     77    cpuState->ds = frame->ds;
     78    cpuState->rdi = frame->rdi;
     79    cpuState->rsi = frame->rsi;
     80    cpuState->rbp = frame->rbp;
     81    cpuState->rsp = frame->rsp;
     82    cpuState->rbx = frame->rbx;
     83    cpuState->rdx = frame->orig_rdx;
     84    cpuState->rcx = frame->rcx;
     85    cpuState->rax = frame->orig_rax;
     86    cpuState->r8 = frame->r8;
     87    cpuState->r9 = frame->r9;
     88    cpuState->r10 = frame->r10;
     89    cpuState->r11 = frame->r11;
     90    cpuState->r12 = frame->r12;
     91    cpuState->r13 = frame->r13;
     92    cpuState->r14 = frame->r14;
     93    cpuState->r15 = frame->r15;
     94    cpuState->vector = frame->vector;
     95    cpuState->error_code = frame->error_code;
     96    cpuState->rip = frame->rip;
     97    cpuState->cs = frame->cs;
     98    cpuState->eflags = frame->flags;
     99    cpuState->user_rsp = frame->user_rsp;
     100    cpuState->user_ss = frame->user_ss;
     101}
     102
     103
     104static inline void
     105install_breakpoints(const arch_team_debug_info &teamInfo)
     106{
     107/*TODO: BEFORE SUBMIT FIX ASM*/
     108    // set breakpoints
     109    asm("mov %0, %%dr0" : : "r"(teamInfo.breakpoints[0].address));
     110    asm("mov %0, %%dr1" : : "r"(teamInfo.breakpoints[1].address));
     111    asm("mov %0, %%dr2" : : "r"(teamInfo.breakpoints[2].address));
     112//  asm("mov %0, %%dr3" : : "r"(teamInfo.breakpoints[3].address));
     113        // DR3 is used to hold the current struct thread*.
     114
     115    // enable breakpoints
     116    asm("mov %0, %%dr7" : : "r"(teamInfo.dr7));
     117}
     118
     119
     120/*! Sets a break-/watchpoint in the given team info.
     121    Interrupts must be disabled and the team debug info lock be held.
     122*/
     123static inline status_t
     124set_breakpoint(arch_team_debug_info &info, void *address, uint32 type,
     125    uint32 length, bool setGlobalFlag)
     126{
     127    // check, if there is already a breakpoint at that address
     128    bool alreadySet = false;
     129    for (int32 i = 0; i < X86_BREAKPOINT_COUNT; i++) {
     130        if (info.breakpoints[i].address == address
     131            && info.breakpoints[i].type == type) {
     132            alreadySet = true;
     133            break;
     134        }
     135    }
     136
     137    if (!alreadySet) {
     138        // find a free slot
     139        int32 slot = -1;
     140        for (int32 i = 0; i < X86_BREAKPOINT_COUNT; i++) {
     141            if (!info.breakpoints[i].address) {
     142                slot = i;
     143                break;
     144            }
     145        }
     146
     147        // init the breakpoint
     148        if (slot >= 0) {
     149            info.breakpoints[slot].address = address;
     150            info.breakpoints[slot].type = type;
     151            info.breakpoints[slot].length = length;
     152
     153            info.dr7 |= (length << sDR7Len[slot])
     154                | (type << sDR7RW[slot])
     155                | (1 << sDR7L[slot]);
     156            if (setGlobalFlag)
     157                info.dr7 |= (1 << sDR7G[slot]);
     158        } else {
     159            if (type == X86_INSTRUCTION_BREAKPOINT)
     160                return B_NO_MORE_BREAKPOINTS;
     161            else
     162                return B_NO_MORE_WATCHPOINTS;
     163        }
     164    }
     165
     166    return B_OK;
     167}
     168
     169
     170/*! Clears a break-/watchpoint in the given team info.
     171    Interrupts must be disabled and the team debug info lock be held.
     172*/
     173static inline status_t
     174clear_breakpoint(arch_team_debug_info &info, void *address, bool watchpoint)
     175{
     176    // find the breakpoint
     177    int32 slot = -1;
     178    for (int32 i = 0; i < X86_BREAKPOINT_COUNT; i++) {
     179        if (info.breakpoints[i].address == address
     180            && (watchpoint
     181                != (info.breakpoints[i].type == X86_INSTRUCTION_BREAKPOINT))) {
     182            slot = i;
     183            break;
     184        }
     185    }
     186
     187    // clear the breakpoint
     188    if (slot >= 0) {
     189        info.breakpoints[slot].address = NULL;
     190
     191        info.dr7 &= ~((0x3 << sDR7Len[slot])
     192            | (0x3 << sDR7RW[slot])
     193            | (1 << sDR7L[slot])
     194            | (1 << sDR7G[slot]));
     195    } else {
     196        if (watchpoint)
     197            return B_WATCHPOINT_NOT_FOUND;
     198        else
     199            return B_BREAKPOINT_NOT_FOUND;
     200    }
     201
     202    return B_OK;
     203}
     204
     205
     206static status_t
     207set_breakpoint(void *address, uint32 type, uint32 length)
     208{
     209    if (!address)
     210        return B_BAD_VALUE;
     211
     212    struct thread *thread = thread_get_current_thread();
     213
     214    cpu_status state = disable_interrupts();
     215    GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
     216
     217    status_t error = set_breakpoint(thread->team->debug_info.arch_info, address,
     218        type, length, false);
     219
     220    RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
     221    restore_interrupts(state);
     222
     223    return error;
     224}
     225
     226
     227static status_t
     228clear_breakpoint(void *address, bool watchpoint)
     229{
     230    if (!address)
     231        return B_BAD_VALUE;
     232
     233    struct thread *thread = thread_get_current_thread();
     234
     235    cpu_status state = disable_interrupts();
     236    GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
     237
     238    status_t error = clear_breakpoint(thread->team->debug_info.arch_info,
     239        address, watchpoint);
     240
     241    RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
     242    restore_interrupts(state);
     243
     244    return error;
     245}
     246
     247
     248static inline status_t
     249check_watch_point_parameters(void* address, uint32 type, int32 length,
     250    uint32& archType, uint32& archLength)
     251{
     252    // check type
     253    switch (type) {
     254        case B_DATA_WRITE_WATCHPOINT:
     255            archType = X86_DATA_WRITE_BREAKPOINT;
     256            break;
     257        case B_DATA_READ_WRITE_WATCHPOINT:
     258            archType = X86_DATA_READ_WRITE_BREAKPOINT;
     259            break;
     260        case B_DATA_READ_WATCHPOINT:
     261        default:
     262            return B_WATCHPOINT_TYPE_NOT_SUPPORTED;
     263            break;
     264    }
     265
     266    // check length and alignment
     267    switch (length) {
     268        case 1:
     269            archLength = X86_BREAKPOINT_LENGTH_1;
     270            break;
     271        case 2:
     272            if ((uint64)address & 0x1)
     273                return B_BAD_WATCHPOINT_ALIGNMENT;
     274            archLength = X86_BREAKPOINT_LENGTH_2;
     275            break;
     276        case 4:
     277            if ((uint64)address & 0x3)
     278                return B_BAD_WATCHPOINT_ALIGNMENT;
     279            archLength = X86_BREAKPOINT_LENGTH_4;
     280            break;
     281        default:
     282            return B_WATCHPOINT_LENGTH_NOT_SUPPORTED;
     283    }
     284
     285    return B_OK;
     286}
     287
     288
     289void
     290arch_clear_team_debug_info(struct arch_team_debug_info *info)
     291{
     292    for (int32 i = 0; i < X86_BREAKPOINT_COUNT; i++)
     293        info->breakpoints[i].address = NULL;
     294
     295    info->dr7 = X86_BREAKPOINTS_DISABLED_DR7;
     296}
     297
     298
     299void
     300arch_destroy_team_debug_info(struct arch_team_debug_info *info)
     301{
     302    arch_clear_team_debug_info(info);
     303}
     304
     305
     306void
     307arch_clear_thread_debug_info(struct arch_thread_debug_info *info)
     308{
     309    info->flags = 0;
     310}
     311
     312
     313void
     314arch_destroy_thread_debug_info(struct arch_thread_debug_info *info)
     315{
     316    arch_clear_thread_debug_info(info);
     317}
     318
     319
     320void
     321arch_update_thread_single_step()
     322{
     323    if (struct iframe* frame = x86_64_get_user_iframe()) {
     324        struct thread* thread = thread_get_current_thread();
     325
     326        // set/clear TF in EFLAGS depending on if single stepping is desired
     327        if (thread->debug_info.flags & B_THREAD_DEBUG_SINGLE_STEP)
     328            frame->flags |= (1 << X86_EFLAGS_TF);
     329        else
     330            frame->flags &= ~(1 << X86_EFLAGS_TF);
     331    }
     332}
     333
     334
     335void
     336arch_set_debug_cpu_state(const debug_cpu_state *cpuState)
     337{
     338    if (struct iframe *frame = x86_64_get_user_iframe()) {
     339
     340        // Since fxrstor requires 16-byte alignment and this isn't
     341        // guaranteed passed buffer, we use our thread's fpu_state field as
     342        // temporary buffer. We need to disable interrupts to make use of
     343        // it.
     344        struct thread* thread = thread_get_current_thread();
     345        InterruptsLocker locker;
     346        memcpy(thread->arch_info.fpu_state, &cpuState->extended_registers,
     347            sizeof(&cpuState->extended_registers));
     348        x86_64_fxrstor(thread->arch_info.fpu_state);
     349
     350
     351//      frame->gs = cpuState->gs;
     352//      frame->fs = cpuState->fs;
     353//      frame->es = cpuState->es;
     354//      frame->ds = cpuState->ds;
     355        frame->rdi = cpuState->rdi;
     356        frame->rsi = cpuState->rsi;
     357        frame->rbp = cpuState->rbp;
     358//      frame->rsp = cpuState->rsp;
     359        frame->rbx = cpuState->rbx;
     360        frame->rdx = cpuState->rdx;
     361        frame->rcx = cpuState->rcx;
     362        frame->rax = cpuState->rax;
     363        frame->r8 = cpuState->r8;
     364        frame->r9 = cpuState->r9;
     365        frame->r10 = cpuState->r10;
     366        frame->r11 = cpuState->r11;
     367        frame->r12 = cpuState->r12;
     368        frame->r13 = cpuState->r13;
     369        frame->r14 = cpuState->r14;
     370        frame->r15 = cpuState->r15;
     371//      frame->vector = cpuState->vector;
     372//      frame->error_code = cpuState->error_code;
     373        frame->rip = cpuState->rip;
     374//      frame->cs = cpuState->cs;
     375        frame->flags = (frame->flags & ~X86_EFLAGS_USER_SETTABLE_FLAGS)
     376            | (cpuState->eflags & X86_EFLAGS_USER_SETTABLE_FLAGS);
     377        frame->user_rsp = cpuState->user_rsp;
     378//      frame->user_ss = cpuState->user_ss;
     379    }
     380}
     381
     382
     383void
     384arch_get_debug_cpu_state(debug_cpu_state *cpuState)
     385{
     386    if (struct iframe *frame = x86_64_get_user_iframe()) {
     387
     388        // Since fxsave requires 16-byte alignment and this isn't guaranteed
     389        // passed buffer, we use our thread's fpu_state field as temporary
     390        // buffer. We need to disable interrupts to make use of it.
     391        struct thread* thread = thread_get_current_thread();
     392        InterruptsLocker locker;
     393        x86_64_fxsave(thread->arch_info.fpu_state);
     394            // unlike fnsave, fxsave doesn't reinit the FPU state
     395        memcpy(&cpuState->extended_registers, thread->arch_info.fpu_state,
     396            sizeof(&cpuState->extended_registers));
     397        get_iframe_registers(frame, cpuState);
     398    }
     399}
     400
     401
     402status_t
     403arch_set_breakpoint(void *address)
     404{
     405    return set_breakpoint(address, X86_INSTRUCTION_BREAKPOINT,
     406        X86_BREAKPOINT_LENGTH_1);
     407}
     408
     409
     410status_t
     411arch_clear_breakpoint(void *address)
     412{
     413    return clear_breakpoint(address, false);
     414}
     415
     416
     417status_t
     418arch_set_watchpoint(void *address, uint32 type, int32 length)
     419{
     420    uint32 archType, archLength;
     421    status_t error = check_watch_point_parameters(address, type, length,
     422        archType, archLength);
     423    if (error != B_OK)
     424        return error;
     425
     426    return set_breakpoint(address, archType, archLength);
     427}
     428
     429
     430status_t
     431arch_clear_watchpoint(void *address)
     432{
     433    return clear_breakpoint(address, true);
     434}
     435
     436bool
     437arch_has_breakpoints(struct arch_team_debug_info *info)
     438{
     439    // Reading info->dr7 is atomically, so we don't need to lock. The caller
     440    // has to ensure, that the info doesn't go away.
     441    return (info->dr7 != X86_BREAKPOINTS_DISABLED_DR7);
     442}
     443
     444
     445static void
     446install_breakpoints_per_cpu(void* /*cookie*/, int cpu)
     447{
     448    struct team* kernelTeam = team_get_kernel_team();
     449
     450    GRAB_TEAM_DEBUG_INFO_LOCK(kernelTeam->debug_info);
     451
     452    install_breakpoints(kernelTeam->debug_info.arch_info);
     453
     454    RELEASE_TEAM_DEBUG_INFO_LOCK(kernelTeam->debug_info);
     455}
     456
     457
     458// #pragma mark - kernel debugger commands
     459
     460#if KERNEL_BREAKPOINTS
     461
     462static int
     463debugger_breakpoints(int argc, char** argv)
     464{
     465    struct team* kernelTeam = team_get_kernel_team();
     466    arch_team_debug_info& info = kernelTeam->debug_info.arch_info;
     467
     468    for (int32 i = 0; i < X86_BREAKPOINT_COUNT; i++) {
     469        kprintf("breakpoint[%ld] ", i);
     470
     471        if (info.breakpoints[i].address != NULL) {
     472            kprintf("%p ", info.breakpoints[i].address);
     473            switch (info.breakpoints[i].type) {
     474                case X86_INSTRUCTION_BREAKPOINT:
     475                    kprintf("instruction");
     476                    break;
     477                case X86_IO_READ_WRITE_BREAKPOINT:
     478                    kprintf("io read/write");
     479                    break;
     480                case X86_DATA_WRITE_BREAKPOINT:
     481                    kprintf("data write");
     482                    break;
     483                case X86_DATA_READ_WRITE_BREAKPOINT:
     484                    kprintf("data read/write");
     485                    break;
     486            }
     487
     488            int length = 1;
     489            switch (info.breakpoints[i].length) {
     490                case X86_BREAKPOINT_LENGTH_1:
     491                    length = 1;
     492                    break;
     493                case X86_BREAKPOINT_LENGTH_2:
     494                    length = 2;
     495                    break;
     496                case X86_BREAKPOINT_LENGTH_4:
     497                    length = 4;
     498                    break;
     499            }
     500
     501            if (info.breakpoints[i].type != X86_INSTRUCTION_BREAKPOINT)
     502                kprintf(" %d byte%s", length, (length > 1 ? "s" : ""));
     503        } else
     504            kprintf("unused");
     505
     506        kprintf("\n");
     507    }
     508
     509    return 0;
     510}
     511
     512
     513static int
     514debugger_breakpoint(int argc, char** argv)
     515{
     516    // get arguments
     517
     518    if (argc < 2 || argc > 3)
     519        return print_debugger_command_usage(argv[0]);
     520
     521    addr_t address = strtoul(argv[1], NULL, 0);
     522    if (address == 0)
     523        return print_debugger_command_usage(argv[0]);
     524
     525    bool clear = false;
     526    if (argc == 3) {
     527        if (strcmp(argv[2], "clear") == 0)
     528            clear = true;
     529        else
     530            return print_debugger_command_usage(argv[0]);
     531    }
     532
     533    // set/clear breakpoint
     534
     535    arch_team_debug_info& info = team_get_kernel_team()->debug_info.arch_info;
     536
     537    status_t error;
     538
     539    if (clear) {
     540        error = clear_breakpoint(info, (void*)address, false);
     541    } else {
     542        error = set_breakpoint(info, (void*)address, X86_INSTRUCTION_BREAKPOINT,
     543            X86_BREAKPOINT_LENGTH_1, true);
     544    }
     545
     546    if (error == B_OK)
     547        call_all_cpus_sync(install_breakpoints_per_cpu, NULL);
     548    else
     549        kprintf("Failed to install breakpoint: %s\n", strerror(error));
     550
     551    return 0;
     552}
     553
     554
     555static int
     556debugger_watchpoint(int argc, char** argv)
     557{
     558    // get arguments
     559
     560    if (argc < 2 || argc > 4)
     561        return print_debugger_command_usage(argv[0]);
     562
     563    addr_t address = strtoul(argv[1], NULL, 0);
     564    if (address == 0)
     565        return print_debugger_command_usage(argv[0]);
     566
     567    bool clear = false;
     568    bool readWrite = false;
     569    int argi = 2;
     570    int length = 1;
     571    if (argc >= 3) {
     572        if (strcmp(argv[argi], "clear") == 0) {
     573            clear = true;
     574            argi++;
     575        } else if (strcmp(argv[argi], "rw") == 0) {
     576            readWrite = true;
     577            argi++;
     578        }
     579
     580        if (!clear && argi < argc)
     581            length = strtoul(argv[argi++], NULL, 0);
     582
     583        if (length == 0 || argi < argc)
     584            return print_debugger_command_usage(argv[0]);
     585    }
     586
     587    // set/clear breakpoint
     588
     589    arch_team_debug_info& info = team_get_kernel_team()->debug_info.arch_info;
     590
     591    status_t error;
     592
     593    if (clear) {
     594        error = clear_breakpoint(info, (void*)address, true);
     595    } else {
     596        uint32 type = readWrite ? B_DATA_READ_WRITE_WATCHPOINT
     597            : B_DATA_WRITE_WATCHPOINT;
     598
     599        uint32 archType, archLength;
     600        error = check_watch_point_parameters((void*)address, type, length,
     601            archType, archLength);
     602
     603        if (error == B_OK) {
     604            error = set_breakpoint(info, (void*)address, archType, archLength,
     605                true);
     606        }
     607    }
     608
     609    if (error == B_OK)
     610        call_all_cpus_sync(install_breakpoints_per_cpu, NULL);
     611    else
     612        kprintf("Failed to install breakpoint: %s\n", strerror(error));
     613
     614    return 0;
     615}
     616
     617
     618static int
     619debugger_single_step(int argc, char** argv)
     620{
     621    // TODO: Since we need an iframe, this doesn't work when KDL wasn't entered
     622    // via an exception.
     623
     624    struct iframe* frame = x86_64_get_current_iframe();
     625    if (frame == NULL) {
     626        kprintf("Failed to get the current iframe!\n");
     627        return 0;
     628    }
     629
     630    frame->flags |= (1 << X86_EFLAGS_TF);
     631
     632    return B_KDEBUG_QUIT;
     633}
     634
     635
     636#endif  // KERNEL_BREAKPOINTS
     637
     638
     639void
     640x86_64_init_user_debug()
     641{
     642    // get debug settings
     643    if (void *handle = load_driver_settings("kernel")) {
     644        sQEmuSingleStepHack = get_driver_boolean_parameter(handle,
     645            "qemu_single_step_hack", false, false);;
     646
     647        unload_driver_settings(handle);
     648    }
     649
     650#if KERNEL_BREAKPOINTS
     651    // install debugger commands
     652    add_debugger_command_etc("breakpoints", &debugger_breakpoints,
     653        "Lists current break-/watchpoints",
     654        "\n"
     655        "Lists the current kernel break-/watchpoints.\n", 0);
     656    add_debugger_command_alias("watchpoints", "breakpoints", NULL);
     657    add_debugger_command_etc("breakpoint", &debugger_breakpoint,
     658        "Set/clears a breakpoint",
     659        "<address> [ clear ]\n"
     660        "Sets respectively clears the breakpoint at address <address>.\n", 0);
     661    add_debugger_command_etc("watchpoint", &debugger_watchpoint,
     662        "Set/clears a watchpoint",
     663        "<address> <address> ( [ rw ] [ <size> ] | clear )\n"
     664        "Sets respectively clears the watchpoint at address <address>.\n"
     665        "If \"rw\" is given the new watchpoint is a read/write watchpoint\n"
     666        "otherwise a write watchpoint only.\n", 0);
     667    add_debugger_command_etc("step", &debugger_single_step,
     668        "Single-steps to the next instruction",
     669        "\n"
     670        "Single-steps to the next instruction.\n", 0);
     671#endif
     672}
     673
     674
     675/**
     676 *  Interrupts are disabled and will possibly be enabled by the function.
     677 */
     678void
     679x86_64_handle_debug_exception(struct iframe *frame)
     680{
     681    struct thread* thread = thread_get_current_thread();
     682
     683    // Get dr6 and dr7. If the given iframe is a userland frame, the exception
     684    // obviously occurred in userland. In that case
     685    // x86_64_exit_user_debug_at_kernel_entry() has already been invoked and dr6
     686    // and dr7 are stored in the cpu info. Otherwise we need to fetch the
     687    // current values from the registers.
     688    uint32 dr6;
     689    uint32 dr7;
     690    if (IFRAME_IS_USER(frame)) {
     691        dr6 = thread->cpu->arch.dr6;
     692        dr7 = thread->cpu->arch.dr7;
     693    } else {
     694//      asm("mov %%dr6, %0" : "=r"(dr6));
     695//      asm("mov %%dr7, %0" : "=r"(dr7));
     696    }
     697
     698    TRACE(("x86_64_handle_debug_exception(): DR6: %lx, DR7: %lx\n", dr6, dr7));
     699
     700    // check, which exception condition applies
     701    if (dr6 & X86_DR6_BREAKPOINT_MASK) {
     702        // breakpoint
     703
     704        // check which breakpoint was taken
     705        bool watchpoint = true;
     706        for (int32 i = 0; i < X86_BREAKPOINT_COUNT; i++) {
     707            if (dr6 & (1 << sDR6B[i])) {
     708                uint32 type = (dr7 >> sDR7RW[i]) & 0x3;
     709                if (type == X86_INSTRUCTION_BREAKPOINT)
     710                    watchpoint = false;
     711            }
     712        }
     713
     714        if (IFRAME_IS_USER(frame)) {
     715            // enable interrupts and notify the debugger
     716            enable_interrupts();
     717
     718            if (watchpoint)
     719                user_debug_watchpoint_hit();
     720            else
     721                user_debug_breakpoint_hit(false);
     722        } else {
     723            panic("hit kernel %spoint: dr6: 0x%lx, dr7: 0x%lx",
     724                watchpoint ? "watch" : "break", dr6, dr7);
     725        }
     726    } else if (dr6 & (1 << X86_DR6_BD)) {
     727        // general detect exception
     728        // Occurs only, if GD in DR7 is set (which we don't do) and someone
     729        // tries to write to the debug registers.
     730        if (IFRAME_IS_USER(frame)) {
     731            dprintf("x86_64_handle_debug_exception(): ignoring spurious general "
     732                "detect exception\n");
     733
     734            enable_interrupts();
     735        } else
     736            panic("spurious general detect exception in kernel mode");
     737    } else if ((dr6 & (1 << X86_DR6_BS)) || sQEmuSingleStepHack) {
     738        // single step
     739
     740        if (IFRAME_IS_USER(frame)) {
     741            // enable interrupts and notify the debugger
     742            enable_interrupts();
     743
     744            user_debug_single_stepped();
     745        } else {
     746            // Disable single-stepping -- the next "step" command will re-enable
     747            // it, but we don't want it when continuing otherwise.
     748            frame->flags &= ~(1 << X86_EFLAGS_TF);
     749
     750            // Determine whether the exception occurred at a syscall/trap
     751            // kernel entry or whether this is genuine kernel single-stepping.
     752            bool inKernel = true;
     753            if (thread->team != team_get_kernel_team()
     754                && x86_64_get_user_iframe() == NULL) {
     755                // TODO: This is not yet fully correct, since a newly created
     756                // thread that doesn't have entered userland yet also has this
     757                // property.
     758                inKernel = false;
     759            }
     760
     761            if (inKernel) {
     762                panic("kernel single step");
     763            } else {
     764                // The thread is a userland thread and it just entered the
     765                // kernel when the single-step exception occurred. This happens
     766                // e.g. when sysenter is called with single-stepping enabled.
     767                // We need to ignore the exception now and send a single-step
     768                // notification later, when the thread wants to return from the
     769                // kernel.
     770                InterruptsSpinLocker threadLocker(gThreadSpinlock);
     771
     772                // Check whether the team is still being debugged and set
     773                // the B_THREAD_DEBUG_NOTIFY_SINGLE_STEP and
     774                // B_THREAD_DEBUG_STOP flags, so that the thread will be
     775                // stopped when it is going to leave the kernel and notify the
     776                // debugger about the single-step event.
     777                int32 teamDebugFlags
     778                    = atomic_get(&thread->team->debug_info.flags);
     779                if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
     780                    atomic_or(&thread->debug_info.flags,
     781                        B_THREAD_DEBUG_NOTIFY_SINGLE_STEP
     782                            | B_THREAD_DEBUG_STOP);
     783                }
     784            }
     785        }
     786    } else if (dr6 & (1 << X86_DR6_BT)) {
     787        // task switch
     788        // Occurs only, if T in EFLAGS is set (which we don't do).
     789        if (IFRAME_IS_USER(frame)) {
     790            dprintf("x86_64_handle_debug_exception(): ignoring spurious task switch "
     791                "exception\n");
     792
     793            enable_interrupts();
     794        } else
     795            panic("spurious task switch exception in kernel mode");
     796    } else {
     797        if (IFRAME_IS_USER(frame)) {
     798            TRACE(("x86_64_handle_debug_exception(): ignoring spurious debug "
     799                "exception (no condition recognized)\n"));
     800
     801            enable_interrupts();
     802        } else {
     803            panic("spurious debug exception in kernel mode (no condition "
     804                "recognized)");
     805        }
     806    }
     807}
     808
     809
     810/**
     811 *  Interrupts are disabled and will possibly be enabled by the function.
     812 */
     813void
     814x86_64_handle_breakpoint_exception(struct iframe *frame)
     815{
     816    TRACE(("x86_64_handle_breakpoint_exception()\n"));
     817
     818    // reset rip to the int3 instruction
     819    frame->rip--;
     820
     821    if (!IFRAME_IS_USER(frame)) {
     822        panic("breakpoint exception in kernel mode");
     823        return;
     824    }
     825
     826    enable_interrupts();
     827
     828    user_debug_breakpoint_hit(true);
     829}
     830
     831
     832static inline void
     833disable_breakpoints()
     834{
     835    asm("mov %0, %%dr7" : : "r"((uint64)X86_BREAKPOINTS_DISABLED_DR7));
     836}
     837
     838
     839/**
     840 *  Interrupts are disabled.
     841 */
     842void
     843x86_64_exit_user_debug_at_kernel_entry()
     844{
     845    struct thread *thread = thread_get_current_thread();
     846
     847    if (!(thread->flags & THREAD_FLAGS_BREAKPOINTS_INSTALLED))
     848        return;
     849
     850    // We need to save the current values of dr6 and dr7 in the CPU structure,
     851    // since in case of a debug exception we might overwrite them before
     852    // x86_64_handle_debug_exception() is called.
     853//  asm("mov %%dr6, %0" : "=r"(thread->cpu->arch.dr6));
     854//  asm("mov %%dr7, %0" : "=r"(thread->cpu->arch.dr7));
     855
     856    GRAB_THREAD_LOCK();
     857
     858    // disable user breakpoints
     859    disable_breakpoints();
     860
     861    // install kernel breakpoints
     862    struct team* kernelTeam = team_get_kernel_team();
     863    GRAB_TEAM_DEBUG_INFO_LOCK(kernelTeam->debug_info);
     864    install_breakpoints(kernelTeam->debug_info.arch_info);
     865    RELEASE_TEAM_DEBUG_INFO_LOCK(kernelTeam->debug_info);
     866
     867    atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_INSTALLED);
     868
     869    RELEASE_THREAD_LOCK();
     870}
     871
     872
     873/**
     874 *  Interrupts are disabled. \a frame is unused, i.e. can be \c NULL.
     875 */
     876void
     877x86_64_init_user_debug_at_kernel_exit(struct iframe *frame)
     878{
     879    struct thread *thread = thread_get_current_thread();
     880
     881    if (!(thread->flags & THREAD_FLAGS_BREAKPOINTS_DEFINED))
     882        return;
     883
     884    // disable kernel breakpoints
     885    disable_breakpoints();
     886
     887    GRAB_THREAD_LOCK();
     888    GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
     889
     890    arch_team_debug_info &teamInfo = thread->team->debug_info.arch_info;
     891
     892    // install the user breakpoints
     893    install_breakpoints(teamInfo);
     894
     895    atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_INSTALLED);
     896
     897    RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
     898    RELEASE_THREAD_LOCK();
     899}
  • src/system/kernel/arch/x86_64/entry.S

     
     1/*
     2 * Copyright 2010, Nathan Mentley, nathanmentley@gmail.com.
     3 * Distributed under the terms of the MIT License.
     4*/
     5
     6
     7#include <asm_defs.h>
     8
     9.code32
     10
     11FUNCTION(init_long_mode):
     12
     13    jmp jumptolong
     14.code64
     15jumptolong:
     16    call _start         // Jump to main kernel code
     17FUNCTION_END(init_long_mode)
     18
  • src/system/kernel/arch/x86_64/cpuid.S

     
     1/*
     2 * Copyright 2004, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
     3 * Distributed under the terms of the MIT License.
     4 *
     5 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
     6 * Distributed under the terms of the NewOS License.
     7 */
     8
     9#include <asm_defs.h>
     10
     11
     12.text
     13
     14/* void get_current_cpuid(cpuid_info *info, uint32 eaxRegister) */
     15FUNCTION(get_current_cpuid):
     16    ret
     17FUNCTION_END(get_current_cpuid)
     18
     19
     20/* unsigned int get_eflags(void) */
     21FUNCTION(get_eflags):
     22    ret
     23FUNCTION_END(get_eflags)
     24
     25
     26/* void set_eflags(unsigned int val) */
     27FUNCTION(set_eflags):
     28    ret
     29FUNCTION_END(set_eflags)
  • src/system/kernel/arch/x86_64/paging/X86_64PagingMethod.cpp

     
     1/*
     2 * Copyright 2010, Nathan Mentley, nathanmentley@gmail.com.
     3 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
     4 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
     5 * Distributed under the terms of the MIT License.
     6 *
     7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
     8 * Distributed under the terms of the NewOS License.
     9 */
     10
     11
     12#include "paging/X86_64PagingMethod.h"
     13
     14#include <stdlib.h>
     15#include <string.h>
     16
     17#include <AutoDeleter.h>
     18
     19#include <boot/kernel_args.h>
     20#include <util/AutoLock.h>
     21#include <vm/vm.h>
     22#include <vm/vm_page.h>
     23#include <vm/VMAddressSpace.h>
     24
     25#include "paging/paging.h"
     26#include "paging/X86_64PagingStructures.h"
     27#include "paging/X86_64VMTranslationMap.h"
     28#include "paging/x86_64_physical_page_mapper.h"
     29#include "paging/x86_64_physical_page_mapper_large_memory.h"
     30
     31
     32//#define TRACE_X86_64_PAGING_METHOD
     33#ifdef TRACE_X86_64_PAGING_METHOD
     34#   define TRACE(x...) dprintf(x)
     35#else
     36#   define TRACE(x...) ;
     37#endif
     38
     39
     40using X86_64LargePhysicalPageMapper::PhysicalPageSlot;
     41
     42
     43// number of 32 bit pages that will be cached
     44static const page_num_t kMaxFree32BitPagesCount = 32;
     45
     46
     47// #pragma mark - PhysicalPageSlotPool
     48
     49
     50struct X86_64PagingMethod::PhysicalPageSlotPool
     51    : X86_64LargePhysicalPageMapper::PhysicalPageSlotPool {
     52public:
     53    virtual                     ~PhysicalPageSlotPool();
     54
     55            status_t            InitInitial(X86_64PagingMethod* method,
     56                                    kernel_args* args);
     57            status_t            InitInitialPostArea(kernel_args* args);
     58
     59            void                Init(area_id dataArea,
     60                                    page_table_entry* pageTable,
     61                                    area_id virtualArea, addr_t virtualBase);
     62
     63    virtual status_t            AllocatePool(
     64                                    X86_64LargePhysicalPageMapper
     65                                        ::PhysicalPageSlotPool*& _pool);
     66    virtual void                Map(phys_addr_t physicalAddress,
     67                                    addr_t virtualAddress);
     68
     69public:
     70    static  PhysicalPageSlotPool sInitialPhysicalPagePool;
     71
     72private:
     73            area_id             fDataArea;
     74            area_id             fVirtualArea;
     75            addr_t              fVirtualBase;
     76            page_table_entry* fPageTable;
     77};
     78
     79
     80X86_64PagingMethod::PhysicalPageSlotPool
     81    X86_64PagingMethod::PhysicalPageSlotPool::sInitialPhysicalPagePool;
     82
     83
     84X86_64PagingMethod::PhysicalPageSlotPool::~PhysicalPageSlotPool()
     85{
     86}
     87
     88
     89status_t
     90X86_64PagingMethod::PhysicalPageSlotPool::InitInitial(
     91    X86_64PagingMethod* method, kernel_args* args)
     92{
     93    // allocate a virtual address range for the pages to be mapped into
     94    addr_t virtualBase = vm_allocate_early(args, kPageTableRange, 0, 0,
     95        kPageTableRange);
     96    if (virtualBase == 0) {
     97        panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve "
     98            "physical page pool space in virtual address space!");
     99        return B_ERROR;
     100    }
     101
     102    // allocate memory for the page table and data
     103    size_t areaSize = B_PAGE_SIZE
     104        + sizeof(PhysicalPageSlot[kPageTableEntryCount]);
     105    page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(
     106        args, areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
     107
     108    // clear the page table and put it in the page dir
     109    memset(pageTable, 0, B_PAGE_SIZE);
     110
     111    phys_addr_t physicalTable = 0;
     112    method->_EarlyQuery((addr_t)pageTable, &physicalTable);
     113
     114    page_directory_entry* entry = PageDirEntryForAddress(
     115        method->KernelVirtualPageDirs(), virtualBase);
     116    PutPageTableInPageDir(entry, physicalTable,
     117        B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
     118
     119    // init the pool structure and add the initial pool
     120    Init(-1, pageTable, -1, (addr_t)virtualBase);
     121
     122    return B_OK;
     123}
     124
     125
     126status_t
     127X86_64PagingMethod::PhysicalPageSlotPool::InitInitialPostArea(
     128    kernel_args* args)
     129{
     130    // create an area for the (already allocated) data
     131    size_t areaSize = B_PAGE_SIZE
     132        + sizeof(PhysicalPageSlot[kPageTableEntryCount]);
     133    void* temp = fPageTable;
     134    area_id area = create_area("physical page pool", &temp,
     135        B_EXACT_ADDRESS, areaSize, B_ALREADY_WIRED,
     136        B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
     137    if (area < B_OK) {
     138        panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
     139            "create area for physical page pool.");
     140        return area;
     141    }
     142    fDataArea = area;
     143
     144    // create an area for the virtual address space
     145    temp = (void*)fVirtualBase;
     146    area = vm_create_null_area(VMAddressSpace::KernelID(),
     147        "physical page pool space", &temp, B_EXACT_ADDRESS,
     148        kPageTableRange, 0);
     149    if (area < B_OK) {
     150        panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
     151            "create area for physical page pool space.");
     152        return area;
     153    }
     154    fVirtualArea = area;
     155
     156    return B_OK;
     157}
     158
     159
     160void
     161X86_64PagingMethod::PhysicalPageSlotPool::Init(area_id dataArea,
     162    page_table_entry* pageTable, area_id virtualArea, addr_t virtualBase)
     163{
     164    fDataArea = dataArea;
     165    fVirtualArea = virtualArea;
     166    fVirtualBase = virtualBase;
     167    fPageTable = pageTable;
     168
     169    // init slot list
     170    fSlots = (PhysicalPageSlot*)(fPageTable + kPageTableEntryCount);
     171    addr_t slotAddress = virtualBase;
     172    for (uint32 i = 0; i < kPageTableEntryCount;
     173            i++, slotAddress += B_PAGE_SIZE) {
     174        PhysicalPageSlot* slot = &fSlots[i];
     175        slot->next = slot + 1;
     176        slot->pool = this;
     177        slot->address = slotAddress;
     178    }
     179
     180    fSlots[kPageTableEntryCount - 1].next = NULL;
     181        // terminate list
     182}
     183
     184
     185void
     186X86_64PagingMethod::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress,
     187    addr_t virtualAddress)
     188{
     189    page_table_entry& pte = fPageTable[
     190        (virtualAddress - fVirtualBase) / B_PAGE_SIZE];
     191    pte = (physicalAddress & X86_64_PTE_ADDRESS_MASK)
     192        | X86_64_PTE_WRITABLE | X86_64_PTE_GLOBAL | X86_64_PTE_PRESENT;
     193
     194    invalidate_TLB(virtualAddress);
     195}
     196
     197
     198status_t
     199X86_64PagingMethod::PhysicalPageSlotPool::AllocatePool(
     200    X86_64LargePhysicalPageMapper::PhysicalPageSlotPool*& _pool)
     201{
     202    // create the pool structure
     203    PhysicalPageSlotPool* pool = new(std::nothrow) PhysicalPageSlotPool;
     204    if (pool == NULL)
     205        return B_NO_MEMORY;
     206    ObjectDeleter<PhysicalPageSlotPool> poolDeleter(pool);
     207
     208    // create an area that can contain the page table and the slot
     209    // structures
     210    size_t areaSize = B_PAGE_SIZE
     211        + sizeof(PhysicalPageSlot[kPageTableEntryCount]);
     212    void* data;
     213    virtual_address_restrictions virtualRestrictions = {};
     214    virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
     215    physical_address_restrictions physicalRestrictions = {};
     216    area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool",
     217        PAGE_ALIGN(areaSize), B_FULL_LOCK,
     218        B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
     219        &virtualRestrictions, &physicalRestrictions, &data);
     220    if (dataArea < 0)
     221        return dataArea;
     222
     223    // create the null area for the virtual address space
     224    void* virtualBase;
     225    area_id virtualArea = vm_create_null_area(
     226        VMAddressSpace::KernelID(), "physical page pool space",
     227        &virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, kPageTableRange,
     228        CREATE_AREA_PRIORITY_VIP);
     229    if (virtualArea < 0) {
     230        delete_area(dataArea);
     231        return virtualArea;
     232    }
     233
     234    // prepare the page table
     235    memset(data, 0, B_PAGE_SIZE);
     236
     237    // get the page table's physical address
     238    phys_addr_t physicalTable;
     239    X86_64VMTranslationMap* map = static_cast<X86_64VMTranslationMap*>(
     240        VMAddressSpace::Kernel()->TranslationMap());
     241    uint32 dummyFlags;
     242    cpu_status state = disable_interrupts();
     243    map->QueryInterrupt((addr_t)data, &physicalTable, &dummyFlags);
     244    restore_interrupts(state);
     245
     246    // put the page table into the page directory
     247    page_directory_entry* pageDirEntry
     248        = X86_64PagingMethod::PageDirEntryForAddress(
     249            map->PagingStructures()->VirtualPageDirs(), (addr_t)virtualBase);
     250    PutPageTableInPageDir(pageDirEntry, physicalTable,
     251        B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
     252
     253    // init the pool structure
     254    pool->Init(dataArea, (page_table_entry*)data, virtualArea,
     255        (addr_t)virtualBase);
     256    poolDeleter.Detach();
     257    _pool = pool;
     258    return B_OK;
     259}
     260
     261
     262// #pragma mark - X86_64PagingMethod
     263
     264
     265X86_64PagingMethod::X86_64PagingMethod()
     266    :
     267    fPhysicalPageMapper(NULL),
     268    fKernelPhysicalPageMapper(NULL),
     269    fFreePages(NULL),
     270    fFreePagesCount(0)
     271{
     272    mutex_init(&fFreePagesLock, "x86_64 free pages");
     273}
     274
     275
     276X86_64PagingMethod::~X86_64PagingMethod()
     277{
     278}
     279
     280
     281status_t
     282X86_64PagingMethod::Init(kernel_args* args,
     283    VMPhysicalPageMapper** _physicalPageMapper)
     284{
     285    // create the initial pool for the physical page mapper
     286    PhysicalPageSlotPool* pool
     287        = new(&PhysicalPageSlotPool::sInitialPhysicalPagePool)
     288            PhysicalPageSlotPool;
     289    status_t error = pool->InitInitial(this, args);
     290    if (error != B_OK) {
     291        panic("X86_64PagingMethod::Init(): Failed to create initial pool "
     292            "for physical page mapper!");
     293        return error;
     294    }
     295
     296    // create physical page mapper
     297    large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper,
     298        fKernelPhysicalPageMapper);
     299
     300    *_physicalPageMapper = fPhysicalPageMapper;
     301    return B_OK;
     302}
     303
     304
     305status_t
     306X86_64PagingMethod::InitPostArea(kernel_args* args)
     307{
     308    // wrap the kernel paging structures in an area
     309    area_id area = create_area("kernel paging structs", &fEarlyPageStructures,
     310        B_EXACT_ADDRESS, fEarlyPageStructuresSize, B_ALREADY_WIRED,
     311        B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
     312    if (area < B_OK)
     313        return area;
     314
     315    // let the initial page pool create areas for its structures
     316    status_t error = PhysicalPageSlotPool::sInitialPhysicalPagePool
     317        .InitInitialPostArea(args);
     318    if (error != B_OK)
     319        return error;
     320
     321    // The early physical page mapping mechanism is no longer needed. Unmap the
     322    // slot.
     323    *fFreeVirtualSlotPTE = 0;
     324    invalidate_TLB(fFreeVirtualSlot);
     325
     326    fFreeVirtualSlotPTE = NULL;
     327    fFreeVirtualSlot = 0;
     328
     329    return B_OK;
     330}
     331
     332
     333status_t
     334X86_64PagingMethod::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
     335{
     336    X86_64VMTranslationMap* map = new(std::nothrow) X86_64VMTranslationMap;
     337    if (map == NULL)
     338        return B_NO_MEMORY;
     339
     340    status_t error = map->Init(kernel);
     341    if (error != B_OK) {
     342        delete map;
     343        return error;
     344    }
     345
     346    *_map = map;
     347    return B_OK;
     348}
     349
     350
     351status_t
     352X86_64PagingMethod::MapEarly(kernel_args* args, addr_t virtualAddress,
     353    phys_addr_t physicalAddress, uint8 attributes,
     354    phys_addr_t (*get_free_page)(kernel_args*))
     355{
     356    // check to see if a page table exists for this range
     357    page_directory_entry* pageDirEntry = PageDirEntryForAddress(
     358        fKernelVirtualPageDirs, virtualAddress);
     359    page_table_entry* pageTable;
     360    if ((*pageDirEntry & X86_64_PDE_PRESENT) == 0) {
     361        // we need to allocate a page table
     362        phys_addr_t physicalPageTable = get_free_page(args) * B_PAGE_SIZE;
     363
     364        TRACE("X86_64PagingMethod::MapEarly(): asked for free page for "
     365            "page table: %#" B_PRIxPHYSADDR "\n", physicalPageTable);
     366
     367        // put it in the page dir
     368        PutPageTableInPageDir(pageDirEntry, physicalPageTable, attributes);
     369
     370        // zero it out
     371        pageTable = _EarlyGetPageTable(physicalPageTable);
     372        memset(pageTable, 0, B_PAGE_SIZE);
     373    } else {
     374        // table already exists -- map it
     375        pageTable = _EarlyGetPageTable(
     376            *pageDirEntry & X86_64_PDE_ADDRESS_MASK);
     377    }
     378
     379    page_table_entry* entry = pageTable
     380        + virtualAddress / B_PAGE_SIZE % kPageTableEntryCount;
     381
     382    ASSERT_PRINT(
     383        (*entry & X86_64_PTE_PRESENT) == 0,
     384        "virtual address: %#" B_PRIxADDR ", pde: %#" B_PRIx64
     385        ", existing pte: %#" B_PRIx64, virtualAddress, *pageDirEntry, *entry);
     386
     387    // now, fill in the pentry
     388    PutPageTableEntryInTable(entry, physicalAddress, attributes, 0,
     389        IS_KERNEL_ADDRESS(virtualAddress));
     390
     391    return B_OK;
     392}
     393
     394
     395bool
     396X86_64PagingMethod::IsKernelPageAccessible(addr_t virtualAddress,
     397    uint32 protection)
     398{
     399    // we can't check much without the physical page mapper
     400    if (fPhysicalPageMapper == NULL)
     401        return false;
     402
     403    // We only trust the kernel team's page directories. So switch to the
     404    // kernel PDPT first. Always set it to make sure the TLBs don't contain
     405    // obsolete data.
     406    uint32 physicalPDPT;
     407    read_cr3(physicalPDPT);
     408    write_cr3(fKernelPhysicalPageDirPointerTable);
     409
     410    // get the PDPT entry for the address
     411    page_directory_pointer_table_entry pdptEntry = 0;
     412    if (physicalPDPT == fKernelPhysicalPageDirPointerTable) {
     413        pdptEntry = fKernelVirtualPageDirPointerTable[
     414            virtualAddress / kPageDirRange];
     415    } else {
     416        // map the original PDPT and get the entry
     417        void* handle;
     418        addr_t virtualPDPT;
     419        status_t error = fPhysicalPageMapper->GetPageDebug(physicalPDPT,
     420            &virtualPDPT, &handle);
     421        if (error == B_OK) {
     422            pdptEntry = ((page_directory_pointer_table_entry*)
     423                virtualPDPT)[virtualAddress / kPageDirRange];
     424            fPhysicalPageMapper->PutPageDebug(virtualPDPT, handle);
     425        }
     426    }
     427
     428    // map the page dir and get the entry
     429    page_directory_entry pageDirEntry = 0;
     430    if ((pdptEntry & X86_64_PDPTE_PRESENT) != 0) {
     431        void* handle;
     432        addr_t virtualPageDir;
     433        status_t error = fPhysicalPageMapper->GetPageDebug(
     434            pdptEntry & X86_64_PDPTE_ADDRESS_MASK, &virtualPageDir, &handle);
     435        if (error == B_OK) {
     436            pageDirEntry = ((page_directory_entry*)virtualPageDir)[
     437                virtualAddress / kPageTableRange % kPageDirEntryCount];
     438            fPhysicalPageMapper->PutPageDebug(virtualPageDir, handle);
     439        }
     440    }
     441
     442    // map the page table and get the entry
     443    page_table_entry pageTableEntry = 0;
     444    if ((pageDirEntry & X86_64_PDE_PRESENT) != 0) {
     445        void* handle;
     446        addr_t virtualPageTable;
     447        status_t error = fPhysicalPageMapper->GetPageDebug(
     448            pageDirEntry & X86_64_PDE_ADDRESS_MASK, &virtualPageTable,
     449            &handle);
     450        if (error == B_OK) {
     451            pageTableEntry = ((page_table_entry*)virtualPageTable)[
     452                virtualAddress / B_PAGE_SIZE % kPageTableEntryCount];
     453            fPhysicalPageMapper->PutPageDebug(virtualPageTable, handle);
     454        }
     455    }
     456
     457    // switch back to the original page directory
     458    if (physicalPDPT != fKernelPhysicalPageDirPointerTable)
     459        write_cr3(physicalPDPT);
     460
     461    if ((pageTableEntry & X86_64_PTE_PRESENT) == 0)
     462        return false;
     463
     464    // present means kernel-readable, so check for writable
     465    return (protection & B_KERNEL_WRITE_AREA) == 0
     466        || (pageTableEntry & X86_64_PTE_WRITABLE) != 0;
     467}
     468
     469
     470/*static*/ void
     471X86_64PagingMethod::PutPageTableInPageDir(page_directory_entry* entry,
     472    phys_addr_t physicalTable, uint32 attributes)
     473{
     474    *entry = (physicalTable & X86_64_PDE_ADDRESS_MASK)
     475        | X86_64_PDE_PRESENT
     476        | X86_64_PDE_WRITABLE
     477        | X86_64_PDE_USER;
     478        // TODO: We ignore the attributes of the page table -- for compatibility
     479        // with BeOS we allow having user accessible areas in the kernel address
     480        // space. This is currently being used by some drivers, mainly for the
     481        // frame buffer. Our current real time data implementation makes use of
     482        // this fact, too.
     483        // We might want to get rid of this possibility one day, especially if
     484        // we intend to port it to a platform that does not support this.
     485}
     486
     487
     488/*static*/ void
     489X86_64PagingMethod::PutPageTableEntryInTable(page_table_entry* entry,
     490    phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
     491    bool globalPage)
     492{
     493    page_table_entry page = (physicalAddress & X86_64_PTE_ADDRESS_MASK)
     494        | X86_64_PTE_PRESENT | (globalPage ? X86_64_PTE_GLOBAL : 0)
     495        | MemoryTypeToPageTableEntryFlags(memoryType);
     496
     497    // if the page is user accessible, it's automatically
     498    // accessible in kernel space, too (but with the same
     499    // protection)
     500    if ((attributes & B_USER_PROTECTION) != 0) {
     501        page |= X86_64_PTE_USER;
     502        if ((attributes & B_WRITE_AREA) != 0)
     503            page |= X86_64_PTE_WRITABLE;
     504    } else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
     505        page |= X86_64_PTE_WRITABLE;
     506
     507    // put it in the page table
     508    *(volatile page_table_entry*)entry = page;
     509}
     510
     511
     512void*
     513X86_64PagingMethod::Allocate32BitPage(phys_addr_t& _physicalAddress,
     514    void*& _handle)
     515{
     516    // get a free page
     517    MutexLocker locker(fFreePagesLock);
     518    vm_page* page;
     519    if (fFreePages != NULL) {
     520        page = fFreePages;
     521        fFreePages = page->cache_next;
     522        fFreePagesCount--;
     523        locker.Unlock();
     524    } else {
     525        // no pages -- allocate one
     526        locker.Unlock();
     527
     528        physical_address_restrictions restrictions = {};
     529        restrictions.high_address = 0x100000000LL;
     530        page = vm_page_allocate_page_run(PAGE_STATE_UNUSED, 1, &restrictions,
     531            VM_PRIORITY_SYSTEM);
     532        if (page == NULL)
     533            return NULL;
     534
     535        DEBUG_PAGE_ACCESS_END(page);
     536    }
     537
     538    // map the page
     539    phys_addr_t physicalAddress
     540        = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
     541    addr_t virtualAddress;
     542    if (fPhysicalPageMapper->GetPage(physicalAddress, &virtualAddress, &_handle)
     543            != B_OK) {
     544        // mapping failed -- free page
     545        locker.Lock();
     546        page->cache_next = fFreePages;
     547        fFreePages = page;
     548        fFreePagesCount++;
     549        return NULL;
     550    }
     551
     552    _physicalAddress = physicalAddress;
     553    return (void*)virtualAddress;
     554}
     555
     556
     557void
     558X86_64PagingMethod::Free32BitPage(void* address, phys_addr_t physicalAddress,
     559    void* handle)
     560{
     561    // unmap the page
     562    fPhysicalPageMapper->PutPage((addr_t)address, handle);
     563
     564    // free it
     565    vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
     566    MutexLocker locker(fFreePagesLock);
     567    if (fFreePagesCount < kMaxFree32BitPagesCount) {
     568        // cache not full yet -- cache it
     569        page->cache_next = fFreePages;
     570        fFreePages = page;
     571        fFreePagesCount++;
     572    } else {
     573        // cache full -- free it
     574        locker.Unlock();
     575        DEBUG_PAGE_ACCESS_START(page);
     576        vm_page_free(NULL, page);
     577    }
     578}
     579
     580
     581bool
     582X86_64PagingMethod::_EarlyQuery(addr_t virtualAddress,
     583    phys_addr_t* _physicalAddress)
     584{
     585    page_directory_entry* pageDirEntry = PageDirEntryForAddress(
     586        fKernelVirtualPageDirs, virtualAddress);
     587    if ((*pageDirEntry & X86_64_PDE_PRESENT) == 0) {
     588        // no pagetable here
     589        return false;
     590    }
     591
     592    page_table_entry* entry = _EarlyGetPageTable(
     593            *pageDirEntry & X86_64_PDE_ADDRESS_MASK)
     594        + virtualAddress / B_PAGE_SIZE % kPageTableEntryCount;
     595    if ((*entry & X86_64_PTE_PRESENT) == 0) {
     596        // page mapping not valid
     597        return false;
     598    }
     599
     600    *_physicalAddress = *entry & X86_64_PTE_ADDRESS_MASK;
     601    return true;
     602}
     603
     604
     605page_table_entry*
     606X86_64PagingMethod::_EarlyGetPageTable(phys_addr_t address)
     607{
     608    *fFreeVirtualSlotPTE = (address & X86_64_PTE_ADDRESS_MASK)
     609        | X86_64_PTE_PRESENT | X86_64_PTE_WRITABLE | X86_64_PTE_GLOBAL;
     610
     611    invalidate_TLB(fFreeVirtualSlot);
     612
     613    return (page_table_entry*)fFreeVirtualSlot;
     614}
  • src/system/kernel/arch/x86_64/paging/X86_64PagingMethod.h

     
     1/*
     2 * Copyright 2010, Nathan Mentley, nathanmentley@gmail.com.
     3 * Distributed under the terms of the MIT License.
     4 *
     5 * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
     6 * Distributed under the terms of the MIT License.
     7 */
     8#ifndef KERNEL_ARCH_X86_64_PAGING_X86_64_PAGING_METHOD_H
     9#define KERNEL_ARCH_X86_64_PAGING_X86_64_PAGING_METHOD_H
     10
     11
     12#include <KernelExport.h>
     13
     14#include <lock.h>
     15#include <vm/vm_types.h>
     16
     17#include <vm/VMTranslationMap.h>
     18
     19#include "paging/paging.h"
     20#include "paging/X86_64PagingMethod.h"
     21#include "paging/X86_64PagingStructures.h"
     22
     23class TranslationMapPhysicalPageMapper;
     24class X86_64PhysicalPageMapper;
     25
     26
     27class X86_64PagingMethod {
     28public:
     29                        X86_64PagingMethod();
     30    virtual                 ~X86_64PagingMethod();
     31
     32    virtual status_t            Init(kernel_args* args, VMPhysicalPageMapper** _physicalPageMapper);
     33    virtual status_t            InitPostArea(kernel_args* args);
     34
     35    virtual status_t            CreateTranslationMap(bool kernel,
     36                                    VMTranslationMap** _map);
     37
     38    virtual status_t            MapEarly(kernel_args* args,
     39                                    addr_t virtualAddress,
     40                                    phys_addr_t physicalAddress,
     41                                    uint8 attributes,
     42                                    phys_addr_t (*get_free_page)(kernel_args*));
     43
     44    virtual bool                IsKernelPageAccessible(addr_t virtualAddress,
     45                                    uint32 protection);
     46
     47    void*                   Allocate32BitPage(
     48                                    phys_addr_t& _physicalAddress,
     49                                    void*& _handle);
     50    void                    Free32BitPage(void* address,
     51                                    phys_addr_t physicalAddress, void* handle);
     52
     53    inline  X86_64PhysicalPageMapper*   PhysicalPageMapper() const
     54                                    { return fPhysicalPageMapper; }
     55    inline  TranslationMapPhysicalPageMapper*
     56                        KernelPhysicalPageMapper() const
     57                                    { return fKernelPhysicalPageMapper; }
     58    inline  page_directory_pointer_table_entry*
     59                                    KernelVirtualPageDirPointerTable() const;
     60    inline  phys_addr_t         KernelPhysicalPageDirPointerTable() const;
     61    inline  page_directory_entry* const* KernelVirtualPageDirs() const
     62                                    { return fKernelVirtualPageDirs; }
     63    inline  const phys_addr_t*  KernelPhysicalPageDirs() const
     64                                    { return fKernelPhysicalPageDirs; }
     65
     66    static  X86_64PagingMethod* Method();
     67
     68    static  void                PutPageTableInPageDir(
     69                                    page_directory_entry* entry,
     70                                    phys_addr_t physicalTable,
     71                                    uint32 attributes);
     72    static  void                PutPageTableEntryInTable(
     73                                    page_table_entry* entry,
     74                                    phys_addr_t physicalAddress,
     75                                    uint32 attributes, uint32 memoryType,
     76                                    bool globalPage);
     77    static  page_table_entry SetPageTableEntry(page_table_entry* entry,
     78                                    page_table_entry newEntry);
     79    static  page_table_entry SetPageTableEntryFlags(
     80                                    page_table_entry* entry, uint64 flags);
     81    static  page_table_entry TestAndSetPageTableEntry(
     82                                    page_table_entry* entry,
     83                                    page_table_entry newEntry,
     84                                    page_table_entry oldEntry);
     85    static  page_table_entry ClearPageTableEntry(
     86                                    page_table_entry* entry);
     87    static  page_table_entry ClearPageTableEntryFlags(
     88                                    page_table_entry* entry, uint64 flags);
     89
     90    static  page_directory_entry* PageDirEntryForAddress(
     91                                    page_directory_entry* const* pdpt,
     92                                    addr_t address);
     93
     94    static  uint64              MemoryTypeToPageTableEntryFlags(
     95                                    uint32 memoryType);
     96
     97private:
     98    struct PhysicalPageSlotPool;
     99    friend struct PhysicalPageSlotPool;
     100
     101private:
     102    bool                _EarlyQuery(addr_t virtualAddress,
     103                            phys_addr_t* _physicalAddress);
     104    page_table_entry* _EarlyGetPageTable(phys_addr_t address);
     105
     106private:
     107    X86_64PhysicalPageMapper*   fPhysicalPageMapper;
     108
     109    TranslationMapPhysicalPageMapper*
     110                            fKernelPhysicalPageMapper;
     111    void*               fEarlyPageStructures;
     112    size_t              fEarlyPageStructuresSize;
     113
     114    page_directory_pointer_table_entry*
     115                    fKernelVirtualPageDirPointerTable;
     116    phys_addr_t         fKernelPhysicalPageDirPointerTable;
     117    page_directory_entry*       fKernelVirtualPageDirs[4];
     118    phys_addr_t         fKernelPhysicalPageDirs[4];
     119    addr_t              fFreeVirtualSlot;
     120    page_table_entry*       fFreeVirtualSlotPTE;
     121
     122    mutex               fFreePagesLock;
     123    vm_page*            fFreePages;
     124    page_num_t          fFreePagesCount;
     125};
     126
     127
     128extern X86_64PagingMethod* gX86_64PagingMethod;
     129
     130
     131page_directory_pointer_table_entry*
     132X86_64PagingMethod::KernelVirtualPageDirPointerTable() const
     133{
     134    return fKernelVirtualPageDirPointerTable;
     135}
     136
     137
     138phys_addr_t
     139X86_64PagingMethod::KernelPhysicalPageDirPointerTable() const
     140{
     141    return fKernelPhysicalPageDirPointerTable;
     142}
     143
     144
     145/*static*/ inline X86_64PagingMethod*
     146X86_64PagingMethod::Method()
     147{
     148    return static_cast<X86_64PagingMethod*>(gX86_64PagingMethod);
     149}
     150
     151
     152/*static*/ inline page_directory_entry*
     153X86_64PagingMethod::PageDirEntryForAddress(
     154    page_directory_entry* const* pdpt, addr_t address)
     155{
     156    return pdpt[address >> 30]
     157        + (address / kPageTableRange) % kPageDirEntryCount;
     158}
     159
     160
     161/*static*/ inline page_table_entry
     162X86_64PagingMethod::SetPageTableEntry(page_table_entry* entry,
     163    page_table_entry newEntry)
     164{
     165    return atomic_set64((int64*)entry, newEntry);
     166}
     167
     168
     169/*static*/ inline page_table_entry
     170X86_64PagingMethod::SetPageTableEntryFlags(page_table_entry* entry,
     171    uint64 flags)
     172{
     173    return atomic_or64((int64*)entry, flags);
     174}
     175
     176
     177/*static*/ inline page_table_entry
     178X86_64PagingMethod::TestAndSetPageTableEntry(page_table_entry* entry,
     179    page_table_entry newEntry, page_table_entry oldEntry)
     180{
     181    return atomic_test_and_set64((int64*)entry, newEntry, oldEntry);
     182}
     183
     184
     185/*static*/ inline page_table_entry
     186X86_64PagingMethod::ClearPageTableEntry(page_table_entry* entry)
     187{
     188    return SetPageTableEntry(entry, 0);
     189}
     190
     191
     192/*static*/ inline page_table_entry
     193X86_64PagingMethod::ClearPageTableEntryFlags(page_table_entry* entry,
     194    uint64 flags)
     195{
     196    return atomic_and64((int64*)entry, ~flags);
     197}
     198
     199
     200/*static*/ inline uint64
     201X86_64PagingMethod::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
     202{
     203    // ATM we only handle the uncacheable and write-through type explicitly. For
     204    // all other types we rely on the MTRRs to be set up correctly. Since we set
     205    // the default memory type to write-back and since the uncacheable type in
     206    // the PTE overrides any MTRR attribute (though, as per the specs, that is
     207    // not recommended for performance reasons), this reduces the work we
     208    // actually *have* to do with the MTRRs to setting the remaining types
     209    // (usually only write-combining for the frame buffer).
     210    switch (memoryType) {
     211        case B_MTR_UC:
     212            return X86_64_PTE_CACHING_DISABLED | X86_64_PTE_WRITE_THROUGH;
     213
     214        case B_MTR_WC:
     215            // X86_PTE_WRITE_THROUGH would be closer, but the combination with
     216            // MTRR WC is "implementation defined" for Pentium Pro/II.
     217            return 0;
     218
     219        case B_MTR_WT:
     220            return X86_64_PTE_WRITE_THROUGH;
     221
     222        case B_MTR_WP:
     223        case B_MTR_WB:
     224        default:
     225            return 0;
     226    }
     227}
     228
     229
     230#endif  // KERNEL_ARCH_X86_64_PAGING_X86_64_PAGING_METHOD_H
  • src/system/kernel/arch/x86_64/arch_smp.cpp

     
     1/*
     2 * Copyright 2010, Haiku Inc. All rights reserved.
     3 * Distributed under the terms of the MIT License.
     4 */
     5
     6
     7#include <KernelExport.h>
     8
     9#include <arch/smp.h>
     10
     11
     12//#define TRACE_ARCH_SMP
     13#ifdef TRACE_ARCH_SMP
     14#   define TRACE(x) dprintf x
     15#else
     16#   define TRACE(x) ;
     17#endif
     18
     19
     20/* TODO: once x86_64 is more stable set max cpu count higher to 1
     21 *  and update the following functions
     22 */
     23status_t
     24arch_smp_init(kernel_args *args)
     25{
     26    // for now, max cpu count on x86_64 is set to 1
     27    // which means smp isn't needed yet.
     28    return B_OK;
     29}
     30
     31
     32status_t
     33arch_smp_per_cpu_init(kernel_args *args, int32 cpu)
     34{
     35    panic("%s not implemented", __func__);
     36    return B_OK;
     37}
     38
     39
     40void
     41arch_smp_send_ici(int32 target_cpu)
     42{
     43    panic("%s not implemented", __func__);
     44}
     45
     46
     47void
     48arch_smp_send_broadcast_ici()
     49{
     50    panic("%s not implemented", __func__);
     51}
  • src/system/kernel/arch/x86_64/arch_interrupts.S

     
     1/*
     2 * Copyright 2002-2010, The Haiku Team. All rights reserved.
     3 * Distributed under the terms of the MIT License.
     4 *
     5 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
     6 * Copyright 2002, Michael Noisternig. All rights reserved.
     7 * Distributed under the terms of the NewOS License.
     8 */
     9
     10#include <arch/user_debugger.h>
     11//#include <arch/x86/arch_cpu.h>
     12//#include <arch/x86/arch_kernel.h>
     13#include <descriptors.h>
     14#include <asm_defs.h>
     15#include <commpage_defs.h>
     16#include <thread_types.h>
     17
     18#include "tracing_config.h"
     19
     20#include "asm_offsets.h"
     21#include "syscall_numbers.h"
     22#include "syscall_table.h"
     23
     24
     25/*! void i386_restore_frame_from_syscall(struct iframe iframe);
     26    Pops the regs of the iframe from the stack to make it current and then
     27    return to userland.
     28    Interrupts are disabled.
     29*/
     30
     31
     32.text
     33
     34#define PUSHA  \
     35    push    %rax; \
     36    push    %rbx; \
     37    push    %rcx; \
     38    push    %rdx; \
     39    push    %rdi; \
     40    push    %rsi; \
     41    push    %rbp; \
     42    push    %r8;  \
     43    push    %r9;  \
     44    push    %r10; \
     45    push    %r11; \
     46    push    %r12; \
     47    push    %r13; \
     48    push    %r14; \
     49    push    %r15
     50   
     51#define POPA \
     52    pop %r15; \
     53    pop %r14; \
     54    pop %r13; \
     55    pop %r12; \
     56    pop %r11; \
     57    pop %r10; \
     58    pop %r9; \
     59    pop %r8; \
     60    pop %rbp; \
     61    pop %rsi; \
     62    pop %rdi; \
     63    pop %rdx; \
     64    pop %rcx; \
     65    pop %rbx; \
     66    pop %rax
     67
     68#define UPDATE_THREAD_USER_TIME_COMMON()                    \
     69    mov %rax, %rbx;             /* save for later */    \
     70    mov %rdx, %rcx;                                     \
     71                                                            \
     72    /* thread->user_time += now - thread->last_time; */     \
     73    sub     THREAD_last_time(%rdi), %rax;                   \
     74    sbb     (THREAD_last_time + 4)(%rdi), %rdx;             \
     75    add     %rax, THREAD_user_time(%rdi);                   \
     76    adc     %rdx, (THREAD_user_time + 4)(%rdi);             \
     77                                                            \
     78    /* thread->last_time = now; */                          \
     79    movl    %ebx, THREAD_last_time(%edi);                   \
     80    movl    %ecx, (THREAD_last_time + 4)(%edi);             \
     81                                                            \
     82    /* thread->in_kernel = true; */                         \
     83    movb    $1, THREAD_in_kernel(%edi)
     84
     85#define UPDATE_THREAD_USER_TIME()                           \
     86    call    system_time;                                    \
     87    UPDATE_THREAD_USER_TIME_COMMON()
     88
     89#define UPDATE_THREAD_USER_TIME_PUSH_TIME()                 \
     90    call    system_time;                                    \
     91    push    %rdx;                                           \
     92    push    %rax;                                           \
     93    UPDATE_THREAD_USER_TIME_COMMON()
     94
     95#define UPDATE_THREAD_KERNEL_TIME()                         \
     96    call    system_time;                                    \
     97                                                            \
     98    mov %rax, %rbx;             /* save for later */    \
     99    mov %rdx, %rcx;                                     \
     100                                                            \
     101    /* thread->kernel_time += now - thread->last_time; */   \
     102    sub     THREAD_last_time(%rdi), %rax;                   \
     103    sbb     (THREAD_last_time + 4)(%rdi), %rdx;             \
     104    add     %rax, THREAD_kernel_time(%rdi);                 \
     105    adc     %rdx, (THREAD_kernel_time + 4)(%rdi);           \
     106                                                            \
     107    /* thread->last_time = now; */                          \
     108    mov %rbx, THREAD_last_time(%rdi);                   \
     109    mov %rcx, (THREAD_last_time + 4)(%rdi);             \
     110                                                            \
     111    /* thread->in_kernel = false; */                        \
     112    movb    $0, THREAD_in_kernel(%rdi)
     113
     114#define TRAP_ERRC(name, vector) \
     115.align 8; \
     116FUNCTION(name): \
     117    push    $vector; \
     118    push    $-1; \
     119    push    $-1; \
     120    jmp int_bottom; \
     121FUNCTION_END(name)
     122
     123#define TRAP(name, vector) \
     124.align 8; \
     125FUNCTION(name): \
     126    push    $0; \
     127    push    $vector; \
     128    push    %rdx; \
     129    push    %rax; \
     130    jmp int_bottom; \
     131FUNCTION_END(name)
     132
     133#define PUSH_IFRAME_BOTTOM(iframeType)  \
     134    PUSHA;                          \
     135/*  push    %ds;                        \
     136    push    %es;                    */  \
     137    push    %fs;                        \
     138    push    %gs;                        \
     139    push    $iframeType
     140
     141#define POP_IFRAME_AND_RETURN()                         \
     142    /* skip iframe type */                          \
     143    lea 8(%rbp), %rsp;                          \
     144                                        \
     145    pop %gs;                                \
     146    add $4, %rsp;   /* we skip %fs, as this contains the CPU    \
     147                           dependent TLS segment */ \
     148/*  pop %es;                                \
     149    pop %ds;                            */  \
     150                                        \
     151    POPA;                                   \
     152    add $16,%rsp;   /* ignore the vector, error code, and       \
     153                           original eax/edx values */   \
     154    iret
     155
     156#define DISABLE_BREAKPOINTS()                           \
     157    testl   $THREAD_FLAGS_BREAKPOINTS_INSTALLED, THREAD_flags(%rdi);    \
     158    jz      1f;                         \
     159/*TODO: call    x86_64_exit_user_debug_at_kernel_entry; */          \
     160  1:
     161
     162
     163TRAP(trap0, 0)
     164TRAP(trap1, 1)
     165TRAP(trap2, 2)
     166TRAP(trap3, 3)
     167TRAP(trap4, 4)
     168TRAP(trap5, 5)
     169TRAP(trap6, 6)
     170TRAP(trap7, 7)
     171
     172
     173.align 8;
     174FUNCTION(double_fault):
     175    iret
     176FUNCTION_END(double_fault)
     177
     178
     179TRAP(trap9, 9)
     180TRAP_ERRC(trap10, 10)
     181TRAP_ERRC(trap11, 11)
     182TRAP_ERRC(trap12, 12)
     183TRAP_ERRC(trap13, 13)
     184TRAP_ERRC(trap14, 14)
     185/*TRAP(trap15, 15)*/
     186TRAP(trap16, 16)
     187TRAP_ERRC(trap17, 17)
     188TRAP(trap18, 18)
     189TRAP(trap19, 19)
     190
     191// legacy or ioapic interrupts
     192TRAP(trap32, 32)
     193TRAP(trap33, 33)
     194TRAP(trap34, 34)
     195TRAP(trap35, 35)
     196TRAP(trap36, 36)
     197TRAP(trap37, 37)
     198TRAP(trap38, 38)
     199TRAP(trap39, 39)
     200TRAP(trap40, 40)
     201TRAP(trap41, 41)
     202TRAP(trap42, 42)
     203TRAP(trap43, 43)
     204TRAP(trap44, 44)
     205TRAP(trap45, 45)
     206TRAP(trap46, 46)
     207TRAP(trap47, 47)
     208
     209// additional ioapic interrupts
     210TRAP(trap48, 48)
     211TRAP(trap49, 49)
     212TRAP(trap50, 50)
     213TRAP(trap51, 51)
     214TRAP(trap52, 52)
     215TRAP(trap53, 53)
     216TRAP(trap54, 54)
     217TRAP(trap55, 55)
     218
     219// configurable msi or msi-x interrupts
     220TRAP(trap56, 56)
     221TRAP(trap57, 57)
     222TRAP(trap58, 58)
     223TRAP(trap59, 59)
     224TRAP(trap60, 60)
     225TRAP(trap61, 61)
     226TRAP(trap62, 62)
     227TRAP(trap63, 63)
     228TRAP(trap64, 64)
     229TRAP(trap65, 65)
     230TRAP(trap66, 66)
     231TRAP(trap67, 67)
     232TRAP(trap68, 68)
     233TRAP(trap69, 69)
     234TRAP(trap70, 70)
     235TRAP(trap71, 71)
     236TRAP(trap72, 72)
     237TRAP(trap73, 73)
     238TRAP(trap74, 74)
     239TRAP(trap75, 75)
     240TRAP(trap76, 76)
     241TRAP(trap77, 77)
     242TRAP(trap78, 78)
     243TRAP(trap79, 79)
     244TRAP(trap80, 80)
     245TRAP(trap81, 81)
     246TRAP(trap82, 82)
     247TRAP(trap83, 83)
     248TRAP(trap84, 84)
     249TRAP(trap85, 85)
     250TRAP(trap86, 86)
     251TRAP(trap87, 87)
     252TRAP(trap88, 88)
     253TRAP(trap89, 89)
     254TRAP(trap90, 90)
     255TRAP(trap91, 91)
     256TRAP(trap92, 92)
     257TRAP(trap93, 93)
     258TRAP(trap94, 94)
     259TRAP(trap95, 95)
     260TRAP(trap96, 96)
     261TRAP(trap97, 97)
     262//TRAP(trap98, 98) // performance testing interrupt
     263//TRAP(trap99, 99) // syscall interrupt
     264TRAP(trap100, 100)
     265TRAP(trap101, 101)
     266TRAP(trap102, 102)
     267TRAP(trap103, 103)
     268TRAP(trap104, 104)
     269TRAP(trap105, 105)
     270TRAP(trap106, 106)
     271TRAP(trap107, 107)
     272TRAP(trap108, 108)
     273TRAP(trap109, 109)
     274TRAP(trap110, 110)
     275TRAP(trap111, 111)
     276TRAP(trap112, 112)
     277TRAP(trap113, 113)
     278TRAP(trap114, 114)
     279TRAP(trap115, 115)
     280TRAP(trap116, 116)
     281TRAP(trap117, 117)
     282TRAP(trap118, 118)
     283TRAP(trap119, 119)
     284TRAP(trap120, 120)
     285TRAP(trap121, 121)
     286TRAP(trap122, 122)
     287TRAP(trap123, 123)
     288TRAP(trap124, 124)
     289TRAP(trap125, 125)
     290TRAP(trap126, 126)
     291TRAP(trap127, 127)
     292TRAP(trap128, 128)
     293TRAP(trap129, 129)
     294TRAP(trap130, 130)
     295TRAP(trap131, 131)
     296TRAP(trap132, 132)
     297TRAP(trap133, 133)
     298TRAP(trap134, 134)
     299TRAP(trap135, 135)
     300TRAP(trap136, 136)
     301TRAP(trap137, 137)
     302TRAP(trap138, 138)
     303TRAP(trap139, 139)
     304TRAP(trap140, 140)
     305TRAP(trap141, 141)
     306TRAP(trap142, 142)
     307TRAP(trap143, 143)
     308TRAP(trap144, 144)
     309TRAP(trap145, 145)
     310TRAP(trap146, 146)
     311TRAP(trap147, 147)
     312TRAP(trap148, 148)
     313TRAP(trap149, 149)
     314TRAP(trap150, 150)
     315TRAP(trap151, 151)
     316TRAP(trap152, 152)
     317TRAP(trap153, 153)
     318TRAP(trap154, 154)
     319TRAP(trap155, 155)
     320TRAP(trap156, 156)
     321TRAP(trap157, 157)
     322TRAP(trap158, 158)
     323TRAP(trap159, 159)
     324TRAP(trap160, 160)
     325TRAP(trap161, 161)
     326TRAP(trap162, 162)
     327TRAP(trap163, 163)
     328TRAP(trap164, 164)
     329TRAP(trap165, 165)
     330TRAP(trap166, 166)
     331TRAP(trap167, 167)
     332TRAP(trap168, 168)
     333TRAP(trap169, 169)
     334TRAP(trap170, 170)
     335TRAP(trap171, 171)
     336TRAP(trap172, 172)
     337TRAP(trap173, 173)
     338TRAP(trap174, 174)
     339TRAP(trap175, 175)
     340TRAP(trap176, 176)
     341TRAP(trap177, 177)
     342TRAP(trap178, 178)
     343TRAP(trap179, 179)
     344TRAP(trap180, 180)
     345TRAP(trap181, 181)
     346TRAP(trap182, 182)
     347TRAP(trap183, 183)
     348TRAP(trap184, 184)
     349TRAP(trap185, 185)
     350TRAP(trap186, 186)
     351TRAP(trap187, 187)
     352TRAP(trap188, 188)
     353TRAP(trap189, 189)
     354TRAP(trap190, 190)
     355TRAP(trap191, 191)
     356TRAP(trap192, 192)
     357TRAP(trap193, 193)
     358TRAP(trap194, 194)
     359TRAP(trap195, 195)
     360TRAP(trap196, 196)
     361TRAP(trap197, 197)
     362TRAP(trap198, 198)
     363TRAP(trap199, 199)
     364TRAP(trap200, 200)
     365TRAP(trap201, 201)
     366TRAP(trap202, 202)
     367TRAP(trap203, 203)
     368TRAP(trap204, 204)
     369TRAP(trap205, 205)
     370TRAP(trap206, 206)
     371TRAP(trap207, 207)
     372TRAP(trap208, 208)
     373TRAP(trap209, 209)
     374TRAP(trap210, 210)
     375TRAP(trap211, 211)
     376TRAP(trap212, 212)
     377TRAP(trap213, 213)
     378TRAP(trap214, 214)
     379TRAP(trap215, 215)
     380TRAP(trap216, 216)
     381TRAP(trap217, 217)
     382TRAP(trap218, 218)
     383TRAP(trap219, 219)
     384TRAP(trap220, 220)
     385TRAP(trap221, 221)
     386TRAP(trap222, 222)
     387TRAP(trap223, 223)
     388TRAP(trap224, 224)
     389TRAP(trap225, 225)
     390TRAP(trap226, 226)
     391TRAP(trap227, 227)
     392TRAP(trap228, 228)
     393TRAP(trap229, 229)
     394TRAP(trap230, 230)
     395TRAP(trap231, 231)
     396TRAP(trap232, 232)
     397TRAP(trap233, 233)
     398TRAP(trap234, 234)
     399TRAP(trap235, 235)
     400TRAP(trap236, 236)
     401TRAP(trap237, 237)
     402TRAP(trap238, 238)
     403TRAP(trap239, 239)
     404TRAP(trap240, 240)
     405TRAP(trap241, 241)
     406TRAP(trap242, 242)
     407TRAP(trap243, 243)
     408TRAP(trap244, 244)
     409TRAP(trap245, 245)
     410TRAP(trap246, 246)
     411TRAP(trap247, 247)
     412TRAP(trap248, 248)
     413TRAP(trap249, 249)
     414TRAP(trap250, 250)
     415
     416// smp / apic local interrupts
     417TRAP(trap251, 251)
     418TRAP(trap252, 252)
     419TRAP(trap253, 253)
     420TRAP(trap254, 254)
     421TRAP(trap255, 255)
     422
     423
     424.align 16
     425STATIC_FUNCTION(int_bottom):
     426    PUSH_IFRAME_BOTTOM(IFRAME_TYPE_OTHER)
     427
     428    mov %rsp, %rbp      // frame pointer is the iframe
     429
     430    // Set the RF (resume flag) in EFLAGS. This prevents an instruction
     431    // breakpoint on the instruction we're returning to to trigger a debug
     432    // exception.
     433    orl     $0x10000, IFRAME_flags(%rbp);
     434
     435    jmp     int_bottom_user
     436
     437    // We need to recheck user mode using the thread's in_kernel flag, since
     438    // sysexit introduces a raced condition: It doesn't reenable interrupts,
     439    // so that we have to do it in the instruction before, thus opening a
     440    // window for an interrupt while still being in the kernel, but having set
     441    // up everything for userland already.
     442    mov %dr3, %rdi                      // thread pointer
     443    cmpb    $0, THREAD_in_kernel(%rdi)
     444    je      int_bottom_user
     445
     446    // disable interrupts -- the handler will enable them, if necessary
     447    cli
     448
     449    push    %rbp
     450    mov IFRAME_vector(%rbp), %rax
     451//TODO: call    *gInterruptHandlerTable(, %rax, 8)
     452
     453    POP_IFRAME_AND_RETURN()
     454FUNCTION_END(int_bottom)
     455
     456
     457STATIC_FUNCTION(int_bottom_user):
     458    mov $KERNEL_DATA_SEG,%rax
     459    cld
     460    mov %rax,%ds
     461    mov %rax,%es
     462
     463    // disable breakpoints, if installed
     464    mov %dr3, %rdi              // thread pointer
     465    cli                             // disable interrupts
     466    DISABLE_BREAKPOINTS()
     467
     468    // update the thread's user time
     469    UPDATE_THREAD_USER_TIME()
     470
     471    // leave interrupts disabled -- the handler will enable them, if
     472    // necessary
     473
     474    push    %rbp
     475    mov IFRAME_vector(%rbp), %rax
     476//TODO: call    *gInterruptHandlerTable(, %rax, 8)
     477
     478    // Don't do any kernel exit work, if we actually came from the kernel (but
     479    // were already/still prepared for userland), since the iframe in this case
     480    // will be a kernel iframe and e.g. trying to set up a signal stack will not
     481    // be a very healthy endeavor.
     482    cmp     $USER_CODE_SEG, IFRAME_cs(%ebp)
     483    jne     1f
     484
     485    testl   $(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
     486            | THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED) \
     487            , THREAD_flags(%rdi)
     488    jnz     kernel_exit_work
     4891:
     490
     491    cli                             // disable interrupts
     492
     493    // update the thread's kernel time and return
     494    UPDATE_THREAD_KERNEL_TIME()
     495    POP_IFRAME_AND_RETURN()
     496FUNCTION_END(int_bottom_user)
     497
     498
     499.align 8;
     500FUNCTION(trap14_double_fault):
     501/*  pushl   $14
     502    pushl   $-1
     503    pushl   $-1
     504
     505    PUSH_IFRAME_BOTTOM(IFRAME_TYPE_OTHER)
     506
     507    mov %rsp, %rbp      // frame pointer is the iframe
     508
     509    pushl   %rbp
     510    call    x86_page_fault_exception_double_fault
     511
     512    POP_IFRAME_AND_RETURN()*/
     513FUNCTION_END(trap14_double_fault)
     514
     515
     516.align 16
     517
     518FUNCTION(x86_64_restore_frame_from_syscall):
     519FUNCTION_END(x86_64_restore_frame_from_syscall)
     520
     521
     522// test interrupt handler for performance measurements
     523.align 16
     524FUNCTION(trap98):
     525    iret
     526FUNCTION_END(trap98)
     527
     528
     529.align 16
     530FUNCTION(trap99):
     531    // push error, vector, orig_edx, orig_eax, and other registers
     532//  PUSH_IFRAME_BOTTOM_SYSCALL()
     533
     534//  call    handle_syscall
     535
     536//  POP_IFRAME_AND_RETURN()
     537FUNCTION_END(trap99)
     538
     539
     540/*! Handler called by the sysenter instruction
     541    ecx - user esp
     542*/
     543FUNCTION(x86_64_sysenter):
     544FUNCTION_END(x86_64_sysenter)
     545
     546/* user space half of the syscall mechanism, to be copied into the commpage */
     547
     548// int 99 fallback
     549FUNCTION(_user_syscall_int):
     550    int $99
     551    ret
     552FUNCTION_END(_user_syscall_int)
     553SYMBOL(_user_syscall_int_end):
     554
     555
     556// Intel sysenter/sysexit
     557FUNCTION(_user_syscall_sysenter):
     558    // sysexit forces us to trash edx (-> eip) and ecx (-> esp), but they are
     559    // scratch registers anyway. We use ecx right away to store esp.
     560    mov %rsp, %rcx
     561    sysenter
     562    ret
     563FUNCTION_END(_user_syscall_sysenter)
     564SYMBOL(_user_syscall_sysenter_end):
     565
     566
     567/*! Is copied to the signal stack call to restore the original frame when
     568    the signal handler exits.
     569    The copying code (in arch_thread.c::arch_setup_signal_frame()) copies
     570    everything between the x86_64_return_from_signal and x86_64_end_return_from_signal
     571    symbols.
     572*/
     573FUNCTION(x86_64_return_from_signal):
     574    add $24, %rsp   // Flushes the 3 arguments to sa_handler
     575    mov $SYSCALL_RESTORE_SIGNAL_FRAME, %rax
     576        // This syscall will restore the cpu context to the
     577        // one existing before calling the signal handler
     578    mov $0, %rcx
     579    lea 8(%rsp), %rdx
     580    int $99
     581    ret
     582FUNCTION_END(x86_64_return_from_signal)
     583SYMBOL(x86_64_end_return_from_signal):
     584
     585
     586  STATIC_FUNCTION(kernel_exit_work):
     587    // if no signals are pending and the thread shall not be debugged, we can
     588    // use the quick kernel exit function
     589    testl   $(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD) \
     590            , THREAD_flags(%rdi)
     591    jnz     kernel_exit_handle_signals
     592    cli                             // disable interrupts
     593    call    thread_at_kernel_exit_no_signals
     594  kernel_exit_work_done:
     595
     596    // syscall restart
     597    // TODO: this only needs to be done for syscalls!
     598    testl   $THREAD_FLAGS_RESTART_SYSCALL, THREAD_flags(%rdi)
     599    jz      1f
     600    push    %rbp
     601//TODO: call    x86_64_restart_syscall
     602    addq    $8, %rsp
     603  1:
     604
     605    // install breakpoints, if defined
     606    testl   $THREAD_FLAGS_BREAKPOINTS_DEFINED, THREAD_flags(%edi)
     607    jz      1f
     608    push    %rbp
     609    call    x86_64_init_user_debug_at_kernel_exit
     610  1:
     611    POP_IFRAME_AND_RETURN()
     612  FUNCTION_END(kernel_exit_work)
     613
     614  STATIC_FUNCTION(kernel_exit_handle_signals):
     615    // make sure interrupts are enabled (they are, when coming from a syscall
     616    // but otherwise they might be disabled)
     617    sti
     618    call    thread_at_kernel_exit   // also disables interrupts
     619    jmp     kernel_exit_work_done
     620  FUNCTION_END(kernel_exit_handle_signals)