Ticket #6310: x86_64_support_source.patch
File x86_64_support_source.patch, 41.2 KB (added by , 13 years ago) |
---|
-
src/tools/gensyscalls/arch/x86_64/arch_gensyscalls.h
1 #define SYSCALL_RETURN_TYPE_ALIGNMENT_TYPE int 2 #define SYSCALL_PARAMETER_ALIGNMENT_TYPE int -
src/system/boot/platform/bios_ia32/mmu.h
12 12 // For use with mmu_map_physical_memory() 13 13 static const uint32 kDefaultPageFlags = 0x3; // present, R/W 14 14 15 15 16 #ifdef __cplusplus 16 17 extern "C" { 17 18 #endif … … 23 24 extern bool mmu_allocate_physical(addr_t base, size_t size); 24 25 extern void mmu_free(void *virtualAddress, size_t size); 25 26 27 extern void map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags); 28 29 extern addr_t get_next_virtual_page(); 30 extern addr_t get_next_physical_page(); 31 extern uint32* get_next_page_table(); 32 26 33 #ifdef __cplusplus 27 34 } 28 35 #endif -
src/system/boot/platform/bios_ia32/long.h
1 /* 2 * Copyright 2010, Nathan Mentley, nathanmentley@gmail.com. 3 * Distributed under the terms of the MIT License. 4 */ 5 #ifndef LONG_H 6 #define LONG_H 7 8 9 #include <SupportDefs.h> 10 11 12 #ifdef __cplusplus 13 extern "C" { 14 #endif 15 16 extern void long_init_for_kernel(void); 17 18 #ifdef __cplusplus 19 } 20 #endif 21 22 #endif /* LONG_H */ -
src/system/boot/platform/bios_ia32/start.cpp
21 21 #include "debug.h" 22 22 #include "hpet.h" 23 23 #include "keyboard.h" 24 #include "long.h" 24 25 #include "mmu.h" 25 26 #include "multiboot.h" 26 27 #include "serial.h" … … 86 87 mmu_init_for_kernel(); 87 88 smp_boot_other_cpus(); 88 89 90 if(gKernelArgs.kernel_image.elf_size == 64) 91 long_init_for_kernel(); 92 89 93 dprintf("kernel entry at %lx\n", 90 94 gKernelArgs.kernel_image.elf_header.e_entry); 91 95 -
src/system/boot/platform/bios_ia32/mmu.cpp
127 127 } 128 128 129 129 130 staticaddr_t130 extern addr_t 131 131 get_next_virtual_page() 132 132 { 133 133 return get_next_virtual_address(B_PAGE_SIZE); 134 134 } 135 135 136 136 137 staticaddr_t137 extern addr_t 138 138 get_next_physical_page() 139 139 { 140 140 return get_next_physical_address(B_PAGE_SIZE); 141 141 } 142 142 143 143 144 staticuint32 *144 extern uint32 * 145 145 get_next_page_table() 146 146 { 147 147 TRACE("get_next_page_table, sNextPageTableAddress %#" B_PRIxADDR … … 216 216 If the mapping goes beyond the current page table, it will allocate 217 217 a new one. If it cannot map the requested page, it panics. 218 218 */ 219 staticvoid219 extern void 220 220 map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags) 221 221 { 222 222 TRACE("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress, … … 542 542 543 543 // seg 0x08 - kernel 4GB code 544 544 set_segment_descriptor(&virtualGDT[1], 0, 0xffffffff, DT_CODE_READABLE, 545 DPL_KERNEL );545 DPL_KERNEL, false); 546 546 547 547 // seg 0x10 - kernel 4GB data 548 548 set_segment_descriptor(&virtualGDT[2], 0, 0xffffffff, DT_DATA_WRITEABLE, 549 DPL_KERNEL );549 DPL_KERNEL, false); 550 550 551 551 // seg 0x1b - ring 3 user 4GB code 552 552 set_segment_descriptor(&virtualGDT[3], 0, 0xffffffff, DT_CODE_READABLE, 553 DPL_USER );553 DPL_USER, false); 554 554 555 555 // seg 0x23 - ring 3 user 4GB data 556 556 set_segment_descriptor(&virtualGDT[4], 0, 0xffffffff, DT_DATA_WRITEABLE, 557 DPL_USER );557 DPL_USER, false); 558 558 559 559 // virtualGDT[5] and above will be filled later by the kernel 560 560 // to contain the TSS descriptors, and for TLS (one for every CPU) -
src/system/boot/platform/bios_ia32/long.cpp
1 /* 2 * Copyright 2010, Nathan Mentley, nathanmentley@gmail.com. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include "long.h" 8 #include "mmu.h" 9 10 #include <kernel.h> 11 #include <boot/kernel_args.h> 12 #include <boot/stage2.h> 13 #include <boot/platform.h> 14 15 16 //#define TRACE_LONG 17 #ifdef TRACE_LONG 18 # define TRACE(x...) dprintf(x) 19 #else 20 # define TRACE(x...) ; 21 #endif 22 23 24 struct gdt_idt_descr { 25 uint16 limit; 26 uint32 *base; 27 } _PACKED; 28 29 30 static void long_disable_paging() 31 { 32 asm volatile("mov %%cr0, %%eax ;" 33 "and $0x7FFFFFFF, %%eax;" 34 "mov %%eax,%%cr0;" 35 ::); 36 } 37 38 static void long_init_basic_pae() 39 { 40 asm volatile ("mov %%cr4,%%eax;" //enable pae 41 "or 1 << 6,%%eax;" 42 "mov %%eax,%%cr4;" 43 ::); 44 } 45 46 static void long_init_64bit_gdt() 47 { 48 struct gdt_idt_descr gdtDescriptor; 49 segment_descriptor* virtualGDT = (segment_descriptor*)gKernelArgs.arch_args.vir_gdt; 50 51 clear_segment_descriptor(&virtualGDT[0]); 52 set_segment_descriptor(&virtualGDT[1], 0, 0xffffffff, DT_CODE_READABLE, DPL_KERNEL, true); 53 set_segment_descriptor(&virtualGDT[2], 0, 0xffffffff, DT_DATA_WRITEABLE, DPL_KERNEL, true); 54 set_segment_descriptor(&virtualGDT[3], 0, 0xffffffff, DT_CODE_READABLE, DPL_USER, true); 55 set_segment_descriptor(&virtualGDT[4], 0, 0xffffffff, DT_DATA_WRITEABLE, DPL_USER, true); 56 57 TRACE("gdt updated at virtual address %p\n", 58 (void*)gKernelArgs.arch_args.vir_gdt); 59 60 gdtDescriptor.limit = GDT_LIMIT - 1; 61 gdtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_gdt; 62 63 asm volatile("lgdt %0;" //load gdt 64 : : "m" (gdtDescriptor)); 65 } 66 67 static void long_init_pml4() 68 { 69 static uint64 *sPageDirectory = 0; 70 uint64* pageTable = 0; 71 72 // allocate a new pgdir 73 sPageDirectory = (uint64 *)get_next_physical_page(); 74 75 // clear out the pgdir 76 for (int32 i = 0; i < 512; i++) { 77 sPageDirectory[i] = 0; 78 } 79 //load old tables into pd 80 81 // make the first page table at the first free spot 82 pageTable = (uint64*)get_next_page_table(); 83 84 for (int32 i = 0; i < 512; i++) { 85 pageTable[i] = (i * 0x1000) | kDefaultPageFlags; 86 } 87 88 sPageDirectory[0] = (uint64)pageTable | kDefaultPageFlags; 89 90 // make the second page table 91 pageTable = (uint64*)get_next_page_table(); 92 93 for (int32 i = 0; i < 512; i++) { 94 pageTable[i] = (i * 0x1000 + 0x400000) | kDefaultPageFlags; 95 } 96 97 sPageDirectory[1] = (uint64)pageTable | kDefaultPageFlags; 98 99 gKernelArgs.arch_args.num_pgtables = 0; 100 gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory; 101 102 //initalize pgdirpointer and point to pgdir 103 sPageDirectory = (uint64 *)get_next_physical_page(); 104 gKernelArgs.arch_args.phys_pgdirpointer = (uint32)sPageDirectory; 105 106 // clear out the pgdirpointer 107 for (int64 i = 0; i < 512; i++) { 108 sPageDirectory[i] = 0; 109 } 110 111 sPageDirectory[0] = (uint64)gKernelArgs.arch_args.phys_pgdir | kDefaultPageFlags; 112 113 sPageDirectory[511] = (uint64)sPageDirectory | kDefaultPageFlags; 114 115 gKernelArgs.arch_args.vir_pgdirpointer = get_next_virtual_page(); 116 map_page(gKernelArgs.arch_args.vir_pgdirpointer, (uint32)sPageDirectory, 117 kDefaultPageFlags); 118 119 //initalize pml4 and point to pgdirpointer 120 sPageDirectory = (uint64 *)get_next_physical_page(); 121 gKernelArgs.arch_args.phys_pml4 = (uint32)sPageDirectory; 122 123 // clear out the pml4 124 for (int64 i = 0; i < 512; i++) { 125 sPageDirectory[i] = 0; 126 } 127 128 sPageDirectory[0] = (uint64)gKernelArgs.arch_args.phys_pgdirpointer | kDefaultPageFlags; 129 130 sPageDirectory[511] = (uint64)sPageDirectory | kDefaultPageFlags; 131 132 gKernelArgs.arch_args.vir_pml4 = get_next_virtual_page(); 133 map_page(gKernelArgs.arch_args.vir_pml4, (uint32)sPageDirectory, 134 kDefaultPageFlags); 135 136 asm volatile("movl %0, %%eax;" 137 "movl %%eax, %%cr3;" : : "m" (gKernelArgs.arch_args.phys_pml4) : "eax"); 138 } 139 140 static void long_set_efer_flag() 141 { 142 asm volatile("mov $0xC0000080,%%ecx;" 143 "rdmsr;" 144 "or 1 << 9,%%eax;" 145 "wrmsr;" 146 ::); 147 } 148 149 static void long_enable_paging() 150 { 151 asm volatile("mov %%cr0,%%eax;" 152 "bts $31,%%eax;" 153 "mov %%eax,%%cr0;" 154 ::); 155 } 156 157 extern "C" void 158 long_init_for_kernel(void) 159 { 160 long_disable_paging(); 161 162 long_init_64bit_gdt(); 163 164 long_init_pml4(); 165 166 long_init_basic_pae(); 167 168 long_set_efer_flag(); 169 170 long_enable_paging(); 171 } 172 -
src/system/boot/platform/bios_ia32/smp.cpp
418 418 gKernelArgs.arch_args.ioapic_phys, B_PAGE_SIZE, kDefaultPageFlags); 419 419 } 420 420 421 TRACE(("smp: apic = %p\n", gKernelArgs.arch_args.apic));422 TRACE(("smp: ioapic = %p\n", gKernelArgs.arch_args.ioapic));421 TRACE(("smp: apic = %p\n", (void *)gKernelArgs.arch_args.apic)); 422 TRACE(("smp: ioapic = %p\n", (void *)gKernelArgs.arch_args.ioapic)); 423 423 424 424 // calculate how fast the apic timer is 425 425 calculate_apic_timer_conversion_factor(); … … 472 472 uint32 j; 473 473 474 474 // set this stack up 475 finalStack = (uint32 *) gKernelArgs.cpu_kstack[i].start;475 finalStack = (uint32 *)(addr_t)gKernelArgs.cpu_kstack[i].start; 476 476 memset((uint8*)finalStack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE, 0, 477 477 KERNEL_STACK_SIZE); 478 478 tempStack = (finalStack -
src/system/boot/loader/elf.cpp
11 11 #include <boot/stage2.h> 12 12 #include <driver_settings.h> 13 13 #include <elf32.h> 14 #include <elf64.h> 14 15 #include <kernel.h> 15 16 16 17 #include <unistd.h> … … 44 45 #endif 45 46 } 46 47 47 48 template <class Ehdr, class Phdr> 48 49 static status_t 49 verify_elf_header( struct Elf32_Ehdr &header)50 verify_elf_header(Ehdr &header, uint8 ident) 50 51 { 51 52 if (memcmp(header.e_ident, ELF_MAGIC, 4) != 0 52 || header.e_ident[4] != ELFCLASS3253 || (header.e_ident[4] != ident) 53 54 || header.e_phoff == 0 54 55 || !header.IsHostEndian() 55 || header.e_phentsize != sizeof( struct Elf32_Phdr))56 || header.e_phentsize != sizeof(Phdr)) 56 57 return B_BAD_TYPE; 57 58 58 59 return B_OK; 59 60 } 60 61 61 62 template <class Dyn, class Sym, class Rel, class Rela> 62 63 static status_t 63 64 elf_parse_dynamic_section(struct preloaded_image *image) 64 65 { … … 71 72 image->pltrel_len = 0; 72 73 image->pltrel_type = 0; 73 74 74 struct Elf32_Dyn *d = (struct Elf32_Dyn *)image->dynamic_section.start;75 Dyn *d = (Dyn *)(addr_t)image->dynamic_section.start; 75 76 if (!d) 76 77 return B_ERROR; 77 78 … … 81 82 case DT_STRTAB: 82 83 break; 83 84 case DT_SYMTAB: 84 image->syms = ( struct Elf32_Sym*)(d[i].d_un.d_ptr85 image->syms = (void *)(d[i].d_un.d_ptr 85 86 + image->text_region.delta); 86 87 break; 87 88 case DT_REL: 88 image->rel = ( struct Elf32_Rel*)(d[i].d_un.d_ptr89 image->rel = (void *)(d[i].d_un.d_ptr 89 90 + image->text_region.delta); 90 91 break; 91 92 case DT_RELSZ: 92 93 image->rel_len = d[i].d_un.d_val; 93 94 break; 94 95 case DT_RELA: 95 image->rela = ( struct Elf32_Rela*)(d[i].d_un.d_ptr96 image->rela = (void *)(d[i].d_un.d_ptr 96 97 + image->text_region.delta); 97 98 break; 98 99 case DT_RELASZ: 99 100 image->rela_len = d[i].d_un.d_val; 100 101 break; 101 102 case DT_JMPREL: 102 image->pltrel = ( struct Elf32_Rel*)(d[i].d_un.d_ptr103 image->pltrel = (void *)(d[i].d_un.d_ptr 103 104 + image->text_region.delta); 104 105 break; 105 106 case DT_PLTRELSZ: … … 121 122 return B_OK; 122 123 } 123 124 124 125 template <class Ehdr, class Sym, class Shdr> 125 126 static status_t 126 load_elf_symbol_table(int fd, preloaded_image *image )127 load_elf_symbol_table(int fd, preloaded_image *image, Ehdr &elfHeader) 127 128 { 128 struct Elf32_Ehdr &elfHeader = image->elf_header; 129 Elf32_Sym *symbolTable = NULL; 130 Elf32_Shdr *stringHeader = NULL; 129 Sym *symbolTable = NULL; 130 Shdr *stringHeader = NULL; 131 131 uint32 numSymbols = 0; 132 132 char *stringTable; 133 133 status_t status; … … 135 135 // get section headers 136 136 137 137 ssize_t size = elfHeader.e_shnum * elfHeader.e_shentsize; 138 Elf32_Shdr *sectionHeaders = (struct Elf32_Shdr *)malloc(size);138 Shdr *sectionHeaders = (Shdr *)malloc(size); 139 139 if (sectionHeaders == NULL) { 140 140 dprintf("error allocating space for section headers\n"); 141 141 return B_NO_MEMORY; … … 161 161 } 162 162 163 163 // read in symbol table 164 symbolTable = ( Elf32_Sym *)kernel_args_malloc(164 symbolTable = (Sym *)kernel_args_malloc( 165 165 size = sectionHeaders[i].sh_size); 166 166 if (symbolTable == NULL) { 167 167 status = B_NO_MEMORY; … … 176 176 goto error1; 177 177 } 178 178 179 numSymbols = size / sizeof( Elf32_Sym);179 numSymbols = size / sizeof(Sym); 180 180 break; 181 181 } 182 182 } … … 205 205 TRACE(("loaded %ld debug symbols\n", numSymbols)); 206 206 207 207 // insert tables into image 208 image->debug_symbols = symbolTable;208 image->debug_symbols = (Elf32_Sym*)symbolTable; 209 209 image->num_debug_symbols = numSymbols; 210 210 image->debug_string_table = stringTable; 211 211 image->debug_string_table_size = size; … … 223 223 return status; 224 224 } 225 225 226 226 template <class Ehdr, class Phdr, class Sym, class Shdr> 227 227 status_t 228 elf_load_image(int fd, preloaded_image *image )228 elf_load_image(int fd, preloaded_image *image, Ehdr &elfHeader) 229 229 { 230 230 size_t totalSize; 231 231 status_t status; 232 232 233 TRACE(("elf_load_image(fd = %d, image = %p)\n", fd, image));233 ssize_t length = read_pos(fd, 0, &elfHeader, sizeof(Ehdr)); 234 234 235 struct Elf32_Ehdr &elfHeader = image->elf_header;236 237 ssize_t length = read_pos(fd, 0, &elfHeader, sizeof(Elf32_Ehdr));238 if (length < (ssize_t)sizeof(Elf32_Ehdr))239 return B_BAD_TYPE;240 241 status = verify_elf_header(elfHeader);242 if (status < B_OK)243 return status;244 245 235 ssize_t size = elfHeader.e_phnum * elfHeader.e_phentsize; 246 Elf32_Phdr *programHeaders = (struct Elf32_Phdr *)malloc(size);236 Phdr *programHeaders = (Phdr *)malloc(size); 247 237 if (programHeaders == NULL) { 248 238 dprintf("error allocating space for program headers\n"); 249 239 return B_NO_MEMORY; … … 262 252 image->text_region.size = 0; 263 253 264 254 for (int32 i = 0; i < elfHeader.e_phnum; i++) { 265 Elf32_Phdr &header = programHeaders[i];255 Phdr &header = programHeaders[i]; 266 256 267 257 switch (header.p_type) { 268 258 case PT_LOAD: … … 350 340 // load program data 351 341 352 342 for (int i = 0; i < elfHeader.e_phnum; i++) { 353 Elf32_Phdr &header = programHeaders[i];343 Phdr &header = programHeaders[i]; 354 344 355 345 if (header.p_type != PT_LOAD) 356 346 continue; … … 392 382 image->debug_string_table = NULL; 393 383 394 384 if (sLoadElfSymbols) 395 load_elf_symbol_table (fd, image);385 load_elf_symbol_table<Ehdr, Sym, Shdr>(fd, image, elfHeader); 396 386 397 387 free(programHeaders); 398 388 … … 407 397 return status; 408 398 } 409 399 400 status_t 401 elf_load_image(int fd, preloaded_image *image) 402 { 403 status_t status; 410 404 405 TRACE(("elf_load_image(fd = %d, image = %p)\n", fd, image)); 406 407 struct Elf32_Ehdr &elfHeader = image->elf_header; 408 struct Elf64_Ehdr &elfHeader64 = image->elf_header64; 409 410 ssize_t length = read_pos(fd, 0, &elfHeader, sizeof(Elf32_Ehdr)); 411 if (length < (ssize_t)sizeof(Elf32_Ehdr)) 412 return B_BAD_TYPE; 413 414 status = verify_elf_header<Elf32_Ehdr, Elf32_Phdr>(elfHeader, ELFCLASS32); 415 if (status < B_OK) { //not elf32. Check if elf64. 416 ssize_t length = read_pos(fd, 0, &elfHeader64, sizeof(Elf64_Ehdr)); 417 if (length < (ssize_t)sizeof(Elf64_Ehdr)) 418 return B_BAD_TYPE; 419 420 status = verify_elf_header<Elf64_Ehdr, Elf64_Phdr>(elfHeader64, ELFCLASS64); 421 if (status < B_OK) 422 return status; 423 424 image->elf_size = 64; 425 return elf_load_image<Elf64_Ehdr, Elf64_Phdr, Elf64_Sym, Elf64_Shdr>(fd, image, elfHeader64); 426 } 427 428 image->elf_size = 32; 429 return elf_load_image<Elf32_Ehdr, Elf32_Phdr, Elf32_Sym, Elf32_Shdr>(fd, image, elfHeader); 430 } 431 432 411 433 status_t 412 434 elf_load_image(Directory *directory, const char *path) 413 435 { … … 456 478 return status; 457 479 } 458 480 459 481 template <class Dyn, class Sym, class Rel, class Rela> 460 482 status_t 461 483 elf_relocate_image(struct preloaded_image *image) 462 484 { 463 status_t status = elf_parse_dynamic_section (image);485 status_t status = elf_parse_dynamic_section<Dyn, Sym, Rel, Rela>(image); 464 486 if (status != B_OK) 465 487 return status; 466 488 467 489 // deal with the rels first 468 490 if (image->rel) { 469 491 TRACE(("total %i relocs\n", 470 image->rel_len / (int)sizeof( struct Elf32_Rel)));492 image->rel_len / (int)sizeof(Rel))); 471 493 472 status = boot_arch_elf_relocate_rel(image, image->rel, image->rel_len);494 status = boot_arch_elf_relocate_rel(image, (Rel*)((uint64)image->rel), image->rel_len); 473 495 if (status < B_OK) 474 496 return status; 475 497 } 476 498 477 499 if (image->pltrel) { 478 500 TRACE(("total %i plt-relocs\n", 479 image->pltrel_len / (int)sizeof( struct Elf32_Rel)));501 image->pltrel_len / (int)sizeof(Rel))); 480 502 481 503 if (image->pltrel_type == DT_REL) { 482 status = boot_arch_elf_relocate_rel(image, image->pltrel,504 status = boot_arch_elf_relocate_rel(image, (Rel *)(addr_t)image->pltrel, 483 505 image->pltrel_len); 484 506 } else { 485 507 status = boot_arch_elf_relocate_rela(image, 486 ( struct Elf32_Rela *)image->pltrel, image->pltrel_len);508 (Rela *)(addr_t)image->pltrel, image->pltrel_len); 487 509 } 488 510 if (status < B_OK) 489 511 return status; … … 491 513 492 514 if (image->rela) { 493 515 TRACE(("total %i rela relocs\n", 494 image->rela_len / (int)sizeof( struct Elf32_Rela)));495 status = boot_arch_elf_relocate_rela(image, image->rela,516 image->rela_len / (int)sizeof(Rela))); 517 status = boot_arch_elf_relocate_rela(image, (Rela *)(addr_t)image->rela, 496 518 image->rela_len); 497 519 if (status < B_OK) 498 520 return status; … … 502 524 } 503 525 504 526 527 template <class Dyn, class Sym, class Rel, class Rela> 505 528 status_t 506 boot_elf_resolve_symbol(struct preloaded_image *image, 529 elf64_relocate_image(struct preloaded_image *image) 530 { 531 status_t status = elf_parse_dynamic_section<Dyn, Sym, Rel, Rela>(image); 532 if (status != B_OK) 533 return status; 534 535 // deal with the rels first 536 if (image->rel) { 537 TRACE(("total %i relocs\n", 538 image->rel_len / (int)sizeof(Rel))); 539 540 status = boot_arch_elf64_relocate_rel(image, (Rel*)((uint64)image->rel), image->rel_len); 541 if (status < B_OK) 542 return status; 543 } 544 545 if (image->pltrel) { 546 TRACE(("total %i plt-relocs\n", 547 image->pltrel_len / (int)sizeof(Rel))); 548 549 if (image->pltrel_type == DT_REL) { 550 status = boot_arch_elf64_relocate_rel(image, (Rel *)(addr_t)image->pltrel, 551 image->pltrel_len); 552 } else { 553 status = boot_arch_elf64_relocate_rela(image, 554 (Rela *)(addr_t)image->pltrel, image->pltrel_len); 555 } 556 if (status < B_OK) 557 return status; 558 } 559 560 if (image->rela) { 561 TRACE(("total %i rela relocs\n", 562 image->rela_len / (int)sizeof(Rela))); 563 status = boot_arch_elf64_relocate_rela(image, (Rela *)(addr_t)image->rela, 564 image->rela_len); 565 if (status < B_OK) 566 return status; 567 } 568 569 return B_OK; 570 } 571 572 573 status_t 574 elf_relocate_image(struct preloaded_image *image) 575 { 576 if(image->elf_size == 32) 577 return elf_relocate_image<Elf32_Dyn, Elf32_Sym, Elf32_Rel, Elf32_Rela>(image); 578 return elf64_relocate_image<Elf64_Dyn, Elf64_Sym, Elf64_Rel, Elf64_Rela>(image); 579 } 580 581 582 status_t boot_elf_resolve_symbol(struct preloaded_image *image, 507 583 struct Elf32_Sym *symbol, addr_t *symbolAddress) 508 584 { 509 585 switch (symbol->st_shndx) { … … 524 600 return B_NO_ERROR; 525 601 } 526 602 } 603 604 status_t boot_elf64_resolve_symbol(struct preloaded_image *image, 605 struct Elf64_Sym *symbol, addr_t *symbolAddress) 606 { 607 switch (symbol->st_shndx) { 608 case SHN_UNDEF: 609 // Since we do that only for the kernel, there shouldn't be 610 // undefined symbols. 611 return B_MISSING_SYMBOL; 612 case SHN_ABS: 613 *symbolAddress = symbol->st_value; 614 return B_NO_ERROR; 615 case SHN_COMMON: 616 // ToDo: finish this 617 TRACE(("elf_resolve_symbol: COMMON symbol, finish me!\n")); 618 return B_ERROR; 619 default: 620 // standard symbol 621 *symbolAddress = symbol->st_value + image->text_region.delta; 622 return B_NO_ERROR; 623 } 624 } -
src/system/libroot/os/arch/x86_64/byteorder.S
1 /* 2 * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 #include <asm_defs.h> 7 8 9 /* uint16 __swap_int16(uint16 value) */ 10 FUNCTION(__swap_int16): 11 movl 4(%rsp), %eax 12 bswap %eax 13 shr $16, %eax 14 ret 15 FUNCTION_END(__swap_int16) 16 17 /* this one is much faster on a P4, courtesy of Marcus Overhagen, 18 * a good candidate for per processor optimizations: */ 19 20 FUNCTION(__swap_int16_p4): 21 movl 4(%rsp), %eax 22 rolw $8, %ax 23 ret 24 25 26 /* uint32 __swap_int32(uint32 value) */ 27 FUNCTION(__swap_int32): 28 movl 4(%rsp), %eax 29 bswap %eax 30 ret 31 FUNCTION_END(__swap_int32) 32 33 /* uint64 __swap_int64(uint64 value) */ 34 FUNCTION(__swap_int64): 35 movq 8(%rsp), %rax 36 bswap %rax 37 ret 38 FUNCTION_END(__swap_int64) 39 40 /* float __swap_float(float value) */ 41 FUNCTION(__swap_float): 42 mov 8(%rsp), %rax 43 bswap %rax 44 mov %rax, 8(%rsp) 45 fld 8(%rsp) 46 ret 47 FUNCTION_END(__swap_float) 48 49 /* double __swap_double(double value) */ 50 FUNCTION(__swap_double): 51 mov 8(%rsp), %rax 52 bswap %rax 53 mov %rax, 8(%rsp) 54 fld 8(%rsp) 55 ret 56 FUNCTION_END(__swap_double) -
src/system/libroot/os/arch/x86_64/system_time.c
1 /* 2 Just a dummy to avoid a special case in the build system. system_time() 3 is implemented in system_time_asm.S. 4 */ -
src/system/libroot/os/arch/x86_64/system_time_asm.S
1 /* 2 * Copyright 2010, Nathan Mentley, nathanmentley@gmail.com. 3 * Distributed under the terms of the MIT License. 4 * 5 * Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de. 6 * Distributed under the terms of the MIT License. 7 * 8 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 9 * Distributed under the terms of the NewOS License. 10 */ 11 12 #include <asm_defs.h> 13 14 15 /* saves the conversion factor needed for system_time */ 16 .lcomm cv_factor 4 17 .lcomm cv_factor_nsecs 4 18 .lcomm cv_factor_nsecs_shift 1 19 20 21 //.text 22 23 24 FUNCTION(__x86_64_setup_system_time): 25 /* movl 4(%rsp), %eax 26 movl %eax, cv_factor 27 movl 8(%rsp), %eax 28 movl %eax, cv_factor_nsecs 29 movb 12(%rsp), %al 30 movb %al, cv_factor_nsecs_shift 31 */ ret 32 FUNCTION_END(__x86_64_setup_system_time) 33 34 35 /* TODO: Finish system_time */ 36 /* int64 system_time(); */ 37 FUNCTION(system_time): 38 ret 39 FUNCTION_END(system_time) 40 41 42 /* int64 system_time_nsecs(); */ 43 FUNCTION(system_time_nsecs): 44 ret 45 FUNCTION_END(system_time_nsecs) -
src/system/libroot/os/arch/x86_64/atomic.S
1 /* 2 ** Copyright 2010, Nathan Mentley. nathanmentley@gmail.com. All rights reserved. 3 ** Distributed under the terms of the MIT License. 4 ** 5 ** Copyright 2003, Marcus Overhagen. All rights reserved. 6 ** Distributed under the terms of the OpenBeOS license. 7 ** 8 ** Copyright 2001, Travis Geiselbrecht. All rights reserved. 9 ** Distributed under the terms of the NewOS License. 10 */ 11 12 #include <asm_defs.h> 13 14 15 .text 16 17 /* int64 atomic_set64(vint64 *value, int64 newValue) */ 18 FUNCTION(atomic_set64): 19 mov 8(%rsp),%rdx 20 mov 16(%rsp),%rax 21 lock 22 xchg %rax,(%rdx) 23 ret 24 FUNCTION_END(atomic_set64) 25 26 /* int32 atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst) */ 27 FUNCTION(atomic_test_and_set64): 28 mov 8(%rsp),%rdx 29 mov 16(%rsp),%rcx 30 mov 24(%rsp),%rax 31 lock 32 cmpxchg %rcx,(%rdx) 33 ret 34 FUNCTION_END(atomic_test_and_set64) 35 36 /* int64 atomic_add64(vint64 *value, int64 addValue) */ 37 FUNCTION(atomic_add64): 38 mov 8(%rsp),%rdx 39 mov 16(%rsp),%rax 40 lock 41 xadd %rax,(%rdx) 42 ret 43 FUNCTION_END(atomic_add64) 44 45 /* int64 atomic_and64(vint64 *value, int64 andValue) */ 46 FUNCTION(atomic_and64): 47 mov 8(%rsp),%rdx 48 _atomic_and64: 49 mov 16(%rsp),%rcx 50 mov (%rdx),%rax 51 and %rax,%rcx 52 lock 53 cmpxchg %rcx,(%rdx) 54 jnz _atomic_and64 55 ret 56 FUNCTION_END(atomic_and64) 57 58 /* int64 atomic_or64(vint64 *value, int64 orValue) */ 59 FUNCTION(atomic_or64): 60 mov 8(%rsp),%rdx 61 _atomic_or64: 62 mov 16(%rsp),%rcx 63 mov (%rdx),%rax 64 or %rax,%rcx 65 lock 66 cmpxchg %rcx,(%rdx) 67 jnz _atomic_or64 68 ret 69 FUNCTION_END(atomic_or64) 70 71 /* int64 atomic_get64(vint64 *value) */ 72 FUNCTION(atomic_get64): 73 mov 8(%rsp), %rdx 74 _atomic_get64: 75 mov (%rdx), %rax 76 mov %rax, %rcx 77 lock 78 cmpxchg %rcx, (%rdx) 79 jnz _atomic_get64 80 ret 81 FUNCTION_END(atomic_get64) 82 83 84 /* int32 atomic_set(vint32 *value, int32 newValue) */ 85 FUNCTION(atomic_set): 86 movl 4(%rsp),%edx 87 movl 8(%rsp),%eax 88 lock 89 xchg %eax,(%edx) 90 ret 91 FUNCTION_END(atomic_set) 92 93 /* int32 atomic_test_and_set(vint32 *value, int32 newValue, int32 testAgainst) */ 94 FUNCTION(atomic_test_and_set): 95 movl 4(%rsp),%edx 96 movl 8(%rsp),%ecx 97 movl 12(%rsp),%eax 98 lock 99 cmpxchgl %ecx,(%edx) 100 ret 101 FUNCTION_END(atomic_test_and_set) 102 103 /* int32 atomic_add(vint32 *value, int32 addValue) */ 104 FUNCTION(atomic_add): 105 movl 4(%rsp),%edx 106 movl 8(%rsp),%eax 107 lock 108 xaddl %eax,(%edx) 109 ret 110 FUNCTION_END(atomic_add) 111 112 /* int32 atomic_and(vint32 *value, int32 andValue) */ 113 FUNCTION(atomic_and): 114 movl 4(%rsp),%edx 115 _atomic_and32: 116 movl 8(%rsp),%ecx 117 movl (%edx),%eax 118 andl %eax,%ecx 119 lock 120 cmpxchgl %ecx,(%edx) 121 jnz _atomic_and32 122 ret 123 FUNCTION_END(atomic_and) 124 125 /* int32 atomic_or(vint32 *value, int32 orValue) */ 126 FUNCTION(atomic_or): 127 movl 4(%rsp),%edx 128 _atomic_or32: 129 movl 8(%rsp),%ecx 130 movl (%edx),%eax 131 orl %eax,%ecx 132 lock 133 cmpxchgl %ecx,(%edx) 134 jnz _atomic_or32 135 ret 136 FUNCTION_END(atomic_or) 137 138 /* int32 atomic_get(vint32 *value) */ 139 FUNCTION(atomic_get): 140 movl 4(%rsp), %edx 141 _atomic_get32: 142 movl (%edx), %eax 143 movl %eax, %ecx 144 lock 145 cmpxchgl %ecx, (%edx) 146 jnz _atomic_get32 147 ret 148 FUNCTION_END(atomic_get) -
src/system/libroot/posix/stdlib/strtod.c
126 126 #if defined(__i386__) || defined(__ia64__) || defined(__alpha__) || \ 127 127 defined(__sparc64__) || defined(__powerpc__) || defined(__POWERPC__) || \ 128 128 defined(__m68k__) || defined(__M68K__) || defined(__arm__) || \ 129 defined(__ARM__) || defined(__mipsel__) || defined(__MIPSEL__) 129 defined(__ARM__) || defined(__mipsel__) || defined(__MIPSEL__) || \ 130 defined(__X86_64__) || defined(__x86_64__) 130 131 # include <sys/types.h> 131 132 # if BYTE_ORDER == BIG_ENDIAN 132 133 # define IEEE_BIG_ENDIAN -
src/system/libroot/posix/locale/LocaleData.cpp
7 7 #include <ctype.h> 8 8 #include <limits.h> 9 9 10 #include < PosixCtype.h>11 #include < PosixLocaleConv.h>10 #include <libroot/locale/PosixCtype.h> 11 #include <libroot/locale/PosixLocaleConv.h> 12 12 13 13 #ifndef _KERNEL_MODE 14 #ifndef _BOOT_MODE 14 15 #include <langinfo.h> 15 16 #include <PosixLanginfo.h> 16 17 #include <PosixLCTimeInfo.h> 17 18 #endif 19 #endif 18 20 19 21 20 22 /* … … 171 173 172 174 173 175 #ifndef _KERNEL_MODE 176 #ifndef _BOOT_MODE 174 177 175 178 const struct lc_time_t gPosixLCTimeInfo = { 176 179 { … … 269 272 gPosixLocaleConv.currency_symbol 270 273 }; 271 274 275 #endif // !_BOOT_MODE 272 276 #endif // !_KERNEL_MODE 273 277 274 278 -
src/system/libroot/posix/arch/x86_64/setjmp_internal.h
1 /* 2 ** Copyright 2004, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 3 ** Distributed under the terms of the Haiku License. 4 */ 5 #ifndef SETJMP_INTERNAL_H 6 #define SETJMP_INTERNAL_H 7 8 /* These are the fields of the __jmp_regs structure */ 9 10 #define JMP_REGS_RBX 0 11 #define JMP_REGS_RSI 8 12 #define JMP_REGS_RDI 16 13 #define JMP_REGS_RBP 24 14 #define JMP_REGS_RSP 32 15 #define JMP_REGS_PC 40 16 17 #include <asm_defs.h> 18 19 #endif /* SETJMP_INTERNAL_H */ -
src/system/libroot/posix/arch/x86_64/sigsetjmp.S
1 /* 2 * Copyright 2010, Nathan Mentley, nathanmentley@gmail.com. 3 * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de. 4 * Copyright 2004-2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 5 * Distributed under the terms of the MIT License. 6 */ 7 8 #include "setjmp_internal.h" 9 10 11 /* int sigsetjmp(jmp_buf buffer, int saveMask) */ 12 FUNCTION(__sigsetjmp): 13 FUNCTION(sigsetjmp): 14 // return address to %rdx, stack pointer for return to %rcx (both are 15 // scratch registers) 16 mov 0(%rsp), %rdx 17 lea 8(%rsp), %rcx 18 19 // buffer to %eax 20 mov 8(%rsp), %rax 21 22 sigsetjmp_setjmp_entry: 23 // fill __jmp_buf structure with current registers 24 mov %rbx, JMP_REGS_RBX(%rax) 25 mov %rsi, JMP_REGS_RSI(%rax) 26 mov %rdi, JMP_REGS_RDI(%rax) 27 mov %rbp, JMP_REGS_RBP(%rax) 28 29 // save stack and return address (because that's where we intend to jump to) 30 mov %rcx, JMP_REGS_RSP(%rax) 31 mov %rdx, JMP_REGS_PC(%rax) 32 33 jmp __setjmp_save_sigs 34 FUNCTION_END(sigsetjmp) 35 36 37 /* int setjmp(jmp_buf buffer) */ 38 FUNCTION(setjmp): 39 // prepare %edx, %ecx, and %eax for sigsetjmp 40 mov 0(%rsp), %rdx 41 lea 8(%rsp), %rcx 42 mov (%rcx), %rax 43 44 // let sigsetjmp do the real work 45 push $0 // saveMask 46 push %rax // buffer 47 call sigsetjmp_setjmp_entry 48 add $16, %rsp 49 50 ret 51 FUNCTION_END(setjmp) 52 53 54 #pragma weak _setjmp=setjmp -
src/system/libroot/posix/arch/x86_64/siglongjmp.S
1 /* 2 ** Copyright 2010, Nathan Mentley, nathanmentley@gmail.com. All rights reserved. 3 ** Distributed under the terms of the MIT License. 4 ** 5 ** Copyright 2004, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 6 ** Distributed under the terms of the Haiku License. 7 */ 8 9 #include "setjmp_internal.h" 10 11 12 /* int __siglongjmp(jmp_buf buffer, int value) */ 13 FUNCTION(siglongjmp): 14 FUNCTION(longjmp): 15 FUNCTION(_longjmp): 16 mov 8(%rsp), %rcx 17 mov 16(%rsp), %rax 18 19 // restore registers 20 mov JMP_REGS_RBX(%rcx), %rbx 21 mov JMP_REGS_RSI(%rcx), %rsi 22 mov JMP_REGS_RDI(%rcx), %rdi 23 mov JMP_REGS_RBP(%rcx), %rbp 24 mov JMP_REGS_RSP(%rcx), %rsp 25 26 // prepare the stack so that we will return to the setjmp() program location 27 mov JMP_REGS_PC(%rcx), %rdx 28 push %rdx // return address 29 30 // let __setjmp_save_sigs deal with the signal mask and the return value 31 push %rax // value 32 push %rcx // buffer 33 call __longjmp_return 34 add $16, %rsp 35 36 ret 37 FUNCTION_END(siglongjmp) 38 39 40 #pragma weak longjmp=siglongjmp -
src/system/glue/arch/x86_64/crti.S
1 /* 2 * Copyright 2005-2006, Axel Dörfler, axeld@pinc-software.de. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 /** This file contains the first part of the ".init" and ".fini" sections in 7 * the ELF executable. 8 * The functions defined here will be called during initialization/termination 9 * of the loaded executable/library. The ".init" and ".fini" sections are 10 * stacked together like this: 11 * 12 * crti.S entry point 13 * call to _init_before/_term_before 14 * crtbegin.S GCC specific: constructors/destructors are called, ... 15 * crtend.S 16 * crtn.S call to _init_after/_term_after 17 * exit 18 */ 19 20 #define FUNCTION(x) .global x; .type x,@function; x 21 22 .section .init 23 FUNCTION(_init): 24 pushl %rbp 25 movl %rsp, %rbp 26 pushl 8(%rbp) // put image ID on the stack again 27 call _init_before 28 // crtbegin.o stuff comes here 29 30 .section .fini 31 FUNCTION(_fini): 32 pushl %rbp 33 movl %rsp, %rbp 34 pushl 8(%rbp) 35 call _term_before 36 // crtend.o stuff comes here -
src/system/glue/arch/x86_64/crtn.S
1 /* 2 * Copyright 2005-2006, Axel Dörfler, axeld@pinc-software.de. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 /** This file contains the final part of the ".init" and ".fini" sections in 7 * the ELF executable. It is tightly connected to crti.S. 8 * Have a look at crti.S to find a description of what happens here. 9 */ 10 11 .section .init 12 // the image ID is still on the stack 13 call _init_after 14 movl %rbp, %rsp 15 popl %rbp 16 ret 17 18 .section .fini 19 // the image ID is still on the stack 20 call _term_after 21 movl %rbp, %rsp 22 popl %rbp 23 ret -
src/system/kernel/arch/x86/arch_elf.cpp
179 179 return B_ERROR; 180 180 } 181 181 182 183 /* elf64 support. Only needed in boot mode */ 184 #ifdef _BOOT_MODE 185 status_t 186 boot_arch_elf64_relocate_rel(struct preloaded_image *image, 187 struct Elf64_Rel *rel, int relLength) 188 { 189 addr_t S; 190 addr_t A; 191 addr_t P; 192 addr_t finalAddress; 193 addr_t *resolveAddress; 194 int i; 195 196 S = A = P = 0; 197 198 for (i = 0; i * (int)sizeof(struct Elf64_Rel) < relLength; i++) { 199 TRACE(("looking at rel type %s, offset 0x%lx\n", 200 kRelocations[ELF64_R_TYPE(rel[i].r_info)], rel[i].r_offset)); 201 202 // calc S 203 switch (ELF64_R_TYPE(rel[i].r_info)) { 204 case R_X86_64_32: 205 case R_X86_64_PC32: 206 case R_X86_64_GLOB_DAT: 207 case R_X86_64_JMP_SLOT: 208 { 209 struct Elf64_Sym *symbol; 210 status_t status; 211 212 symbol = SYMBOL64(image, ELF64_R_SYM(rel[i].r_info)); 213 214 status = boot_elf64_resolve_symbol(image, symbol, &S); 215 216 if (status < B_OK) 217 return status; 218 TRACE(("S %p (%s)\n", (void *)S, SYMNAME(image, symbol))); 219 } 220 } 221 // calc A 222 switch (ELF64_R_TYPE(rel[i].r_info)) { 223 case R_X86_64_32: 224 case R_X86_64_PC32: 225 case R_X86_64_GOT32: 226 case R_X86_64_PLT32: 227 case R_X86_64_RELATIVE: 228 case R_X86_64_GOTOFF: 229 A = *(addr_t *)(image->text_region.delta + rel[i].r_offset); 230 TRACE(("A %p\n", (void *)A)); 231 break; 232 } 233 // calc P 234 switch (ELF64_R_TYPE(rel[i].r_info)) { 235 case R_X86_64_PC32: 236 case R_X86_64_GOT32: 237 case R_X86_64_PLT32: 238 P = image->text_region.delta + rel[i].r_offset; 239 TRACE(("P %p\n", (void *)P)); 240 break; 241 } 242 243 switch (ELF64_R_TYPE(rel[i].r_info)) { 244 case R_X86_64_NONE: 245 continue; 246 case R_X86_64_32: 247 finalAddress = S + A; 248 break; 249 case R_X86_64_PC32: 250 finalAddress = S + A - P; 251 break; 252 case R_X86_64_RELATIVE: 253 // B + A; 254 finalAddress = image->text_region.delta + A; 255 break; 256 case R_X86_64_JMP_SLOT: 257 case R_X86_64_GLOB_DAT: 258 finalAddress = S; 259 break; 260 261 default: 262 dprintf("arch_elf_relocate_rel: unhandled relocation type %d\n", 263 ELF64_R_TYPE(rel[i].r_info)); 264 return B_BAD_DATA; 265 } 266 267 resolveAddress = (addr_t *)(image->text_region.delta + rel[i].r_offset); 268 269 *resolveAddress = finalAddress; 270 TRACE(("-> offset %#lx = %#lx\n", 271 (image->text_region.delta + rel[i].r_offset), finalAddress)); 272 } 273 274 return B_NO_ERROR; 275 } 276 277 278 status_t 279 boot_arch_elf64_relocate_rela(struct preloaded_image *image, 280 struct Elf64_Rela *rela, int relLength) 281 { 282 addr_t S; 283 addr_t A; 284 addr_t P; 285 addr_t finalAddress; 286 addr_t *resolveAddress; 287 int i; 288 289 S = A = P = 0; 290 291 for (i = 0; i * (int)sizeof(struct Elf64_Rela) < relLength; i++) { 292 TRACE(("looking at rel type %s, offset 0x%lx\n", 293 kRelocations[ELF64_R_TYPE(rela[i].r_info)], rela[i].r_offset)); 294 295 // calc S 296 switch (ELF64_R_TYPE(rela[i].r_info)) { 297 case R_X86_64_32: 298 case R_X86_64_PC32: 299 case R_X86_64_GLOB_DAT: 300 case R_X86_64_JMP_SLOT: 301 { 302 struct Elf64_Sym *symbol; 303 status_t status; 304 305 symbol = SYMBOL64(image, ELF64_R_SYM(rela[i].r_info)); 306 307 status = boot_elf64_resolve_symbol(image, symbol, &S); 308 309 if (status < B_OK) 310 return status; 311 TRACE(("S %p (%s)\n", (void *)S, SYMNAME(image, symbol))); 312 } 313 } 314 // calc A 315 switch (ELF64_R_TYPE(rela[i].r_info)) { 316 case R_X86_64_32: 317 case R_X86_64_PC32: 318 case R_X86_64_GOT32: 319 case R_X86_64_PLT32: 320 case R_X86_64_RELATIVE: 321 case R_X86_64_GOTOFF: 322 A = *(addr_t *)(image->text_region.delta + rela[i].r_offset); 323 TRACE(("A %p\n", (void *)A)); 324 break; 325 } 326 // calc P 327 switch (ELF64_R_TYPE(rela[i].r_info)) { 328 case R_X86_64_PC32: 329 case R_X86_64_GOT32: 330 case R_X86_64_PLT32: 331 P = image->text_region.delta + rela[i].r_offset; 332 TRACE(("P %p\n", (void *)P)); 333 break; 334 } 335 336 switch (ELF64_R_TYPE(rela[i].r_info)) { 337 case R_X86_64_NONE: 338 continue; 339 case R_X86_64_32: 340 finalAddress = S + A; 341 break; 342 case R_X86_64_PC32: 343 finalAddress = S + A - P; 344 break; 345 case R_X86_64_RELATIVE: 346 // B + A; 347 finalAddress = image->text_region.delta + A; 348 break; 349 case R_X86_64_JMP_SLOT: 350 case R_X86_64_GLOB_DAT: 351 finalAddress = S; 352 break; 353 354 default: 355 dprintf("arch_elf_relocate_rel: unhandled relocation type %d\n", 356 ELF64_R_TYPE(rela[i].r_info)); 357 return B_BAD_DATA; 358 } 359 360 resolveAddress = (addr_t *)(image->text_region.delta + rela[i].r_offset); 361 362 *resolveAddress = finalAddress; 363 TRACE(("-> offset %#lx = %#lx\n", 364 (image->text_region.delta + rela[i].r_offset), finalAddress)); 365 } 366 367 return B_NO_ERROR; 368 } 369 #endif -
src/system/kernel/arch/x86/arch_cpu.cpp
731 731 732 732 for (i = 0; i < args->num_cpus; i++) { 733 733 set_segment_descriptor(&gGDT[TLS_BASE_SEGMENT + i], 0, TLS_SIZE, 734 DT_DATA_WRITEABLE, DPL_USER );734 DT_DATA_WRITEABLE, DPL_USER, falsez); 735 735 } 736 736 737 737 // setup SSE2/3 support \ -
src/system/kernel/arch/x86/apm.cpp
316 316 317 317 set_segment_descriptor(&gGDT[BIOS_DATA_SEGMENT >> 3], 318 318 biosData, B_PAGE_SIZE - biosData, 319 DT_DATA_WRITEABLE, DPL_KERNEL );319 DT_DATA_WRITEABLE, DPL_KERNEL, false); 320 320 321 321 // TODO: test if APM segments really are in the BIOS ROM area (especially the 322 322 // data segment) … … 328 328 329 329 set_segment_descriptor(&gGDT[APM_CODE32_SEGMENT >> 3], 330 330 gBiosBase + (info.code32_segment_base << 4) - 0xe0000, 0xffff, 331 DT_CODE_READABLE, DPL_KERNEL );331 DT_CODE_READABLE, DPL_KERNEL, false); 332 332 set_segment_descriptor(&gGDT[APM_CODE16_SEGMENT >> 3], 333 333 gBiosBase + (info.code16_segment_base << 4) - 0xe0000, 0xffff, 334 DT_CODE_READABLE, DPL_KERNEL );334 DT_CODE_READABLE, DPL_KERNEL, false); 335 335 gGDT[APM_CODE16_SEGMENT >> 3].d_b = 0; 336 336 // 16-bit segment 337 337 … … 343 343 344 344 set_segment_descriptor(&gGDT[APM_DATA_SEGMENT >> 3], 345 345 (addr_t)gDmaAddress + (info.data_segment_base << 4), info.data_segment_length, 346 DT_DATA_WRITEABLE, DPL_KERNEL );346 DT_DATA_WRITEABLE, DPL_KERNEL, false); 347 347 } else { 348 348 // use the BIOS area as data segment 349 349 set_segment_descriptor(&gGDT[APM_DATA_SEGMENT >> 3], 350 350 gBiosBase + (info.data_segment_base << 4) - 0xe0000, 0xffff, 351 DT_DATA_WRITEABLE, DPL_KERNEL );351 DT_DATA_WRITEABLE, DPL_KERNEL, false); 352 352 } 353 353 354 354 // setup APM entry point