From 3fed0d9d0bd5ca3a2e123ed512a06b2c96cd83a4 Mon Sep 17 00:00:00 2001
From: Yongcong Du <ycdu.vmcore@gmail.com>
Date: Sat, 7 Apr 2012 16:07:29 +0800
Subject: [PATCH] x86: AMD C1E with no ARAT(Always Running APIC Timer) idle
support
AMD C1E is a BIOS controlled C3 state. Certain processors families
may cut off TSC and the lapic timer when it is in a deep C state,
including C1E state, thus the cpu can't be waken up and system will hang.
This patch firstly adds the support of idle selection during boot. Then
it implements amdc1e_noarat_idle() routine which checks the MSR which
contains the C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27) before
executing the halt instruction, then clear them once set.
However intel C1E doesn't has such problem. AMD C1E is a BIOS controlled
C3 state. The difference between C1E and C3 is that transition into C1E
is not initiated by the operating system. System will enter C1E state
automatically when both cores enters C1 state. As for intel C1E, it
means "reduce CPU voltage before entering corresponding Cx-state".
This patch may fix #8111, #3999, #7562, #7940 and #8060
Copied from the description of #3999:
>but for some reason I hit the power button instead of the reset one. And
>the boot continued!!
The reason is CPUs are waken up once power button is hit.
---
headers/private/kernel/arch/x86/arch_cpu.h | 3 ++
src/system/kernel/arch/x86/arch_cpu.cpp | 53 +++++++++++++++++++++++++++-
2 files changed, 55 insertions(+), 1 deletion(-)
diff --git a/headers/private/kernel/arch/x86/arch_cpu.h b/headers/private/kernel/arch/x86/arch_cpu.h
index e761292..8b71250 100644
a
|
b
|
|
33 | 33 | #define IA32_MSR_MTRR_PHYSICAL_BASE_0 0x200 |
34 | 34 | #define IA32_MSR_MTRR_PHYSICAL_MASK_0 0x201 |
35 | 35 | |
| 36 | // K8 MSR registers |
| 37 | #define K8_MSR_IPM 0xc0010055 |
| 38 | |
36 | 39 | // x86 features from cpuid eax 1, edx register |
37 | 40 | // reference http://www.intel.com/assets/pdf/appnote/241618.pdf (Table 5-5) |
38 | 41 | #define IA32_FEATURE_FPU 0x00000001 // x87 fpu |
diff --git a/src/system/kernel/arch/x86/arch_cpu.cpp b/src/system/kernel/arch/x86/arch_cpu.cpp
index c199afc..993a204 100644
a
|
b
|
extern "C" void reboot(void);
|
82 | 82 | // from arch_x86.S |
83 | 83 | |
84 | 84 | void (*gX86SwapFPUFunc)(void *oldState, const void *newState); |
| 85 | void (*gCpuIdleFunc)(void); |
85 | 86 | bool gHasSSE = false; |
86 | 87 | |
87 | 88 | static uint32 sCpuRendezvous; |
… |
… |
arch_cpu_preboot_init_percpu(kernel_args *args, int cpu)
|
699 | 700 | } |
700 | 701 | |
701 | 702 | |
| 703 | static void |
| 704 | halt_idle(void) |
| 705 | { |
| 706 | asm("hlt"); |
| 707 | } |
| 708 | |
| 709 | |
| 710 | #define K8_SMIONCMPHALT (1ULL << 27) |
| 711 | #define K8_C1EONCMPHALT (1ULL << 28) |
| 712 | #define K8_CMPHALT (K8_SMIONCMPHALT | K8_C1EONCMPHALT) |
| 713 | static void |
| 714 | amdc1e_noarat_idle(void) |
| 715 | { |
| 716 | uint64 msr = x86_read_msr(K8_MSR_IPM); |
| 717 | if (msr & K8_CMPHALT) |
| 718 | x86_write_msr(K8_MSR_IPM, msr & ~K8_CMPHALT); |
| 719 | halt_idle(); |
| 720 | } |
| 721 | |
| 722 | |
| 723 | static bool |
| 724 | detect_amdc1e_noarat() |
| 725 | { |
| 726 | cpu_ent *cpu = get_cpu_struct(); |
| 727 | |
| 728 | if (cpu->arch.vendor != VENDOR_AMD) |
| 729 | return false; |
| 730 | |
| 731 | // Family 0x12 and higher processors support ARAT |
| 732 | // Family lower than 0xf processors doesn't support C1E |
| 733 | // Family 0xf with model <= 0x40 procssors doesn't support C1E |
| 734 | uint32 family = cpu->arch.family + cpu->arch.extended_family; |
| 735 | uint32 model = (cpu->arch.extended_model << 4) | cpu->arch.model; |
| 736 | if (family < 0x12 && family > 0xf) |
| 737 | return true; |
| 738 | else if (family == 0xf && model > 0x40) |
| 739 | return true; |
| 740 | else |
| 741 | return false; |
| 742 | } |
| 743 | |
| 744 | |
702 | 745 | status_t |
703 | 746 | arch_cpu_init_percpu(kernel_args *args, int cpu) |
704 | 747 | { |
… |
… |
arch_cpu_init_percpu(kernel_args *args, int cpu)
|
721 | 764 | asm volatile("lidt %0" : : "m"(descriptor)); |
722 | 765 | } |
723 | 766 | |
| 767 | if (!gCpuIdleFunc) { |
| 768 | if (detect_amdc1e_noarat()) |
| 769 | gCpuIdleFunc = amdc1e_noarat_idle; |
| 770 | else |
| 771 | gCpuIdleFunc = halt_idle; |
| 772 | } |
724 | 773 | return 0; |
725 | 774 | } |
726 | 775 | |
| 776 | |
727 | 777 | status_t |
728 | 778 | arch_cpu_init(kernel_args *args) |
729 | 779 | { |
… |
… |
i386_set_tss_and_kstack(addr_t kstack)
|
882 | 932 | get_cpu_struct()->arch.tss.sp0 = kstack; |
883 | 933 | } |
884 | 934 | |
| 935 | |
885 | 936 | void |
886 | 937 | arch_cpu_global_TLB_invalidate(void) |
887 | 938 | { |
… |
… |
arch_cpu_shutdown(bool rebootSystem)
|
1008 | 1059 | void |
1009 | 1060 | arch_cpu_idle(void) |
1010 | 1061 | { |
1011 | | asm("hlt"); |
| 1062 | gCpuIdleFunc(); |
1012 | 1063 | } |
1013 | 1064 | |
1014 | 1065 | |