From 2dead15fb8f6522b96c913603b5ad0b5c7d01f49 Mon Sep 17 00:00:00 2001 From: Lans Zhang Date: Fri, 1 Mar 2013 09:20:39 +0800 Subject: x86_64: Use __BOOT_DS instead_of __KERNEL_DS for safety In startup_32, the running code still uses the initial GDT located in setup. Thus, __BOOT_DS is preferred. Currently __KERNEL_DS is lucky to equal to __BOOT_DS, but this is not always a safe way. Signed-off-by: Lans Zhang Link: http://lkml.kernel.org/r/51300267.6000008@gmail.com Signed-off-by: H. Peter Anvin --- arch/x86/boot/compressed/head_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index c1d383d1fb7e..16f24e6dad79 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -52,7 +52,7 @@ ENTRY(startup_32) jnz 1f cli - movl $(__KERNEL_DS), %eax + movl $(__BOOT_DS), %eax movl %eax, %ds movl %eax, %es movl %eax, %ss -- cgit v1.2.3 From 8e3c2a8cf647e2f3a1e9d38009b0d1c02145822f Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 4 Mar 2013 21:16:17 +0100 Subject: x86: Drop KERNEL_IMAGE_START We have KERNEL_IMAGE_START and __START_KERNEL_map which both contain the start of the kernel text mapping's virtual address. Remove the prior one which has been replicated a lot less times around the tree. No functionality change. Signed-off-by: Borislav Petkov Link: http://lkml.kernel.org/r/1362428180-8865-3-git-send-email-bp@alien8.de Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/page_64_types.h | 1 - arch/x86/kernel/head64.c | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 8b491e66eaa8..6c896fbe21db 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -48,6 +48,5 @@ * arch/x86/kernel/head_64.S), and it is mapped here: */ #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) -#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) #endif /* _ASM_X86_PAGE_64_DEFS_H */ diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index c5e403f6d869..101ac1a9263e 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -144,10 +144,10 @@ void __init x86_64_start_kernel(char * real_mode_data) * Build-time sanity checks on the kernel image and module * area mappings. (these are purely build-time and produce no code) */ - BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START); - BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE); + BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map); + BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE); BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); - BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0); + BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0); BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == -- cgit v1.2.3 From aca91bfc67fe356544eb1cfc483c09b27ce49fa9 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 4 Mar 2013 21:16:18 +0100 Subject: x86-64, docs, mm: Add vsyscall range to virtual address space layout Add the end of the virtual address space to its layout documentation. Signed-off-by: Borislav Petkov Link: http://lkml.kernel.org/r/1362428180-8865-4-git-send-email-bp@alien8.de Signed-off-by: H. Peter Anvin --- Documentation/x86/x86_64/mm.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index d6498e3cd713..881582f75c9c 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -13,7 +13,9 @@ ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) ... unused hole ... ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0 -ffffffffa0000000 - fffffffffff00000 (=1536 MB) module mapping space +ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space +ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls +ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole The direct mapping covers all memory in the system up to the highest memory address (this means in some cases it can also include PCI memory -- cgit v1.2.3 From 1423bed239415edd1562c25be8a7408858fdbb19 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 4 Mar 2013 21:16:19 +0100 Subject: x86, msr: Unify variable names Make sure all MSR-accessing primitives which split MSR values in two 32-bit parts have their variables called 'low' and 'high' for consistence with the rest of the code and for ease of staring. Signed-off-by: Borislav Petkov Link: http://lkml.kernel.org/r/1362428180-8865-5-git-send-email-bp@alien8.de Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/msr.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 9264802e2824..cb7502852acb 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -137,11 +137,11 @@ static inline unsigned long long native_read_pmc(int counter) * pointer indirection), this allows gcc to optimize better */ -#define rdmsr(msr, val1, val2) \ +#define rdmsr(msr, low, high) \ do { \ u64 __val = native_read_msr((msr)); \ - (void)((val1) = (u32)__val); \ - (void)((val2) = (u32)(__val >> 32)); \ + (void)((low) = (u32)__val); \ + (void)((high) = (u32)(__val >> 32)); \ } while (0) static inline void wrmsr(unsigned msr, unsigned low, unsigned high) @@ -162,12 +162,12 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) } /* rdmsr with exception handling */ -#define rdmsr_safe(msr, p1, p2) \ +#define rdmsr_safe(msr, low, high) \ ({ \ int __err; \ u64 __val = native_read_msr_safe((msr), &__err); \ - (*p1) = (u32)__val; \ - (*p2) = (u32)(__val >> 32); \ + (*low) = (u32)__val; \ + (*high) = (u32)(__val >> 32); \ __err; \ }) @@ -208,7 +208,7 @@ do { \ #define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ (u32)((val) >> 32)) -#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) +#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) -- cgit v1.2.3 From 73f460408ca9b6e917f32c89c9a85c586f17f732 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 4 Mar 2013 21:16:20 +0100 Subject: x86, quirks: Shut-up a long-standing gcc warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So gcc nags about those since forever in randconfig builds. arch/x86/kernel/quirks.c: In function ‘ati_ixp4x0_rev’: arch/x86/kernel/quirks.c:361:4: warning: ‘b’ is used uninitialized in this function [-Wuninitialized] arch/x86/kernel/quirks.c: In function ‘ati_force_enable_hpet’: arch/x86/kernel/quirks.c:367:4: warning: ‘d’ may be used uninitialized in this function [-Wuninitialized] arch/x86/kernel/quirks.c:357:6: note: ‘d’ was declared here arch/x86/kernel/quirks.c:407:21: warning: ‘val’ may be used uninitialized in this function [-Wuninitialized] This function quirk is called on a SB400 chipset only anyway so the distant possibility of a PCI access failing becomes almost impossible there. Even if it did fail, then something else more serious is the problem. So zero-out the variables so that gcc shuts up but do a coarse check on the PCI accesses at the end and signal whether any of them had an error. They shouldn't but in case they do, we'll at least know and we can address it. Signed-off-by: Borislav Petkov Link: http://lkml.kernel.org/r/1362428180-8865-6-git-send-email-bp@alien8.de Signed-off-by: H. Peter Anvin --- arch/x86/kernel/quirks.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 26ee48a33dc4..04ee1e2e4c02 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -354,18 +354,22 @@ static void ati_force_hpet_resume(void) static u32 ati_ixp4x0_rev(struct pci_dev *dev) { - u32 d; - u8 b; + int err = 0; + u32 d = 0; + u8 b = 0; - pci_read_config_byte(dev, 0xac, &b); + err = pci_read_config_byte(dev, 0xac, &b); b &= ~(1<<5); - pci_write_config_byte(dev, 0xac, b); - pci_read_config_dword(dev, 0x70, &d); + err |= pci_write_config_byte(dev, 0xac, b); + err |= pci_read_config_dword(dev, 0x70, &d); d |= 1<<8; - pci_write_config_dword(dev, 0x70, d); - pci_read_config_dword(dev, 0x8, &d); + err |= pci_write_config_dword(dev, 0x70, d); + err |= pci_read_config_dword(dev, 0x8, &d); d &= 0xff; dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d); + + WARN_ON_ONCE(err); + return d; } -- cgit v1.2.3 From bb916ff7cda2ad52be433b7b1f355b79b5d7d5ee Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 15 Apr 2013 12:06:09 +0300 Subject: x86/lib: Fix spelling in the comments Apparently 'byts' should be 'bytes'. Signed-off-by: Andy Shevchenko Cc: H . Peter Anvin Signed-off-by: Ingo Molnar --- arch/x86/lib/memcpy_32.c | 4 ++-- arch/x86/lib/memmove_64.S | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c index b908a59eccf5..20b0eaf14c60 100644 --- a/arch/x86/lib/memcpy_32.c +++ b/arch/x86/lib/memcpy_32.c @@ -51,7 +51,7 @@ void *memmove(void *dest, const void *src, size_t n) "sub $0x10, %0\n\t" /* - * We gobble 16byts forward in each loop. + * We gobble 16 bytes forward in each loop. */ "3:\n\t" "sub $0x10, %0\n\t" @@ -117,7 +117,7 @@ void *memmove(void *dest, const void *src, size_t n) "sub $0x10, %0\n\t" /* - * We gobble 16byts backward in each loop. + * We gobble 16 bytes backward in each loop. */ "7:\n\t" "sub $0x10, %0\n\t" diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index ee164610ec46..f3d83cbfc831 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -56,7 +56,7 @@ ENTRY(memmove) 3: sub $0x20, %rdx /* - * We gobble 32byts forward in each loop. + * We gobble 32 bytes forward in each loop. */ 5: sub $0x20, %rdx @@ -122,7 +122,7 @@ ENTRY(memmove) addq %rdx, %rdi subq $0x20, %rdx /* - * We gobble 32byts backward in each loop. + * We gobble 32 bytes backward in each loop. */ 8: subq $0x20, %rdx -- cgit v1.2.3 From d50ba3687b99213501463a1947e3dd5b98bc2d99 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 15 Apr 2013 12:06:10 +0300 Subject: x86/lib: Fix spelling, put space between a numeral and its units As suggested by Peter Anvin. Signed-off-by: Andy Shevchenko Cc: H . Peter Anvin Signed-off-by: Ingo Molnar --- arch/x86/lib/checksum_32.S | 2 +- arch/x86/lib/memcpy_32.c | 2 +- arch/x86/lib/memcpy_64.S | 2 +- arch/x86/lib/memmove_64.S | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 2af5df3ade7c..e78b8eee6615 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -61,7 +61,7 @@ ENTRY(csum_partial) testl $3, %esi # Check alignment. jz 2f # Jump if alignment is ok. testl $1, %esi # Check alignment. - jz 10f # Jump if alignment is boundary of 2bytes. + jz 10f # Jump if alignment is boundary of 2 bytes. # buf is odd dec %ecx diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c index 20b0eaf14c60..e78761d6b7f8 100644 --- a/arch/x86/lib/memcpy_32.c +++ b/arch/x86/lib/memcpy_32.c @@ -26,7 +26,7 @@ void *memmove(void *dest, const void *src, size_t n) char *ret = dest; __asm__ __volatile__( - /* Handle more 16bytes in loop */ + /* Handle more 16 bytes in loop */ "cmp $0x10, %0\n\t" "jb 1f\n\t" diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 1c273be7c97e..56313a326188 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -98,7 +98,7 @@ ENTRY(memcpy) subq $0x20, %rdx /* * At most 3 ALU operations in one cycle, - * so append NOPS in the same 16bytes trunk. + * so append NOPS in the same 16 bytes trunk. */ .p2align 4 .Lcopy_backward_loop: diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index f3d83cbfc831..65268a6104f4 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -27,7 +27,7 @@ ENTRY(memmove) CFI_STARTPROC - /* Handle more 32bytes in loop */ + /* Handle more 32 bytes in loop */ mov %rdi, %rax cmp $0x20, %rdx jb 1f -- cgit v1.2.3