diff options
Diffstat (limited to 'include/asm-x86_64')
39 files changed, 379 insertions, 389 deletions
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild index 1ee9b07f3fe6..ebd7117782a6 100644 --- a/include/asm-x86_64/Kbuild +++ b/include/asm-x86_64/Kbuild @@ -6,13 +6,11 @@ ALTARCHDEF := defined __i386__ header-y += boot.h header-y += bootsetup.h -header-y += cpufeature.h header-y += debugreg.h header-y += ldt.h header-y += msr.h header-y += prctl.h header-y += ptrace-abi.h -header-y += setup.h header-y += sigcontext32.h header-y += ucontext.h header-y += vsyscall32.h diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h index 9d1916e59c04..6b6fc6f8be7e 100644 --- a/include/asm-x86_64/acpi.h +++ b/include/asm-x86_64/acpi.h @@ -54,30 +54,8 @@ #define ACPI_ENABLE_IRQS() local_irq_enable() #define ACPI_FLUSH_CPU_CACHE() wbinvd() - -static inline int -__acpi_acquire_global_lock (unsigned int *lock) -{ - unsigned int old, new, val; - do { - old = *lock; - new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); - val = cmpxchg(lock, old, new); - } while (unlikely (val != old)); - return (new < 3) ? -1 : 0; -} - -static inline int -__acpi_release_global_lock (unsigned int *lock) -{ - unsigned int old, new, val; - do { - old = *lock; - new = old & ~0x3; - val = cmpxchg(lock, old, new); - } while (unlikely (val != old)); - return old & 0x1; -} +int __acpi_acquire_global_lock(unsigned int *lock); +int __acpi_release_global_lock(unsigned int *lock); #define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ ((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr)) diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h index a584826cc570..a6657b4f3e0e 100644 --- a/include/asm-x86_64/alternative.h +++ b/include/asm-x86_64/alternative.h @@ -4,6 +4,7 @@ #ifdef __KERNEL__ #include <linux/types.h> +#include <linux/stddef.h> #include <asm/cpufeature.h> struct alt_instr { @@ -133,4 +134,15 @@ static inline void alternatives_smp_switch(int smp) {} #define LOCK_PREFIX "" #endif +struct paravirt_patch; +#ifdef CONFIG_PARAVIRT +void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end); +#else +static inline void +apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end) +{} +#define __start_parainstructions NULL +#define __stop_parainstructions NULL +#endif + #endif /* _X86_64_ALTERNATIVE_H */ diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index 007e88d6d43f..706ca4b60000 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h @@ -21,7 +21,7 @@ * on us. We need to use _exactly_ the address the user gave us, * not some alias that contains the same information. */ -typedef struct { volatile int counter; } atomic_t; +typedef struct { int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } @@ -189,9 +189,9 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) { int __i = i; __asm__ __volatile__( - LOCK_PREFIX "xaddl %0, %1;" - :"=r"(i) - :"m"(v->counter), "0"(i)); + LOCK_PREFIX "xaddl %0, %1" + :"+r" (i), "+m" (v->counter) + : : "memory"); return i + __i; } diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h index 5b535eaf5309..8da9609070f4 100644 --- a/include/asm-x86_64/bitops.h +++ b/include/asm-x86_64/bitops.h @@ -7,7 +7,13 @@ #include <asm/alternative.h> -#define ADDR (*(volatile long *) addr) +#if __GNUC__ < 4 || __GNUC_MINOR__ < 1 +/* Technically wrong, but this avoids compilation errors on some gcc + versions. */ +#define ADDR "=m" (*(volatile long *) addr) +#else +#define ADDR "+m" (*(volatile long *) addr) +#endif /** * set_bit - Atomically set a bit in memory @@ -23,7 +29,7 @@ static __inline__ void set_bit(int nr, volatile void * addr) { __asm__ __volatile__( LOCK_PREFIX "btsl %1,%0" - :"+m" (ADDR) + :ADDR :"dIr" (nr) : "memory"); } @@ -40,7 +46,7 @@ static __inline__ void __set_bit(int nr, volatile void * addr) { __asm__ volatile( "btsl %1,%0" - :"+m" (ADDR) + :ADDR :"dIr" (nr) : "memory"); } @@ -58,7 +64,7 @@ static __inline__ void clear_bit(int nr, volatile void * addr) { __asm__ __volatile__( LOCK_PREFIX "btrl %1,%0" - :"+m" (ADDR) + :ADDR :"dIr" (nr)); } @@ -66,7 +72,7 @@ static __inline__ void __clear_bit(int nr, volatile void * addr) { __asm__ __volatile__( "btrl %1,%0" - :"+m" (ADDR) + :ADDR :"dIr" (nr)); } @@ -86,7 +92,7 @@ static __inline__ void __change_bit(int nr, volatile void * addr) { __asm__ __volatile__( "btcl %1,%0" - :"+m" (ADDR) + :ADDR :"dIr" (nr)); } @@ -103,7 +109,7 @@ static __inline__ void change_bit(int nr, volatile void * addr) { __asm__ __volatile__( LOCK_PREFIX "btcl %1,%0" - :"+m" (ADDR) + :ADDR :"dIr" (nr)); } @@ -121,7 +127,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) __asm__ __volatile__( LOCK_PREFIX "btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) + :"=r" (oldbit),ADDR :"dIr" (nr) : "memory"); return oldbit; } @@ -141,7 +147,7 @@ static __inline__ int __test_and_set_bit(int nr, volatile void * addr) __asm__( "btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) + :"=r" (oldbit),ADDR :"dIr" (nr)); return oldbit; } @@ -160,7 +166,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) __asm__ __volatile__( LOCK_PREFIX "btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) + :"=r" (oldbit),ADDR :"dIr" (nr) : "memory"); return oldbit; } @@ -180,7 +186,7 @@ static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) __asm__( "btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) + :"=r" (oldbit),ADDR :"dIr" (nr)); return oldbit; } @@ -192,7 +198,7 @@ static __inline__ int __test_and_change_bit(int nr, volatile void * addr) __asm__ __volatile__( "btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) + :"=r" (oldbit),ADDR :"dIr" (nr) : "memory"); return oldbit; } @@ -211,7 +217,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) __asm__ __volatile__( LOCK_PREFIX "btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) + :"=r" (oldbit),ADDR :"dIr" (nr) : "memory"); return oldbit; } @@ -237,7 +243,7 @@ static __inline__ int variable_test_bit(int nr, volatile const void * addr) __asm__ __volatile__( "btl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit) - :"m" (ADDR),"dIr" (nr)); + :"m" (*(volatile long *)addr),"dIr" (nr)); return oldbit; } diff --git a/include/asm-x86_64/bug.h b/include/asm-x86_64/bug.h index 80ac1fe966ac..682606414913 100644 --- a/include/asm-x86_64/bug.h +++ b/include/asm-x86_64/bug.h @@ -1,30 +1,30 @@ #ifndef __ASM_X8664_BUG_H #define __ASM_X8664_BUG_H 1 -#include <linux/stringify.h> - -/* - * Tell the user there is some problem. The exception handler decodes - * this frame. - */ -struct bug_frame { - unsigned char ud2[2]; - unsigned char push; - signed int filename; - unsigned char ret; - unsigned short line; -} __attribute__((packed)); - #ifdef CONFIG_BUG #define HAVE_ARCH_BUG -/* We turn the bug frame into valid instructions to not confuse - the disassembler. Thanks to Jan Beulich & Suresh Siddha - for nice instruction selection. - The magic numbers generate mov $64bitimm,%eax ; ret $offset. */ -#define BUG() \ - asm volatile( \ - "ud2 ; pushq $%c1 ; ret $%c0" :: \ - "i"(__LINE__), "i" (__FILE__)) + +#ifdef CONFIG_DEBUG_BUGVERBOSE +#define BUG() \ + do { \ + asm volatile("1:\tud2\n" \ + ".pushsection __bug_table,\"a\"\n" \ + "2:\t.quad 1b, %c0\n" \ + "\t.word %c1, 0\n" \ + "\t.org 2b+%c2\n" \ + ".popsection" \ + : : "i" (__FILE__), "i" (__LINE__), \ + "i" (sizeof(struct bug_entry))); \ + for(;;) ; \ + } while(0) +#else +#define BUG() \ + do { \ + asm volatile("ud2"); \ + for(;;) ; \ + } while(0) +#endif + void out_of_line_bug(void); #else static inline void out_of_line_bug(void) { } diff --git a/include/asm-x86_64/cacheflush.h b/include/asm-x86_64/cacheflush.h index d32f7f58752a..ab1cb5c7dc92 100644 --- a/include/asm-x86_64/cacheflush.h +++ b/include/asm-x86_64/cacheflush.h @@ -7,6 +7,7 @@ /* Caches aren't brain-dead on the intel. */ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h index 6b93f5a3a5c8..7ee900645719 100644 --- a/include/asm-x86_64/calgary.h +++ b/include/asm-x86_64/calgary.h @@ -51,6 +51,8 @@ struct iommu_table { #define TCE_TABLE_SIZE_4M 6 #define TCE_TABLE_SIZE_8M 7 +extern int use_calgary; + #ifdef CONFIG_CALGARY_IOMMU extern int calgary_iommu_init(void); extern void detect_calgary(void); diff --git a/include/asm-x86_64/checksum.h b/include/asm-x86_64/checksum.h index 989469e8e0b7..419fe88a0342 100644 --- a/include/asm-x86_64/checksum.h +++ b/include/asm-x86_64/checksum.h @@ -19,15 +19,16 @@ * the last step before putting a checksum into a packet. * Make sure not to mix with 64bit checksums. */ -static inline unsigned int csum_fold(unsigned int sum) +static inline __sum16 csum_fold(__wsum sum) { __asm__( " addl %1,%0\n" " adcl $0xffff,%0" : "=r" (sum) - : "r" (sum << 16), "0" (sum & 0xffff0000) + : "r" ((__force u32)sum << 16), + "0" ((__force u32)sum & 0xffff0000) ); - return (~sum) >> 16; + return (__force __sum16)(~(__force u32)sum >> 16); } /* @@ -43,7 +44,7 @@ static inline unsigned int csum_fold(unsigned int sum) * iph: ipv4 header * ihl: length of header / 4 */ -static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl) +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { unsigned int sum; @@ -70,7 +71,7 @@ static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl) : "=r" (sum), "=r" (iph), "=r" (ihl) : "1" (iph), "2" (ihl) : "memory"); - return(sum); + return (__force __sum16)sum; } /** @@ -84,16 +85,17 @@ static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl) * Returns the pseudo header checksum the input data. Result is * 32bit unfolded. */ -static inline unsigned long -csum_tcpudp_nofold(unsigned saddr, unsigned daddr, unsigned short len, - unsigned short proto, unsigned int sum) +static inline __wsum +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum) { asm(" addl %1, %0\n" " adcl %2, %0\n" " adcl %3, %0\n" " adcl $0, %0\n" : "=r" (sum) - : "g" (daddr), "g" (saddr), "g" ((ntohs(len)<<16)+proto*256), "0" (sum)); + : "g" (daddr), "g" (saddr), + "g" ((len + proto)<<8), "0" (sum)); return sum; } @@ -109,9 +111,9 @@ csum_tcpudp_nofold(unsigned saddr, unsigned daddr, unsigned short len, * Returns the 16bit pseudo header checksum the input data already * complemented and ready to be filled in. */ -static inline unsigned short int -csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, - unsigned short len, unsigned short proto, unsigned int sum) +static inline __sum16 +csum_tcpudp_magic(__be32 saddr, __be32 daddr, + unsigned short len, unsigned short proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } @@ -126,25 +128,25 @@ csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, * Before filling it in it needs to be csum_fold()'ed. * buff should be aligned to a 64bit boundary if possible. */ -extern unsigned int csum_partial(const unsigned char *buff, unsigned len, unsigned int sum); +extern __wsum csum_partial(const void *buff, int len, __wsum sum); #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1 #define HAVE_CSUM_COPY_USER 1 /* Do not call this directly. Use the wrappers below */ -extern unsigned long csum_partial_copy_generic(const unsigned char *src, const unsigned char *dst, - unsigned len, - unsigned sum, +extern __wsum csum_partial_copy_generic(const void *src, const void *dst, + int len, + __wsum sum, int *src_err_ptr, int *dst_err_ptr); -extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst, - int len, unsigned int isum, int *errp); -extern unsigned int csum_partial_copy_to_user(const unsigned char *src, unsigned char __user *dst, - int len, unsigned int isum, int *errp); -extern unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, - unsigned int sum); +extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, + int len, __wsum isum, int *errp); +extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst, + int len, __wsum isum, int *errp); +extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, + __wsum sum); /* Old names. To be removed. */ #define csum_and_copy_to_user csum_partial_copy_to_user @@ -158,7 +160,7 @@ extern unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned * Returns the 16bit folded/inverted checksum of the passed buffer. * Ready to fill in. */ -extern unsigned short ip_compute_csum(unsigned char * buff, int len); +extern __sum16 ip_compute_csum(const void *buff, int len); /** * csum_ipv6_magic - Compute checksum of an IPv6 pseudo header. @@ -176,9 +178,9 @@ extern unsigned short ip_compute_csum(unsigned char * buff, int len); struct in6_addr; #define _HAVE_ARCH_IPV6_CSUM 1 -extern unsigned short -csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, - __u32 len, unsigned short proto, unsigned int sum); +extern __sum16 +csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, + __u32 len, unsigned short proto, __wsum sum); static inline unsigned add32_with_carry(unsigned a, unsigned b) { diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h index ee792faaca01..0b3c686139f1 100644 --- a/include/asm-x86_64/cpufeature.h +++ b/include/asm-x86_64/cpufeature.h @@ -29,7 +29,7 @@ #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ -#define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */ +#define X86_FEATURE_DS (0*32+21) /* Debug Store */ #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ @@ -68,6 +68,8 @@ #define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ #define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */ #define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */ +#define X86_FEATURE_PEBS (3*32+10) /* Precise-Event Based Sampling */ +#define X86_FEATURE_BTS (3*32+11) /* Branch Trace Store */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ @@ -112,5 +114,8 @@ #define cpu_has_cyrix_arr 0 #define cpu_has_centaur_mcr 0 #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) +#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) +#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) +#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) #endif /* __ASM_X8664_CPUFEATURE_H */ diff --git a/include/asm-x86_64/delay.h b/include/asm-x86_64/delay.h index 65f64acc5319..c2669f1f5529 100644 --- a/include/asm-x86_64/delay.h +++ b/include/asm-x86_64/delay.h @@ -7,18 +7,21 @@ * Delay routines calling functions in arch/x86_64/lib/delay.c */ +/* Undefined functions to get compile-time errors */ extern void __bad_udelay(void); extern void __bad_ndelay(void); extern void __udelay(unsigned long usecs); -extern void __ndelay(unsigned long usecs); +extern void __ndelay(unsigned long nsecs); extern void __const_udelay(unsigned long usecs); extern void __delay(unsigned long loops); +/* 0x10c7 is 2**32 / 1000000 (rounded up) */ #define udelay(n) (__builtin_constant_p(n) ? \ - ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \ + ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ __udelay(n)) +/* 0x5 is 2**32 / 1000000000 (rounded up) */ #define ndelay(n) (__builtin_constant_p(n) ? \ ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ __ndelay(n)) diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h index eb7723a46790..913d6ac00033 100644 --- a/include/asm-x86_64/desc.h +++ b/include/asm-x86_64/desc.h @@ -9,64 +9,13 @@ #include <linux/string.h> #include <linux/smp.h> +#include <asm/desc_defs.h> #include <asm/segment.h> #include <asm/mmu.h> -// 8 byte segment descriptor -struct desc_struct { - u16 limit0; - u16 base0; - unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1; - unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8; -} __attribute__((packed)); - -struct n_desc_struct { - unsigned int a,b; -}; - extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; -enum { - GATE_INTERRUPT = 0xE, - GATE_TRAP = 0xF, - GATE_CALL = 0xC, -}; - -// 16byte gate -struct gate_struct { - u16 offset_low; - u16 segment; - unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1; - u16 offset_middle; - u32 offset_high; - u32 zero1; -} __attribute__((packed)); - -#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) -#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF) -#define PTR_HIGH(x) ((unsigned long)(x) >> 32) - -enum { - DESC_TSS = 0x9, - DESC_LDT = 0x2, -}; - -// LDT or TSS descriptor in the GDT. 16 bytes. -struct ldttss_desc { - u16 limit0; - u16 base0; - unsigned base1 : 8, type : 5, dpl : 2, p : 1; - unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8; - u32 base3; - u32 zero1; -} __attribute__((packed)); - -struct desc_ptr { - unsigned short size; - unsigned long address; -} __attribute__((packed)) ; - #define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8)) #define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8)) #define clear_LDT() asm volatile("lldt %w0"::"r" (0)) diff --git a/include/asm-x86_64/desc_defs.h b/include/asm-x86_64/desc_defs.h new file mode 100644 index 000000000000..089004070099 --- /dev/null +++ b/include/asm-x86_64/desc_defs.h @@ -0,0 +1,69 @@ +/* Written 2000 by Andi Kleen */ +#ifndef __ARCH_DESC_DEFS_H +#define __ARCH_DESC_DEFS_H + +/* + * Segment descriptor structure definitions, usable from both x86_64 and i386 + * archs. + */ + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> + +// 8 byte segment descriptor +struct desc_struct { + u16 limit0; + u16 base0; + unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1; + unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8; +} __attribute__((packed)); + +struct n_desc_struct { + unsigned int a,b; +}; + +enum { + GATE_INTERRUPT = 0xE, + GATE_TRAP = 0xF, + GATE_CALL = 0xC, +}; + +// 16byte gate +struct gate_struct { + u16 offset_low; + u16 segment; + unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1; + u16 offset_middle; + u32 offset_high; + u32 zero1; +} __attribute__((packed)); + +#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) +#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF) +#define PTR_HIGH(x) ((unsigned long)(x) >> 32) + +enum { + DESC_TSS = 0x9, + DESC_LDT = 0x2, +}; + +// LDT or TSS descriptor in the GDT. 16 bytes. +struct ldttss_desc { + u16 limit0; + u16 base0; + unsigned base1 : 8, type : 5, dpl : 2, p : 1; + unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8; + u32 base3; + u32 zero1; +} __attribute__((packed)); + +struct desc_ptr { + unsigned short size; + unsigned long address; +} __attribute__((packed)) ; + + +#endif /* !__ASSEMBLY__ */ + +#endif diff --git a/include/asm-x86_64/device.h b/include/asm-x86_64/device.h new file mode 100644 index 000000000000..3afa03f33a36 --- /dev/null +++ b/include/asm-x86_64/device.h @@ -0,0 +1,15 @@ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#ifndef _ASM_X86_64_DEVICE_H +#define _ASM_X86_64_DEVICE_H + +struct dev_archdata { +#ifdef CONFIG_ACPI + void *acpi_handle; +#endif +}; + +#endif /* _ASM_X86_64_DEVICE_H */ diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h index 10174b110a5c..be9ec6890723 100644 --- a/include/asm-x86_64/dma-mapping.h +++ b/include/asm-x86_64/dma-mapping.h @@ -180,12 +180,13 @@ static inline int dma_get_cache_alignment(void) return boot_cpu_data.x86_clflush_size; } -#define dma_is_consistent(h) 1 +#define dma_is_consistent(d, h) 1 extern int dma_set_mask(struct device *dev, u64 mask); static inline void -dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir) +dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir) { flush_write_buffers(); } diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h index a406fcb1e924..6d24ea7c4d9d 100644 --- a/include/asm-x86_64/elf.h +++ b/include/asm-x86_64/elf.h @@ -45,7 +45,6 @@ typedef struct user_i387_struct elf_fpregset_t; #ifdef __KERNEL__ #include <asm/processor.h> -#include <asm/compat.h> /* * This is used to ensure we don't load something for the wrong architecture. diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h index 9804bf07b092..5cdfb08013c3 100644 --- a/include/asm-x86_64/futex.h +++ b/include/asm-x86_64/futex.h @@ -55,7 +55,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - inc_preempt_count(); + pagefault_disable(); switch (op) { case FUTEX_OP_SET: @@ -78,7 +78,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ret = -ENOSYS; } - dec_preempt_count(); + pagefault_enable(); if (!ret) { switch (cmp) { diff --git a/include/asm-x86_64/genapic.h b/include/asm-x86_64/genapic.h index a0e9a4b93484..b80f4bb5f273 100644 --- a/include/asm-x86_64/genapic.h +++ b/include/asm-x86_64/genapic.h @@ -30,6 +30,6 @@ struct genapic { }; -extern struct genapic *genapic; +extern struct genapic *genapic, *genapic_force, apic_flat; #endif diff --git a/include/asm-x86_64/ioctls.h b/include/asm-x86_64/ioctls.h index 62caf8b6e4e1..3fc0b15a0d7e 100644 --- a/include/asm-x86_64/ioctls.h +++ b/include/asm-x86_64/ioctls.h @@ -46,6 +46,10 @@ #define TIOCSBRK 0x5427 /* BSD compatibility */ #define TIOCCBRK 0x5428 /* BSD compatibility */ #define TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TCGETS2 _IOR('T',0x2A, struct termios2) +#define TCSETS2 _IOW('T',0x2B, struct termios2) +#define TCSETSW2 _IOW('T',0x2C, struct termios2) +#define TCSETSF2 _IOW('T',0x2D, struct termios2) #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h index 37e194169fac..3227bc93d69b 100644 --- a/include/asm-x86_64/msr.h +++ b/include/asm-x86_64/msr.h @@ -169,8 +169,8 @@ static inline unsigned int cpuid_edx(unsigned int op) #define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ #define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */ #define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ -#define MSR_FS_BASE 0xc0000100 /* 64bit GS base */ -#define MSR_GS_BASE 0xc0000101 /* 64bit FS base */ +#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ +#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ #define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */ /* EFER bits: */ #define _EFER_SCE 0 /* SYSCALL/SYSRET */ @@ -189,6 +189,7 @@ static inline unsigned int cpuid_edx(unsigned int op) #define MSR_IA32_PERFCTR0 0xc1 #define MSR_IA32_PERFCTR1 0xc2 +#define MSR_FSB_FREQ 0xcd #define MSR_MTRRcap 0x0fe #define MSR_IA32_BBL_CR_CTL 0x119 @@ -210,6 +211,10 @@ static inline unsigned int cpuid_edx(unsigned int op) #define MSR_IA32_LASTINTFROMIP 0x1dd #define MSR_IA32_LASTINTTOIP 0x1de +#define MSR_IA32_PEBS_ENABLE 0x3f1 +#define MSR_IA32_DS_AREA 0x600 +#define MSR_IA32_PERF_CAPABILITIES 0x345 + #define MSR_MTRRfix64K_00000 0x250 #define MSR_MTRRfix16K_80000 0x258 #define MSR_MTRRfix16K_A0000 0x259 @@ -307,6 +312,9 @@ static inline unsigned int cpuid_edx(unsigned int op) #define MSR_IA32_PERF_STATUS 0x198 #define MSR_IA32_PERF_CTL 0x199 +#define MSR_IA32_MPERF 0xE7 +#define MSR_IA32_APERF 0xE8 + #define MSR_IA32_THERM_CONTROL 0x19a #define MSR_IA32_THERM_INTERRUPT 0x19b #define MSR_IA32_THERM_STATUS 0x19c @@ -407,4 +415,13 @@ static inline unsigned int cpuid_edx(unsigned int op) #define MSR_P4_U2L_ESCR0 0x3b0 #define MSR_P4_U2L_ESCR1 0x3b1 +/* Intel Core-based CPU performance counters */ +#define MSR_CORE_PERF_FIXED_CTR0 0x309 +#define MSR_CORE_PERF_FIXED_CTR1 0x30a +#define MSR_CORE_PERF_FIXED_CTR2 0x30b +#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d +#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e +#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f +#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 + #endif diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h index f367d4014b42..72375e7d32a8 100644 --- a/include/asm-x86_64/nmi.h +++ b/include/asm-x86_64/nmi.h @@ -77,4 +77,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, extern int unknown_nmi_panic; +void __trigger_all_cpu_backtrace(void); +#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() + #endif /* ASM_NMI_H */ diff --git a/include/asm-x86_64/pci-direct.h b/include/asm-x86_64/pci-direct.h index eba9cb471df3..6823fa4f1afa 100644 --- a/include/asm-x86_64/pci-direct.h +++ b/include/asm-x86_64/pci-direct.h @@ -10,6 +10,7 @@ extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset); extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset); extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset); extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val); +extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val); extern int early_pci_allowed(void); diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 0555c1c4d8fa..59901c690a0d 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -221,20 +221,19 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC -static inline unsigned long pgd_bad(pgd_t pgd) -{ - unsigned long val = pgd_val(pgd); - val &= ~PTE_MASK; - val &= ~(_PAGE_USER | _PAGE_DIRTY); - return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED); -} +static inline unsigned long pgd_bad(pgd_t pgd) +{ + return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); +} static inline unsigned long pud_bad(pud_t pud) { - unsigned long val = pud_val(pud); - val &= ~PTE_MASK; - val &= ~(_PAGE_USER | _PAGE_DIRTY); - return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED); + return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); +} + +static inline unsigned long pmd_bad(pmd_t pmd) +{ + return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); } #define pte_none(x) (!pte_val(x)) @@ -347,7 +346,6 @@ static inline int pmd_large(pmd_t pte) { #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) -#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE ) #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index cef17e0f828c..76552d72804c 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h @@ -475,6 +475,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx) : :"a" (eax), "c" (ecx)); } +static inline void __sti_mwait(unsigned long eax, unsigned long ecx) +{ + /* "mwait %eax,%ecx;" */ + asm volatile( + "sti; .byte 0x0f,0x01,0xc9;" + : :"a" (eax), "c" (ecx)); +} + extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); #define stack_current() \ diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index e72cfcdf5344..6d324b838972 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -61,7 +61,6 @@ extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn); extern unsigned long numa_free_all_bootmem(void); extern void reserve_bootmem_generic(unsigned long phys, unsigned len); -extern void free_bootmem_generic(unsigned long phys, unsigned len); extern void load_gs_index(unsigned gs); @@ -88,6 +87,7 @@ extern void syscall32_cpu_init(void); extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end); extern void early_quirks(void); +extern void quirk_intel_irqbalance(void); extern void check_efer(void); extern int unhandled_signal(struct task_struct *tsk, int sig); diff --git a/include/asm-x86_64/rio.h b/include/asm-x86_64/rio.h new file mode 100644 index 000000000000..c7350f6d2015 --- /dev/null +++ b/include/asm-x86_64/rio.h @@ -0,0 +1,74 @@ +/* + * Derived from include/asm-i386/mach-summit/mach_mpparse.h + * and include/asm-i386/mach-default/bios_ebda.h + * + * Author: Laurent Vivier <Laurent.Vivier@bull.net> + */ + +#ifndef __ASM_RIO_H +#define __ASM_RIO_H + +#define RIO_TABLE_VERSION 3 + +struct rio_table_hdr { + u8 version; /* Version number of this data structure */ + u8 num_scal_dev; /* # of Scalability devices */ + u8 num_rio_dev; /* # of RIO I/O devices */ +} __attribute__((packed)); + +struct scal_detail { + u8 node_id; /* Scalability Node ID */ + u32 CBAR; /* Address of 1MB register space */ + u8 port0node; /* Node ID port connected to: 0xFF=None */ + u8 port0port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 port1node; /* Node ID port connected to: 0xFF = None */ + u8 port1port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 port2node; /* Node ID port connected to: 0xFF = None */ + u8 port2port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 chassis_num; /* 1 based Chassis number (1 = boot node) */ +} __attribute__((packed)); + +struct rio_detail { + u8 node_id; /* RIO Node ID */ + u32 BBAR; /* Address of 1MB register space */ + u8 type; /* Type of device */ + u8 owner_id; /* Node ID of Hurricane that owns this */ + /* node */ + u8 port0node; /* Node ID port connected to: 0xFF=None */ + u8 port0port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 port1node; /* Node ID port connected to: 0xFF=None */ + u8 port1port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 first_slot; /* Lowest slot number below this Calgary */ + u8 status; /* Bit 0 = 1 : the XAPIC is used */ + /* = 0 : the XAPIC is not used, ie: */ + /* ints fwded to another XAPIC */ + /* Bits1:7 Reserved */ + u8 WP_index; /* instance index - lower ones have */ + /* lower slot numbers/PCI bus numbers */ + u8 chassis_num; /* 1 based Chassis number */ +} __attribute__((packed)); + +enum { + HURR_SCALABILTY = 0, /* Hurricane Scalability info */ + HURR_RIOIB = 2, /* Hurricane RIOIB info */ + COMPAT_CALGARY = 4, /* Compatibility Calgary */ + ALT_CALGARY = 5, /* Second Planar Calgary */ +}; + +/* + * there is a real-mode segmented pointer pointing to the + * 4K EBDA area at 0x40E. + */ +static inline unsigned long get_bios_ebda(void) +{ + unsigned long address = *(unsigned short *)phys_to_virt(0x40EUL); + address <<= 4; + return address; +} + +#endif /* __ASM_RIO_H */ diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index d6b7c057edba..e17b9ec42e98 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -82,11 +82,6 @@ extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */ extern u8 x86_cpu_to_log_apicid[NR_CPUS]; extern u8 bios_cpu_apicid[]; -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) -{ - return cpus_addr(cpumask)[0]; -} - static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < NR_CPUS) @@ -118,13 +113,6 @@ static __inline int logical_smp_processor_id(void) #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] #else #define cpu_physical_id(cpu) boot_cpu_id -static inline int smp_call_function_single(int cpuid, void (*func) (void *info), - void *info, int retry, int wait) -{ - /* Disable interrupts here? */ - func(info); - return 0; -} #endif /* !CONFIG_SMP */ #endif diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 05ef097ba55b..88bf981e73cf 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h @@ -36,7 +36,34 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) "2:\t" : "=m" (lock->slock) : : "memory"); } -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +/* + * Same as __raw_spin_lock, but reenable interrupts during spinning. + */ +#ifndef CONFIG_PROVE_LOCKING +static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +{ + asm volatile( + "\n1:\t" + LOCK_PREFIX " ; decl %0\n\t" + "jns 5f\n" + "testl $0x200, %1\n\t" /* interrupts were disabled? */ + "jz 4f\n\t" + "sti\n" + "3:\t" + "rep;nop\n\t" + "cmpl $0, %0\n\t" + "jle 3b\n\t" + "cli\n\t" + "jmp 1b\n" + "4:\t" + "rep;nop\n\t" + "cmpl $0, %0\n\t" + "jg 1b\n\t" + "jmp 4b\n" + "5:\n\t" + : "+m" (lock->slock) : "r" ((unsigned)flags) : "memory"); +} +#endif static inline int __raw_spin_trylock(raw_spinlock_t *lock) { diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h index 59efe849f351..4da9345c1500 100644 --- a/include/asm-x86_64/spinlock_types.h +++ b/include/asm-x86_64/spinlock_types.h @@ -6,13 +6,13 @@ #endif typedef struct { - volatile unsigned int slock; + unsigned int slock; } raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } typedef struct { - volatile unsigned int lock; + unsigned int lock; } raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } diff --git a/include/asm-x86_64/stacktrace.h b/include/asm-x86_64/stacktrace.h index 5eb9799bef76..6f0b54594307 100644 --- a/include/asm-x86_64/stacktrace.h +++ b/include/asm-x86_64/stacktrace.h @@ -1,6 +1,8 @@ #ifndef _ASM_STACKTRACE_H #define _ASM_STACKTRACE_H 1 +extern int kstack_depth_to_print; + /* Generic stack tracer with callbacks */ struct stacktrace_ops { diff --git a/include/asm-x86_64/termbits.h b/include/asm-x86_64/termbits.h index bd950946e52c..6cfc3bb10c1a 100644 --- a/include/asm-x86_64/termbits.h +++ b/include/asm-x86_64/termbits.h @@ -17,6 +17,28 @@ struct termios { cc_t c_cc[NCCS]; /* control characters */ }; +struct termios2 { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +struct ktermios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + /* c_cc characters */ #define VINTR 0 #define VQUIT 1 @@ -118,6 +140,7 @@ struct termios { #define HUPCL 0002000 #define CLOCAL 0004000 #define CBAUDEX 0010000 +#define BOTHER 0010000 /* non standard rate */ #define B57600 0010001 #define B115200 0010002 #define B230400 0010003 @@ -133,10 +156,12 @@ struct termios { #define B3000000 0010015 #define B3500000 0010016 #define B4000000 0010017 -#define CIBAUD 002003600000 /* input baud rate (not used) */ +#define CIBAUD 002003600000 /* input baud rate */ #define CMSPAR 010000000000 /* mark or space (stick) parity */ #define CRTSCTS 020000000000 /* flow control */ +#define IBSHIFT 8 /* Shift from CBAUD to CIBAUD */ + /* c_lflag bits */ #define ISIG 0000001 #define ICANON 0000002 diff --git a/include/asm-x86_64/termios.h b/include/asm-x86_64/termios.h index 041a91f7ddfb..443b225537f0 100644 --- a/include/asm-x86_64/termios.h +++ b/include/asm-x86_64/termios.h @@ -98,8 +98,10 @@ struct termio { copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ }) -#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) -#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) +#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) +#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) +#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) #endif /* __KERNEL__ */ diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h index 787a08114b48..74a6c74397f7 100644 --- a/include/asm-x86_64/thread_info.h +++ b/include/asm-x86_64/thread_info.h @@ -122,6 +122,7 @@ static inline struct thread_info *stack_thread_info(void) #define TIF_MEMDIE 20 #define TIF_DEBUG 21 /* uses debug registers */ #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ +#define TIF_FREEZE 23 /* is freezing for suspend */ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) @@ -137,6 +138,7 @@ static inline struct thread_info *stack_thread_info(void) #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) #define _TIF_DEBUG (1<<TIF_DEBUG) #define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) +#define _TIF_FREEZE (1<<TIF_FREEZE) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK \ diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index 5c8f49280dbc..2facec5914d2 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h @@ -47,6 +47,7 @@ extern int __node_distance(int, int); .flags = SD_LOAD_BALANCE \ | SD_BALANCE_FORK \ | SD_BALANCE_EXEC \ + | SD_SERIALIZE \ | SD_WAKE_BALANCE, \ .last_balance = jiffies, \ .balance_interval = 1, \ diff --git a/include/asm-x86_64/types.h b/include/asm-x86_64/types.h index c86c2e6793e2..2d4491aae281 100644 --- a/include/asm-x86_64/types.h +++ b/include/asm-x86_64/types.h @@ -48,9 +48,6 @@ typedef unsigned long long u64; typedef u64 dma64_addr_t; typedef u64 dma_addr_t; -typedef u64 sector_t; -#define HAVE_SECTOR_T - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h index 19f99178fe83..d5dbc87274f8 100644 --- a/include/asm-x86_64/uaccess.h +++ b/include/asm-x86_64/uaccess.h @@ -6,7 +6,6 @@ */ #include <linux/compiler.h> #include <linux/errno.h> -#include <linux/sched.h> #include <linux/prefetch.h> #include <asm/page.h> diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index 777288eb7e75..c5f596e71faa 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -622,25 +622,7 @@ __SYSCALL(__NR_move_pages, sys_move_pages) #define __NR_syscall_max __NR_move_pages -#ifdef __KERNEL__ -#include <linux/err.h> -#endif - #ifndef __NO_STUBS - -/* user-visible error numbers are in the range -1 - -MAX_ERRNO */ - -#define __syscall_clobber "r11","rcx","memory" - -#define __syscall_return(type, res) \ -do { \ - if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \ - errno = -(res); \ - res = -1; \ - } \ - return (type) (res); \ -} while (0) - #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT #define __ARCH_WANT_SYS_ALARM @@ -664,87 +646,6 @@ do { \ #define __ARCH_WANT_SYS_TIME #define __ARCH_WANT_COMPAT_SYS_TIME -#define __syscall "syscall" - -#define _syscall0(type,name) \ -type name(void) \ -{ \ -long __res; \ -__asm__ volatile (__syscall \ - : "=a" (__res) \ - : "0" (__NR_##name) : __syscall_clobber ); \ -__syscall_return(type,__res); \ -} - -#define _syscall1(type,name,type1,arg1) \ -type name(type1 arg1) \ -{ \ -long __res; \ -__asm__ volatile (__syscall \ - : "=a" (__res) \ - : "0" (__NR_##name),"D" ((long)(arg1)) : __syscall_clobber ); \ -__syscall_return(type,__res); \ -} - -#define _syscall2(type,name,type1,arg1,type2,arg2) \ -type name(type1 arg1,type2 arg2) \ -{ \ -long __res; \ -__asm__ volatile (__syscall \ - : "=a" (__res) \ - : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)) : __syscall_clobber ); \ -__syscall_return(type,__res); \ -} - -#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ -type name(type1 arg1,type2 arg2,type3 arg3) \ -{ \ -long __res; \ -__asm__ volatile (__syscall \ - : "=a" (__res) \ - : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \ - "d" ((long)(arg3)) : __syscall_clobber); \ -__syscall_return(type,__res); \ -} - -#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ -type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ -{ \ -long __res; \ -__asm__ volatile ("movq %5,%%r10 ;" __syscall \ - : "=a" (__res) \ - : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \ - "d" ((long)(arg3)),"g" ((long)(arg4)) : __syscall_clobber,"r10" ); \ -__syscall_return(type,__res); \ -} - -#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ - type5,arg5) \ -type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ -{ \ -long __res; \ -__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall \ - : "=a" (__res) \ - : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \ - "d" ((long)(arg3)),"g" ((long)(arg4)),"g" ((long)(arg5)) : \ - __syscall_clobber,"r8","r10" ); \ -__syscall_return(type,__res); \ -} - -#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ - type5,arg5,type6,arg6) \ -type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \ -{ \ -long __res; \ -__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; " __syscall \ - : "=a" (__res) \ - : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \ - "d" ((long)(arg3)), "g" ((long)(arg4)), "g" ((long)(arg5)), \ - "g" ((long)(arg6)) : \ - __syscall_clobber,"r8","r10","r9" ); \ -__syscall_return(type,__res); \ -} - #ifdef __KERNEL__ #ifndef __ASSEMBLY__ diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h index 2e7ff10fd775..02710f6a4560 100644 --- a/include/asm-x86_64/unwind.h +++ b/include/asm-x86_64/unwind.h @@ -1,104 +1,6 @@ #ifndef _ASM_X86_64_UNWIND_H #define _ASM_X86_64_UNWIND_H -/* - * Copyright (C) 2002-2006 Novell, Inc. - * Jan Beulich <jbeulich@novell.com> - * This code is released under version 2 of the GNU GPL. - */ - -#ifdef CONFIG_STACK_UNWIND - -#include <linux/sched.h> -#include <asm/ptrace.h> -#include <asm/uaccess.h> -#include <asm/vsyscall.h> - -struct unwind_frame_info -{ - struct pt_regs regs; - struct task_struct *task; - unsigned call_frame:1; -}; - -#define UNW_PC(frame) (frame)->regs.rip -#define UNW_SP(frame) (frame)->regs.rsp -#ifdef CONFIG_FRAME_POINTER -#define UNW_FP(frame) (frame)->regs.rbp -#define FRAME_RETADDR_OFFSET 8 -#define FRAME_LINK_OFFSET 0 -#define STACK_BOTTOM(tsk) (((tsk)->thread.rsp0 - 1) & ~(THREAD_SIZE - 1)) -#define STACK_TOP(tsk) ((tsk)->thread.rsp0) -#endif -/* Might need to account for the special exception and interrupt handling - stacks here, since normally - EXCEPTION_STACK_ORDER < THREAD_ORDER < IRQSTACK_ORDER, - but the construct is needed only for getting across the stack switch to - the interrupt stack - thus considering the IRQ stack itself is unnecessary, - and the overhead of comparing against all exception handling stacks seems - not desirable. */ -#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) - -#define UNW_REGISTER_INFO \ - PTREGS_INFO(rax), \ - PTREGS_INFO(rdx), \ - PTREGS_INFO(rcx), \ - PTREGS_INFO(rbx), \ - PTREGS_INFO(rsi), \ - PTREGS_INFO(rdi), \ - PTREGS_INFO(rbp), \ - PTREGS_INFO(rsp), \ - PTREGS_INFO(r8), \ - PTREGS_INFO(r9), \ - PTREGS_INFO(r10), \ - PTREGS_INFO(r11), \ - PTREGS_INFO(r12), \ - PTREGS_INFO(r13), \ - PTREGS_INFO(r14), \ - PTREGS_INFO(r15), \ - PTREGS_INFO(rip) - -#define UNW_DEFAULT_RA(raItem, dataAlign) \ - ((raItem).where == Memory && \ - !((raItem).value * (dataAlign) + 8)) - -static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, - /*const*/ struct pt_regs *regs) -{ - info->regs = *regs; -} - -static inline void arch_unw_init_blocked(struct unwind_frame_info *info) -{ - extern const char thread_return[]; - - memset(&info->regs, 0, sizeof(info->regs)); - info->regs.rip = (unsigned long)thread_return; - info->regs.cs = __KERNEL_CS; - __get_user(info->regs.rbp, (unsigned long *)info->task->thread.rsp); - info->regs.rsp = info->task->thread.rsp; - info->regs.ss = __KERNEL_DS; -} - -extern int arch_unwind_init_running(struct unwind_frame_info *, - int (*callback)(struct unwind_frame_info *, - void *arg), - void *arg); - -static inline int arch_unw_user_mode(const struct unwind_frame_info *info) -{ -#if 0 /* This can only work when selector register saves/restores - are properly annotated (and tracked in UNW_REGISTER_INFO). */ - return user_mode(&info->regs); -#else - return (long)info->regs.rip >= 0 - || (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END) - || (long)info->regs.rsp >= 0; -#endif -} - -#else - #define UNW_PC(frame) ((void)(frame), 0UL) #define UNW_SP(frame) ((void)(frame), 0UL) @@ -107,6 +9,4 @@ static inline int arch_unw_user_mode(const void *info) return 0; } -#endif - #endif /* _ASM_X86_64_UNWIND_H */ diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h index 01d1c17e2849..05cb8dd200de 100644 --- a/include/asm-x86_64/vsyscall.h +++ b/include/asm-x86_64/vsyscall.h @@ -10,6 +10,7 @@ enum vsyscall_num { #define VSYSCALL_START (-10UL << 20) #define VSYSCALL_SIZE 1024 #define VSYSCALL_END (-2UL << 20) +#define VSYSCALL_MAPPED_PAGES 1 #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr)) #ifdef __KERNEL__ |