summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/topology.h3
-rw-r--r--include/asm-frv/topology.h4
-rw-r--r--include/asm-generic/topology.h14
-rw-r--r--include/asm-ia64/topology.h7
-rw-r--r--include/asm-powerpc/topology.h3
-rw-r--r--include/asm-sh/bugs.h2
-rw-r--r--include/asm-sh/cpu-sh4/freq.h6
-rw-r--r--include/asm-sh/cpu-sh4/rtc.h5
-rw-r--r--include/asm-sh/migor.h58
-rw-r--r--include/asm-sh/processor.h5
-rw-r--r--include/asm-sh/r7780rp.h22
-rw-r--r--include/asm-sh/se7721.h70
-rw-r--r--include/asm-sh/se7722.h2
-rw-r--r--include/asm-sh/sh_keysc.h13
-rw-r--r--include/asm-sh/system.h2
-rw-r--r--include/asm-sh/topology.h2
-rw-r--r--include/asm-sh/uaccess_32.h5
-rw-r--r--include/asm-x86/boot.h8
-rw-r--r--include/asm-x86/dma-mapping.h238
-rw-r--r--include/asm-x86/dma-mapping_32.h187
-rw-r--r--include/asm-x86/dma-mapping_64.h202
-rw-r--r--include/asm-x86/e820_32.h2
-rw-r--r--include/asm-x86/genapic_32.h1
-rw-r--r--include/asm-x86/i387.h37
-rw-r--r--include/asm-x86/numa_64.h3
-rw-r--r--include/asm-x86/pci_64.h1
-rw-r--r--include/asm-x86/processor.h16
-rw-r--r--include/asm-x86/scatterlist.h2
-rw-r--r--include/asm-x86/thread_info.h9
-rw-r--r--include/asm-x86/thread_info_32.h2
-rw-r--r--include/asm-x86/thread_info_64.h6
-rw-r--r--include/asm-x86/topology.h22
-rw-r--r--include/asm-x86/tsc.h1
-rw-r--r--include/linux/bitmap.h1
-rw-r--r--include/linux/cpumask.h25
-rw-r--r--include/linux/cpuset.h13
-rw-r--r--include/linux/efi.h7
-rw-r--r--include/linux/file.h1
-rw-r--r--include/linux/fs.h52
-rw-r--r--include/linux/init_task.h3
-rw-r--r--include/linux/irqflags.h6
-rw-r--r--include/linux/ktime.h6
-rw-r--r--include/linux/list.h48
-rw-r--r--include/linux/mount.h11
-rw-r--r--include/linux/prctl.h6
-rw-r--r--include/linux/sched.h56
-rw-r--r--include/linux/sysdev.h17
-rw-r--r--include/linux/topology.h46
48 files changed, 700 insertions, 558 deletions
diff --git a/include/asm-alpha/topology.h b/include/asm-alpha/topology.h
index 420ccde6b916..149532e162c4 100644
--- a/include/asm-alpha/topology.h
+++ b/include/asm-alpha/topology.h
@@ -41,8 +41,7 @@ static inline cpumask_t node_to_cpumask(int node)
#define pcibus_to_cpumask(bus) (cpu_online_map)
-#else /* CONFIG_NUMA */
-# include <asm-generic/topology.h>
#endif /* !CONFIG_NUMA */
+# include <asm-generic/topology.h>
#endif /* _ASM_ALPHA_TOPOLOGY_H */
diff --git a/include/asm-frv/topology.h b/include/asm-frv/topology.h
index abe7298742ac..942724352705 100644
--- a/include/asm-frv/topology.h
+++ b/include/asm-frv/topology.h
@@ -5,10 +5,8 @@
#error NUMA not supported yet
-#else /* !CONFIG_NUMA */
+#endif /* CONFIG_NUMA */
#include <asm-generic/topology.h>
-#endif /* CONFIG_NUMA */
-
#endif /* _ASM_TOPOLOGY_H */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 342a2a0105c4..a6aea79bca4f 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -27,6 +27,8 @@
#ifndef _ASM_GENERIC_TOPOLOGY_H
#define _ASM_GENERIC_TOPOLOGY_H
+#ifndef CONFIG_NUMA
+
/* Other architectures wishing to use this simple topology API should fill
in the below functions as appropriate in their own <asm/topology.h> file. */
#ifndef cpu_to_node
@@ -52,4 +54,16 @@
)
#endif
+#endif /* CONFIG_NUMA */
+
+/* returns pointer to cpumask for specified node */
+#ifndef node_to_cpumask_ptr
+
+#define node_to_cpumask_ptr(v, node) \
+ cpumask_t _##v = node_to_cpumask(node), *v = &_##v
+
+#define node_to_cpumask_ptr_next(v, node) \
+ _##v = node_to_cpumask(node)
+#endif
+
#endif /* _ASM_GENERIC_TOPOLOGY_H */
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index 2d67b72b18d0..f2f72ef2a897 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -93,7 +93,7 @@ void build_cpu_to_node_map(void);
.cache_nice_tries = 2, \
.busy_idx = 3, \
.idle_idx = 2, \
- .newidle_idx = 0, /* unused */ \
+ .newidle_idx = 2, \
.wake_idx = 1, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
@@ -116,6 +116,11 @@ void build_cpu_to_node_map(void);
#define smt_capable() (smp_num_siblings > 1)
#endif
+#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
+ CPU_MASK_ALL : \
+ node_to_cpumask(pcibus_to_node(bus)) \
+ )
+
#include <asm-generic/topology.h>
#endif /* _ASM_IA64_TOPOLOGY_H */
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index ca23b681ad05..100c6fbfc587 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -96,11 +96,10 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
{
}
+#endif /* CONFIG_NUMA */
#include <asm-generic/topology.h>
-#endif /* CONFIG_NUMA */
-
#ifdef CONFIG_SMP
#include <asm/cputable.h>
#define smt_capable() (cpu_has_feature(CPU_FTR_SMT))
diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h
index cfda7d5bf026..121b2ecddfc3 100644
--- a/include/asm-sh/bugs.h
+++ b/include/asm-sh/bugs.h
@@ -25,7 +25,7 @@ static void __init check_bugs(void)
case CPU_SH7619:
*p++ = '2';
break;
- case CPU_SH7203 ... CPU_SH7263:
+ case CPU_SH7203 ... CPU_MXG:
*p++ = '2';
*p++ = 'a';
break;
diff --git a/include/asm-sh/cpu-sh4/freq.h b/include/asm-sh/cpu-sh4/freq.h
index ec028c649215..da46e67ae26d 100644
--- a/include/asm-sh/cpu-sh4/freq.h
+++ b/include/asm-sh/cpu-sh4/freq.h
@@ -10,14 +10,14 @@
#ifndef __ASM_CPU_SH4_FREQ_H
#define __ASM_CPU_SH4_FREQ_H
-#if defined(CONFIG_CPU_SUBTYPE_SH7722) || defined(CONFIG_CPU_SUBTYPE_SH7366)
+#if defined(CONFIG_CPU_SUBTYPE_SH7722) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7723) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7366)
#define FRQCR 0xa4150000
#define VCLKCR 0xa4150004
#define SCLKACR 0xa4150008
#define SCLKBCR 0xa415000c
-#if defined(CONFIG_CPU_SUBTYPE_SH7722)
#define IrDACLKCR 0xa4150010
-#endif
#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
defined(CONFIG_CPU_SUBTYPE_SH7780)
#define FRQCR 0xffc80000
diff --git a/include/asm-sh/cpu-sh4/rtc.h b/include/asm-sh/cpu-sh4/rtc.h
index f3d0f53275e4..25b1e6adfe8c 100644
--- a/include/asm-sh/cpu-sh4/rtc.h
+++ b/include/asm-sh/cpu-sh4/rtc.h
@@ -1,7 +1,12 @@
#ifndef __ASM_SH_CPU_SH4_RTC_H
#define __ASM_SH_CPU_SH4_RTC_H
+#ifdef CONFIG_CPU_SUBTYPE_SH7723
+#define rtc_reg_size sizeof(u16)
+#else
#define rtc_reg_size sizeof(u32)
+#endif
+
#define RTC_BIT_INVERTED 0x40 /* bug on SH7750, SH7750S */
#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR
diff --git a/include/asm-sh/migor.h b/include/asm-sh/migor.h
new file mode 100644
index 000000000000..2329363afdc3
--- /dev/null
+++ b/include/asm-sh/migor.h
@@ -0,0 +1,58 @@
+#ifndef __ASM_SH_MIGOR_H
+#define __ASM_SH_MIGOR_H
+
+/*
+ * linux/include/asm-sh/migor.h
+ *
+ * Copyright (C) 2008 Renesas Solutions
+ *
+ * Portions Copyright (C) 2007 Nobuhiro Iwamatsu
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+#include <asm/addrspace.h>
+
+/* GPIO */
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+
+#define PORT_PACR 0xa4050100
+#define PORT_PDCR 0xa4050106
+#define PORT_PECR 0xa4050108
+#define PORT_PHCR 0xa405010e
+#define PORT_PJCR 0xa4050110
+#define PORT_PKCR 0xa4050112
+#define PORT_PLCR 0xa4050114
+#define PORT_PMCR 0xa4050116
+#define PORT_PRCR 0xa405011c
+#define PORT_PWCR 0xa4050146
+#define PORT_PXCR 0xa4050148
+#define PORT_PYCR 0xa405014a
+#define PORT_PZCR 0xa405014c
+#define PORT_PADR 0xa4050120
+#define PORT_PWDR 0xa4050166
+
+#define PORT_HIZCRA 0xa4050158
+#define PORT_HIZCRC 0xa405015c
+
+#define PORT_MSELCRB 0xa4050182
+
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+
+#define PORT_PSELA 0xa405014e
+#define PORT_PSELB 0xa4050150
+#define PORT_PSELC 0xa4050152
+#define PORT_PSELD 0xa4050154
+
+#define PORT_HIZCRA 0xa4050158
+#define PORT_HIZCRB 0xa405015a
+#define PORT_HIZCRC 0xa405015c
+
+#define BSC_CS6ABCR 0xfec1001c
+
+#endif /* __ASM_SH_MIGOR_H */
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index ec707b98e5b9..b7c7ce80f03e 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -16,7 +16,7 @@ enum cpu_type {
CPU_SH7619,
/* SH-2A types */
- CPU_SH7203, CPU_SH7206, CPU_SH7263,
+ CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_MXG,
/* SH-3 types */
CPU_SH7705, CPU_SH7706, CPU_SH7707,
@@ -29,7 +29,8 @@ enum cpu_type {
CPU_SH7760, CPU_SH4_202, CPU_SH4_501,
/* SH-4A types */
- CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, CPU_SHX3,
+ CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785,
+ CPU_SH7723, CPU_SHX3,
/* SH4AL-DSP types */
CPU_SH7343, CPU_SH7722, CPU_SH7366,
diff --git a/include/asm-sh/r7780rp.h b/include/asm-sh/r7780rp.h
index 1770460a4616..a33838f23a6d 100644
--- a/include/asm-sh/r7780rp.h
+++ b/include/asm-sh/r7780rp.h
@@ -55,11 +55,11 @@
#define PA_SCSPTR1 (PA_BCR+0x0524) /* SCIF1 Serial Port control */
#define PA_SCLSR1 (PA_BCR+0x0528) /* SCIF1 Line Status control */
#define PA_SCRER1 (PA_BCR+0x052c) /* SCIF1 Serial Error control */
-#define PA_ICCR (PA_BCR+0x0600) /* Serial control */
-#define PA_SAR (PA_BCR+0x0602) /* Serial Slave control */
-#define PA_MDR (PA_BCR+0x0604) /* Serial Mode control */
-#define PA_ADR1 (PA_BCR+0x0606) /* Serial Address1 control */
-#define PA_DAR1 (PA_BCR+0x0646) /* Serial Data1 control */
+#define PA_SMCR (PA_BCR+0x0600) /* 2-wire Serial control */
+#define PA_SMSMADR (PA_BCR+0x0602) /* 2-wire Serial Slave control */
+#define PA_SMMR (PA_BCR+0x0604) /* 2-wire Serial Mode control */
+#define PA_SMSADR1 (PA_BCR+0x0606) /* 2-wire Serial Address1 control */
+#define PA_SMTRDR1 (PA_BCR+0x0646) /* 2-wire Serial Data1 control */
#define PA_VERREG (PA_BCR+0x0700) /* FPGA Version Register */
#define PA_POFF (PA_BCR+0x0800) /* System Power Off control */
#define PA_PMR (PA_BCR+0x0900) /* */
@@ -107,11 +107,11 @@
#define PA_SCFCR (PA_BCR+0x040c) /* SCIF FIFO control */
#define PA_SCFDR (PA_BCR+0x040e) /* SCIF FIFO data control */
#define PA_SCLSR (PA_BCR+0x0412) /* SCIF Line Status control */
-#define PA_ICCR (PA_BCR+0x0500) /* Serial control */
-#define PA_SAR (PA_BCR+0x0502) /* Serial Slave control */
-#define PA_MDR (PA_BCR+0x0504) /* Serial Mode control */
-#define PA_ADR1 (PA_BCR+0x0506) /* Serial Address1 control */
-#define PA_DAR1 (PA_BCR+0x0546) /* Serial Data1 control */
+#define PA_SMCR (PA_BCR+0x0500) /* 2-wire Serial control */
+#define PA_SMSMADR (PA_BCR+0x0502) /* 2-wire Serial Slave control */
+#define PA_SMMR (PA_BCR+0x0504) /* 2-wire Serial Mode control */
+#define PA_SMSADR1 (PA_BCR+0x0506) /* 2-wire Serial Address1 control */
+#define PA_SMTRDR1 (PA_BCR+0x0546) /* 2-wire Serial Data1 control */
#define PA_VERREG (PA_BCR+0x0600) /* FPGA Version Register */
#define PA_AX88796L 0xa5800400 /* AX88796L Area */
@@ -190,6 +190,8 @@
#define IRQ_TP (HL_FPGA_IRQ_BASE + 12)
#define IRQ_RTC (HL_FPGA_IRQ_BASE + 13)
#define IRQ_TH_ALERT (HL_FPGA_IRQ_BASE + 14)
+#define IRQ_SCIF0 (HL_FPGA_IRQ_BASE + 15)
+#define IRQ_SCIF1 (HL_FPGA_IRQ_BASE + 16)
unsigned char *highlander_init_irq_r7780mp(void);
unsigned char *highlander_init_irq_r7780rp(void);
diff --git a/include/asm-sh/se7721.h b/include/asm-sh/se7721.h
new file mode 100644
index 000000000000..b957f6041193
--- /dev/null
+++ b/include/asm-sh/se7721.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2008 Renesas Solutions Corp.
+ *
+ * Hitachi UL SolutionEngine 7721 Support.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#ifndef __ASM_SH_SE7721_H
+#define __ASM_SH_SE7721_H
+#include <asm/addrspace.h>
+
+/* Box specific addresses. */
+#define SE_AREA0_WIDTH 2 /* Area0: 32bit */
+#define PA_ROM 0xa0000000 /* EPROM */
+#define PA_ROM_SIZE 0x00200000 /* EPROM size 2M byte */
+#define PA_FROM 0xa1000000 /* Flash-ROM */
+#define PA_FROM_SIZE 0x01000000 /* Flash-ROM size 16M byte */
+#define PA_EXT1 0xa4000000
+#define PA_EXT1_SIZE 0x04000000
+#define PA_SDRAM 0xaC000000 /* SDRAM(Area3) 64MB */
+#define PA_SDRAM_SIZE 0x04000000
+
+#define PA_EXT4 0xb0000000
+#define PA_EXT4_SIZE 0x04000000
+
+#define PA_PERIPHERAL 0xB8000000
+
+#define PA_PCIC PA_PERIPHERAL
+#define PA_MRSHPC (PA_PERIPHERAL + 0x003fffe0)
+#define PA_MRSHPC_MW1 (PA_PERIPHERAL + 0x00400000)
+#define PA_MRSHPC_MW2 (PA_PERIPHERAL + 0x00500000)
+#define PA_MRSHPC_IO (PA_PERIPHERAL + 0x00600000)
+#define MRSHPC_OPTION (PA_MRSHPC + 6)
+#define MRSHPC_CSR (PA_MRSHPC + 8)
+#define MRSHPC_ISR (PA_MRSHPC + 10)
+#define MRSHPC_ICR (PA_MRSHPC + 12)
+#define MRSHPC_CPWCR (PA_MRSHPC + 14)
+#define MRSHPC_MW0CR1 (PA_MRSHPC + 16)
+#define MRSHPC_MW1CR1 (PA_MRSHPC + 18)
+#define MRSHPC_IOWCR1 (PA_MRSHPC + 20)
+#define MRSHPC_MW0CR2 (PA_MRSHPC + 22)
+#define MRSHPC_MW1CR2 (PA_MRSHPC + 24)
+#define MRSHPC_IOWCR2 (PA_MRSHPC + 26)
+#define MRSHPC_CDCR (PA_MRSHPC + 28)
+#define MRSHPC_PCIC_INFO (PA_MRSHPC + 30)
+
+#define PA_LED 0xB6800000 /* 8bit LED */
+#define PA_FPGA 0xB7000000 /* FPGA base address */
+
+#define MRSHPC_IRQ0 10
+
+#define FPGA_ILSR1 (PA_FPGA + 0x02)
+#define FPGA_ILSR2 (PA_FPGA + 0x03)
+#define FPGA_ILSR3 (PA_FPGA + 0x04)
+#define FPGA_ILSR4 (PA_FPGA + 0x05)
+#define FPGA_ILSR5 (PA_FPGA + 0x06)
+#define FPGA_ILSR6 (PA_FPGA + 0x07)
+#define FPGA_ILSR7 (PA_FPGA + 0x08)
+#define FPGA_ILSR8 (PA_FPGA + 0x09)
+
+void init_se7721_IRQ(void);
+
+#define __IO_PREFIX se7721
+#include <asm/io_generic.h>
+
+#endif /* __ASM_SH_SE7721_H */
diff --git a/include/asm-sh/se7722.h b/include/asm-sh/se7722.h
index e0e89fcb8388..3690fe5857a4 100644
--- a/include/asm-sh/se7722.h
+++ b/include/asm-sh/se7722.h
@@ -77,6 +77,8 @@
#define PORT_PSELA 0xA405014EUL
#define PORT_PYCR 0xA405014AUL
#define PORT_PZCR 0xA405014CUL
+#define PORT_HIZCRA 0xA4050158UL
+#define PORT_HIZCRC 0xA405015CUL
/* IRQ */
#define IRQ0_IRQ 32
diff --git a/include/asm-sh/sh_keysc.h b/include/asm-sh/sh_keysc.h
new file mode 100644
index 000000000000..b5a4dd5a9729
--- /dev/null
+++ b/include/asm-sh/sh_keysc.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_KEYSC_H__
+#define __ASM_KEYSC_H__
+
+#define SH_KEYSC_MAXKEYS 30
+
+struct sh_keysc_info {
+ enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3 } mode;
+ int scan_timing; /* 0 -> 7, see KYCR1, SCN[2:0] */
+ int delay;
+ int keycodes[SH_KEYSC_MAXKEYS];
+};
+
+#endif /* __ASM_KEYSC_H__ */
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 5145aa2a0ce9..e65b6b822cb3 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -146,6 +146,8 @@ extern unsigned int instruction_size(unsigned int insn);
extern unsigned long cached_to_uncached;
+extern struct dentry *sh_debugfs_root;
+
/* XXX
* disable hlt during certain critical i/o operations
*/
diff --git a/include/asm-sh/topology.h b/include/asm-sh/topology.h
index f402a3b1cfa4..34cdb28e8f44 100644
--- a/include/asm-sh/topology.h
+++ b/include/asm-sh/topology.h
@@ -16,7 +16,7 @@
.cache_nice_tries = 2, \
.busy_idx = 3, \
.idle_idx = 2, \
- .newidle_idx = 0, \
+ .newidle_idx = 2, \
.wake_idx = 1, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
diff --git a/include/asm-sh/uaccess_32.h b/include/asm-sh/uaccess_32.h
index c0318b608893..1e41fda74bd3 100644
--- a/include/asm-sh/uaccess_32.h
+++ b/include/asm-sh/uaccess_32.h
@@ -55,13 +55,10 @@ static inline void set_fs(mm_segment_t s)
* If we don't have an MMU (or if its disabled) the only thing we really have
* to look out for is if the address resides somewhere outside of what
* available RAM we have.
- *
- * TODO: This check could probably also stand to be restricted somewhat more..
- * though it still does the Right Thing(tm) for the time being.
*/
static inline int __access_ok(unsigned long addr, unsigned long size)
{
- return ((addr >= memory_start) && ((addr + size) < memory_end));
+ return 1;
}
#else /* CONFIG_MMU */
#define __addr_ok(addr) \
diff --git a/include/asm-x86/boot.h b/include/asm-x86/boot.h
index ed8affbf96cb..2faed7ecb092 100644
--- a/include/asm-x86/boot.h
+++ b/include/asm-x86/boot.h
@@ -17,4 +17,12 @@
+ (CONFIG_PHYSICAL_ALIGN - 1)) \
& ~(CONFIG_PHYSICAL_ALIGN - 1))
+#ifdef CONFIG_X86_64
+#define BOOT_HEAP_SIZE 0x7000
+#define BOOT_STACK_SIZE 0x4000
+#else
+#define BOOT_HEAP_SIZE 0x4000
+#define BOOT_STACK_SIZE 0x1000
+#endif
+
#endif /* _ASM_BOOT_H */
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index 58f790f4df52..a1a4dc7fe6ec 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -1,5 +1,237 @@
+#ifndef _ASM_DMA_MAPPING_H_
+#define _ASM_DMA_MAPPING_H_
+
+/*
+ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
+ * documentation.
+ */
+
+#include <linux/scatterlist.h>
+#include <asm/io.h>
+#include <asm/swiotlb.h>
+
+extern dma_addr_t bad_dma_address;
+extern int iommu_merge;
+extern struct device fallback_dev;
+extern int panic_on_overflow;
+extern int forbid_dac;
+extern int force_iommu;
+
+struct dma_mapping_ops {
+ int (*mapping_error)(dma_addr_t dma_addr);
+ void* (*alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+ void (*free_coherent)(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+ dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
+ size_t size, int direction);
+ /* like map_single, but doesn't check the device mask */
+ dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
+ size_t size, int direction);
+ void (*unmap_single)(struct device *dev, dma_addr_t addr,
+ size_t size, int direction);
+ void (*sync_single_for_cpu)(struct device *hwdev,
+ dma_addr_t dma_handle, size_t size,
+ int direction);
+ void (*sync_single_for_device)(struct device *hwdev,
+ dma_addr_t dma_handle, size_t size,
+ int direction);
+ void (*sync_single_range_for_cpu)(struct device *hwdev,
+ dma_addr_t dma_handle, unsigned long offset,
+ size_t size, int direction);
+ void (*sync_single_range_for_device)(struct device *hwdev,
+ dma_addr_t dma_handle, unsigned long offset,
+ size_t size, int direction);
+ void (*sync_sg_for_cpu)(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ int direction);
+ void (*sync_sg_for_device)(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ int direction);
+ int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction);
+ void (*unmap_sg)(struct device *hwdev,
+ struct scatterlist *sg, int nents,
+ int direction);
+ int (*dma_supported)(struct device *hwdev, u64 mask);
+ int is_phys;
+};
+
+extern const struct dma_mapping_ops *dma_ops;
+
+static inline int dma_mapping_error(dma_addr_t dma_addr)
+{
+ if (dma_ops->mapping_error)
+ return dma_ops->mapping_error(dma_addr);
+
+ return (dma_addr == bad_dma_address);
+}
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+
+extern int dma_supported(struct device *hwdev, u64 mask);
+extern int dma_set_mask(struct device *dev, u64 mask);
+
+static inline dma_addr_t
+dma_map_single(struct device *hwdev, void *ptr, size_t size,
+ int direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
+ int direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ if (dma_ops->unmap_single)
+ dma_ops->unmap_single(dev, addr, size, direction);
+}
+
+static inline int
+dma_map_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ return dma_ops->map_sg(hwdev, sg, nents, direction);
+}
+
+static inline void
+dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
+ int direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ if (dma_ops->unmap_sg)
+ dma_ops->unmap_sg(hwdev, sg, nents, direction);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ if (dma_ops->sync_single_for_cpu)
+ dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
+ direction);
+ flush_write_buffers();
+}
+
+static inline void
+dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ if (dma_ops->sync_single_for_device)
+ dma_ops->sync_single_for_device(hwdev, dma_handle, size,
+ direction);
+ flush_write_buffers();
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size, int direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ if (dma_ops->sync_single_range_for_cpu)
+ dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
+ size, direction);
+
+ flush_write_buffers();
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ int direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ if (dma_ops->sync_single_range_for_device)
+ dma_ops->sync_single_range_for_device(hwdev, dma_handle,
+ offset, size, direction);
+
+ flush_write_buffers();
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
+ int nelems, int direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ if (dma_ops->sync_sg_for_cpu)
+ dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
+ flush_write_buffers();
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
+ int nelems, int direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ if (dma_ops->sync_sg_for_device)
+ dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
+
+ flush_write_buffers();
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ size_t offset, size_t size,
+ int direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ return dma_ops->map_single(dev, page_to_phys(page)+offset,
+ size, direction);
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, int direction)
+{
+ dma_unmap_single(dev, addr, size, direction);
+}
+
+static inline void
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
+{
+ flush_write_buffers();
+}
+
+static inline int dma_get_cache_alignment(void)
+{
+ /* no easy way to get cache size on all x86, so return the
+ * maximum possible, to be safe */
+ return boot_cpu_data.x86_clflush_size;
+}
+
+#define dma_is_consistent(d, h) (1)
+
#ifdef CONFIG_X86_32
-# include "dma-mapping_32.h"
-#else
-# include "dma-mapping_64.h"
+# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+struct dma_coherent_mem {
+ void *virt_base;
+ u32 device_base;
+ int size;
+ int flags;
+ unsigned long *bitmap;
+};
+
+extern int
+dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+ dma_addr_t device_addr, size_t size, int flags);
+
+extern void
+dma_release_declared_memory(struct device *dev);
+
+extern void *
+dma_mark_declared_memory_occupied(struct device *dev,
+ dma_addr_t device_addr, size_t size);
+#endif /* CONFIG_X86_32 */
#endif
diff --git a/include/asm-x86/dma-mapping_32.h b/include/asm-x86/dma-mapping_32.h
deleted file mode 100644
index 55f01bd9e556..000000000000
--- a/include/asm-x86/dma-mapping_32.h
+++ /dev/null
@@ -1,187 +0,0 @@
-#ifndef _ASM_I386_DMA_MAPPING_H
-#define _ASM_I386_DMA_MAPPING_H
-
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <asm/bug.h>
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(size == 0);
- flush_write_buffers();
- return virt_to_phys(ptr);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nents == 0 || sglist[0].length == 0);
-
- for_each_sg(sglist, sg, nents, i) {
- BUG_ON(!sg_page(sg));
-
- sg->dma_address = sg_phys(sg);
- }
-
- flush_write_buffers();
- return nents;
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- return page_to_phys(page) + offset;
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-}
-
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- flush_write_buffers();
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- flush_write_buffers();
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- flush_write_buffers();
-}
-
-static inline int
-dma_mapping_error(dma_addr_t dma_addr)
-{
- return 0;
-}
-
-extern int forbid_dac;
-
-static inline int
-dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if(mask < 0x00ffffff)
- return 0;
-
- /* Work around chipset bugs */
- if (forbid_dac > 0 && mask > 0xffffffffULL)
- return 0;
-
- return 1;
-}
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
- if(!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
-}
-
-static inline int
-dma_get_cache_alignment(void)
-{
- /* no easy way to get cache size on all x86, so return the
- * maximum possible, to be safe */
- return (1 << INTERNODE_CACHE_SHIFT);
-}
-
-#define dma_is_consistent(d, h) (1)
-
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- flush_write_buffers();
-}
-
-#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-extern int
-dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
- dma_addr_t device_addr, size_t size, int flags);
-
-extern void
-dma_release_declared_memory(struct device *dev);
-
-extern void *
-dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size);
-
-#endif
diff --git a/include/asm-x86/dma-mapping_64.h b/include/asm-x86/dma-mapping_64.h
deleted file mode 100644
index ecd0f6125ba3..000000000000
--- a/include/asm-x86/dma-mapping_64.h
+++ /dev/null
@@ -1,202 +0,0 @@
-#ifndef _X8664_DMA_MAPPING_H
-#define _X8664_DMA_MAPPING_H 1
-
-/*
- * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
- * documentation.
- */
-
-#include <linux/scatterlist.h>
-#include <asm/io.h>
-#include <asm/swiotlb.h>
-
-struct dma_mapping_ops {
- int (*mapping_error)(dma_addr_t dma_addr);
- void* (*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
- void (*free_coherent)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
- dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
- size_t size, int direction);
- /* like map_single, but doesn't check the device mask */
- dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
- size_t size, int direction);
- void (*unmap_single)(struct device *dev, dma_addr_t addr,
- size_t size, int direction);
- void (*sync_single_for_cpu)(struct device *hwdev,
- dma_addr_t dma_handle, size_t size,
- int direction);
- void (*sync_single_for_device)(struct device *hwdev,
- dma_addr_t dma_handle, size_t size,
- int direction);
- void (*sync_single_range_for_cpu)(struct device *hwdev,
- dma_addr_t dma_handle, unsigned long offset,
- size_t size, int direction);
- void (*sync_single_range_for_device)(struct device *hwdev,
- dma_addr_t dma_handle, unsigned long offset,
- size_t size, int direction);
- void (*sync_sg_for_cpu)(struct device *hwdev,
- struct scatterlist *sg, int nelems,
- int direction);
- void (*sync_sg_for_device)(struct device *hwdev,
- struct scatterlist *sg, int nelems,
- int direction);
- int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction);
- void (*unmap_sg)(struct device *hwdev,
- struct scatterlist *sg, int nents,
- int direction);
- int (*dma_supported)(struct device *hwdev, u64 mask);
- int is_phys;
-};
-
-extern dma_addr_t bad_dma_address;
-extern const struct dma_mapping_ops* dma_ops;
-extern int iommu_merge;
-
-static inline int dma_mapping_error(dma_addr_t dma_addr)
-{
- if (dma_ops->mapping_error)
- return dma_ops->mapping_error(dma_addr);
-
- return (dma_addr == bad_dma_address);
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle);
-
-static inline dma_addr_t
-dma_map_single(struct device *hwdev, void *ptr, size_t size,
- int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- return dma_ops->map_single(hwdev, ptr, size, direction);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
- int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- dma_ops->unmap_single(dev, addr, size, direction);
-}
-
-#define dma_map_page(dev,page,offset,size,dir) \
- dma_map_single((dev), page_address(page)+(offset), (size), (dir))
-
-#define dma_unmap_page dma_unmap_single
-
-static inline void
-dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_for_cpu)
- dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
- direction);
- flush_write_buffers();
-}
-
-static inline void
-dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_for_device)
- dma_ops->sync_single_for_device(hwdev, dma_handle, size,
- direction);
- flush_write_buffers();
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
- unsigned long offset, size_t size, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_range_for_cpu) {
- dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
- }
-
- flush_write_buffers();
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
- unsigned long offset, size_t size, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_range_for_device)
- dma_ops->sync_single_range_for_device(hwdev, dma_handle,
- offset, size, direction);
-
- flush_write_buffers();
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_sg_for_cpu)
- dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
- flush_write_buffers();
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
- int nelems, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_sg_for_device) {
- dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
- }
-
- flush_write_buffers();
-}
-
-static inline int
-dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- return dma_ops->map_sg(hwdev, sg, nents, direction);
-}
-
-static inline void
-dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
- int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- dma_ops->unmap_sg(hwdev, sg, nents, direction);
-}
-
-extern int dma_supported(struct device *hwdev, u64 mask);
-
-/* same for gart, swiotlb, and nommu */
-static inline int dma_get_cache_alignment(void)
-{
- return boot_cpu_data.x86_clflush_size;
-}
-
-#define dma_is_consistent(d, h) 1
-
-extern int dma_set_mask(struct device *dev, u64 mask);
-
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
-{
- flush_write_buffers();
-}
-
-extern struct device fallback_dev;
-extern int panic_on_overflow;
-
-#endif /* _X8664_DMA_MAPPING_H */
diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h
index 43b1a8bd4b34..a9f7c6ec32bf 100644
--- a/include/asm-x86/e820_32.h
+++ b/include/asm-x86/e820_32.h
@@ -24,7 +24,7 @@ extern void update_e820(void);
extern int e820_all_mapped(unsigned long start, unsigned long end,
unsigned type);
extern int e820_any_mapped(u64 start, u64 end, unsigned type);
-extern void find_max_pfn(void);
+extern void propagate_e820_map(void);
extern void register_bootmem_low_pages(unsigned long max_low_pfn);
extern void add_memory_region(unsigned long long start,
unsigned long long size, int type);
diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h
index f1b96932746b..b02ea6e17de8 100644
--- a/include/asm-x86/genapic_32.h
+++ b/include/asm-x86/genapic_32.h
@@ -117,6 +117,7 @@ extern struct genapic *genapic;
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
#define get_uv_system_type() UV_NONE
#define is_uv_system() 0
+#define uv_wakeup_secondary(a, b) 1
#endif
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 54522b814f1c..da2adb45f6e3 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -21,8 +21,9 @@
extern void fpu_init(void);
extern void mxcsr_feature_mask_init(void);
-extern void init_fpu(struct task_struct *child);
+extern int init_fpu(struct task_struct *child);
extern asmlinkage void math_state_restore(void);
+extern void init_thread_xstate(void);
extern user_regset_active_fn fpregs_active, xfpregs_active;
extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
@@ -117,24 +118,22 @@ static inline void __save_init_fpu(struct task_struct *tsk)
/* Using "fxsaveq %0" would be the ideal choice, but is only supported
starting with gas 2.16. */
__asm__ __volatile__("fxsaveq %0"
- : "=m" (tsk->thread.i387.fxsave));
+ : "=m" (tsk->thread.xstate->fxsave));
#elif 0
/* Using, as a workaround, the properly prefixed form below isn't
accepted by any binutils version so far released, complaining that
the same type of prefix is used twice if an extended register is
needed for addressing (fix submitted to mainline 2005-11-21). */
__asm__ __volatile__("rex64/fxsave %0"
- : "=m" (tsk->thread.i387.fxsave));
+ : "=m" (tsk->thread.xstate->fxsave));
#else
/* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn't require extended registers. */
- __asm__ __volatile__("rex64/fxsave %P2(%1)"
- : "=m" (tsk->thread.i387.fxsave)
- : "cdaSDb" (tsk),
- "i" (offsetof(__typeof__(*tsk),
- thread.i387.fxsave)));
+ __asm__ __volatile__("rex64/fxsave (%1)"
+ : "=m" (tsk->thread.xstate->fxsave)
+ : "cdaSDb" (&tsk->thread.xstate->fxsave));
#endif
- clear_fpu_state(&tsk->thread.i387.fxsave);
+ clear_fpu_state(&tsk->thread.xstate->fxsave);
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
@@ -148,7 +147,7 @@ static inline int save_i387(struct _fpstate __user *buf)
int err = 0;
BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
- sizeof(tsk->thread.i387.fxsave));
+ sizeof(tsk->thread.xstate->fxsave));
if ((unsigned long)buf % 16)
printk("save_i387: bad fpstate %p\n", buf);
@@ -164,7 +163,7 @@ static inline int save_i387(struct _fpstate __user *buf)
task_thread_info(tsk)->status &= ~TS_USEDFPU;
stts();
} else {
- if (__copy_to_user(buf, &tsk->thread.i387.fxsave,
+ if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
sizeof(struct i387_fxsave_struct)))
return -1;
}
@@ -201,7 +200,7 @@ static inline void restore_fpu(struct task_struct *tsk)
"nop ; frstor %1",
"fxrstor %1",
X86_FEATURE_FXSR,
- "m" ((tsk)->thread.i387.fxsave));
+ "m" (tsk->thread.xstate->fxsave));
}
/* We need a safe address that is cheap to find and that is already
@@ -225,8 +224,8 @@ static inline void __save_init_fpu(struct task_struct *tsk)
"fxsave %[fx]\n"
"bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
X86_FEATURE_FXSR,
- [fx] "m" (tsk->thread.i387.fxsave),
- [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory");
+ [fx] "m" (tsk->thread.xstate->fxsave),
+ [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. safe_address is a random variable that should be in L1 */
@@ -327,25 +326,25 @@ static inline void clear_fpu(struct task_struct *tsk)
static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
- return tsk->thread.i387.fxsave.cwd;
+ return tsk->thread.xstate->fxsave.cwd;
} else {
- return (unsigned short)tsk->thread.i387.fsave.cwd;
+ return (unsigned short)tsk->thread.xstate->fsave.cwd;
}
}
static inline unsigned short get_fpu_swd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
- return tsk->thread.i387.fxsave.swd;
+ return tsk->thread.xstate->fxsave.swd;
} else {
- return (unsigned short)tsk->thread.i387.fsave.swd;
+ return (unsigned short)tsk->thread.xstate->fsave.swd;
}
}
static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
{
if (cpu_has_xmm) {
- return tsk->thread.i387.fxsave.mxcsr;
+ return tsk->thread.xstate->fxsave.mxcsr;
} else {
return MXCSR_DEFAULT;
}
diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h
index 32c22ae0709f..22e87c9f6a80 100644
--- a/include/asm-x86/numa_64.h
+++ b/include/asm-x86/numa_64.h
@@ -9,7 +9,8 @@ struct bootnode {
u64 end;
};
-extern int compute_hash_shift(struct bootnode *nodes, int numnodes);
+extern int compute_hash_shift(struct bootnode *nodes, int numblks,
+ int *nodeids);
#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h
index df867e5d80b1..f330234ffa5c 100644
--- a/include/asm-x86/pci_64.h
+++ b/include/asm-x86/pci_64.h
@@ -22,6 +22,7 @@ extern int (*pci_config_read)(int seg, int bus, int dev, int fn,
extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
int reg, int len, u32 value);
+extern void dma32_reserve_bootmem(void);
extern void pci_iommu_alloc(void);
/* The PCI address space does equal the physical memory
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 6e26c7c717a2..e6bf92ddeb21 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -354,7 +354,7 @@ struct i387_soft_struct {
u32 entry_eip;
};
-union i387_union {
+union thread_xstate {
struct i387_fsave_struct fsave;
struct i387_fxsave_struct fxsave;
struct i387_soft_struct soft;
@@ -365,6 +365,9 @@ DECLARE_PER_CPU(struct orig_ist, orig_ist);
#endif
extern void print_cpu_info(struct cpuinfo_x86 *);
+extern unsigned int xstate_size;
+extern void free_thread_xstate(struct task_struct *);
+extern struct kmem_cache *task_xstate_cachep;
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern unsigned short num_cache_leaves;
@@ -397,8 +400,8 @@ struct thread_struct {
unsigned long cr2;
unsigned long trap_no;
unsigned long error_code;
- /* Floating point info: */
- union i387_union i387 __attribute__((aligned(16)));;
+ /* floating point and extended processor state */
+ union thread_xstate *xstate;
#ifdef CONFIG_X86_32
/* Virtual 86 mode info */
struct vm86_struct __user *vm86_info;
@@ -918,4 +921,11 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
#define KSTK_EIP(task) (task_pt_regs(task)->ip)
+/* Get/set a process' ability to use the timestamp counter instruction */
+#define GET_TSC_CTL(adr) get_tsc_mode((adr))
+#define SET_TSC_CTL(val) set_tsc_mode((val))
+
+extern int get_tsc_mode(unsigned long adr);
+extern int set_tsc_mode(unsigned int val);
+
#endif
diff --git a/include/asm-x86/scatterlist.h b/include/asm-x86/scatterlist.h
index d13c197866d6..c0432061f81a 100644
--- a/include/asm-x86/scatterlist.h
+++ b/include/asm-x86/scatterlist.h
@@ -11,9 +11,7 @@ struct scatterlist {
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;
-#ifdef CONFIG_X86_64
unsigned int dma_length;
-#endif
};
#define ARCH_HAS_SG_CHAIN
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
index d5fd12f2abdb..77244f17993f 100644
--- a/include/asm-x86/thread_info.h
+++ b/include/asm-x86/thread_info.h
@@ -1,5 +1,14 @@
+#ifndef _ASM_X86_THREAD_INFO_H
#ifdef CONFIG_X86_32
# include "thread_info_32.h"
#else
# include "thread_info_64.h"
#endif
+
+#ifndef __ASSEMBLY__
+extern void arch_task_cache_init(void);
+extern void free_thread_info(struct thread_info *ti);
+extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+#define arch_task_cache_init arch_task_cache_init
+#endif
+#endif /* _ASM_X86_THREAD_INFO_H */
diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h
index 4e053fa561a9..531859962096 100644
--- a/include/asm-x86/thread_info_32.h
+++ b/include/asm-x86/thread_info_32.h
@@ -102,8 +102,6 @@ static inline struct thread_info *current_thread_info(void)
__get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE)))
#endif
-#define free_thread_info(info) free_pages((unsigned long)(info), get_order(THREAD_SIZE))
-
#else /* !__ASSEMBLY__ */
/* how to get the thread information struct from ASM */
diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h
index 1e5c6f6152cd..ed664e874dec 100644
--- a/include/asm-x86/thread_info_64.h
+++ b/include/asm-x86/thread_info_64.h
@@ -85,8 +85,6 @@ static inline struct thread_info *stack_thread_info(void)
#define alloc_thread_info(tsk) \
((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
-#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
-
#else /* !__ASSEMBLY__ */
/* how to get the thread information struct from ASM */
@@ -126,6 +124,7 @@ static inline struct thread_info *stack_thread_info(void)
#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
+#define TIF_NOTSC 28 /* TSC is not accessible in userland */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -147,6 +146,7 @@ static inline struct thread_info *stack_thread_info(void)
#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
+#define _TIF_NOTSC (1 << TIF_NOTSC)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK \
@@ -160,7 +160,7 @@ static inline struct thread_info *stack_thread_info(void)
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS)
+ (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS|_TIF_NOTSC)
#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h
index 81a29eb08ac4..22073268b481 100644
--- a/include/asm-x86/topology.h
+++ b/include/asm-x86/topology.h
@@ -88,6 +88,17 @@ static inline int cpu_to_node(int cpu)
#endif
return per_cpu(x86_cpu_to_node_map, cpu);
}
+
+#ifdef CONFIG_NUMA
+
+/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
+#define node_to_cpumask_ptr(v, node) \
+ cpumask_t *v = &(node_to_cpumask_map[node])
+
+#define node_to_cpumask_ptr_next(v, node) \
+ v = &(node_to_cpumask_map[node])
+#endif
+
#endif /* CONFIG_X86_64 */
/*
@@ -136,17 +147,13 @@ extern unsigned long node_remap_size[];
# define SD_CACHE_NICE_TRIES 2
# define SD_IDLE_IDX 2
-# define SD_NEWIDLE_IDX 0
+# define SD_NEWIDLE_IDX 2
# define SD_FORKEXEC_IDX 1
#endif
/* sched_domains SD_NODE_INIT for NUMAQ machines */
#define SD_NODE_INIT (struct sched_domain) { \
- .span = CPU_MASK_NONE, \
- .parent = NULL, \
- .child = NULL, \
- .groups = NULL, \
.min_interval = 8, \
.max_interval = 32, \
.busy_factor = 32, \
@@ -164,7 +171,6 @@ extern unsigned long node_remap_size[];
| SD_WAKE_BALANCE, \
.last_balance = jiffies, \
.balance_interval = 1, \
- .nr_balance_failed = 0, \
}
#ifdef CONFIG_X86_64_ACPI_NUMA
@@ -174,10 +180,10 @@ extern int __node_distance(int, int);
#else /* CONFIG_NUMA */
-#include <asm-generic/topology.h>
-
#endif
+#include <asm-generic/topology.h>
+
extern cpumask_t cpu_coregroup_map(int cpu);
#ifdef ENABLE_TOPO_DEFINES
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h
index d2d8eb5b55f5..0434bd8349a7 100644
--- a/include/asm-x86/tsc.h
+++ b/include/asm-x86/tsc.h
@@ -18,6 +18,7 @@ extern unsigned int cpu_khz;
extern unsigned int tsc_khz;
extern void disable_TSC(void);
+extern void enable_TSC(void);
static inline cycles_t get_cycles(void)
{
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index acad1105d942..1dbe074f1c64 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -108,6 +108,7 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits);
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
+extern int bitmap_scnprintf_len(unsigned int len);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
unsigned long *dst, int nbits);
extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 7047f58306a7..259c8051155d 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -222,8 +222,13 @@ int __next_cpu(int n, const cpumask_t *srcp);
#define next_cpu(n, src) ({ (void)(src); 1; })
#endif
+#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
+extern cpumask_t *cpumask_of_cpu_map;
+#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu])
+
+#else
#define cpumask_of_cpu(cpu) \
-({ \
+(*({ \
typeof(_unused_cpumask_arg_) m; \
if (sizeof(m) == sizeof(unsigned long)) { \
m.bits[0] = 1UL<<(cpu); \
@@ -231,8 +236,9 @@ int __next_cpu(int n, const cpumask_t *srcp);
cpus_clear(m); \
cpu_set((cpu), m); \
} \
- m; \
-})
+ &m; \
+}))
+#endif
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
@@ -243,6 +249,8 @@ int __next_cpu(int n, const cpumask_t *srcp);
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
} }
+#define CPU_MASK_ALL_PTR (&CPU_MASK_ALL)
+
#else
#define CPU_MASK_ALL \
@@ -251,6 +259,10 @@ int __next_cpu(int n, const cpumask_t *srcp);
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
} }
+/* cpu_mask_all is in init/main.c */
+extern cpumask_t cpu_mask_all;
+#define CPU_MASK_ALL_PTR (&cpu_mask_all)
+
#endif
#define CPU_MASK_NONE \
@@ -273,6 +285,13 @@ static inline int __cpumask_scnprintf(char *buf, int len,
return bitmap_scnprintf(buf, len, srcp->bits, nbits);
}
+#define cpumask_scnprintf_len(len) \
+ __cpumask_scnprintf_len((len))
+static inline int __cpumask_scnprintf_len(int len)
+{
+ return bitmap_scnprintf_len(len);
+}
+
#define cpumask_parse_user(ubuf, ulen, dst) \
__cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS)
static inline int __cpumask_parse_user(const char __user *buf, int len,
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 0a26be353cb3..726761e24003 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -20,8 +20,8 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
extern int cpuset_init_early(void);
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
-extern cpumask_t cpuset_cpus_allowed(struct task_struct *p);
-extern cpumask_t cpuset_cpus_allowed_locked(struct task_struct *p);
+extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask);
+extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -84,13 +84,14 @@ static inline int cpuset_init_early(void) { return 0; }
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
-static inline cpumask_t cpuset_cpus_allowed(struct task_struct *p)
+static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask)
{
- return cpu_possible_map;
+ *mask = cpu_possible_map;
}
-static inline cpumask_t cpuset_cpus_allowed_locked(struct task_struct *p)
+static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
+ cpumask_t *mask)
{
- return cpu_possible_map;
+ *mask = cpu_possible_map;
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 14813b595802..a5f359a7ad0e 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -18,6 +18,7 @@
#include <linux/proc_fs.h>
#include <linux/rtc.h>
#include <linux/ioport.h>
+#include <linux/pfn.h>
#include <asm/page.h>
#include <asm/system.h>
@@ -394,4 +395,10 @@ struct efi_generic_dev_path {
u16 length;
} __attribute ((packed));
+static inline void memrange_efi_to_native(u64 *addr, u64 *npages)
+{
+ *npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr);
+ *addr &= PAGE_MASK;
+}
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/file.h b/include/linux/file.h
index 7239baac81a9..653477021e4c 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -61,6 +61,7 @@ extern struct kmem_cache *filp_cachep;
extern void __fput(struct file *);
extern void fput(struct file *);
+extern void drop_file_write_access(struct file *file);
struct file_operations;
struct vfsmount;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b84b848431f2..d1eeea669d2c 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -776,6 +776,9 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
index < ra->start + ra->size);
}
+#define FILE_MNT_WRITE_TAKEN 1
+#define FILE_MNT_WRITE_RELEASED 2
+
struct file {
/*
* fu_list becomes invalid after file_free is called and queued via
@@ -810,6 +813,9 @@ struct file {
spinlock_t f_ep_lock;
#endif /* #ifdef CONFIG_EPOLL */
struct address_space *f_mapping;
+#ifdef CONFIG_DEBUG_WRITECOUNT
+ unsigned long f_mnt_write_state;
+#endif
};
extern spinlock_t files_lock;
#define file_list_lock() spin_lock(&files_lock);
@@ -818,6 +824,49 @@ extern spinlock_t files_lock;
#define get_file(x) atomic_inc(&(x)->f_count)
#define file_count(x) atomic_read(&(x)->f_count)
+#ifdef CONFIG_DEBUG_WRITECOUNT
+static inline void file_take_write(struct file *f)
+{
+ WARN_ON(f->f_mnt_write_state != 0);
+ f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN;
+}
+static inline void file_release_write(struct file *f)
+{
+ f->f_mnt_write_state |= FILE_MNT_WRITE_RELEASED;
+}
+static inline void file_reset_write(struct file *f)
+{
+ f->f_mnt_write_state = 0;
+}
+static inline void file_check_state(struct file *f)
+{
+ /*
+ * At this point, either both or neither of these bits
+ * should be set.
+ */
+ WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN);
+ WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_RELEASED);
+}
+static inline int file_check_writeable(struct file *f)
+{
+ if (f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN)
+ return 0;
+ printk(KERN_WARNING "writeable file with no "
+ "mnt_want_write()\n");
+ WARN_ON(1);
+ return -EINVAL;
+}
+#else /* !CONFIG_DEBUG_WRITECOUNT */
+static inline void file_take_write(struct file *filp) {}
+static inline void file_release_write(struct file *filp) {}
+static inline void file_reset_write(struct file *filp) {}
+static inline void file_check_state(struct file *filp) {}
+static inline int file_check_writeable(struct file *filp)
+{
+ return 0;
+}
+#endif /* CONFIG_DEBUG_WRITECOUNT */
+
#define MAX_NON_LFS ((1UL<<31) - 1)
/* Page cache limit. The filesystems should put that into their s_maxbytes
@@ -1735,7 +1784,8 @@ extern struct file *create_read_pipe(struct file *f);
extern struct file *create_write_pipe(void);
extern void free_write_pipe(struct file *);
-extern int open_namei(int dfd, const char *, int, int, struct nameidata *);
+extern struct file *do_filp_open(int dfd, const char *pathname,
+ int open_flag, int mode);
extern int may_open(struct nameidata *, int, int);
extern int kernel_read(struct file *, unsigned long, char *, unsigned long);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 1f74e1d7415f..37a6f5bc4a92 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -151,6 +151,9 @@ extern struct group_info init_groups;
.cpus_allowed = CPU_MASK_ALL, \
.mm = NULL, \
.active_mm = &init_mm, \
+ .se = { \
+ .group_node = LIST_HEAD_INIT(tsk.se.group_node), \
+ }, \
.rt = { \
.run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
.time_slice = HZ, \
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 412e025bc5c7..e600c4e9b8c5 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -84,10 +84,10 @@
#define irqs_disabled() \
({ \
- unsigned long flags; \
+ unsigned long _flags; \
\
- raw_local_save_flags(flags); \
- raw_irqs_disabled_flags(flags); \
+ raw_local_save_flags(_flags); \
+ raw_irqs_disabled_flags(_flags); \
})
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 2cd7fa73d1af..ce5983225be4 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -327,4 +327,10 @@ extern void ktime_get_ts(struct timespec *ts);
/* Get the real (wall-) time in timespec format: */
#define ktime_get_real_ts(ts) getnstimeofday(ts)
+static inline ktime_t ns_to_ktime(u64 ns)
+{
+ static const ktime_t ktime_zero = { .tv64 = 0 };
+ return ktime_add_ns(ktime_zero, ns);
+}
+
#endif
diff --git a/include/linux/list.h b/include/linux/list.h
index 75ce2cb4ff6e..dac16f99c701 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -631,31 +631,14 @@ static inline void list_splice_init_rcu(struct list_head *list,
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_rcu(pos, head) \
- for (pos = (head)->next; \
- prefetch(rcu_dereference(pos)->next), pos != (head); \
- pos = pos->next)
+ for (pos = rcu_dereference((head)->next); \
+ prefetch(pos->next), pos != (head); \
+ pos = rcu_dereference(pos->next))
#define __list_for_each_rcu(pos, head) \
- for (pos = (head)->next; \
- rcu_dereference(pos) != (head); \
- pos = pos->next)
-
-/**
- * list_for_each_safe_rcu
- * @pos: the &struct list_head to use as a loop cursor.
- * @n: another &struct list_head to use as temporary storage
- * @head: the head for your list.
- *
- * Iterate over an rcu-protected list, safe against removal of list entry.
- *
- * This list-traversal primitive may safely run concurrently with
- * the _rcu list-mutation primitives such as list_add_rcu()
- * as long as the traversal is guarded by rcu_read_lock().
- */
-#define list_for_each_safe_rcu(pos, n, head) \
- for (pos = (head)->next; \
- n = rcu_dereference(pos)->next, pos != (head); \
- pos = n)
+ for (pos = rcu_dereference((head)->next); \
+ pos != (head); \
+ pos = rcu_dereference(pos->next))
/**
* list_for_each_entry_rcu - iterate over rcu list of given type
@@ -668,10 +651,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_entry_rcu(pos, head, member) \
- for (pos = list_entry((head)->next, typeof(*pos), member); \
- prefetch(rcu_dereference(pos)->member.next), \
- &pos->member != (head); \
- pos = list_entry(pos->member.next, typeof(*pos), member))
+ for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \
+ prefetch(pos->member.next), &pos->member != (head); \
+ pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member))
/**
@@ -686,9 +668,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_continue_rcu(pos, head) \
- for ((pos) = (pos)->next; \
- prefetch(rcu_dereference((pos))->next), (pos) != (head); \
- (pos) = (pos)->next)
+ for ((pos) = rcu_dereference((pos)->next); \
+ prefetch((pos)->next), (pos) != (head); \
+ (pos) = rcu_dereference((pos)->next))
/*
* Double linked lists with a single pointer list head.
@@ -986,10 +968,10 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
* as long as the traversal is guarded by rcu_read_lock().
*/
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
- for (pos = (head)->first; \
- rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) && \
+ for (pos = rcu_dereference((head)->first); \
+ pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
+ pos = rcu_dereference(pos->next))
#else
#warning "don't include kernel headers in userspace"
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 5ee2df217cdf..d6600e3f7e45 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/nodemask.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
@@ -28,8 +29,10 @@ struct mnt_namespace;
#define MNT_NOATIME 0x08
#define MNT_NODIRATIME 0x10
#define MNT_RELATIME 0x20
+#define MNT_READONLY 0x40 /* does the user want this to be r/o? */
#define MNT_SHRINKABLE 0x100
+#define MNT_IMBALANCED_WRITE_COUNT 0x200 /* just for debugging */
#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */
#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
@@ -62,6 +65,11 @@ struct vfsmount {
int mnt_expiry_mark; /* true if marked for expiry */
int mnt_pinned;
int mnt_ghosts;
+ /*
+ * This value is not stable unless all of the mnt_writers[] spinlocks
+ * are held, and all mnt_writer[]s on this mount have 0 as their ->count
+ */
+ atomic_t __mnt_writers;
};
static inline struct vfsmount *mntget(struct vfsmount *mnt)
@@ -71,9 +79,12 @@ static inline struct vfsmount *mntget(struct vfsmount *mnt)
return mnt;
}
+extern int mnt_want_write(struct vfsmount *mnt);
+extern void mnt_drop_write(struct vfsmount *mnt);
extern void mntput_no_expire(struct vfsmount *mnt);
extern void mnt_pin(struct vfsmount *mnt);
extern void mnt_unpin(struct vfsmount *mnt);
+extern int __mnt_is_readonly(struct vfsmount *mnt);
static inline void mntput(struct vfsmount *mnt)
{
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 3800639775ae..5c80b1939636 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -67,4 +67,10 @@
#define PR_CAPBSET_READ 23
#define PR_CAPBSET_DROP 24
+/* Get/set the process' ability to use the timestamp counter instruction */
+#define PR_GET_TSC 25
+#define PR_SET_TSC 26
+# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
+# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6a1e7afb099b..be6914014c70 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -704,6 +704,7 @@ enum cpu_idle_type {
#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
+#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
#define BALANCE_FOR_MC_POWER \
(sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
@@ -733,12 +734,31 @@ struct sched_group {
u32 reciprocal_cpu_power;
};
+enum sched_domain_level {
+ SD_LV_NONE = 0,
+ SD_LV_SIBLING,
+ SD_LV_MC,
+ SD_LV_CPU,
+ SD_LV_NODE,
+ SD_LV_ALLNODES,
+ SD_LV_MAX
+};
+
+struct sched_domain_attr {
+ int relax_domain_level;
+};
+
+#define SD_ATTR_INIT (struct sched_domain_attr) { \
+ .relax_domain_level = -1, \
+}
+
struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
struct sched_domain *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */
cpumask_t span; /* span of all CPUs in this domain */
+ int first_cpu; /* cache of the first cpu in this domain */
unsigned long min_interval; /* Minimum balance interval ms */
unsigned long max_interval; /* Maximum balance interval ms */
unsigned int busy_factor; /* less balancing by factor if busy */
@@ -750,6 +770,7 @@ struct sched_domain {
unsigned int wake_idx;
unsigned int forkexec_idx;
int flags; /* See SD_* */
+ enum sched_domain_level level;
/* Runtime fields. */
unsigned long last_balance; /* init to jiffies. units in jiffies */
@@ -789,7 +810,8 @@ struct sched_domain {
#endif
};
-extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new);
+extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
+ struct sched_domain_attr *dattr_new);
extern int arch_reinit_sched_domains(void);
#endif /* CONFIG_SMP */
@@ -889,7 +911,8 @@ struct sched_class {
void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
void (*task_new) (struct rq *rq, struct task_struct *p);
- void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask);
+ void (*set_cpus_allowed)(struct task_struct *p,
+ const cpumask_t *newmask);
void (*join_domain)(struct rq *rq);
void (*leave_domain)(struct rq *rq);
@@ -923,6 +946,7 @@ struct load_weight {
struct sched_entity {
struct load_weight load; /* for load-balancing */
struct rb_node run_node;
+ struct list_head group_node;
unsigned int on_rq;
u64 exec_start;
@@ -982,6 +1006,7 @@ struct sched_rt_entity {
unsigned long timeout;
int nr_cpus_allowed;
+ struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity *parent;
/* rq on which this entity is (to be) queued: */
@@ -1502,15 +1527,21 @@ static inline void put_task_struct(struct task_struct *t)
#define used_math() tsk_used_math(current)
#ifdef CONFIG_SMP
-extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
+extern int set_cpus_allowed_ptr(struct task_struct *p,
+ const cpumask_t *new_mask);
#else
-static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+static inline int set_cpus_allowed_ptr(struct task_struct *p,
+ const cpumask_t *new_mask)
{
- if (!cpu_isset(0, new_mask))
+ if (!cpu_isset(0, *new_mask))
return -EINVAL;
return 0;
}
#endif
+static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+{
+ return set_cpus_allowed_ptr(p, &new_mask);
+}
extern unsigned long long sched_clock(void);
@@ -1551,7 +1582,6 @@ static inline void wake_up_idle_cpu(int cpu) { }
extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
-extern unsigned int sysctl_sched_batch_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
@@ -1564,6 +1594,10 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
extern unsigned int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;
+int sched_rt_handler(struct ctl_table *table, int write,
+ struct file *filp, void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
extern unsigned int sysctl_sched_compat_yield;
#ifdef CONFIG_RT_MUTEXES
@@ -2031,7 +2065,7 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
}
#endif
-extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
+extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
extern int sched_mc_power_savings, sched_smt_power_savings;
@@ -2041,8 +2075,11 @@ extern void normalize_rt_tasks(void);
#ifdef CONFIG_GROUP_SCHED
extern struct task_group init_task_group;
+#ifdef CONFIG_USER_SCHED
+extern struct task_group root_task_group;
+#endif
-extern struct task_group *sched_create_group(void);
+extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg);
extern void sched_move_task(struct task_struct *tsk);
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -2053,6 +2090,9 @@ extern unsigned long sched_group_shares(struct task_group *tg);
extern int sched_group_set_rt_runtime(struct task_group *tg,
long rt_runtime_us);
extern long sched_group_rt_runtime(struct task_group *tg);
+extern int sched_group_set_rt_period(struct task_group *tg,
+ long rt_period_us);
+extern long sched_group_rt_period(struct task_group *tg);
#endif
#endif
diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h
index f752e73bf977..f2767bc6b735 100644
--- a/include/linux/sysdev.h
+++ b/include/linux/sysdev.h
@@ -45,12 +45,16 @@ struct sysdev_class_attribute {
ssize_t (*store)(struct sysdev_class *, const char *, size_t);
};
-#define SYSDEV_CLASS_ATTR(_name,_mode,_show,_store) \
-struct sysdev_class_attribute attr_##_name = { \
+#define _SYSDEV_CLASS_ATTR(_name,_mode,_show,_store) \
+{ \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
-};
+}
+
+#define SYSDEV_CLASS_ATTR(_name,_mode,_show,_store) \
+ struct sysdev_class_attribute attr_##_name = \
+ _SYSDEV_CLASS_ATTR(_name,_mode,_show,_store)
extern int sysdev_class_register(struct sysdev_class *);
@@ -100,15 +104,16 @@ struct sysdev_attribute {
};
-#define _SYSDEV_ATTR(_name,_mode,_show,_store) \
+#define _SYSDEV_ATTR(_name, _mode, _show, _store) \
{ \
.attr = { .name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
}
-#define SYSDEV_ATTR(_name,_mode,_show,_store) \
-struct sysdev_attribute attr_##_name = _SYSDEV_ATTR(_name,_mode,_show,_store);
+#define SYSDEV_ATTR(_name, _mode, _show, _store) \
+ struct sysdev_attribute attr_##_name = \
+ _SYSDEV_ATTR(_name, _mode, _show, _store);
extern int sysdev_create_file(struct sys_device *, struct sysdev_attribute *);
extern void sysdev_remove_file(struct sys_device *, struct sysdev_attribute *);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index bd14f8b30f09..4bb7074a2c3a 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -38,16 +38,15 @@
#endif
#ifndef nr_cpus_node
-#define nr_cpus_node(node) \
- ({ \
- cpumask_t __tmp__; \
- __tmp__ = node_to_cpumask(node); \
- cpus_weight(__tmp__); \
+#define nr_cpus_node(node) \
+ ({ \
+ node_to_cpumask_ptr(__tmp__, node); \
+ cpus_weight(*__tmp__); \
})
#endif
-#define for_each_node_with_cpus(node) \
- for_each_online_node(node) \
+#define for_each_node_with_cpus(node) \
+ for_each_online_node(node) \
if (nr_cpus_node(node))
void arch_update_cpu_topology(void);
@@ -80,7 +79,9 @@ void arch_update_cpu_topology(void);
* by defining their own arch-specific initializer in include/asm/topology.h.
* A definition there will automagically override these default initializers
* and allow arch-specific performance tuning of sched_domains.
+ * (Only non-zero and non-null fields need be specified.)
*/
+
#ifdef CONFIG_SCHED_SMT
/* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is,
* so can't we drop this in favor of CONFIG_SCHED_SMT?
@@ -89,20 +90,10 @@ void arch_update_cpu_topology(void);
/* Common values for SMT siblings */
#ifndef SD_SIBLING_INIT
#define SD_SIBLING_INIT (struct sched_domain) { \
- .span = CPU_MASK_NONE, \
- .parent = NULL, \
- .child = NULL, \
- .groups = NULL, \
.min_interval = 1, \
.max_interval = 2, \
.busy_factor = 64, \
.imbalance_pct = 110, \
- .cache_nice_tries = 0, \
- .busy_idx = 0, \
- .idle_idx = 0, \
- .newidle_idx = 0, \
- .wake_idx = 0, \
- .forkexec_idx = 0, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_NEWIDLE \
| SD_BALANCE_FORK \
@@ -112,7 +103,6 @@ void arch_update_cpu_topology(void);
| SD_SHARE_CPUPOWER, \
.last_balance = jiffies, \
.balance_interval = 1, \
- .nr_balance_failed = 0, \
}
#endif
#endif /* CONFIG_SCHED_SMT */
@@ -121,18 +111,12 @@ void arch_update_cpu_topology(void);
/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */
#ifndef SD_MC_INIT
#define SD_MC_INIT (struct sched_domain) { \
- .span = CPU_MASK_NONE, \
- .parent = NULL, \
- .child = NULL, \
- .groups = NULL, \
.min_interval = 1, \
.max_interval = 4, \
.busy_factor = 64, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
.busy_idx = 2, \
- .idle_idx = 0, \
- .newidle_idx = 0, \
.wake_idx = 1, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
@@ -144,7 +128,6 @@ void arch_update_cpu_topology(void);
| BALANCE_FOR_MC_POWER, \
.last_balance = jiffies, \
.balance_interval = 1, \
- .nr_balance_failed = 0, \
}
#endif
#endif /* CONFIG_SCHED_MC */
@@ -152,10 +135,6 @@ void arch_update_cpu_topology(void);
/* Common values for CPUs */
#ifndef SD_CPU_INIT
#define SD_CPU_INIT (struct sched_domain) { \
- .span = CPU_MASK_NONE, \
- .parent = NULL, \
- .child = NULL, \
- .groups = NULL, \
.min_interval = 1, \
.max_interval = 4, \
.busy_factor = 64, \
@@ -174,16 +153,11 @@ void arch_update_cpu_topology(void);
| BALANCE_FOR_PKG_POWER,\
.last_balance = jiffies, \
.balance_interval = 1, \
- .nr_balance_failed = 0, \
}
#endif
/* sched_domains SD_ALLNODES_INIT for NUMA machines */
#define SD_ALLNODES_INIT (struct sched_domain) { \
- .span = CPU_MASK_NONE, \
- .parent = NULL, \
- .child = NULL, \
- .groups = NULL, \
.min_interval = 64, \
.max_interval = 64*num_online_cpus(), \
.busy_factor = 128, \
@@ -191,14 +165,10 @@ void arch_update_cpu_topology(void);
.cache_nice_tries = 1, \
.busy_idx = 3, \
.idle_idx = 3, \
- .newidle_idx = 0, /* unused */ \
- .wake_idx = 0, /* unused */ \
- .forkexec_idx = 0, /* unused */ \
.flags = SD_LOAD_BALANCE \
| SD_SERIALIZE, \
.last_balance = jiffies, \
.balance_interval = 64, \
- .nr_balance_failed = 0, \
}
#ifdef CONFIG_NUMA