summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h6
-rw-r--r--include/linux/amba/serial.h14
-rw-r--r--include/linux/atmel_serial.h240
-rw-r--r--include/linux/atomic.h361
-rw-r--r--include/linux/average.h61
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h1
-rw-r--r--include/linux/bio.h38
-rw-r--r--include/linux/bitmap.h2
-rw-r--r--include/linux/bitops.h6
-rw-r--r--include/linux/blk-cgroup.h2
-rw-r--r--include/linux/blk_types.h15
-rw-r--r--include/linux/blkdev.h36
-rw-r--r--include/linux/bpf.h12
-rw-r--r--include/linux/cgroup-defs.h15
-rw-r--r--include/linux/cgroup.h24
-rw-r--r--include/linux/cgroup_subsys.h28
-rw-r--r--include/linux/clk-provider.h89
-rw-r--r--include/linux/clk/clk-conf.h2
-rw-r--r--include/linux/clk/shmobile.h12
-rw-r--r--include/linux/clk/tegra.h3
-rw-r--r--include/linux/clk/ti.h160
-rw-r--r--include/linux/clockchips.h3
-rw-r--r--include/linux/compiler.h7
-rw-r--r--include/linux/context_tracking.h15
-rw-r--r--include/linux/context_tracking_state.h1
-rw-r--r--include/linux/coresight.h21
-rw-r--r--include/linux/cpufeature.h7
-rw-r--r--include/linux/cpufreq.h28
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/crypto.h54
-rw-r--r--include/linux/device-mapper.h4
-rw-r--r--include/linux/device.h28
-rw-r--r--include/linux/etherdevice.h2
-rw-r--r--include/linux/extcon.h7
-rw-r--r--include/linux/f2fs_fs.h16
-rw-r--r--include/linux/fdtable.h4
-rw-r--r--include/linux/filter.h17
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/fsl_devices.h20
-rw-r--r--include/linux/fsl_ifc.h50
-rw-r--r--include/linux/genhd.h33
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/ieee80211.h2
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/iio/common/st_sensors.h2
-rw-r--r--include/linux/iio/consumer.h2
-rw-r--r--include/linux/iio/iio.h17
-rw-r--r--include/linux/iio/sysfs.h3
-rw-r--r--include/linux/iio/trigger.h3
-rw-r--r--include/linux/iio/triggered_buffer.h4
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/ipv6.h5
-rw-r--r--include/linux/irq.h19
-rw-r--r--include/linux/irqchip/arm-gic-v3.h1
-rw-r--r--include/linux/irqchip/arm-gic.h3
-rw-r--r--include/linux/irqdesc.h8
-rw-r--r--include/linux/irqdomain.h26
-rw-r--r--include/linux/jbd.h1047
-rw-r--r--include/linux/jbd2.h44
-rw-r--r--include/linux/jbd_common.h46
-rw-r--r--include/linux/jiffies.h35
-rw-r--r--include/linux/jump_label.h261
-rw-r--r--include/linux/kasan.h10
-rw-r--r--include/linux/kexec.h1
-rw-r--r--include/linux/klist.h1
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/kvm_host.h24
-rw-r--r--include/linux/llist.h2
-rw-r--r--include/linux/mei_cl_bus.h15
-rw-r--r--include/linux/mfd/max77693-common.h49
-rw-r--r--include/linux/mfd/max77693-private.h134
-rw-r--r--include/linux/mfd/max77843-private.h174
-rw-r--r--include/linux/mfd/palmas.h7
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h8
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--include/linux/mlx4/cq.h3
-rw-r--r--include/linux/mlx4/device.h5
-rw-r--r--include/linux/mlx4/qp.h3
-rw-r--r--include/linux/mlx5/device.h10
-rw-r--r--include/linux/mlx5/driver.h29
-rw-r--r--include/linux/mlx5/mlx5_ifc.h24
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/mpls_iptunnel.h6
-rw-r--r--include/linux/msi.h109
-rw-r--r--include/linux/net.h8
-rw-r--r--include/linux/netdevice.h176
-rw-r--r--include/linux/netfilter.h44
-rw-r--r--include/linux/netfilter/nf_conntrack_zones_common.h23
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h3
-rw-r--r--include/linux/netfilter/x_tables.h8
-rw-r--r--include/linux/netfilter_bridge.h12
-rw-r--r--include/linux/netfilter_ipv6.h18
-rw-r--r--include/linux/nvme.h22
-rw-r--r--include/linux/nvmem-consumer.h157
-rw-r--r--include/linux/nvmem-provider.h47
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/of_irq.h1
-rw-r--r--include/linux/of_platform.h9
-rw-r--r--include/linux/pci-ats.h49
-rw-r--r--include/linux/pci.h67
-rw-r--r--include/linux/percpu-defs.h6
-rw-r--r--include/linux/perf/arm_pmu.h154
-rw-r--r--include/linux/perf_event.h10
-rw-r--r--include/linux/phy.h14
-rw-r--r--include/linux/phy_fixed.h8
-rw-r--r--include/linux/platform_data/atmel.h12
-rw-r--r--include/linux/platform_data/clk-ux500.h12
-rw-r--r--include/linux/platform_data/leds-kirkwood-ns2.h14
-rw-r--r--include/linux/platform_data/spi-davinci.h1
-rw-r--r--include/linux/platform_data/spi-mt65xx.h20
-rw-r--r--include/linux/platform_data/st_nci.h29
-rw-r--r--include/linux/platform_data/video-ep93xx.h8
-rw-r--r--include/linux/pm_domain.h9
-rw-r--r--include/linux/pm_opp.h36
-rw-r--r--include/linux/pm_qos.h5
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/preempt.h19
-rw-r--r--include/linux/property.h4
-rw-r--r--include/linux/proportions.h2
-rw-r--r--include/linux/psci.h52
-rw-r--r--include/linux/pxa2xx_ssp.h1
-rw-r--r--include/linux/quotaops.h5
-rw-r--r--include/linux/rcupdate.h144
-rw-r--r--include/linux/rcutiny.h10
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/regulator/consumer.h16
-rw-r--r--include/linux/regulator/da9211.h19
-rw-r--r--include/linux/regulator/driver.h1
-rw-r--r--include/linux/regulator/machine.h1
-rw-r--r--include/linux/regulator/mt6311.h29
-rw-r--r--include/linux/scatterlist.h9
-rw-r--r--include/linux/sched.h98
-rw-r--r--include/linux/serial_8250.h7
-rw-r--r--include/linux/skbuff.h153
-rw-r--r--include/linux/soc/dove/pmu.h6
-rw-r--r--include/linux/soc/mediatek/infracfg.h26
-rw-r--r--include/linux/soc/qcom/smd-rpm.h35
-rw-r--r--include/linux/soc/qcom/smd.h46
-rw-r--r--include/linux/soc/qcom/smem.h11
-rw-r--r--include/linux/spi/spi.h64
-rw-r--r--include/linux/spinlock.h40
-rw-r--r--include/linux/stmmac.h22
-rw-r--r--include/linux/stop_machine.h28
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/ti_wilink_st.h1
-rw-r--r--include/linux/tick.h25
-rw-r--r--include/linux/time64.h35
-rw-r--r--include/linux/timekeeping.h9
-rw-r--r--include/linux/trace_events.h7
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/linux/tty_driver.h2
-rw-r--r--include/linux/types.h3
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/uprobes.h17
-rw-r--r--include/linux/usb/chipidea.h15
-rw-r--r--include/linux/usb/composite.h2
-rw-r--r--include/linux/usb/gadget.h198
-rw-r--r--include/linux/usb/hcd.h6
-rw-r--r--include/linux/usb/msm_hsusb.h9
-rw-r--r--include/linux/usb/of.h7
-rw-r--r--include/linux/usb/otg.h15
-rw-r--r--include/linux/workqueue.h6
163 files changed, 3488 insertions, 2337 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d2445fa9999f..7235c4851460 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
@@ -221,7 +217,7 @@ struct pci_dev;
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
-
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
void acpi_pci_irq_disable (struct pci_dev *dev);
extern int ec_read(u8 addr, u8 *val);
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index 0ddb5c02ad8b..6a0a89ed7f81 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -33,12 +33,14 @@
#define UART01x_DR 0x00 /* Data read or written from the interface. */
#define UART01x_RSR 0x04 /* Receive status register (Read). */
#define UART01x_ECR 0x04 /* Error clear register (Write). */
+#define ZX_UART01x_DR 0x04 /* Data read or written from the interface. */
#define UART010_LCRH 0x08 /* Line control register, high byte. */
#define ST_UART011_DMAWM 0x08 /* DMA watermark configure register. */
#define UART010_LCRM 0x0C /* Line control register, middle byte. */
#define ST_UART011_TIMEOUT 0x0C /* Timeout period register. */
#define UART010_LCRL 0x10 /* Line control register, low byte. */
#define UART010_CR 0x14 /* Control register. */
+#define ZX_UART01x_FR 0x14 /* Flag register (Read only). */
#define UART01x_FR 0x18 /* Flag register (Read only). */
#define UART010_IIR 0x1C /* Interrupt identification register (Read). */
#define UART010_ICR 0x1C /* Interrupt clear register (Write). */
@@ -49,13 +51,21 @@
#define UART011_LCRH 0x2c /* Line control register. */
#define ST_UART011_LCRH_TX 0x2c /* Tx Line control register. */
#define UART011_CR 0x30 /* Control register. */
+#define ZX_UART011_LCRH_TX 0x30 /* Tx Line control register. */
#define UART011_IFLS 0x34 /* Interrupt fifo level select. */
+#define ZX_UART011_CR 0x34 /* Control register. */
+#define ZX_UART011_IFLS 0x38 /* Interrupt fifo level select. */
#define UART011_IMSC 0x38 /* Interrupt mask. */
#define UART011_RIS 0x3c /* Raw interrupt status. */
#define UART011_MIS 0x40 /* Masked interrupt status. */
+#define ZX_UART011_IMSC 0x40 /* Interrupt mask. */
#define UART011_ICR 0x44 /* Interrupt clear register. */
+#define ZX_UART011_RIS 0x44 /* Raw interrupt status. */
#define UART011_DMACR 0x48 /* DMA control register. */
+#define ZX_UART011_MIS 0x48 /* Masked interrupt status. */
+#define ZX_UART011_ICR 0x4c /* Interrupt clear register. */
#define ST_UART011_XFCR 0x50 /* XON/XOFF control register. */
+#define ZX_UART011_DMACR 0x50 /* DMA control register. */
#define ST_UART011_XON1 0x54 /* XON1 register. */
#define ST_UART011_XON2 0x58 /* XON2 register. */
#define ST_UART011_XOFF1 0x5C /* XON1 register. */
@@ -75,15 +85,19 @@
#define UART01x_RSR_PE 0x02
#define UART01x_RSR_FE 0x01
+#define ZX_UART01x_FR_BUSY 0x300
#define UART011_FR_RI 0x100
#define UART011_FR_TXFE 0x080
#define UART011_FR_RXFF 0x040
#define UART01x_FR_TXFF 0x020
#define UART01x_FR_RXFE 0x010
#define UART01x_FR_BUSY 0x008
+#define ZX_UART01x_FR_DSR 0x008
#define UART01x_FR_DCD 0x004
#define UART01x_FR_DSR 0x002
+#define ZX_UART01x_FR_CTS 0x002
#define UART01x_FR_CTS 0x001
+#define ZX_UART011_FR_RI 0x001
#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY)
#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
index 00beddf6be20..ee696d7e8a43 100644
--- a/include/linux/atmel_serial.h
+++ b/include/linux/atmel_serial.h
@@ -16,115 +16,151 @@
#ifndef ATMEL_SERIAL_H
#define ATMEL_SERIAL_H
-#define ATMEL_US_CR 0x00 /* Control Register */
-#define ATMEL_US_RSTRX (1 << 2) /* Reset Receiver */
-#define ATMEL_US_RSTTX (1 << 3) /* Reset Transmitter */
-#define ATMEL_US_RXEN (1 << 4) /* Receiver Enable */
-#define ATMEL_US_RXDIS (1 << 5) /* Receiver Disable */
-#define ATMEL_US_TXEN (1 << 6) /* Transmitter Enable */
-#define ATMEL_US_TXDIS (1 << 7) /* Transmitter Disable */
-#define ATMEL_US_RSTSTA (1 << 8) /* Reset Status Bits */
-#define ATMEL_US_STTBRK (1 << 9) /* Start Break */
-#define ATMEL_US_STPBRK (1 << 10) /* Stop Break */
-#define ATMEL_US_STTTO (1 << 11) /* Start Time-out */
-#define ATMEL_US_SENDA (1 << 12) /* Send Address */
-#define ATMEL_US_RSTIT (1 << 13) /* Reset Iterations */
-#define ATMEL_US_RSTNACK (1 << 14) /* Reset Non Acknowledge */
-#define ATMEL_US_RETTO (1 << 15) /* Rearm Time-out */
-#define ATMEL_US_DTREN (1 << 16) /* Data Terminal Ready Enable [AT91RM9200 only] */
-#define ATMEL_US_DTRDIS (1 << 17) /* Data Terminal Ready Disable [AT91RM9200 only] */
-#define ATMEL_US_RTSEN (1 << 18) /* Request To Send Enable */
-#define ATMEL_US_RTSDIS (1 << 19) /* Request To Send Disable */
+#define ATMEL_US_CR 0x00 /* Control Register */
+#define ATMEL_US_RSTRX BIT(2) /* Reset Receiver */
+#define ATMEL_US_RSTTX BIT(3) /* Reset Transmitter */
+#define ATMEL_US_RXEN BIT(4) /* Receiver Enable */
+#define ATMEL_US_RXDIS BIT(5) /* Receiver Disable */
+#define ATMEL_US_TXEN BIT(6) /* Transmitter Enable */
+#define ATMEL_US_TXDIS BIT(7) /* Transmitter Disable */
+#define ATMEL_US_RSTSTA BIT(8) /* Reset Status Bits */
+#define ATMEL_US_STTBRK BIT(9) /* Start Break */
+#define ATMEL_US_STPBRK BIT(10) /* Stop Break */
+#define ATMEL_US_STTTO BIT(11) /* Start Time-out */
+#define ATMEL_US_SENDA BIT(12) /* Send Address */
+#define ATMEL_US_RSTIT BIT(13) /* Reset Iterations */
+#define ATMEL_US_RSTNACK BIT(14) /* Reset Non Acknowledge */
+#define ATMEL_US_RETTO BIT(15) /* Rearm Time-out */
+#define ATMEL_US_DTREN BIT(16) /* Data Terminal Ready Enable */
+#define ATMEL_US_DTRDIS BIT(17) /* Data Terminal Ready Disable */
+#define ATMEL_US_RTSEN BIT(18) /* Request To Send Enable */
+#define ATMEL_US_RTSDIS BIT(19) /* Request To Send Disable */
+#define ATMEL_US_TXFCLR BIT(24) /* Transmit FIFO Clear */
+#define ATMEL_US_RXFCLR BIT(25) /* Receive FIFO Clear */
+#define ATMEL_US_TXFLCLR BIT(26) /* Transmit FIFO Lock Clear */
+#define ATMEL_US_FIFOEN BIT(30) /* FIFO enable */
+#define ATMEL_US_FIFODIS BIT(31) /* FIFO disable */
-#define ATMEL_US_MR 0x04 /* Mode Register */
-#define ATMEL_US_USMODE (0xf << 0) /* Mode of the USART */
-#define ATMEL_US_USMODE_NORMAL 0
-#define ATMEL_US_USMODE_RS485 1
-#define ATMEL_US_USMODE_HWHS 2
-#define ATMEL_US_USMODE_MODEM 3
-#define ATMEL_US_USMODE_ISO7816_T0 4
-#define ATMEL_US_USMODE_ISO7816_T1 6
-#define ATMEL_US_USMODE_IRDA 8
-#define ATMEL_US_USCLKS (3 << 4) /* Clock Selection */
-#define ATMEL_US_USCLKS_MCK (0 << 4)
-#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
-#define ATMEL_US_USCLKS_SCK (3 << 4)
-#define ATMEL_US_CHRL (3 << 6) /* Character Length */
-#define ATMEL_US_CHRL_5 (0 << 6)
-#define ATMEL_US_CHRL_6 (1 << 6)
-#define ATMEL_US_CHRL_7 (2 << 6)
-#define ATMEL_US_CHRL_8 (3 << 6)
-#define ATMEL_US_SYNC (1 << 8) /* Synchronous Mode Select */
-#define ATMEL_US_PAR (7 << 9) /* Parity Type */
-#define ATMEL_US_PAR_EVEN (0 << 9)
-#define ATMEL_US_PAR_ODD (1 << 9)
-#define ATMEL_US_PAR_SPACE (2 << 9)
-#define ATMEL_US_PAR_MARK (3 << 9)
-#define ATMEL_US_PAR_NONE (4 << 9)
-#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
-#define ATMEL_US_NBSTOP (3 << 12) /* Number of Stop Bits */
-#define ATMEL_US_NBSTOP_1 (0 << 12)
-#define ATMEL_US_NBSTOP_1_5 (1 << 12)
-#define ATMEL_US_NBSTOP_2 (2 << 12)
-#define ATMEL_US_CHMODE (3 << 14) /* Channel Mode */
-#define ATMEL_US_CHMODE_NORMAL (0 << 14)
-#define ATMEL_US_CHMODE_ECHO (1 << 14)
-#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
-#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
-#define ATMEL_US_MSBF (1 << 16) /* Bit Order */
-#define ATMEL_US_MODE9 (1 << 17) /* 9-bit Character Length */
-#define ATMEL_US_CLKO (1 << 18) /* Clock Output Select */
-#define ATMEL_US_OVER (1 << 19) /* Oversampling Mode */
-#define ATMEL_US_INACK (1 << 20) /* Inhibit Non Acknowledge */
-#define ATMEL_US_DSNACK (1 << 21) /* Disable Successive NACK */
-#define ATMEL_US_MAX_ITER (7 << 24) /* Max Iterations */
-#define ATMEL_US_FILTER (1 << 28) /* Infrared Receive Line Filter */
+#define ATMEL_US_MR 0x04 /* Mode Register */
+#define ATMEL_US_USMODE GENMASK(3, 0) /* Mode of the USART */
+#define ATMEL_US_USMODE_NORMAL 0
+#define ATMEL_US_USMODE_RS485 1
+#define ATMEL_US_USMODE_HWHS 2
+#define ATMEL_US_USMODE_MODEM 3
+#define ATMEL_US_USMODE_ISO7816_T0 4
+#define ATMEL_US_USMODE_ISO7816_T1 6
+#define ATMEL_US_USMODE_IRDA 8
+#define ATMEL_US_USCLKS GENMASK(5, 4) /* Clock Selection */
+#define ATMEL_US_USCLKS_MCK (0 << 4)
+#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
+#define ATMEL_US_USCLKS_SCK (3 << 4)
+#define ATMEL_US_CHRL GENMASK(7, 6) /* Character Length */
+#define ATMEL_US_CHRL_5 (0 << 6)
+#define ATMEL_US_CHRL_6 (1 << 6)
+#define ATMEL_US_CHRL_7 (2 << 6)
+#define ATMEL_US_CHRL_8 (3 << 6)
+#define ATMEL_US_SYNC BIT(8) /* Synchronous Mode Select */
+#define ATMEL_US_PAR GENMASK(11, 9) /* Parity Type */
+#define ATMEL_US_PAR_EVEN (0 << 9)
+#define ATMEL_US_PAR_ODD (1 << 9)
+#define ATMEL_US_PAR_SPACE (2 << 9)
+#define ATMEL_US_PAR_MARK (3 << 9)
+#define ATMEL_US_PAR_NONE (4 << 9)
+#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
+#define ATMEL_US_NBSTOP GENMASK(13, 12) /* Number of Stop Bits */
+#define ATMEL_US_NBSTOP_1 (0 << 12)
+#define ATMEL_US_NBSTOP_1_5 (1 << 12)
+#define ATMEL_US_NBSTOP_2 (2 << 12)
+#define ATMEL_US_CHMODE GENMASK(15, 14) /* Channel Mode */
+#define ATMEL_US_CHMODE_NORMAL (0 << 14)
+#define ATMEL_US_CHMODE_ECHO (1 << 14)
+#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
+#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
+#define ATMEL_US_MSBF BIT(16) /* Bit Order */
+#define ATMEL_US_MODE9 BIT(17) /* 9-bit Character Length */
+#define ATMEL_US_CLKO BIT(18) /* Clock Output Select */
+#define ATMEL_US_OVER BIT(19) /* Oversampling Mode */
+#define ATMEL_US_INACK BIT(20) /* Inhibit Non Acknowledge */
+#define ATMEL_US_DSNACK BIT(21) /* Disable Successive NACK */
+#define ATMEL_US_MAX_ITER GENMASK(26, 24) /* Max Iterations */
+#define ATMEL_US_FILTER BIT(28) /* Infrared Receive Line Filter */
-#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
-#define ATMEL_US_RXRDY (1 << 0) /* Receiver Ready */
-#define ATMEL_US_TXRDY (1 << 1) /* Transmitter Ready */
-#define ATMEL_US_RXBRK (1 << 2) /* Break Received / End of Break */
-#define ATMEL_US_ENDRX (1 << 3) /* End of Receiver Transfer */
-#define ATMEL_US_ENDTX (1 << 4) /* End of Transmitter Transfer */
-#define ATMEL_US_OVRE (1 << 5) /* Overrun Error */
-#define ATMEL_US_FRAME (1 << 6) /* Framing Error */
-#define ATMEL_US_PARE (1 << 7) /* Parity Error */
-#define ATMEL_US_TIMEOUT (1 << 8) /* Receiver Time-out */
-#define ATMEL_US_TXEMPTY (1 << 9) /* Transmitter Empty */
-#define ATMEL_US_ITERATION (1 << 10) /* Max number of Repetitions Reached */
-#define ATMEL_US_TXBUFE (1 << 11) /* Transmission Buffer Empty */
-#define ATMEL_US_RXBUFF (1 << 12) /* Reception Buffer Full */
-#define ATMEL_US_NACK (1 << 13) /* Non Acknowledge */
-#define ATMEL_US_RIIC (1 << 16) /* Ring Indicator Input Change [AT91RM9200 only] */
-#define ATMEL_US_DSRIC (1 << 17) /* Data Set Ready Input Change [AT91RM9200 only] */
-#define ATMEL_US_DCDIC (1 << 18) /* Data Carrier Detect Input Change [AT91RM9200 only] */
-#define ATMEL_US_CTSIC (1 << 19) /* Clear to Send Input Change */
-#define ATMEL_US_RI (1 << 20) /* RI */
-#define ATMEL_US_DSR (1 << 21) /* DSR */
-#define ATMEL_US_DCD (1 << 22) /* DCD */
-#define ATMEL_US_CTS (1 << 23) /* CTS */
+#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
+#define ATMEL_US_RXRDY BIT(0) /* Receiver Ready */
+#define ATMEL_US_TXRDY BIT(1) /* Transmitter Ready */
+#define ATMEL_US_RXBRK BIT(2) /* Break Received / End of Break */
+#define ATMEL_US_ENDRX BIT(3) /* End of Receiver Transfer */
+#define ATMEL_US_ENDTX BIT(4) /* End of Transmitter Transfer */
+#define ATMEL_US_OVRE BIT(5) /* Overrun Error */
+#define ATMEL_US_FRAME BIT(6) /* Framing Error */
+#define ATMEL_US_PARE BIT(7) /* Parity Error */
+#define ATMEL_US_TIMEOUT BIT(8) /* Receiver Time-out */
+#define ATMEL_US_TXEMPTY BIT(9) /* Transmitter Empty */
+#define ATMEL_US_ITERATION BIT(10) /* Max number of Repetitions Reached */
+#define ATMEL_US_TXBUFE BIT(11) /* Transmission Buffer Empty */
+#define ATMEL_US_RXBUFF BIT(12) /* Reception Buffer Full */
+#define ATMEL_US_NACK BIT(13) /* Non Acknowledge */
+#define ATMEL_US_RIIC BIT(16) /* Ring Indicator Input Change */
+#define ATMEL_US_DSRIC BIT(17) /* Data Set Ready Input Change */
+#define ATMEL_US_DCDIC BIT(18) /* Data Carrier Detect Input Change */
+#define ATMEL_US_CTSIC BIT(19) /* Clear to Send Input Change */
+#define ATMEL_US_RI BIT(20) /* RI */
+#define ATMEL_US_DSR BIT(21) /* DSR */
+#define ATMEL_US_DCD BIT(22) /* DCD */
+#define ATMEL_US_CTS BIT(23) /* CTS */
-#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */
-#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */
-#define ATMEL_US_CSR 0x14 /* Channel Status Register */
-#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */
-#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */
-#define ATMEL_US_SYNH (1 << 15) /* Transmit/Receive Sync [AT91SAM9261 only] */
+#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */
+#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */
+#define ATMEL_US_CSR 0x14 /* Channel Status Register */
+#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */
+#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */
+#define ATMEL_US_SYNH BIT(15) /* Transmit/Receive Sync */
-#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
-#define ATMEL_US_CD (0xffff << 0) /* Clock Divider */
+#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
+#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */
-#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */
-#define ATMEL_US_TO (0xffff << 0) /* Time-out Value */
+#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */
+#define ATMEL_US_TO GENMASK(15, 0) /* Time-out Value */
-#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
-#define ATMEL_US_TG (0xff << 0) /* Timeguard Value */
+#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
+#define ATMEL_US_TG GENMASK(7, 0) /* Timeguard Value */
-#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */
-#define ATMEL_US_NER 0x44 /* Number of Errors Register */
-#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
+#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */
+#define ATMEL_US_NER 0x44 /* Number of Errors Register */
+#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
-#define ATMEL_US_NAME 0xf0 /* Ip Name */
-#define ATMEL_US_VERSION 0xfc /* Ip Version */
+#define ATMEL_US_CMPR 0x90 /* Comparaison Register */
+#define ATMEL_US_FMR 0xa0 /* FIFO Mode Register */
+#define ATMEL_US_TXRDYM(data) (((data) & 0x3) << 0) /* TX Ready Mode */
+#define ATMEL_US_RXRDYM(data) (((data) & 0x3) << 4) /* RX Ready Mode */
+#define ATMEL_US_ONE_DATA 0x0
+#define ATMEL_US_TWO_DATA 0x1
+#define ATMEL_US_FOUR_DATA 0x2
+#define ATMEL_US_FRTSC BIT(7) /* FIFO RTS pin Control */
+#define ATMEL_US_TXFTHRES(thr) (((thr) & 0x3f) << 8) /* TX FIFO Threshold */
+#define ATMEL_US_RXFTHRES(thr) (((thr) & 0x3f) << 16) /* RX FIFO Threshold */
+#define ATMEL_US_RXFTHRES2(thr) (((thr) & 0x3f) << 24) /* RX FIFO Threshold2 */
+
+#define ATMEL_US_FLR 0xa4 /* FIFO Level Register */
+#define ATMEL_US_TXFL(reg) (((reg) >> 0) & 0x3f) /* TX FIFO Level */
+#define ATMEL_US_RXFL(reg) (((reg) >> 16) & 0x3f) /* RX FIFO Level */
+
+#define ATMEL_US_FIER 0xa8 /* FIFO Interrupt Enable Register */
+#define ATMEL_US_FIDR 0xac /* FIFO Interrupt Disable Register */
+#define ATMEL_US_FIMR 0xb0 /* FIFO Interrupt Mask Register */
+#define ATMEL_US_FESR 0xb4 /* FIFO Event Status Register */
+#define ATMEL_US_TXFEF BIT(0) /* Transmit FIFO Empty Flag */
+#define ATMEL_US_TXFFF BIT(1) /* Transmit FIFO Full Flag */
+#define ATMEL_US_TXFTHF BIT(2) /* Transmit FIFO Threshold Flag */
+#define ATMEL_US_RXFEF BIT(3) /* Receive FIFO Empty Flag */
+#define ATMEL_US_RXFFF BIT(4) /* Receive FIFO Full Flag */
+#define ATMEL_US_RXFTHF BIT(5) /* Receive FIFO Threshold Flag */
+#define ATMEL_US_TXFPTEF BIT(6) /* Transmit FIFO Pointer Error Flag */
+#define ATMEL_US_RXFPTEF BIT(7) /* Receive FIFO Pointer Error Flag */
+#define ATMEL_US_TXFLOCK BIT(8) /* Transmit FIFO Lock (FESR only) */
+#define ATMEL_US_RXFTHF2 BIT(9) /* Receive FIFO Threshold Flag 2 */
+
+#define ATMEL_US_NAME 0xf0 /* Ip Name */
+#define ATMEL_US_VERSION 0xfc /* Ip Version */
#endif
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 5b08a8540ecf..00a5763e850e 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -2,6 +2,329 @@
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
#include <asm/atomic.h>
+#include <asm/barrier.h>
+
+/*
+ * Relaxed variants of xchg, cmpxchg and some atomic operations.
+ *
+ * We support four variants:
+ *
+ * - Fully ordered: The default implementation, no suffix required.
+ * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
+ * - Release: Provides RELEASE semantics, _release suffix.
+ * - Relaxed: No ordering guarantees, _relaxed suffix.
+ *
+ * For compound atomics performing both a load and a store, ACQUIRE
+ * semantics apply only to the load and RELEASE semantics only to the
+ * store portion of the operation. Note that a failed cmpxchg_acquire
+ * does -not- imply any memory ordering constraints.
+ *
+ * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
+ */
+
+#ifndef atomic_read_acquire
+#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
+#endif
+
+#ifndef atomic_set_release
+#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
+#endif
+
+/*
+ * The idea here is to build acquire/release variants by adding explicit
+ * barriers on top of the relaxed variant. In the case where the relaxed
+ * variant is already fully ordered, no additional barriers are needed.
+ */
+#define __atomic_op_acquire(op, args...) \
+({ \
+ typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
+ smp_mb__after_atomic(); \
+ __ret; \
+})
+
+#define __atomic_op_release(op, args...) \
+({ \
+ smp_mb__before_atomic(); \
+ op##_relaxed(args); \
+})
+
+#define __atomic_op_fence(op, args...) \
+({ \
+ typeof(op##_relaxed(args)) __ret; \
+ smp_mb__before_atomic(); \
+ __ret = op##_relaxed(args); \
+ smp_mb__after_atomic(); \
+ __ret; \
+})
+
+/* atomic_add_return_relaxed */
+#ifndef atomic_add_return_relaxed
+#define atomic_add_return_relaxed atomic_add_return
+#define atomic_add_return_acquire atomic_add_return
+#define atomic_add_return_release atomic_add_return
+
+#else /* atomic_add_return_relaxed */
+
+#ifndef atomic_add_return_acquire
+#define atomic_add_return_acquire(...) \
+ __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_add_return_release
+#define atomic_add_return_release(...) \
+ __atomic_op_release(atomic_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_add_return
+#define atomic_add_return(...) \
+ __atomic_op_fence(atomic_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic_add_return_relaxed */
+
+/* atomic_sub_return_relaxed */
+#ifndef atomic_sub_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return
+#define atomic_sub_return_acquire atomic_sub_return
+#define atomic_sub_return_release atomic_sub_return
+
+#else /* atomic_sub_return_relaxed */
+
+#ifndef atomic_sub_return_acquire
+#define atomic_sub_return_acquire(...) \
+ __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_sub_return_release
+#define atomic_sub_return_release(...) \
+ __atomic_op_release(atomic_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_sub_return
+#define atomic_sub_return(...) \
+ __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic_sub_return_relaxed */
+
+/* atomic_xchg_relaxed */
+#ifndef atomic_xchg_relaxed
+#define atomic_xchg_relaxed atomic_xchg
+#define atomic_xchg_acquire atomic_xchg
+#define atomic_xchg_release atomic_xchg
+
+#else /* atomic_xchg_relaxed */
+
+#ifndef atomic_xchg_acquire
+#define atomic_xchg_acquire(...) \
+ __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_xchg_release
+#define atomic_xchg_release(...) \
+ __atomic_op_release(atomic_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_xchg
+#define atomic_xchg(...) \
+ __atomic_op_fence(atomic_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic_xchg_relaxed */
+
+/* atomic_cmpxchg_relaxed */
+#ifndef atomic_cmpxchg_relaxed
+#define atomic_cmpxchg_relaxed atomic_cmpxchg
+#define atomic_cmpxchg_acquire atomic_cmpxchg
+#define atomic_cmpxchg_release atomic_cmpxchg
+
+#else /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic_cmpxchg_acquire
+#define atomic_cmpxchg_acquire(...) \
+ __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_cmpxchg_release
+#define atomic_cmpxchg_release(...) \
+ __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_cmpxchg
+#define atomic_cmpxchg(...) \
+ __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic64_read_acquire
+#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
+#endif
+
+#ifndef atomic64_set_release
+#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
+#endif
+
+/* atomic64_add_return_relaxed */
+#ifndef atomic64_add_return_relaxed
+#define atomic64_add_return_relaxed atomic64_add_return
+#define atomic64_add_return_acquire atomic64_add_return
+#define atomic64_add_return_release atomic64_add_return
+
+#else /* atomic64_add_return_relaxed */
+
+#ifndef atomic64_add_return_acquire
+#define atomic64_add_return_acquire(...) \
+ __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return_release
+#define atomic64_add_return_release(...) \
+ __atomic_op_release(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return
+#define atomic64_add_return(...) \
+ __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_add_return_relaxed */
+
+/* atomic64_sub_return_relaxed */
+#ifndef atomic64_sub_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return
+#define atomic64_sub_return_acquire atomic64_sub_return
+#define atomic64_sub_return_release atomic64_sub_return
+
+#else /* atomic64_sub_return_relaxed */
+
+#ifndef atomic64_sub_return_acquire
+#define atomic64_sub_return_acquire(...) \
+ __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return_release
+#define atomic64_sub_return_release(...) \
+ __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return
+#define atomic64_sub_return(...) \
+ __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_sub_return_relaxed */
+
+/* atomic64_xchg_relaxed */
+#ifndef atomic64_xchg_relaxed
+#define atomic64_xchg_relaxed atomic64_xchg
+#define atomic64_xchg_acquire atomic64_xchg
+#define atomic64_xchg_release atomic64_xchg
+
+#else /* atomic64_xchg_relaxed */
+
+#ifndef atomic64_xchg_acquire
+#define atomic64_xchg_acquire(...) \
+ __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg_release
+#define atomic64_xchg_release(...) \
+ __atomic_op_release(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg
+#define atomic64_xchg(...) \
+ __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_xchg_relaxed */
+
+/* atomic64_cmpxchg_relaxed */
+#ifndef atomic64_cmpxchg_relaxed
+#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
+#define atomic64_cmpxchg_acquire atomic64_cmpxchg
+#define atomic64_cmpxchg_release atomic64_cmpxchg
+
+#else /* atomic64_cmpxchg_relaxed */
+
+#ifndef atomic64_cmpxchg_acquire
+#define atomic64_cmpxchg_acquire(...) \
+ __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg_release
+#define atomic64_cmpxchg_release(...) \
+ __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg
+#define atomic64_cmpxchg(...) \
+ __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_cmpxchg_relaxed */
+
+/* cmpxchg_relaxed */
+#ifndef cmpxchg_relaxed
+#define cmpxchg_relaxed cmpxchg
+#define cmpxchg_acquire cmpxchg
+#define cmpxchg_release cmpxchg
+
+#else /* cmpxchg_relaxed */
+
+#ifndef cmpxchg_acquire
+#define cmpxchg_acquire(...) \
+ __atomic_op_acquire(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg_release
+#define cmpxchg_release(...) \
+ __atomic_op_release(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg
+#define cmpxchg(...) \
+ __atomic_op_fence(cmpxchg, __VA_ARGS__)
+#endif
+#endif /* cmpxchg_relaxed */
+
+/* cmpxchg64_relaxed */
+#ifndef cmpxchg64_relaxed
+#define cmpxchg64_relaxed cmpxchg64
+#define cmpxchg64_acquire cmpxchg64
+#define cmpxchg64_release cmpxchg64
+
+#else /* cmpxchg64_relaxed */
+
+#ifndef cmpxchg64_acquire
+#define cmpxchg64_acquire(...) \
+ __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64_release
+#define cmpxchg64_release(...) \
+ __atomic_op_release(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64
+#define cmpxchg64(...) \
+ __atomic_op_fence(cmpxchg64, __VA_ARGS__)
+#endif
+#endif /* cmpxchg64_relaxed */
+
+/* xchg_relaxed */
+#ifndef xchg_relaxed
+#define xchg_relaxed xchg
+#define xchg_acquire xchg
+#define xchg_release xchg
+
+#else /* xchg_relaxed */
+
+#ifndef xchg_acquire
+#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg_release
+#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg
+#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
+#endif
+#endif /* xchg_relaxed */
/**
* atomic_add_unless - add unless the number is already a given value
@@ -28,6 +351,23 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif
+#ifndef atomic_andnot
+static inline void atomic_andnot(int i, atomic_t *v)
+{
+ atomic_and(~i, v);
+}
+#endif
+
+static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ atomic_andnot(mask, v);
+}
+
+static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ atomic_or(mask, v);
+}
+
/**
* atomic_inc_not_zero_hint - increment if not null
* @v: pointer of type atomic_t
@@ -111,21 +451,16 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif
-#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
-static inline void atomic_or(int i, atomic_t *v)
-{
- int old;
- int new;
-
- do {
- old = atomic_read(v);
- new = old | i;
- } while (atomic_cmpxchg(v, old, new) != old);
-}
-#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
-
#include <asm-generic/atomic-long.h>
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
+
+#ifndef atomic64_andnot
+static inline void atomic64_andnot(long long i, atomic64_t *v)
+{
+ atomic64_and(~i, v);
+}
+#endif
+
#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/average.h b/include/linux/average.h
index c6028fd742c1..d04aa58280de 100644
--- a/include/linux/average.h
+++ b/include/linux/average.h
@@ -3,28 +3,43 @@
/* Exponentially weighted moving average (EWMA) */
-/* For more documentation see lib/average.c */
-
-struct ewma {
- unsigned long internal;
- unsigned long factor;
- unsigned long weight;
-};
-
-extern void ewma_init(struct ewma *avg, unsigned long factor,
- unsigned long weight);
-
-extern struct ewma *ewma_add(struct ewma *avg, unsigned long val);
-
-/**
- * ewma_read() - Get average value
- * @avg: Average structure
- *
- * Returns the average value held in @avg.
- */
-static inline unsigned long ewma_read(const struct ewma *avg)
-{
- return avg->internal >> avg->factor;
-}
+#define DECLARE_EWMA(name, _factor, _weight) \
+ struct ewma_##name { \
+ unsigned long internal; \
+ }; \
+ static inline void ewma_##name##_init(struct ewma_##name *e) \
+ { \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ e->internal = 0; \
+ } \
+ static inline unsigned long \
+ ewma_##name##_read(struct ewma_##name *e) \
+ { \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ return e->internal >> ilog2(_factor); \
+ } \
+ static inline void ewma_##name##_add(struct ewma_##name *e, \
+ unsigned long val) \
+ { \
+ unsigned long internal = ACCESS_ONCE(e->internal); \
+ unsigned long weight = ilog2(_weight); \
+ unsigned long factor = ilog2(_factor); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ \
+ ACCESS_ONCE(e->internal) = internal ? \
+ (((internal << weight) - internal) + \
+ (val << factor)) >> weight : \
+ (val << factor); \
+ }
#endif /* _LINUX_AVERAGE_H */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 6cceedf65ca2..cf038431a5cc 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -640,7 +640,6 @@ struct bcma_drv_cc {
spinlock_t gpio_lock;
#ifdef CONFIG_BCMA_DRIVER_GPIO
struct gpio_chip gpio;
- struct irq_domain *irq_domain;
#endif
};
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5e963a6d7c14..b9b6e046b52e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -187,17 +187,6 @@ static inline void *bio_data(struct bio *bio)
__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
/*
- * Check if adding a bio_vec after bprv with offset would create a gap in
- * the SG list. Most drivers don't care about this, but some do.
- */
-static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
-{
- return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
-}
-
-#define bio_io_error(bio) bio_endio((bio), -EIO)
-
-/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
*/
@@ -306,6 +295,21 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
atomic_set(&bio->__bi_cnt, count);
}
+static inline bool bio_flagged(struct bio *bio, unsigned int bit)
+{
+ return (bio->bi_flags & (1U << bit)) != 0;
+}
+
+static inline void bio_set_flag(struct bio *bio, unsigned int bit)
+{
+ bio->bi_flags |= (1U << bit);
+}
+
+static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
+{
+ bio->bi_flags &= ~(1U << bit);
+}
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -426,7 +430,14 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
}
-extern void bio_endio(struct bio *, int);
+extern void bio_endio(struct bio *);
+
+static inline void bio_io_error(struct bio *bio)
+{
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+}
+
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
@@ -440,7 +451,6 @@ void bio_chain(struct bio *, struct bio *);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
-extern int bio_get_nr_vecs(struct block_device *);
struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
const struct iov_iter *, gfp_t);
@@ -717,7 +727,7 @@ extern void bio_integrity_free(struct bio *);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern bool bio_integrity_enabled(struct bio *bio);
extern int bio_integrity_prep(struct bio *);
-extern void bio_integrity_endio(struct bio *, int);
+extern void bio_integrity_endio(struct bio *);
extern void bio_integrity_advance(struct bio *, unsigned int);
extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index ea17cca9e685..9653fdb76a42 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -295,7 +295,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits;
}
-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
+static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 297f5bda4fdf..e63553386ae7 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -57,7 +57,7 @@ extern unsigned long __sw_hweight64(__u64 w);
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
-static __inline__ int get_bitmask_order(unsigned int count)
+static inline int get_bitmask_order(unsigned int count)
{
int order;
@@ -65,7 +65,7 @@ static __inline__ int get_bitmask_order(unsigned int count)
return order; /* We could be slightly more clever with -1 here... */
}
-static __inline__ int get_count_order(unsigned int count)
+static inline int get_count_order(unsigned int count)
{
int order;
@@ -75,7 +75,7 @@ static __inline__ int get_count_order(unsigned int count)
return order;
}
-static inline unsigned long hweight_long(unsigned long w)
+static __always_inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 1b62d768c7df..a4cd1641e9e2 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -374,7 +374,7 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
* root_rl in such cases.
*/
blkg = blkg_lookup_create(blkcg, q);
- if (unlikely(IS_ERR(blkg)))
+ if (IS_ERR(blkg))
goto root_rl;
blkg_get(blkg);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 7303b3405520..e8130138f29d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -14,7 +14,7 @@ struct page;
struct block_device;
struct io_context;
struct cgroup_subsys_state;
-typedef void (bio_end_io_t) (struct bio *, int);
+typedef void (bio_end_io_t) (struct bio *);
typedef void (bio_destructor_t) (struct bio *);
/*
@@ -46,7 +46,8 @@ struct bvec_iter {
struct bio {
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
- unsigned long bi_flags; /* status, command, etc */
+ unsigned int bi_flags; /* status, command, etc */
+ int bi_error;
unsigned long bi_rw; /* bottom bits READ/WRITE,
* top bits priority
*/
@@ -111,16 +112,14 @@ struct bio {
/*
* bio flags
*/
-#define BIO_UPTODATE 0 /* ok after I/O completion */
#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
#define BIO_CLONED 2 /* doesn't own data */
#define BIO_BOUNCED 3 /* bio is a bounce bio */
#define BIO_USER_MAPPED 4 /* contains user pages */
#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
#define BIO_QUIET 6 /* Make BIO Quiet */
-#define BIO_SNAP_STABLE 7 /* bio data must be snapshotted during write */
-#define BIO_CHAIN 8 /* chained bio, ->bi_remaining in effect */
-#define BIO_REFFED 9 /* bio has elevated ->bi_cnt */
+#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
+#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
/*
* Flags starting here get preserved by bio_reset() - this includes
@@ -129,14 +128,12 @@ struct bio {
#define BIO_RESET_BITS 13
#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */
-#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
-
/*
* top 4 bits of bio flags indicate the pool this bio came from
*/
#define BIO_POOL_BITS (4)
#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
-#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
+#define BIO_POOL_OFFSET (32 - BIO_POOL_BITS)
#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d4068c17d0df..a622f270f09e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -213,14 +213,6 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
struct bio_vec;
-struct bvec_merge_data {
- struct block_device *bi_bdev;
- sector_t bi_sector;
- unsigned bi_size;
- unsigned long bi_rw;
-};
-typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
- struct bio_vec *);
typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
typedef int (lld_busy_fn) (struct request_queue *q);
@@ -258,6 +250,7 @@ struct blk_queue_tag {
struct queue_limits {
unsigned long bounce_pfn;
unsigned long seg_boundary_mask;
+ unsigned long virt_boundary_mask;
unsigned int max_hw_sectors;
unsigned int chunk_sectors;
@@ -268,6 +261,7 @@ struct queue_limits {
unsigned int io_min;
unsigned int io_opt;
unsigned int max_discard_sectors;
+ unsigned int max_hw_discard_sectors;
unsigned int max_write_same_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
@@ -305,7 +299,6 @@ struct request_queue {
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
- merge_bvec_fn *merge_bvec_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
@@ -462,6 +455,7 @@ struct request_queue {
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
+ struct bio_set *bio_split;
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -486,7 +480,6 @@ struct request_queue {
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
-#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -782,6 +775,8 @@ extern void blk_rq_unprep_clone(struct request *rq);
extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern void blk_delay_queue(struct request_queue *, unsigned long);
+extern void blk_queue_split(struct request_queue *, struct bio **,
+ struct bio_set *);
extern void blk_recount_segments(struct request_queue *, struct bio *);
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
@@ -986,9 +981,9 @@ extern int blk_queue_dma_drain(struct request_queue *q,
void *buf, unsigned int size);
extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
+extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
-extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
@@ -1138,6 +1133,7 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
+ BLK_DEF_MAX_SECTORS = 2560,
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
@@ -1154,6 +1150,11 @@ static inline unsigned long queue_segment_boundary(struct request_queue *q)
return q->limits.seg_boundary_mask;
}
+static inline unsigned long queue_virt_boundary(struct request_queue *q)
+{
+ return q->limits.virt_boundary_mask;
+}
+
static inline unsigned int queue_max_sectors(struct request_queue *q)
{
return q->limits.max_sectors;
@@ -1354,6 +1355,19 @@ static inline void put_dev_sector(Sector p)
page_cache_release(p.v);
}
+/*
+ * Check if adding a bio_vec after bprv with offset would create a gap in
+ * the SG list. Most drivers don't care about this, but some do.
+ */
+static inline bool bvec_gap_to_prev(struct request_queue *q,
+ struct bio_vec *bprv, unsigned int offset)
+{
+ if (!queue_virt_boundary(q))
+ return false;
+ return offset ||
+ ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+}
+
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4383476a0d48..f57d7fed9ec3 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,6 +10,7 @@
#include <uapi/linux/bpf.h>
#include <linux/workqueue.h>
#include <linux/file.h>
+#include <linux/perf_event.h>
struct bpf_map;
@@ -24,6 +25,10 @@ struct bpf_map_ops {
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
int (*map_delete_elem)(struct bpf_map *map, void *key);
+
+ /* funcs called by prog_array and perf_event_array map */
+ void *(*map_fd_get_ptr) (struct bpf_map *map, int fd);
+ void (*map_fd_put_ptr) (void *ptr);
};
struct bpf_map {
@@ -142,13 +147,13 @@ struct bpf_array {
bool owner_jited;
union {
char value[0] __aligned(8);
- struct bpf_prog *prog[0] __aligned(8);
+ void *ptrs[0] __aligned(8);
};
};
#define MAX_TAIL_CALL_CNT 32
u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
-void bpf_prog_array_map_clear(struct bpf_map *map);
+void bpf_fd_array_map_clear(struct bpf_map *map);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -185,6 +190,7 @@ extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
+extern const struct bpf_func_proto bpf_perf_event_read_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
@@ -192,5 +198,7 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 93755a629299..4d8fcf2187dc 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -34,12 +34,17 @@ struct seq_file;
/* define the enumeration of all cgroup subsystems */
#define SUBSYS(_x) _x ## _cgrp_id,
+#define SUBSYS_TAG(_t) CGROUP_ ## _t, \
+ __unused_tag_ ## _t = CGROUP_ ## _t - 1,
enum cgroup_subsys_id {
#include <linux/cgroup_subsys.h>
CGROUP_SUBSYS_COUNT,
};
+#undef SUBSYS_TAG
#undef SUBSYS
+#define CGROUP_CANFORK_COUNT (CGROUP_CANFORK_END - CGROUP_CANFORK_START)
+
/* bits in struct cgroup_subsys_state flags field */
enum {
CSS_NO_REF = (1 << 0), /* no reference counting for this css */
@@ -318,7 +323,7 @@ struct cftype {
* end of cftype array.
*/
char name[MAX_CFTYPE_NAME];
- int private;
+ unsigned long private;
/*
* If not 0, file mode is set to this value, otherwise it will
* be figured out automatically
@@ -406,7 +411,9 @@ struct cgroup_subsys {
struct cgroup_taskset *tset);
void (*attach)(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset);
- void (*fork)(struct task_struct *task);
+ int (*can_fork)(struct task_struct *task, void **priv_p);
+ void (*cancel_fork)(struct task_struct *task, void *priv);
+ void (*fork)(struct task_struct *task, void *priv);
void (*exit)(struct cgroup_subsys_state *css,
struct cgroup_subsys_state *old_css,
struct task_struct *task);
@@ -434,6 +441,9 @@ struct cgroup_subsys {
int id;
const char *name;
+ /* optional, initialized automatically during boot if not set */
+ const char *legacy_name;
+
/* link to parent, protected by cgroup_lock() */
struct cgroup_root *root;
@@ -491,6 +501,7 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
#else /* CONFIG_CGROUPS */
+#define CGROUP_CANFORK_COUNT 0
#define CGROUP_SUBSYS_COUNT 0
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index a593e299162e..eb7ca55f72ef 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -22,6 +22,15 @@
#ifdef CONFIG_CGROUPS
+/*
+ * All weight knobs on the default hierarhcy should use the following min,
+ * default and max values. The default value is the logarithmic center of
+ * MIN and MAX and allows 100x to be expressed in both directions.
+ */
+#define CGROUP_WEIGHT_MIN 1
+#define CGROUP_WEIGHT_DFL 100
+#define CGROUP_WEIGHT_MAX 10000
+
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
struct cgroup_subsys *ss;
@@ -62,7 +71,12 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
void cgroup_fork(struct task_struct *p);
-void cgroup_post_fork(struct task_struct *p);
+extern int cgroup_can_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT]);
+extern void cgroup_cancel_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT]);
+extern void cgroup_post_fork(struct task_struct *p,
+ void *old_ss_priv[CGROUP_CANFORK_COUNT]);
void cgroup_exit(struct task_struct *p);
int cgroup_init_early(void);
@@ -524,7 +538,13 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
struct dentry *dentry) { return -EINVAL; }
static inline void cgroup_fork(struct task_struct *p) {}
-static inline void cgroup_post_fork(struct task_struct *p) {}
+static inline int cgroup_can_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT])
+{ return 0; }
+static inline void cgroup_cancel_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT]) {}
+static inline void cgroup_post_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT]) {}
static inline void cgroup_exit(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index e4a96fb14403..1f36945fd23d 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -3,6 +3,17 @@
*
* DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
*/
+
+/*
+ * This file *must* be included with SUBSYS() defined.
+ * SUBSYS_TAG() is a noop if undefined.
+ */
+
+#ifndef SUBSYS_TAG
+#define __TMP_SUBSYS_TAG
+#define SUBSYS_TAG(_x)
+#endif
+
#if IS_ENABLED(CONFIG_CPUSETS)
SUBSYS(cpuset)
#endif
@@ -48,11 +59,28 @@ SUBSYS(hugetlb)
#endif
/*
+ * Subsystems that implement the can_fork() family of callbacks.
+ */
+SUBSYS_TAG(CANFORK_START)
+
+#if IS_ENABLED(CONFIG_CGROUP_PIDS)
+SUBSYS(pids)
+#endif
+
+SUBSYS_TAG(CANFORK_END)
+
+/*
* The following subsystems are not supported on the default hierarchy.
*/
#if IS_ENABLED(CONFIG_CGROUP_DEBUG)
SUBSYS(debug)
#endif
+
+#ifdef __TMP_SUBSYS_TAG
+#undef __TMP_SUBSYS_TAG
+#undef SUBSYS_TAG
+#endif
+
/*
* DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
*/
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 78842f46f152..3ecc07d0da77 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -11,7 +11,6 @@
#ifndef __LINUX_CLK_PROVIDER_H
#define __LINUX_CLK_PROVIDER_H
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -33,11 +32,34 @@
#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */
+struct clk;
struct clk_hw;
struct clk_core;
struct dentry;
/**
+ * struct clk_rate_request - Structure encoding the clk constraints that
+ * a clock user might require.
+ *
+ * @rate: Requested clock rate. This field will be adjusted by
+ * clock drivers according to hardware capabilities.
+ * @min_rate: Minimum rate imposed by clk users.
+ * @max_rate: Maximum rate a imposed by clk users.
+ * @best_parent_rate: The best parent rate a parent can provide to fulfill the
+ * requested constraints.
+ * @best_parent_hw: The most appropriate parent clock that fulfills the
+ * requested constraints.
+ *
+ */
+struct clk_rate_request {
+ unsigned long rate;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ unsigned long best_parent_rate;
+ struct clk_hw *best_parent_hw;
+};
+
+/**
* struct clk_ops - Callback operations for hardware clocks; these are to
* be provided by the clock implementation, and will be called by drivers
* through the clk_* api.
@@ -176,12 +198,8 @@ struct clk_ops {
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate);
- long (*determine_rate)(struct clk_hw *hw,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_hw);
+ int (*determine_rate)(struct clk_hw *hw,
+ struct clk_rate_request *req);
int (*set_parent)(struct clk_hw *hw, u8 index);
u8 (*get_parent)(struct clk_hw *hw);
int (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -343,6 +361,9 @@ struct clk_div_table {
* to the closest integer instead of the up one.
* CLK_DIVIDER_READ_ONLY - The divider settings are preconfigured and should
* not be changed by the clock framework.
+ * CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED
+ * except when the value read from the register is zero, the divisor is
+ * 2^width of the field.
*/
struct clk_divider {
struct clk_hw hw;
@@ -360,6 +381,7 @@ struct clk_divider {
#define CLK_DIVIDER_HIWORD_MASK BIT(3)
#define CLK_DIVIDER_ROUND_CLOSEST BIT(4)
#define CLK_DIVIDER_READ_ONLY BIT(5)
+#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
extern const struct clk_ops clk_divider_ops;
@@ -550,6 +572,23 @@ struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
void of_gpio_clk_gate_setup(struct device_node *node);
/**
+ * struct clk_gpio_mux - gpio controlled clock multiplexer
+ *
+ * @hw: see struct clk_gpio
+ * @gpiod: gpio descriptor to select the parent of this clock multiplexer
+ *
+ * Clock with a gpio control for selecting the parent clock.
+ * Implements .get_parent, .set_parent and .determine_rate
+ */
+
+extern const struct clk_ops clk_gpio_mux_ops;
+struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
+ const char * const *parent_names, u8 num_parents, unsigned gpio,
+ bool active_low, unsigned long flags);
+
+void of_gpio_mux_clk_setup(struct device_node *node);
+
+/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
@@ -568,31 +607,27 @@ void devm_clk_unregister(struct device *dev, struct clk *clk);
/* helper functions */
const char *__clk_get_name(struct clk *clk);
+const char *clk_hw_get_name(const struct clk_hw *hw);
struct clk_hw *__clk_get_hw(struct clk *clk);
-u8 __clk_get_num_parents(struct clk *clk);
-struct clk *__clk_get_parent(struct clk *clk);
-struct clk *clk_get_parent_by_index(struct clk *clk, u8 index);
+unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
+struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
+struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
+ unsigned int index);
unsigned int __clk_get_enable_count(struct clk *clk);
-unsigned long __clk_get_rate(struct clk *clk);
+unsigned long clk_hw_get_rate(const struct clk_hw *hw);
unsigned long __clk_get_flags(struct clk *clk);
-bool __clk_is_prepared(struct clk *clk);
+unsigned long clk_hw_get_flags(const struct clk_hw *hw);
+bool clk_hw_is_prepared(const struct clk_hw *hw);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);
-long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_p);
-unsigned long __clk_determine_rate(struct clk_hw *core,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate);
-long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_p);
+int __clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req);
+int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
+int __clk_mux_determine_rate_closest(struct clk_hw *hw,
+ struct clk_rate_request *req);
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
+void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
+ unsigned long max_rate);
static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
{
@@ -603,7 +638,7 @@ static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
/*
* FIXME clock api without lock protection
*/
-unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
+unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
struct of_device_id;
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
index f3050e15f833..e0c362363c38 100644
--- a/include/linux/clk/clk-conf.h
+++ b/include/linux/clk/clk-conf.h
@@ -7,6 +7,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/types.h>
+
struct device_node;
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
diff --git a/include/linux/clk/shmobile.h b/include/linux/clk/shmobile.h
index 63a8159c4e64..cb19cc1865ca 100644
--- a/include/linux/clk/shmobile.h
+++ b/include/linux/clk/shmobile.h
@@ -16,8 +16,20 @@
#include <linux/types.h>
+struct device;
+struct device_node;
+struct generic_pm_domain;
+
void r8a7778_clocks_init(u32 mode);
void r8a7779_clocks_init(u32 mode);
void rcar_gen2_clocks_init(u32 mode);
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+void cpg_mstp_add_clk_domain(struct device_node *np);
+int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev);
+void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev);
+#else
+static inline void cpg_mstp_add_clk_domain(struct device_node *np) {}
+#endif
+
#endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 19c4208f4752..57bf7aab4516 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -17,7 +17,8 @@
#ifndef __LINUX_CLK_TEGRA_H_
#define __LINUX_CLK_TEGRA_H_
-#include <linux/clk.h>
+#include <linux/types.h>
+#include <linux/bug.h>
/*
* Tegra CPU clock and reset control ops
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 79b76e13d904..223be696df27 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -188,33 +188,6 @@ struct clk_hw_omap {
/* DPLL Type and DCO Selection Flags */
#define DPLL_J_TYPE 0x1
-/* Composite clock component types */
-enum {
- CLK_COMPONENT_TYPE_GATE = 0,
- CLK_COMPONENT_TYPE_DIVIDER,
- CLK_COMPONENT_TYPE_MUX,
- CLK_COMPONENT_TYPE_MAX,
-};
-
-/**
- * struct ti_dt_clk - OMAP DT clock alias declarations
- * @lk: clock lookup definition
- * @node_name: clock DT node to map to
- */
-struct ti_dt_clk {
- struct clk_lookup lk;
- char *node_name;
-};
-
-#define DT_CLK(dev, con, name) \
- { \
- .lk = { \
- .dev_id = dev, \
- .con_id = con, \
- }, \
- .node_name = name, \
- }
-
/* Static memmap indices */
enum {
TI_CLKM_CM = 0,
@@ -225,8 +198,6 @@ enum {
CLK_MAX_MEMMAPS
};
-typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
-
/**
* struct clk_omap_reg - OMAP register declaration
* @offset: offset from the master IP module base address
@@ -238,98 +209,62 @@ struct clk_omap_reg {
};
/**
- * struct ti_clk_ll_ops - low-level register access ops for a clock
+ * struct ti_clk_ll_ops - low-level ops for clocks
* @clk_readl: pointer to register read function
* @clk_writel: pointer to register write function
+ * @clkdm_clk_enable: pointer to clockdomain enable function
+ * @clkdm_clk_disable: pointer to clockdomain disable function
+ * @cm_wait_module_ready: pointer to CM module wait ready function
+ * @cm_split_idlest_reg: pointer to CM module function to split idlest reg
*
- * Low-level register access ops are generally used by the basic clock types
- * (clk-gate, clk-mux, clk-divider etc.) to provide support for various
- * low-level hardware interfaces (direct MMIO, regmap etc.), but can also be
- * used by other hardware-specific clock drivers if needed.
+ * Low-level ops are generally used by the basic clock types (clk-gate,
+ * clk-mux, clk-divider etc.) to provide support for various low-level
+ * hadrware interfaces (direct MMIO, regmap etc.), and is initialized
+ * by board code. Low-level ops also contain some other platform specific
+ * operations not provided directly by clock drivers.
*/
struct ti_clk_ll_ops {
u32 (*clk_readl)(void __iomem *reg);
void (*clk_writel)(u32 val, void __iomem *reg);
+ int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk);
+ int (*clkdm_clk_disable)(struct clockdomain *clkdm,
+ struct clk *clk);
+ int (*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg,
+ u8 idlest_shift);
+ int (*cm_split_idlest_reg)(void __iomem *idlest_reg, s16 *prcm_inst,
+ u8 *idlest_reg_id);
};
-extern struct ti_clk_ll_ops *ti_clk_ll_ops;
-
-extern const struct clk_ops ti_clk_divider_ops;
-extern const struct clk_ops ti_clk_mux_ops;
-
#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
-void omap2_init_clk_hw_omap_clocks(struct clk *clk);
-int omap3_noncore_dpll_enable(struct clk_hw *hw);
-void omap3_noncore_dpll_disable(struct clk_hw *hw);
-int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index);
-int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate);
-int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
- unsigned long rate,
- unsigned long parent_rate,
- u8 index);
-long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_clk);
-unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
- unsigned long parent_rate);
-long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
- unsigned long target_rate,
- unsigned long *parent_rate);
-long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_clk);
-u8 omap2_init_dpll_parent(struct clk_hw *hw);
-unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
-long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
- unsigned long *parent_rate);
void omap2_init_clk_clkdm(struct clk_hw *clk);
-unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
- unsigned long parent_rate);
-int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate);
-long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate);
-int omap2_clkops_enable_clkdm(struct clk_hw *hw);
-void omap2_clkops_disable_clkdm(struct clk_hw *hw);
int omap2_clk_disable_autoidle_all(void);
-void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
-int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
- unsigned long parent_rate);
-int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate, u8 index);
-int omap2_dflt_clk_enable(struct clk_hw *hw);
-void omap2_dflt_clk_disable(struct clk_hw *hw);
-int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
-void omap3_clk_lock_dpll5(void);
+int omap2_clk_enable_autoidle_all(void);
+int omap2_clk_allow_idle(struct clk *clk);
+int omap2_clk_deny_idle(struct clk *clk);
unsigned long omap2_dpllcore_recalc(struct clk_hw *hw,
unsigned long parent_rate);
int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate,
unsigned long parent_rate);
void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw);
void omap2xxx_clkt_vps_init(void);
+unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk);
-void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
-void ti_dt_clocks_register(struct ti_dt_clk *oclks);
-void ti_dt_clk_init_provider(struct device_node *np, int index);
void ti_dt_clk_init_retry_clks(void);
void ti_dt_clockdomains_setup(void);
-int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
- ti_of_clk_init_cb_t func);
-int of_ti_clk_autoidle_setup(struct device_node *node);
-int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
+int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops);
+
+struct regmap;
+
+int omap2_clk_provider_init(struct device_node *parent, int index,
+ struct regmap *syscon, void __iomem *mem);
+void omap2_clk_legacy_provider_init(int index, void __iomem *mem);
int omap3430_dt_clk_init(void);
int omap3630_dt_clk_init(void);
int am35xx_dt_clk_init(void);
-int ti81xx_dt_clk_init(void);
+int dm814x_dt_clk_init(void);
+int dm816x_dt_clk_init(void);
int omap4xxx_dt_clk_init(void);
int omap5xxx_dt_clk_init(void);
int dra7xx_dt_clk_init(void);
@@ -338,27 +273,24 @@ int am43xx_dt_clk_init(void);
int omap2420_dt_clk_init(void);
int omap2430_dt_clk_init(void);
-#ifdef CONFIG_OF
-void of_ti_clk_allow_autoidle_all(void);
-void of_ti_clk_deny_autoidle_all(void);
-#else
-static inline void of_ti_clk_allow_autoidle_all(void) { }
-static inline void of_ti_clk_deny_autoidle_all(void) { }
-#endif
+struct ti_clk_features {
+ u32 flags;
+ long fint_min;
+ long fint_max;
+ long fint_band1_max;
+ long fint_band2_min;
+ u8 dpll_bypass_vals;
+ u8 cm_idlest_val;
+};
+
+#define TI_CLK_DPLL_HAS_FREQSEL BIT(0)
+#define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1)
+#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2)
+
+void ti_clk_setup_features(struct ti_clk_features *features);
+const struct ti_clk_features *ti_clk_get_features(void);
extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
-extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
-extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
-extern const struct clk_hw_omap_ops clkhwops_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
-extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
-extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait;
-extern const struct clk_hw_omap_ops clkhwops_iclk;
-extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
#ifdef CONFIG_ATAGS
int omap3430_clk_legacy_init(void);
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 597a1e836f22..31ce435981fe 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -234,13 +234,10 @@ static inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
# endif
-extern int clockevents_notify(unsigned long reason, void *arg);
-
#else /* !CONFIG_GENERIC_CLOCKEVENTS: */
static inline void clockevents_suspend(void) { }
static inline void clockevents_resume(void) { }
-static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
static inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index e08a6ae7c0a4..c836eb2dc44d 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -252,7 +252,12 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
#define WRITE_ONCE(x, val) \
- ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+({ \
+ union { typeof(x) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(x)) (val) }; \
+ __write_once_size(&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
/**
* READ_ONCE_CTRL - Read a value heading a control dependency
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index b96bd299966f..008fc67d0d96 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -49,13 +49,28 @@ static inline void exception_exit(enum ctx_state prev_ctx)
}
}
+
+/**
+ * ct_state() - return the current context tracking state if known
+ *
+ * Returns the current cpu's context tracking state if context tracking
+ * is enabled. If context tracking is disabled, returns
+ * CONTEXT_DISABLED. This should be used primarily for debugging.
+ */
+static inline enum ctx_state ct_state(void)
+{
+ return context_tracking_is_enabled() ?
+ this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
+}
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline enum ctx_state exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
+static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
#endif /* !CONFIG_CONTEXT_TRACKING */
+#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond))
#ifdef CONFIG_CONTEXT_TRACKING_FORCE
extern void context_tracking_init(void);
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 678ecdf90cf6..ee956c528fab 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -14,6 +14,7 @@ struct context_tracking {
bool active;
int recursion;
enum ctx_state {
+ CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
CONTEXT_KERNEL = 0,
CONTEXT_USER,
CONTEXT_GUEST,
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 3486b9082adb..c69e1b932809 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -14,6 +14,7 @@
#define _LINUX_CORESIGHT_H
#include <linux/device.h>
+#include <linux/sched.h>
/* Peripheral id registers (0xFD0-0xFEC) */
#define CORESIGHT_PERIPHIDR4 0xfd0
@@ -248,4 +249,24 @@ static inline struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node) { return NULL; }
#endif
+#ifdef CONFIG_PID_NS
+static inline unsigned long
+coresight_vpid_to_pid(unsigned long vpid)
+{
+ struct task_struct *task = NULL;
+ unsigned long pid = 0;
+
+ rcu_read_lock();
+ task = find_task_by_vpid(vpid);
+ if (task)
+ pid = task_pid_nr(task);
+ rcu_read_unlock();
+
+ return pid;
+}
+#else
+static inline unsigned long
+coresight_vpid_to_pid(unsigned long vpid) { return vpid; }
+#endif
+
#endif
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
index c4d4eb8ac9fe..986c06c88d81 100644
--- a/include/linux/cpufeature.h
+++ b/include/linux/cpufeature.h
@@ -11,6 +11,7 @@
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <asm/cpufeature.h>
@@ -43,16 +44,16 @@
* For a list of legal values for 'feature', please consult the file
* 'asm/cpufeature.h' of your favorite architecture.
*/
-#define module_cpu_feature_match(x, __init) \
+#define module_cpu_feature_match(x, __initfunc) \
static struct cpu_feature const cpu_feature_match_ ## x[] = \
{ { .feature = cpu_feature(x) }, { } }; \
MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
\
-static int cpu_feature_match_ ## x ## _init(void) \
+static int __init cpu_feature_match_ ## x ## _init(void) \
{ \
if (!cpu_have_feature(cpu_feature(x))) \
return -ENODEV; \
- return __init(); \
+ return __initfunc(); \
} \
module_init(cpu_feature_match_ ## x ## _init)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index bde1e567b3a9..430efcbea48e 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -51,11 +51,9 @@ struct cpufreq_cpuinfo {
unsigned int transition_latency;
};
-struct cpufreq_real_policy {
+struct cpufreq_user_policy {
unsigned int min; /* in kHz */
unsigned int max; /* in kHz */
- unsigned int policy; /* see above */
- struct cpufreq_governor *governor; /* see below */
};
struct cpufreq_policy {
@@ -88,7 +86,7 @@ struct cpufreq_policy {
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
- struct cpufreq_real_policy user_policy;
+ struct cpufreq_user_policy user_policy;
struct cpufreq_frequency_table *freq_table;
struct list_head policy_list;
@@ -369,11 +367,10 @@ static inline void cpufreq_resume(void) {}
/* Policy Notifiers */
#define CPUFREQ_ADJUST (0)
-#define CPUFREQ_INCOMPATIBLE (1)
-#define CPUFREQ_NOTIFY (2)
-#define CPUFREQ_START (3)
-#define CPUFREQ_CREATE_POLICY (4)
-#define CPUFREQ_REMOVE_POLICY (5)
+#define CPUFREQ_NOTIFY (1)
+#define CPUFREQ_START (2)
+#define CPUFREQ_CREATE_POLICY (3)
+#define CPUFREQ_REMOVE_POLICY (4)
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
@@ -578,6 +575,8 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
int cpufreq_boost_trigger_state(int state);
int cpufreq_boost_supported(void);
int cpufreq_boost_enabled(void);
+int cpufreq_enable_boost_support(void);
+bool policy_has_boost_freq(struct cpufreq_policy *policy);
#else
static inline int cpufreq_boost_trigger_state(int state)
{
@@ -591,12 +590,23 @@ static inline int cpufreq_boost_enabled(void)
{
return 0;
}
+
+static inline int cpufreq_enable_boost_support(void)
+{
+ return -EINVAL;
+}
+
+static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
+{
+ return false;
+}
#endif
/* the following funtion is for cpufreq core use only */
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
+extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
extern struct freq_attr *cpufreq_generic_attr[];
int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index d075d34279df..786ad32631a6 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -84,7 +84,6 @@ struct cpuidle_device {
struct list_head device_list;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
- int safe_state_index;
cpumask_t coupled_cpus;
struct cpuidle_coupled *coupled;
#endif
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 81ef938b0a8e..e71cb70a1ac2 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -102,12 +102,6 @@
#define CRYPTO_ALG_INTERNAL 0x00002000
/*
- * Temporary flag used to prevent legacy AEAD implementations from
- * being used by user-space.
- */
-#define CRYPTO_ALG_AEAD_NEW 0x00004000
-
-/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_REQ_MASK 0x000fff00
@@ -142,13 +136,10 @@
struct scatterlist;
struct crypto_ablkcipher;
struct crypto_async_request;
-struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
struct crypto_tfm;
struct crypto_type;
-struct aead_request;
-struct aead_givcrypt_request;
struct skcipher_givcrypt_request;
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
@@ -275,47 +266,6 @@ struct ablkcipher_alg {
};
/**
- * struct old_aead_alg - AEAD cipher definition
- * @maxauthsize: Set the maximum authentication tag size supported by the
- * transformation. A transformation may support smaller tag sizes.
- * As the authentication tag is a message digest to ensure the
- * integrity of the encrypted data, a consumer typically wants the
- * largest authentication tag possible as defined by this
- * variable.
- * @setauthsize: Set authentication size for the AEAD transformation. This
- * function is used to specify the consumer requested size of the
- * authentication tag to be either generated by the transformation
- * during encryption or the size of the authentication tag to be
- * supplied during the decryption operation. This function is also
- * responsible for checking the authentication tag size for
- * validity.
- * @setkey: see struct ablkcipher_alg
- * @encrypt: see struct ablkcipher_alg
- * @decrypt: see struct ablkcipher_alg
- * @givencrypt: see struct ablkcipher_alg
- * @givdecrypt: see struct ablkcipher_alg
- * @geniv: see struct ablkcipher_alg
- * @ivsize: see struct ablkcipher_alg
- *
- * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
- * mandatory and must be filled.
- */
-struct old_aead_alg {
- int (*setkey)(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen);
- int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
- int (*encrypt)(struct aead_request *req);
- int (*decrypt)(struct aead_request *req);
- int (*givencrypt)(struct aead_givcrypt_request *req);
- int (*givdecrypt)(struct aead_givcrypt_request *req);
-
- const char *geniv;
-
- unsigned int ivsize;
- unsigned int maxauthsize;
-};
-
-/**
* struct blkcipher_alg - synchronous block cipher definition
* @min_keysize: see struct ablkcipher_alg
* @max_keysize: see struct ablkcipher_alg
@@ -409,7 +359,6 @@ struct compress_alg {
#define cra_ablkcipher cra_u.ablkcipher
-#define cra_aead cra_u.aead
#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
#define cra_compress cra_u.compress
@@ -460,7 +409,7 @@ struct compress_alg {
* struct crypto_type, which implements callbacks common for all
* transformation types. There are multiple options:
* &crypto_blkcipher_type, &crypto_ablkcipher_type,
- * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
+ * &crypto_ahash_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
* callbacks. This is the case for: cipher, compress, shash.
* @cra_u: Callbacks implementing the transformation. This is a union of
@@ -508,7 +457,6 @@ struct crypto_alg {
union {
struct ablkcipher_alg ablkcipher;
- struct old_aead_alg aead;
struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
struct compress_alg compress;
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 51cc1deb7af3..76d23fa8c7d3 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -82,9 +82,6 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
unsigned long arg);
-typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size);
-
/*
* These iteration functions are typically used to check (and combine)
* properties of underlying devices.
@@ -160,7 +157,6 @@ struct target_type {
dm_status_fn status;
dm_message_fn message;
dm_ioctl_fn ioctl;
- dm_merge_fn merge;
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
diff --git a/include/linux/device.h b/include/linux/device.h
index a2b4ea70a946..5d7bc6349930 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -341,7 +341,7 @@ struct subsys_interface {
struct bus_type *subsys;
struct list_head node;
int (*add_dev)(struct device *dev, struct subsys_interface *sif);
- int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
+ void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
};
int subsys_interface_register(struct subsys_interface *sif);
@@ -714,6 +714,8 @@ struct device_dma_parameters {
* along with subsystem-level and driver-level callbacks.
* @pins: For device pin management.
* See Documentation/pinctrl.txt for details.
+ * @msi_list: Hosts MSI descriptors
+ * @msi_domain: The generic MSI domain this device is using.
* @numa_node: NUMA node this device is close to.
* @dma_mask: Dma mask (if dma'ble device).
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
@@ -774,9 +776,15 @@ struct device {
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct irq_domain *msi_domain;
+#endif
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ struct list_head msi_list;
+#endif
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
@@ -861,6 +869,22 @@ static inline void set_dev_node(struct device *dev, int node)
}
#endif
+static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ return dev->msi_domain;
+#else
+ return NULL;
+#endif
+}
+
+static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ dev->msi_domain = d;
+#endif
+}
+
static inline void *dev_get_drvdata(const struct device *dev)
{
return dev->driver_data;
@@ -959,6 +983,8 @@ extern int __must_check device_add(struct device *dev);
extern void device_del(struct device *dev);
extern int device_for_each_child(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
+extern int device_for_each_child_reverse(struct device *dev, void *data,
+ int (*fn)(struct device *dev, void *data));
extern struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
extern int device_rename(struct device *dev, const char *new_name);
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 9012f8775208..eb049c622208 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -76,7 +76,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
- ((a[2] ^ b[2]) & m)) == 0;
+ (__force int)((a[2] ^ b[2]) & m)) == 0;
#else
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
#endif
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index b16d929fa75f..c0f8c4fc5d45 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -27,8 +27,6 @@
#define __LINUX_EXTCON_H__
#include <linux/device.h>
-#include <linux/notifier.h>
-#include <linux/sysfs.h>
/*
* Define the unique id of supported external connectors
@@ -77,8 +75,6 @@ struct extcon_cable;
* be attached simulataneously. {0x7, 0} is equivalent to
* {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
* can be no simultaneous connections.
- * @print_state: An optional callback to override the method to print the
- * status of the extcon device.
* @dev: Device of this extcon.
* @state: Attach/detach state of this extcon. Do not provide at
* register-time.
@@ -102,9 +98,6 @@ struct extcon_dev {
const unsigned int *supported_cable;
const u32 *mutually_exclusive;
- /* Optional callbacks to override class functions */
- ssize_t (*print_state)(struct extcon_dev *edev, char *buf);
-
/* Internal data. Please do not set. */
struct device dev;
struct raw_notifier_head *nh;
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 920408a21ffd..25c6324a0dd0 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -417,15 +417,25 @@ typedef __le32 f2fs_hash_t;
#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
-/* the number of dentry in a block */
-#define NR_DENTRY_IN_BLOCK 214
-
/* MAX level for dir lookup */
#define MAX_DIR_HASH_DEPTH 63
/* MAX buckets in one level of dir */
#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
+/*
+ * space utilization of regular dentry and inline dentry
+ * regular dentry inline dentry
+ * bitmap 1 * 27 = 27 1 * 23 = 23
+ * reserved 1 * 3 = 3 1 * 7 = 7
+ * dentry 11 * 214 = 2354 11 * 182 = 2002
+ * filename 8 * 214 = 1712 8 * 182 = 1456
+ * total 4096 3488
+ *
+ * Note: there are more reserved space in inline dentry than in regular
+ * dentry, when converting inline dentry we should handle this carefully.
+ */
+#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
#define SIZE_OF_DIR_ENTRY 11 /* by byte */
#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
BITS_PER_BYTE)
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index fbb88740634a..674e3e226465 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -86,8 +86,8 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i
static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
{
- rcu_lockdep_assert(rcu_read_lock_held() ||
- lockdep_is_held(&files->file_lock),
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
+ !lockdep_is_held(&files->file_lock),
"suspicious rcu_dereference_check() usage");
return __fcheck_files(files, fd);
}
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 17724f6ea983..fa2cab985e57 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <linux/printk.h>
#include <linux/workqueue.h>
+#include <linux/sched.h>
#include <asm/cacheflush.h>
@@ -354,6 +355,16 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
offsetof(struct bpf_prog, insns[proglen]));
}
+static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
+{
+ /* When classic BPF programs have been loaded and the arch
+ * does not have a classic BPF JIT (anymore), they have been
+ * converted via bpf_migrate_filter() to eBPF and thus always
+ * have an unspec program type.
+ */
+ return prog->type == BPF_PROG_TYPE_UNSPEC;
+}
+
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
@@ -411,6 +422,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct bpf_prog *fp);
+bool bpf_helper_changes_skb_data(void *func);
#ifdef CONFIG_BPF_JIT
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -427,8 +439,9 @@ void bpf_jit_free(struct bpf_prog *fp);
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image)
{
- pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
- flen, proglen, pass, image);
+ pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
+ proglen, pass, image, current->comm, task_pid_nr(current));
+
if (image)
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
16, 1, image, proglen, false);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 84b783f277f7..fbd780c33c5f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1260,6 +1260,7 @@ struct mm_struct;
/* sb->s_iflags */
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
+#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
/* Possible states of 'frozen' field */
enum {
@@ -3041,4 +3042,6 @@ static inline bool dir_relax(struct inode *inode)
return !IS_DEADDIR(inode);
}
+extern bool path_noexec(const struct path *path);
+
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 2a2f56b292c1..f2912914141a 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -20,11 +20,6 @@
#define FSL_UTMI_PHY_DLY 10 /*As per P1010RM, delay for UTMI
PHY CLK to become stable - 10ms*/
#define FSL_USB_PHY_CLK_TIMEOUT 10000 /* uSec */
-#define FSL_USB_VER_OLD 0
-#define FSL_USB_VER_1_6 1
-#define FSL_USB_VER_2_2 2
-#define FSL_USB_VER_2_4 3
-#define FSL_USB_VER_2_5 4
#include <linux/types.h>
@@ -52,6 +47,15 @@
*
*/
+enum fsl_usb2_controller_ver {
+ FSL_USB_VER_NONE = -1,
+ FSL_USB_VER_OLD = 0,
+ FSL_USB_VER_1_6 = 1,
+ FSL_USB_VER_2_2 = 2,
+ FSL_USB_VER_2_4 = 3,
+ FSL_USB_VER_2_5 = 4,
+};
+
enum fsl_usb2_operating_modes {
FSL_USB2_MPH_HOST,
FSL_USB2_DR_HOST,
@@ -65,6 +69,7 @@ enum fsl_usb2_phy_modes {
FSL_USB2_PHY_UTMI,
FSL_USB2_PHY_UTMI_WIDE,
FSL_USB2_PHY_SERIAL,
+ FSL_USB2_PHY_UTMI_DUAL,
};
struct clk;
@@ -72,7 +77,7 @@ struct platform_device;
struct fsl_usb2_platform_data {
/* board specific information */
- int controller_ver;
+ enum fsl_usb2_controller_ver controller_ver;
enum fsl_usb2_operating_modes operating_mode;
enum fsl_usb2_phy_modes phy_mode;
unsigned int port_enables;
@@ -93,6 +98,9 @@ struct fsl_usb2_platform_data {
unsigned suspended:1;
unsigned already_suspended:1;
+ unsigned has_fsl_erratum_a007792:1;
+ unsigned has_fsl_erratum_a005275:1;
+ unsigned check_phy_clk_valid:1;
/* register save area for suspend/resume */
u32 pm_command;
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index bf0321eabbda..0023088b253b 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -841,9 +841,59 @@ struct fsl_ifc_ctrl {
u32 nand_stat;
wait_queue_head_t nand_wait;
+ bool little_endian;
};
extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+static inline u32 ifc_in32(void __iomem *addr)
+{
+ u32 val;
+
+ if (fsl_ifc_ctrl_dev->little_endian)
+ val = ioread32(addr);
+ else
+ val = ioread32be(addr);
+
+ return val;
+}
+
+static inline u16 ifc_in16(void __iomem *addr)
+{
+ u16 val;
+
+ if (fsl_ifc_ctrl_dev->little_endian)
+ val = ioread16(addr);
+ else
+ val = ioread16be(addr);
+
+ return val;
+}
+
+static inline u8 ifc_in8(void __iomem *addr)
+{
+ return ioread8(addr);
+}
+
+static inline void ifc_out32(u32 val, void __iomem *addr)
+{
+ if (fsl_ifc_ctrl_dev->little_endian)
+ iowrite32(val, addr);
+ else
+ iowrite32be(val, addr);
+}
+
+static inline void ifc_out16(u16 val, void __iomem *addr)
+{
+ if (fsl_ifc_ctrl_dev->little_endian)
+ iowrite16(val, addr);
+ else
+ iowrite16be(val, addr);
+}
+
+static inline void ifc_out8(u8 val, void __iomem *addr)
+{
+ iowrite8(val, addr);
+}
#endif /* __ASM_FSL_IFC_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index ec274e0f4ed2..2adbfa6d02bc 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -13,6 +13,7 @@
#include <linux/kdev_t.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
+#include <linux/percpu-refcount.h>
#ifdef CONFIG_BLOCK
@@ -124,7 +125,7 @@ struct hd_struct {
#else
struct disk_stats dkstats;
#endif
- atomic_t ref;
+ struct percpu_ref ref;
struct rcu_head rcu_head;
};
@@ -611,7 +612,7 @@ extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
sector_t len, int flags,
struct partition_meta_info
*info);
-extern void __delete_partition(struct hd_struct *);
+extern void __delete_partition(struct percpu_ref *);
extern void delete_partition(struct gendisk *, int);
extern void printk_all_partitions(void);
@@ -640,27 +641,39 @@ extern ssize_t part_fail_store(struct device *dev,
const char *buf, size_t count);
#endif /* CONFIG_FAIL_MAKE_REQUEST */
-static inline void hd_ref_init(struct hd_struct *part)
+static inline int hd_ref_init(struct hd_struct *part)
{
- atomic_set(&part->ref, 1);
- smp_mb();
+ if (percpu_ref_init(&part->ref, __delete_partition, 0,
+ GFP_KERNEL))
+ return -ENOMEM;
+ return 0;
}
static inline void hd_struct_get(struct hd_struct *part)
{
- atomic_inc(&part->ref);
- smp_mb__after_atomic();
+ percpu_ref_get(&part->ref);
}
static inline int hd_struct_try_get(struct hd_struct *part)
{
- return atomic_inc_not_zero(&part->ref);
+ return percpu_ref_tryget_live(&part->ref);
}
static inline void hd_struct_put(struct hd_struct *part)
{
- if (atomic_dec_and_test(&part->ref))
- __delete_partition(part);
+ percpu_ref_put(&part->ref);
+}
+
+static inline void hd_struct_kill(struct hd_struct *part)
+{
+ percpu_ref_kill(&part->ref);
+}
+
+static inline void hd_free_part(struct hd_struct *part)
+{
+ free_part_stats(part);
+ free_part_info(part);
+ percpu_ref_exit(&part->ref);
}
/*
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 30d3a1f79450..54733d5b503e 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -977,6 +977,11 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
+int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
+ resource_size_t min, resource_size_t max,
+ resource_size_t size, resource_size_t align,
+ bool fb_overlap_ok);
+
/**
* VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device
*
@@ -1233,8 +1238,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
-extern struct resource hyperv_mmio;
-
/*
* Negotiated version with the Host.
*/
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b9c7897dc566..cfa906f28b7a 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2074,8 +2074,8 @@ enum ieee80211_tdls_actioncode {
#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7)
+#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5)
#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
-#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7)
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 193ad488d3e2..908429216d9f 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -37,6 +37,7 @@ static inline struct igmpv3_query *
return (struct igmpv3_query *)skb_transport_header(skb);
}
+extern int sysctl_igmp_llm_reports;
extern int sysctl_igmp_max_memberships;
extern int sysctl_igmp_max_msf;
extern int sysctl_igmp_qrv;
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 2c476acb87d9..3c17cd7fdf06 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -166,6 +166,7 @@ struct st_sensor_transfer_function {
/**
* struct st_sensor_settings - ST specific sensor settings
* @wai: Contents of WhoAmI register.
+ * @wai_addr: The address of WhoAmI register.
* @sensors_supported: List of supported sensors by struct itself.
* @ch: IIO channels for the sensor.
* @odr: Output data rate register and ODR list available.
@@ -179,6 +180,7 @@ struct st_sensor_transfer_function {
*/
struct st_sensor_settings {
u8 wai;
+ u8 wai_addr;
char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
struct iio_chan_spec *ch;
int num_ch;
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 26fb8f6342bb..fad58671c49e 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -100,7 +100,7 @@ void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff);
/**
* iio_channel_cb_get_channels() - get access to the underlying channels.
- * @cb_buff: The callback buffer from whom we want the channel
+ * @cb_buffer: The callback buffer from whom we want the channel
* information.
*
* This function allows one to obtain information about the channels.
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index f79148261d16..7bb7f673cb3f 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -645,6 +645,15 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
#define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
/**
+ * IIO_RAD_TO_DEGREE() - Convert rad to degree
+ * @rad: A value in rad
+ *
+ * Returns the given value converted from rad to degree
+ */
+#define IIO_RAD_TO_DEGREE(rad) \
+ (((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
+
+/**
* IIO_G_TO_M_S_2() - Convert g to meter / second**2
* @g: A value in g
*
@@ -652,4 +661,12 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
*/
#define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
+/**
+ * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
+ * @ms2: A value in meter / second**2
+ *
+ * Returns the given value converted from meter / second**2 to g
+ */
+#define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
+
#endif /* _INDUSTRIAL_IO_H_ */
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index 8a1d18640ab9..9cd8f747212f 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -18,7 +18,8 @@ struct iio_chan_spec;
* struct iio_dev_attr - iio specific device attribute
* @dev_attr: underlying device attribute
* @address: associated register address
- * @l: list head for maintaining list of dynamically created attrs.
+ * @l: list head for maintaining list of dynamically created attrs
+ * @c: specification for the underlying channel
*/
struct iio_dev_attr {
struct device_attribute dev_attr;
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index fa76c79a52a1..1c9e028e0d4a 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -18,6 +18,9 @@ struct iio_subirq {
bool enabled;
};
+struct iio_dev;
+struct iio_trigger;
+
/**
* struct iio_trigger_ops - operations structure for an iio_trigger.
* @owner: used to monitor usage count of the trigger.
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index c378ebec605e..f72f70d5a97b 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -7,8 +7,8 @@ struct iio_dev;
struct iio_buffer_setup_ops;
int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
- irqreturn_t (*pollfunc_bh)(int irq, void *p),
- irqreturn_t (*pollfunc_th)(int irq, void *p),
+ irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
const struct iio_buffer_setup_ops *setup_ops);
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e8493fee8160..d0b380ee7d67 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -32,6 +32,14 @@ extern struct fs_struct init_fs;
#define INIT_CPUSET_SEQ(tsk)
#endif
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(x.prev_cputime.lock), \
+},
+#else
+#define INIT_PREV_CPUTIME(x)
+#endif
+
#define INIT_SIGNALS(sig) { \
.nr_threads = 1, \
.thread_head = LIST_HEAD_INIT(init_task.thread_node), \
@@ -46,6 +54,7 @@ extern struct fs_struct init_fs;
.cputime_atomic = INIT_CPUTIME_ATOMIC, \
.running = 0, \
}, \
+ INIT_PREV_CPUTIME(sig) \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
}
@@ -246,6 +255,7 @@ extern struct task_group root_task_group;
INIT_TASK_RCU_TASKS(tsk) \
INIT_CPUSET_SEQ(tsk) \
INIT_RT_MUTEXES(tsk) \
+ INIT_PREV_CPUTIME(tsk) \
INIT_VTIME(tsk) \
INIT_NUMA_BALANCING(tsk) \
INIT_KASAN(tsk) \
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 82806c60aa42..f1f32af6d9b9 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -29,7 +29,9 @@ struct ipv6_devconf {
__s32 max_desync_factor;
__s32 max_addresses;
__s32 accept_ra_defrtr;
+ __s32 accept_ra_min_hop_limit;
__s32 accept_ra_pinfo;
+ __s32 ignore_routes_with_linkdown;
#ifdef CONFIG_IPV6_ROUTER_PREF
__s32 accept_ra_rtr_pref;
__s32 rtr_probe_interval;
@@ -57,6 +59,7 @@ struct ipv6_devconf {
bool initialized;
struct in6_addr secret;
} stable_secret;
+ __s32 use_oif_addrs_only;
void *sysctl;
};
@@ -94,7 +97,6 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
struct inet6_skb_parm {
int iif;
__be16 ra;
- __u16 hop;
__u16 dst0;
__u16 srcrt;
__u16 dst1;
@@ -111,6 +113,7 @@ struct inet6_skb_parm {
#define IP6SKB_REROUTED 4
#define IP6SKB_ROUTERALERT 8
#define IP6SKB_FRAGMENTED 16
+#define IP6SKB_HOPBYHOP 32
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 51744bcf74ee..6f8b34066442 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -324,8 +324,10 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
* @irq_cpu_online: configure an interrupt source for a secondary CPU
* @irq_cpu_offline: un-configure an interrupt source for a secondary CPU
- * @irq_suspend: function called from core code on suspend once per chip
- * @irq_resume: function called from core code on resume once per chip
+ * @irq_suspend: function called from core code on suspend once per
+ * chip, when one or more interrupts are installed
+ * @irq_resume: function called from core code on resume once per chip,
+ * when one ore more interrupts are installed
* @irq_pm_shutdown: function called from core code on shutdown once per chip
* @irq_calc_mask: Optional function to set irq_data.mask for special cases
* @irq_print_chip: optional to print special chip info in show_interrupts
@@ -488,8 +490,7 @@ extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
#endif
/* Handling of unhandled and spurious interrupts: */
-extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
- irqreturn_t action_ret);
+extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
/* Enable/disable irq debugging output: */
@@ -640,7 +641,7 @@ static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
return d ? d->msi_desc : NULL;
}
-static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
+static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
{
return d->msi_desc;
}
@@ -762,6 +763,12 @@ struct irq_chip_type {
* @reg_base: Register base address (virtual)
* @reg_readl: Alternate I/O accessor (defaults to readl if NULL)
* @reg_writel: Alternate I/O accessor (defaults to writel if NULL)
+ * @suspend: Function called from core code on suspend once per
+ * chip; can be useful instead of irq_chip::suspend to
+ * handle chip details even when no interrupts are in use
+ * @resume: Function called from core code on resume once per chip;
+ * can be useful instead of irq_chip::suspend to handle
+ * chip details even when no interrupts are in use
* @irq_base: Interrupt base nr for this chip
* @irq_cnt: Number of interrupts handled by this chip
* @mask_cache: Cached mask register shared between all chip types
@@ -788,6 +795,8 @@ struct irq_chip_generic {
void __iomem *reg_base;
u32 (*reg_readl)(void __iomem *addr);
void (*reg_writel)(u32 val, void __iomem *addr);
+ void (*suspend)(struct irq_chip_generic *gc);
+ void (*resume)(struct irq_chip_generic *gc);
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index ffbc034c8810..bf982e021fbd 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -360,6 +360,7 @@
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
+#include <asm/msi.h>
/*
* We need a value to serve as a irq-type for LPIs. Choose one that will
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 9de976b4f9a7..65da435d01c1 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -95,11 +95,10 @@
struct device_node;
-void gic_set_irqchip_flags(unsigned long flags);
void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
u32 offset, struct device_node *);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
-void gic_cpu_if_down(void);
+int gic_cpu_if_down(unsigned int gic_nr);
static inline void gic_init(unsigned int nr, int start,
void __iomem *dist , void __iomem *cpu)
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index fcea4e48e21f..5acfa26602e1 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -166,12 +166,16 @@ static inline int handle_domain_irq(struct irq_domain *domain,
#endif
/* Test to see if a driver has successfully requested an irq */
-static inline int irq_has_action(unsigned int irq)
+static inline int irq_desc_has_action(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
return desc->action != NULL;
}
+static inline int irq_has_action(unsigned int irq)
+{
+ return irq_desc_has_action(irq_to_desc(irq));
+}
+
/* caller has locked the irq_desc and both params are valid */
static inline void __irq_set_handler_locked(unsigned int irq,
irq_flow_handler_t handler)
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 744ac0ec98eb..d3ca79236fb0 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -45,6 +45,20 @@ struct irq_data;
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
+/*
+ * Should several domains have the same device node, but serve
+ * different purposes (for example one domain is for PCI/MSI, and the
+ * other for wired IRQs), they can be distinguished using a
+ * bus-specific token. Most domains are expected to only carry
+ * DOMAIN_BUS_ANY.
+ */
+enum irq_domain_bus_token {
+ DOMAIN_BUS_ANY = 0,
+ DOMAIN_BUS_PCI_MSI,
+ DOMAIN_BUS_PLATFORM_MSI,
+ DOMAIN_BUS_NEXUS,
+};
+
/**
* struct irq_domain_ops - Methods for irq_domain objects
* @match: Match an interrupt controller device node to a host, returns
@@ -61,7 +75,8 @@ struct irq_data;
* to setup the irq_desc when returning from map().
*/
struct irq_domain_ops {
- int (*match)(struct irq_domain *d, struct device_node *node);
+ int (*match)(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token);
int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
void (*unmap)(struct irq_domain *d, unsigned int virq);
int (*xlate)(struct irq_domain *d, struct device_node *node,
@@ -116,6 +131,7 @@ struct irq_domain {
/* Optional data */
struct device_node *of_node;
+ enum irq_domain_bus_token bus_token;
struct irq_domain_chip_generic *gc;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
struct irq_domain *parent;
@@ -161,9 +177,15 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data);
-extern struct irq_domain *irq_find_host(struct device_node *node);
+extern struct irq_domain *irq_find_matching_host(struct device_node *node,
+ enum irq_domain_bus_token bus_token);
extern void irq_set_default_host(struct irq_domain *host);
+static inline struct irq_domain *irq_find_host(struct device_node *node)
+{
+ return irq_find_matching_host(node, DOMAIN_BUS_ANY);
+}
+
/**
* irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
deleted file mode 100644
index d32615280be9..000000000000
--- a/include/linux/jbd.h
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * linux/include/linux/jbd.h
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>
- *
- * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Definitions for transaction data structures for the buffer cache
- * filesystem journaling support.
- */
-
-#ifndef _LINUX_JBD_H
-#define _LINUX_JBD_H
-
-/* Allow this file to be included directly into e2fsprogs */
-#ifndef __KERNEL__
-#include "jfs_compat.h"
-#define JFS_DEBUG
-#define jfs_debug jbd_debug
-#else
-
-#include <linux/types.h>
-#include <linux/buffer_head.h>
-#include <linux/journal-head.h>
-#include <linux/stddef.h>
-#include <linux/mutex.h>
-#include <linux/timer.h>
-#include <linux/lockdep.h>
-#include <linux/slab.h>
-
-#define journal_oom_retry 1
-
-/*
- * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
- * certain classes of error which can occur due to failed IOs. Under
- * normal use we want ext3 to continue after such errors, because
- * hardware _can_ fail, but for debugging purposes when running tests on
- * known-good hardware we may want to trap these errors.
- */
-#undef JBD_PARANOID_IOFAIL
-
-/*
- * The default maximum commit age, in seconds.
- */
-#define JBD_DEFAULT_MAX_COMMIT_AGE 5
-
-#ifdef CONFIG_JBD_DEBUG
-/*
- * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
- * consistency checks. By default we don't do this unless
- * CONFIG_JBD_DEBUG is on.
- */
-#define JBD_EXPENSIVE_CHECKING
-extern u8 journal_enable_debug;
-
-void __jbd_debug(int level, const char *file, const char *func,
- unsigned int line, const char *fmt, ...);
-
-#define jbd_debug(n, fmt, a...) \
- __jbd_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
-#else
-#define jbd_debug(n, fmt, a...) /**/
-#endif
-
-static inline void *jbd_alloc(size_t size, gfp_t flags)
-{
- return (void *)__get_free_pages(flags, get_order(size));
-}
-
-static inline void jbd_free(void *ptr, size_t size)
-{
- free_pages((unsigned long)ptr, get_order(size));
-}
-
-#define JFS_MIN_JOURNAL_BLOCKS 1024
-
-
-/**
- * typedef handle_t - The handle_t type represents a single atomic update being performed by some process.
- *
- * All filesystem modifications made by the process go
- * through this handle. Recursive operations (such as quota operations)
- * are gathered into a single update.
- *
- * The buffer credits field is used to account for journaled buffers
- * being modified by the running process. To ensure that there is
- * enough log space for all outstanding operations, we need to limit the
- * number of outstanding buffers possible at any time. When the
- * operation completes, any buffer credits not used are credited back to
- * the transaction, so that at all times we know how many buffers the
- * outstanding updates on a transaction might possibly touch.
- *
- * This is an opaque datatype.
- **/
-typedef struct handle_s handle_t; /* Atomic operation type */
-
-
-/**
- * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
- *
- * journal_t is linked to from the fs superblock structure.
- *
- * We use the journal_t to keep track of all outstanding transaction
- * activity on the filesystem, and to manage the state of the log
- * writing process.
- *
- * This is an opaque datatype.
- **/
-typedef struct journal_s journal_t; /* Journal control structure */
-#endif
-
-/*
- * Internal structures used by the logging mechanism:
- */
-
-#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
-
-/*
- * On-disk structures
- */
-
-/*
- * Descriptor block types:
- */
-
-#define JFS_DESCRIPTOR_BLOCK 1
-#define JFS_COMMIT_BLOCK 2
-#define JFS_SUPERBLOCK_V1 3
-#define JFS_SUPERBLOCK_V2 4
-#define JFS_REVOKE_BLOCK 5
-
-/*
- * Standard header for all descriptor blocks:
- */
-typedef struct journal_header_s
-{
- __be32 h_magic;
- __be32 h_blocktype;
- __be32 h_sequence;
-} journal_header_t;
-
-
-/*
- * The block tag: used to describe a single buffer in the journal
- */
-typedef struct journal_block_tag_s
-{
- __be32 t_blocknr; /* The on-disk block number */
- __be32 t_flags; /* See below */
-} journal_block_tag_t;
-
-/*
- * The revoke descriptor: used on disk to describe a series of blocks to
- * be revoked from the log
- */
-typedef struct journal_revoke_header_s
-{
- journal_header_t r_header;
- __be32 r_count; /* Count of bytes used in the block */
-} journal_revoke_header_t;
-
-
-/* Definitions for the journal tag flags word: */
-#define JFS_FLAG_ESCAPE 1 /* on-disk block is escaped */
-#define JFS_FLAG_SAME_UUID 2 /* block has same uuid as previous */
-#define JFS_FLAG_DELETED 4 /* block deleted by this transaction */
-#define JFS_FLAG_LAST_TAG 8 /* last tag in this descriptor block */
-
-
-/*
- * The journal superblock. All fields are in big-endian byte order.
- */
-typedef struct journal_superblock_s
-{
-/* 0x0000 */
- journal_header_t s_header;
-
-/* 0x000C */
- /* Static information describing the journal */
- __be32 s_blocksize; /* journal device blocksize */
- __be32 s_maxlen; /* total blocks in journal file */
- __be32 s_first; /* first block of log information */
-
-/* 0x0018 */
- /* Dynamic information describing the current state of the log */
- __be32 s_sequence; /* first commit ID expected in log */
- __be32 s_start; /* blocknr of start of log */
-
-/* 0x0020 */
- /* Error value, as set by journal_abort(). */
- __be32 s_errno;
-
-/* 0x0024 */
- /* Remaining fields are only valid in a version-2 superblock */
- __be32 s_feature_compat; /* compatible feature set */
- __be32 s_feature_incompat; /* incompatible feature set */
- __be32 s_feature_ro_compat; /* readonly-compatible feature set */
-/* 0x0030 */
- __u8 s_uuid[16]; /* 128-bit uuid for journal */
-
-/* 0x0040 */
- __be32 s_nr_users; /* Nr of filesystems sharing log */
-
- __be32 s_dynsuper; /* Blocknr of dynamic superblock copy*/
-
-/* 0x0048 */
- __be32 s_max_transaction; /* Limit of journal blocks per trans.*/
- __be32 s_max_trans_data; /* Limit of data blocks per trans. */
-
-/* 0x0050 */
- __u32 s_padding[44];
-
-/* 0x0100 */
- __u8 s_users[16*48]; /* ids of all fs'es sharing the log */
-/* 0x0400 */
-} journal_superblock_t;
-
-#define JFS_HAS_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
-#define JFS_HAS_RO_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
-#define JFS_HAS_INCOMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
-
-#define JFS_FEATURE_INCOMPAT_REVOKE 0x00000001
-
-/* Features known to this kernel version: */
-#define JFS_KNOWN_COMPAT_FEATURES 0
-#define JFS_KNOWN_ROCOMPAT_FEATURES 0
-#define JFS_KNOWN_INCOMPAT_FEATURES JFS_FEATURE_INCOMPAT_REVOKE
-
-#ifdef __KERNEL__
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-
-enum jbd_state_bits {
- BH_JBD /* Has an attached ext3 journal_head */
- = BH_PrivateStart,
- BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
- BH_Freed, /* Has been freed (truncated) */
- BH_Revoked, /* Has been revoked from the log */
- BH_RevokeValid, /* Revoked flag is valid */
- BH_JBDDirty, /* Is dirty but journaled */
- BH_State, /* Pins most journal_head state */
- BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
- BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
- BH_JBDPrivateStart, /* First bit available for private use by FS */
-};
-
-BUFFER_FNS(JBD, jbd)
-BUFFER_FNS(JWrite, jwrite)
-BUFFER_FNS(JBDDirty, jbddirty)
-TAS_BUFFER_FNS(JBDDirty, jbddirty)
-BUFFER_FNS(Revoked, revoked)
-TAS_BUFFER_FNS(Revoked, revoked)
-BUFFER_FNS(RevokeValid, revokevalid)
-TAS_BUFFER_FNS(RevokeValid, revokevalid)
-BUFFER_FNS(Freed, freed)
-
-#include <linux/jbd_common.h>
-
-#define J_ASSERT(assert) BUG_ON(!(assert))
-
-#define J_ASSERT_BH(bh, expr) J_ASSERT(expr)
-#define J_ASSERT_JH(jh, expr) J_ASSERT(expr)
-
-#if defined(JBD_PARANOID_IOFAIL)
-#define J_EXPECT(expr, why...) J_ASSERT(expr)
-#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr)
-#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr)
-#else
-#define __journal_expect(expr, why...) \
- ({ \
- int val = (expr); \
- if (!val) { \
- printk(KERN_ERR \
- "EXT3-fs unexpected failure: %s;\n",# expr); \
- printk(KERN_ERR why "\n"); \
- } \
- val; \
- })
-#define J_EXPECT(expr, why...) __journal_expect(expr, ## why)
-#define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, ## why)
-#define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why)
-#endif
-
-struct jbd_revoke_table_s;
-
-/**
- * struct handle_s - this is the concrete type associated with handle_t.
- * @h_transaction: Which compound transaction is this update a part of?
- * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
- * @h_ref: Reference count on this handle
- * @h_err: Field for caller's use to track errors through large fs operations
- * @h_sync: flag for sync-on-close
- * @h_jdata: flag to force data journaling
- * @h_aborted: flag indicating fatal error on handle
- * @h_lockdep_map: lockdep info for debugging lock problems
- */
-struct handle_s
-{
- /* Which compound transaction is this update a part of? */
- transaction_t *h_transaction;
-
- /* Number of remaining buffers we are allowed to dirty: */
- int h_buffer_credits;
-
- /* Reference count on this handle */
- int h_ref;
-
- /* Field for caller's use to track errors through large fs */
- /* operations */
- int h_err;
-
- /* Flags [no locking] */
- unsigned int h_sync: 1; /* sync-on-close */
- unsigned int h_jdata: 1; /* force data journaling */
- unsigned int h_aborted: 1; /* fatal error on handle */
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map h_lockdep_map;
-#endif
-};
-
-
-/* The transaction_t type is the guts of the journaling mechanism. It
- * tracks a compound transaction through its various states:
- *
- * RUNNING: accepting new updates
- * LOCKED: Updates still running but we don't accept new ones
- * RUNDOWN: Updates are tidying up but have finished requesting
- * new buffers to modify (state not used for now)
- * FLUSH: All updates complete, but we are still writing to disk
- * COMMIT: All data on disk, writing commit record
- * FINISHED: We still have to keep the transaction for checkpointing.
- *
- * The transaction keeps track of all of the buffers modified by a
- * running transaction, and all of the buffers committed but not yet
- * flushed to home for finished transactions.
- */
-
-/*
- * Lock ranking:
- *
- * j_list_lock
- * ->jbd_lock_bh_journal_head() (This is "innermost")
- *
- * j_state_lock
- * ->jbd_lock_bh_state()
- *
- * jbd_lock_bh_state()
- * ->j_list_lock
- *
- * j_state_lock
- * ->t_handle_lock
- *
- * j_state_lock
- * ->j_list_lock (journal_unmap_buffer)
- *
- */
-
-struct transaction_s
-{
- /* Pointer to the journal for this transaction. [no locking] */
- journal_t *t_journal;
-
- /* Sequence number for this transaction [no locking] */
- tid_t t_tid;
-
- /*
- * Transaction's current state
- * [no locking - only kjournald alters this]
- * [j_list_lock] guards transition of a transaction into T_FINISHED
- * state and subsequent call of __journal_drop_transaction()
- * FIXME: needs barriers
- * KLUDGE: [use j_state_lock]
- */
- enum {
- T_RUNNING,
- T_LOCKED,
- T_FLUSH,
- T_COMMIT,
- T_COMMIT_RECORD,
- T_FINISHED
- } t_state;
-
- /*
- * Where in the log does this transaction's commit start? [no locking]
- */
- unsigned int t_log_start;
-
- /* Number of buffers on the t_buffers list [j_list_lock] */
- int t_nr_buffers;
-
- /*
- * Doubly-linked circular list of all buffers reserved but not yet
- * modified by this transaction [j_list_lock]
- */
- struct journal_head *t_reserved_list;
-
- /*
- * Doubly-linked circular list of all buffers under writeout during
- * commit [j_list_lock]
- */
- struct journal_head *t_locked_list;
-
- /*
- * Doubly-linked circular list of all metadata buffers owned by this
- * transaction [j_list_lock]
- */
- struct journal_head *t_buffers;
-
- /*
- * Doubly-linked circular list of all data buffers still to be
- * flushed before this transaction can be committed [j_list_lock]
- */
- struct journal_head *t_sync_datalist;
-
- /*
- * Doubly-linked circular list of all forget buffers (superseded
- * buffers which we can un-checkpoint once this transaction commits)
- * [j_list_lock]
- */
- struct journal_head *t_forget;
-
- /*
- * Doubly-linked circular list of all buffers still to be flushed before
- * this transaction can be checkpointed. [j_list_lock]
- */
- struct journal_head *t_checkpoint_list;
-
- /*
- * Doubly-linked circular list of all buffers submitted for IO while
- * checkpointing. [j_list_lock]
- */
- struct journal_head *t_checkpoint_io_list;
-
- /*
- * Doubly-linked circular list of temporary buffers currently undergoing
- * IO in the log [j_list_lock]
- */
- struct journal_head *t_iobuf_list;
-
- /*
- * Doubly-linked circular list of metadata buffers being shadowed by log
- * IO. The IO buffers on the iobuf list and the shadow buffers on this
- * list match each other one for one at all times. [j_list_lock]
- */
- struct journal_head *t_shadow_list;
-
- /*
- * Doubly-linked circular list of control buffers being written to the
- * log. [j_list_lock]
- */
- struct journal_head *t_log_list;
-
- /*
- * Protects info related to handles
- */
- spinlock_t t_handle_lock;
-
- /*
- * Number of outstanding updates running on this transaction
- * [t_handle_lock]
- */
- int t_updates;
-
- /*
- * Number of buffers reserved for use by all handles in this transaction
- * handle but not yet modified. [t_handle_lock]
- */
- int t_outstanding_credits;
-
- /*
- * Forward and backward links for the circular list of all transactions
- * awaiting checkpoint. [j_list_lock]
- */
- transaction_t *t_cpnext, *t_cpprev;
-
- /*
- * When will the transaction expire (become due for commit), in jiffies?
- * [no locking]
- */
- unsigned long t_expires;
-
- /*
- * When this transaction started, in nanoseconds [no locking]
- */
- ktime_t t_start_time;
-
- /*
- * How many handles used this transaction? [t_handle_lock]
- */
- int t_handle_count;
-};
-
-/**
- * struct journal_s - this is the concrete type associated with journal_t.
- * @j_flags: General journaling state flags
- * @j_errno: Is there an outstanding uncleared error on the journal (from a
- * prior abort)?
- * @j_sb_buffer: First part of superblock buffer
- * @j_superblock: Second part of superblock buffer
- * @j_format_version: Version of the superblock format
- * @j_state_lock: Protect the various scalars in the journal
- * @j_barrier_count: Number of processes waiting to create a barrier lock
- * @j_running_transaction: The current running transaction..
- * @j_committing_transaction: the transaction we are pushing to disk
- * @j_checkpoint_transactions: a linked circular list of all transactions
- * waiting for checkpointing
- * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
- * to start committing, or for a barrier lock to be released
- * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
- * @j_wait_done_commit: Wait queue for waiting for commit to complete
- * @j_wait_checkpoint: Wait queue to trigger checkpointing
- * @j_wait_commit: Wait queue to trigger commit
- * @j_wait_updates: Wait queue to wait for updates to complete
- * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
- * @j_head: Journal head - identifies the first unused block in the journal
- * @j_tail: Journal tail - identifies the oldest still-used block in the
- * journal.
- * @j_free: Journal free - how many free blocks are there in the journal?
- * @j_first: The block number of the first usable block
- * @j_last: The block number one beyond the last usable block
- * @j_dev: Device where we store the journal
- * @j_blocksize: blocksize for the location where we store the journal.
- * @j_blk_offset: starting block offset for into the device where we store the
- * journal
- * @j_fs_dev: Device which holds the client fs. For internal journal this will
- * be equal to j_dev
- * @j_maxlen: Total maximum capacity of the journal region on disk.
- * @j_list_lock: Protects the buffer lists and internal buffer state.
- * @j_inode: Optional inode where we store the journal. If present, all journal
- * block numbers are mapped into this inode via bmap().
- * @j_tail_sequence: Sequence number of the oldest transaction in the log
- * @j_transaction_sequence: Sequence number of the next transaction to grant
- * @j_commit_sequence: Sequence number of the most recently committed
- * transaction
- * @j_commit_request: Sequence number of the most recent transaction wanting
- * commit
- * @j_commit_waited: Sequence number of the most recent transaction someone
- * is waiting for to commit.
- * @j_uuid: Uuid of client object.
- * @j_task: Pointer to the current commit thread for this journal
- * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
- * single compound commit transaction
- * @j_commit_interval: What is the maximum transaction lifetime before we begin
- * a commit?
- * @j_commit_timer: The timer used to wakeup the commit thread
- * @j_revoke_lock: Protect the revoke table
- * @j_revoke: The revoke table - maintains the list of revoked blocks in the
- * current transaction.
- * @j_revoke_table: alternate revoke tables for j_revoke
- * @j_wbuf: array of buffer_heads for journal_commit_transaction
- * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
- * number that will fit in j_blocksize
- * @j_last_sync_writer: most recent pid which did a synchronous write
- * @j_average_commit_time: the average amount of time in nanoseconds it
- * takes to commit a transaction to the disk.
- * @j_private: An opaque pointer to fs-private information.
- */
-
-struct journal_s
-{
- /* General journaling state flags [j_state_lock] */
- unsigned long j_flags;
-
- /*
- * Is there an outstanding uncleared error on the journal (from a prior
- * abort)? [j_state_lock]
- */
- int j_errno;
-
- /* The superblock buffer */
- struct buffer_head *j_sb_buffer;
- journal_superblock_t *j_superblock;
-
- /* Version of the superblock format */
- int j_format_version;
-
- /*
- * Protect the various scalars in the journal
- */
- spinlock_t j_state_lock;
-
- /*
- * Number of processes waiting to create a barrier lock [j_state_lock]
- */
- int j_barrier_count;
-
- /*
- * Transactions: The current running transaction...
- * [j_state_lock] [caller holding open handle]
- */
- transaction_t *j_running_transaction;
-
- /*
- * the transaction we are pushing to disk
- * [j_state_lock] [caller holding open handle]
- */
- transaction_t *j_committing_transaction;
-
- /*
- * ... and a linked circular list of all transactions waiting for
- * checkpointing. [j_list_lock]
- */
- transaction_t *j_checkpoint_transactions;
-
- /*
- * Wait queue for waiting for a locked transaction to start committing,
- * or for a barrier lock to be released
- */
- wait_queue_head_t j_wait_transaction_locked;
-
- /* Wait queue for waiting for checkpointing to complete */
- wait_queue_head_t j_wait_logspace;
-
- /* Wait queue for waiting for commit to complete */
- wait_queue_head_t j_wait_done_commit;
-
- /* Wait queue to trigger checkpointing */
- wait_queue_head_t j_wait_checkpoint;
-
- /* Wait queue to trigger commit */
- wait_queue_head_t j_wait_commit;
-
- /* Wait queue to wait for updates to complete */
- wait_queue_head_t j_wait_updates;
-
- /* Semaphore for locking against concurrent checkpoints */
- struct mutex j_checkpoint_mutex;
-
- /*
- * Journal head: identifies the first unused block in the journal.
- * [j_state_lock]
- */
- unsigned int j_head;
-
- /*
- * Journal tail: identifies the oldest still-used block in the journal.
- * [j_state_lock]
- */
- unsigned int j_tail;
-
- /*
- * Journal free: how many free blocks are there in the journal?
- * [j_state_lock]
- */
- unsigned int j_free;
-
- /*
- * Journal start and end: the block numbers of the first usable block
- * and one beyond the last usable block in the journal. [j_state_lock]
- */
- unsigned int j_first;
- unsigned int j_last;
-
- /*
- * Device, blocksize and starting block offset for the location where we
- * store the journal.
- */
- struct block_device *j_dev;
- int j_blocksize;
- unsigned int j_blk_offset;
-
- /*
- * Device which holds the client fs. For internal journal this will be
- * equal to j_dev.
- */
- struct block_device *j_fs_dev;
-
- /* Total maximum capacity of the journal region on disk. */
- unsigned int j_maxlen;
-
- /*
- * Protects the buffer lists and internal buffer state.
- */
- spinlock_t j_list_lock;
-
- /* Optional inode where we store the journal. If present, all */
- /* journal block numbers are mapped into this inode via */
- /* bmap(). */
- struct inode *j_inode;
-
- /*
- * Sequence number of the oldest transaction in the log [j_state_lock]
- */
- tid_t j_tail_sequence;
-
- /*
- * Sequence number of the next transaction to grant [j_state_lock]
- */
- tid_t j_transaction_sequence;
-
- /*
- * Sequence number of the most recently committed transaction
- * [j_state_lock].
- */
- tid_t j_commit_sequence;
-
- /*
- * Sequence number of the most recent transaction wanting commit
- * [j_state_lock]
- */
- tid_t j_commit_request;
-
- /*
- * Sequence number of the most recent transaction someone is waiting
- * for to commit.
- * [j_state_lock]
- */
- tid_t j_commit_waited;
-
- /*
- * Journal uuid: identifies the object (filesystem, LVM volume etc)
- * backed by this journal. This will eventually be replaced by an array
- * of uuids, allowing us to index multiple devices within a single
- * journal and to perform atomic updates across them.
- */
- __u8 j_uuid[16];
-
- /* Pointer to the current commit thread for this journal */
- struct task_struct *j_task;
-
- /*
- * Maximum number of metadata buffers to allow in a single compound
- * commit transaction
- */
- int j_max_transaction_buffers;
-
- /*
- * What is the maximum transaction lifetime before we begin a commit?
- */
- unsigned long j_commit_interval;
-
- /* The timer used to wakeup the commit thread: */
- struct timer_list j_commit_timer;
-
- /*
- * The revoke table: maintains the list of revoked blocks in the
- * current transaction. [j_revoke_lock]
- */
- spinlock_t j_revoke_lock;
- struct jbd_revoke_table_s *j_revoke;
- struct jbd_revoke_table_s *j_revoke_table[2];
-
- /*
- * array of bhs for journal_commit_transaction
- */
- struct buffer_head **j_wbuf;
- int j_wbufsize;
-
- /*
- * this is the pid of the last person to run a synchronous operation
- * through the journal.
- */
- pid_t j_last_sync_writer;
-
- /*
- * the average amount of time in nanoseconds it takes to commit a
- * transaction to the disk. [j_state_lock]
- */
- u64 j_average_commit_time;
-
- /*
- * An opaque pointer to fs-private information. ext3 puts its
- * superblock pointer here
- */
- void *j_private;
-};
-
-/*
- * Journal flag definitions
- */
-#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */
-#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */
-#define JFS_ACK_ERR 0x004 /* The errno in the sb has been acked */
-#define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */
-#define JFS_LOADED 0x010 /* The journal superblock has been loaded */
-#define JFS_BARRIER 0x020 /* Use IDE barriers */
-#define JFS_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
- * data write error in ordered
- * mode */
-
-/*
- * Function declarations for the journaling transaction and buffer
- * management
- */
-
-/* Filing buffers */
-extern void journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __journal_unfile_buffer(struct journal_head *);
-extern void __journal_refile_buffer(struct journal_head *);
-extern void journal_refile_buffer(journal_t *, struct journal_head *);
-extern void __journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
-extern void journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
-
-/* Log buffer allocation */
-extern struct journal_head * journal_get_descriptor_buffer(journal_t *);
-int journal_next_log_block(journal_t *, unsigned int *);
-
-/* Commit management */
-extern void journal_commit_transaction(journal_t *);
-
-/* Checkpoint list management */
-int __journal_clean_checkpoint_list(journal_t *journal);
-int __journal_remove_checkpoint(struct journal_head *);
-void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
-
-/* Buffer IO */
-extern int
-journal_write_metadata_buffer(transaction_t *transaction,
- struct journal_head *jh_in,
- struct journal_head **jh_out,
- unsigned int blocknr);
-
-/* Transaction locking */
-extern void __wait_on_journal (journal_t *);
-
-/*
- * Journal locking.
- *
- * We need to lock the journal during transaction state changes so that nobody
- * ever tries to take a handle on the running transaction while we are in the
- * middle of moving it to the commit phase. j_state_lock does this.
- *
- * Note that the locking is completely interrupt unsafe. We never touch
- * journal structures from interrupts.
- */
-
-static inline handle_t *journal_current_handle(void)
-{
- return current->journal_info;
-}
-
-/* The journaling code user interface:
- *
- * Create and destroy handles
- * Register buffer modifications against the current transaction.
- */
-
-extern handle_t *journal_start(journal_t *, int nblocks);
-extern int journal_restart (handle_t *, int nblocks);
-extern int journal_extend (handle_t *, int nblocks);
-extern int journal_get_write_access(handle_t *, struct buffer_head *);
-extern int journal_get_create_access (handle_t *, struct buffer_head *);
-extern int journal_get_undo_access(handle_t *, struct buffer_head *);
-extern int journal_dirty_data (handle_t *, struct buffer_head *);
-extern int journal_dirty_metadata (handle_t *, struct buffer_head *);
-extern void journal_release_buffer (handle_t *, struct buffer_head *);
-extern int journal_forget (handle_t *, struct buffer_head *);
-extern void journal_sync_buffer (struct buffer_head *);
-extern void journal_invalidatepage(journal_t *,
- struct page *, unsigned int, unsigned int);
-extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
-extern int journal_stop(handle_t *);
-extern int journal_flush (journal_t *);
-extern void journal_lock_updates (journal_t *);
-extern void journal_unlock_updates (journal_t *);
-
-extern journal_t * journal_init_dev(struct block_device *bdev,
- struct block_device *fs_dev,
- int start, int len, int bsize);
-extern journal_t * journal_init_inode (struct inode *);
-extern int journal_update_format (journal_t *);
-extern int journal_check_used_features
- (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_check_available_features
- (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_set_features
- (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_create (journal_t *);
-extern int journal_load (journal_t *journal);
-extern int journal_destroy (journal_t *);
-extern int journal_recover (journal_t *journal);
-extern int journal_wipe (journal_t *, int);
-extern int journal_skip_recovery (journal_t *);
-extern void journal_update_sb_log_tail (journal_t *, tid_t, unsigned int,
- int);
-extern void journal_abort (journal_t *, int);
-extern int journal_errno (journal_t *);
-extern void journal_ack_err (journal_t *);
-extern int journal_clear_err (journal_t *);
-extern int journal_bmap(journal_t *, unsigned int, unsigned int *);
-extern int journal_force_commit(journal_t *);
-
-/*
- * journal_head management
- */
-struct journal_head *journal_add_journal_head(struct buffer_head *bh);
-struct journal_head *journal_grab_journal_head(struct buffer_head *bh);
-void journal_put_journal_head(struct journal_head *jh);
-
-/*
- * handle management
- */
-extern struct kmem_cache *jbd_handle_cache;
-
-static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
-{
- return kmem_cache_zalloc(jbd_handle_cache, gfp_flags);
-}
-
-static inline void jbd_free_handle(handle_t *handle)
-{
- kmem_cache_free(jbd_handle_cache, handle);
-}
-
-/* Primary revoke support */
-#define JOURNAL_REVOKE_DEFAULT_HASH 256
-extern int journal_init_revoke(journal_t *, int);
-extern void journal_destroy_revoke_caches(void);
-extern int journal_init_revoke_caches(void);
-
-extern void journal_destroy_revoke(journal_t *);
-extern int journal_revoke (handle_t *,
- unsigned int, struct buffer_head *);
-extern int journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void journal_write_revoke_records(journal_t *,
- transaction_t *, int);
-
-/* Recovery revoke support */
-extern int journal_set_revoke(journal_t *, unsigned int, tid_t);
-extern int journal_test_revoke(journal_t *, unsigned int, tid_t);
-extern void journal_clear_revoke(journal_t *);
-extern void journal_switch_revoke_table(journal_t *journal);
-extern void journal_clear_buffer_revoked_flags(journal_t *journal);
-
-/*
- * The log thread user interface:
- *
- * Request space in the current transaction, and force transaction commit
- * transitions on demand.
- */
-
-int __log_space_left(journal_t *); /* Called with journal locked */
-int log_start_commit(journal_t *journal, tid_t tid);
-int __log_start_commit(journal_t *journal, tid_t tid);
-int journal_start_commit(journal_t *journal, tid_t *tid);
-int journal_force_commit_nested(journal_t *journal);
-int log_wait_commit(journal_t *journal, tid_t tid);
-int log_do_checkpoint(journal_t *journal);
-int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
-
-void __log_wait_for_space(journal_t *journal);
-extern void __journal_drop_transaction(journal_t *, transaction_t *);
-extern int cleanup_journal_tail(journal_t *);
-
-/*
- * is_journal_abort
- *
- * Simple test wrapper function to test the JFS_ABORT state flag. This
- * bit, when set, indicates that we have had a fatal error somewhere,
- * either inside the journaling layer or indicated to us by the client
- * (eg. ext3), and that we and should not commit any further
- * transactions.
- */
-
-static inline int is_journal_aborted(journal_t *journal)
-{
- return journal->j_flags & JFS_ABORT;
-}
-
-static inline int is_handle_aborted(handle_t *handle)
-{
- if (handle->h_aborted)
- return 1;
- return is_journal_aborted(handle->h_transaction->t_journal);
-}
-
-static inline void journal_abort_handle(handle_t *handle)
-{
- handle->h_aborted = 1;
-}
-
-#endif /* __KERNEL__ */
-
-/* Comparison functions for transaction IDs: perform comparisons using
- * modulo arithmetic so that they work over sequence number wraps. */
-
-static inline int tid_gt(tid_t x, tid_t y)
-{
- int difference = (x - y);
- return (difference > 0);
-}
-
-static inline int tid_geq(tid_t x, tid_t y)
-{
- int difference = (x - y);
- return (difference >= 0);
-}
-
-extern int journal_blocks_per_page(struct inode *inode);
-
-/*
- * Return the minimum number of blocks which must be free in the journal
- * before a new transaction may be started. Must be called under j_state_lock.
- */
-static inline int jbd_space_needed(journal_t *journal)
-{
- int nblocks = journal->j_max_transaction_buffers;
- if (journal->j_committing_transaction)
- nblocks += journal->j_committing_transaction->
- t_outstanding_credits;
- return nblocks;
-}
-
-/*
- * Definitions which augment the buffer_head layer
- */
-
-/* journaling buffer types */
-#define BJ_None 0 /* Not journaled */
-#define BJ_SyncData 1 /* Normal data: flush before commit */
-#define BJ_Metadata 2 /* Normal journaled metadata */
-#define BJ_Forget 3 /* Buffer superseded by this transaction */
-#define BJ_IO 4 /* Buffer is for temporary IO use */
-#define BJ_Shadow 5 /* Buffer contents being shadowed to the log */
-#define BJ_LogCtl 6 /* Buffer contains log descriptors */
-#define BJ_Reserved 7 /* Buffer is reserved for access by journal */
-#define BJ_Locked 8 /* Locked for I/O during commit */
-#define BJ_Types 9
-
-extern int jbd_blocks_per_page(struct inode *inode);
-
-#ifdef __KERNEL__
-
-#define buffer_trace_init(bh) do {} while (0)
-#define print_buffer_fields(bh) do {} while (0)
-#define print_buffer_trace(bh) do {} while (0)
-#define BUFFER_TRACE(bh, info) do {} while (0)
-#define BUFFER_TRACE2(bh, bh2, info) do {} while (0)
-#define JBUFFER_TRACE(jh, info) do {} while (0)
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_JBD_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index edb640ae9a94..df07e78487d5 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -29,6 +29,7 @@
#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/slab.h>
+#include <linux/bit_spinlock.h>
#include <crypto/hash.h>
#endif
@@ -336,7 +337,45 @@ BUFFER_FNS(Freed, freed)
BUFFER_FNS(Shadow, shadow)
BUFFER_FNS(Verified, verified)
-#include <linux/jbd_common.h>
+static inline struct buffer_head *jh2bh(struct journal_head *jh)
+{
+ return jh->b_bh;
+}
+
+static inline struct journal_head *bh2jh(struct buffer_head *bh)
+{
+ return bh->b_private;
+}
+
+static inline void jbd_lock_bh_state(struct buffer_head *bh)
+{
+ bit_spin_lock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+{
+ return bit_spin_trylock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+{
+ return bit_spin_is_locked(BH_State, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+{
+ bit_spin_unlock(BH_State, &bh->b_state);
+}
+
+static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+{
+ bit_spin_lock(BH_JournalHead, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+{
+ bit_spin_unlock(BH_JournalHead, &bh->b_state);
+}
#define J_ASSERT(assert) BUG_ON(!(assert))
@@ -1042,8 +1081,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
extern void jbd2_journal_commit_transaction(journal_t *);
/* Checkpoint list management */
-void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
+void jbd2_journal_destroy_checkpoint(journal_t *journal);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
deleted file mode 100644
index 3dc53432355f..000000000000
--- a/include/linux/jbd_common.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef _LINUX_JBD_STATE_H
-#define _LINUX_JBD_STATE_H
-
-#include <linux/bit_spinlock.h>
-
-static inline struct buffer_head *jh2bh(struct journal_head *jh)
-{
- return jh->b_bh;
-}
-
-static inline struct journal_head *bh2jh(struct buffer_head *bh)
-{
- return bh->b_private;
-}
-
-static inline void jbd_lock_bh_state(struct buffer_head *bh)
-{
- bit_spin_lock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_trylock_bh_state(struct buffer_head *bh)
-{
- return bit_spin_trylock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
-{
- return bit_spin_is_locked(BH_State, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_state(struct buffer_head *bh)
-{
- bit_spin_unlock(BH_State, &bh->b_state);
-}
-
-static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
-{
- bit_spin_lock(BH_JournalHead, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
-{
- bit_spin_unlock(BH_JournalHead, &bh->b_state);
-}
-
-#endif
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 535fd3bb1ba8..5fdc55312334 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -351,7 +351,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
*/
-static inline unsigned long msecs_to_jiffies(const unsigned int m)
+static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
{
if (__builtin_constant_p(m)) {
if ((int)m < 0)
@@ -363,18 +363,11 @@ static inline unsigned long msecs_to_jiffies(const unsigned int m)
}
extern unsigned long __usecs_to_jiffies(const unsigned int u);
-#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+#if !(USEC_PER_SEC % HZ)
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
}
-#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
-static inline unsigned long _usecs_to_jiffies(const unsigned int u)
-{
- return u * (HZ / USEC_PER_SEC);
-}
-static inline unsigned long _usecs_to_jiffies(const unsigned int u)
-{
#else
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
@@ -405,7 +398,7 @@ static inline unsigned long _usecs_to_jiffies(const unsigned int u)
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
*/
-static inline unsigned long usecs_to_jiffies(const unsigned int u)
+static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
{
if (__builtin_constant_p(u)) {
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
@@ -416,9 +409,25 @@ static inline unsigned long usecs_to_jiffies(const unsigned int u)
}
}
-extern unsigned long timespec_to_jiffies(const struct timespec *value);
-extern void jiffies_to_timespec(const unsigned long jiffies,
- struct timespec *value);
+extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
+extern void jiffies_to_timespec64(const unsigned long jiffies,
+ struct timespec64 *value);
+static inline unsigned long timespec_to_jiffies(const struct timespec *value)
+{
+ struct timespec64 ts = timespec_to_timespec64(*value);
+
+ return timespec64_to_jiffies(&ts);
+}
+
+static inline void jiffies_to_timespec(const unsigned long jiffies,
+ struct timespec *value)
+{
+ struct timespec64 ts;
+
+ jiffies_to_timespec64(jiffies, &ts);
+ *value = timespec64_to_timespec(ts);
+}
+
extern unsigned long timeval_to_jiffies(const struct timeval *value);
extern void jiffies_to_timeval(const unsigned long jiffies,
struct timeval *value);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f4de473f226b..7f653e8f6690 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -7,17 +7,52 @@
* Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
* Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
*
+ * DEPRECATED API:
+ *
+ * The use of 'struct static_key' directly, is now DEPRECATED. In addition
+ * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
+ *
+ * struct static_key false = STATIC_KEY_INIT_FALSE;
+ * struct static_key true = STATIC_KEY_INIT_TRUE;
+ * static_key_true()
+ * static_key_false()
+ *
+ * The updated API replacements are:
+ *
+ * DEFINE_STATIC_KEY_TRUE(key);
+ * DEFINE_STATIC_KEY_FALSE(key);
+ * static_key_likely()
+ * statick_key_unlikely()
+ *
* Jump labels provide an interface to generate dynamic branches using
- * self-modifying code. Assuming toolchain and architecture support, the result
- * of a "if (static_key_false(&key))" statement is an unconditional branch (which
- * defaults to false - and the true block is placed out of line).
+ * self-modifying code. Assuming toolchain and architecture support, if we
+ * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
+ * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
+ * (which defaults to false - and the true block is placed out of line).
+ * Similarly, we can define an initially true key via
+ * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
+ * "if (static_branch_unlikely(&key))", in which case we will generate an
+ * unconditional branch to the out-of-line true branch. Keys that are
+ * initially true or false can be using in both static_branch_unlikely()
+ * and static_branch_likely() statements.
+ *
+ * At runtime we can change the branch target by setting the key
+ * to true via a call to static_branch_enable(), or false using
+ * static_branch_disable(). If the direction of the branch is switched by
+ * these calls then we run-time modify the branch target via a
+ * no-op -> jump or jump -> no-op conversion. For example, for an
+ * initially false key that is used in an "if (static_branch_unlikely(&key))"
+ * statement, setting the key to true requires us to patch in a jump
+ * to the out-of-line of true branch.
*
- * However at runtime we can change the branch target using
- * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
- * object, and for as long as there are references all branches referring to
- * that particular key will point to the (out of line) true block.
+ * In addtion to static_branch_{enable,disable}, we can also reference count
+ * the key or branch direction via static_branch_{inc,dec}. Thus,
+ * static_branch_inc() can be thought of as a 'make more true' and
+ * static_branch_dec() as a 'make more false'. The inc()/dec()
+ * interface is meant to be used exclusively from the inc()/dec() for a given
+ * key.
*
- * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions
+ * Since this relies on modifying code, the branch modifying functions
* must be considered absolute slow paths (machine wide synchronization etc.).
* OTOH, since the affected branches are unconditional, their runtime overhead
* will be absolutely minimal, esp. in the default (off) case where the total
@@ -29,20 +64,10 @@
* cause significant performance degradation. Struct static_key_deferred and
* static_key_slow_dec_deferred() provide for this.
*
- * Lacking toolchain and or architecture support, jump labels fall back to a simple
- * conditional branch.
- *
- * struct static_key my_key = STATIC_KEY_INIT_TRUE;
- *
- * if (static_key_true(&my_key)) {
- * }
+ * Lacking toolchain and or architecture support, static keys fall back to a
+ * simple conditional branch.
*
- * will result in the true case being in-line and starts the key with a single
- * reference. Mixing static_key_true() and static_key_false() on the same key is not
- * allowed.
- *
- * Not initializing the key (static data is initialized to 0s anyway) is the
- * same as using STATIC_KEY_INIT_FALSE.
+ * Additional babbling in: Documentation/static-keys.txt
*/
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -86,8 +111,8 @@ struct static_key {
#ifndef __ASSEMBLY__
enum jump_label_type {
- JUMP_LABEL_DISABLE = 0,
- JUMP_LABEL_ENABLE,
+ JUMP_LABEL_NOP = 0,
+ JUMP_LABEL_JMP,
};
struct module;
@@ -101,33 +126,18 @@ static inline int static_key_count(struct static_key *key)
#ifdef HAVE_JUMP_LABEL
-#define JUMP_LABEL_TYPE_FALSE_BRANCH 0UL
-#define JUMP_LABEL_TYPE_TRUE_BRANCH 1UL
-#define JUMP_LABEL_TYPE_MASK 1UL
-
-static
-inline struct jump_entry *jump_label_get_entries(struct static_key *key)
-{
- return (struct jump_entry *)((unsigned long)key->entries
- & ~JUMP_LABEL_TYPE_MASK);
-}
-
-static inline bool jump_label_get_branch_default(struct static_key *key)
-{
- if (((unsigned long)key->entries & JUMP_LABEL_TYPE_MASK) ==
- JUMP_LABEL_TYPE_TRUE_BRANCH)
- return true;
- return false;
-}
+#define JUMP_TYPE_FALSE 0UL
+#define JUMP_TYPE_TRUE 1UL
+#define JUMP_TYPE_MASK 1UL
static __always_inline bool static_key_false(struct static_key *key)
{
- return arch_static_branch(key);
+ return arch_static_branch(key, false);
}
static __always_inline bool static_key_true(struct static_key *key)
{
- return !static_key_false(key);
+ return !arch_static_branch(key, true);
}
extern struct jump_entry __start___jump_table[];
@@ -145,12 +155,12 @@ extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
-#define STATIC_KEY_INIT_TRUE ((struct static_key) \
+#define STATIC_KEY_INIT_TRUE \
{ .enabled = ATOMIC_INIT(1), \
- .entries = (void *)JUMP_LABEL_TYPE_TRUE_BRANCH })
-#define STATIC_KEY_INIT_FALSE ((struct static_key) \
+ .entries = (void *)JUMP_TYPE_TRUE }
+#define STATIC_KEY_INIT_FALSE \
{ .enabled = ATOMIC_INIT(0), \
- .entries = (void *)JUMP_LABEL_TYPE_FALSE_BRANCH })
+ .entries = (void *)JUMP_TYPE_FALSE }
#else /* !HAVE_JUMP_LABEL */
@@ -198,10 +208,8 @@ static inline int jump_label_apply_nops(struct module *mod)
return 0;
}
-#define STATIC_KEY_INIT_TRUE ((struct static_key) \
- { .enabled = ATOMIC_INIT(1) })
-#define STATIC_KEY_INIT_FALSE ((struct static_key) \
- { .enabled = ATOMIC_INIT(0) })
+#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
+#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
#endif /* HAVE_JUMP_LABEL */
@@ -213,6 +221,157 @@ static inline bool static_key_enabled(struct static_key *key)
return static_key_count(key) > 0;
}
+static inline void static_key_enable(struct static_key *key)
+{
+ int count = static_key_count(key);
+
+ WARN_ON_ONCE(count < 0 || count > 1);
+
+ if (!count)
+ static_key_slow_inc(key);
+}
+
+static inline void static_key_disable(struct static_key *key)
+{
+ int count = static_key_count(key);
+
+ WARN_ON_ONCE(count < 0 || count > 1);
+
+ if (count)
+ static_key_slow_dec(key);
+}
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Two type wrappers around static_key, such that we can use compile time
+ * type differentiation to emit the right code.
+ *
+ * All the below code is macros in order to play type games.
+ */
+
+struct static_key_true {
+ struct static_key key;
+};
+
+struct static_key_false {
+ struct static_key key;
+};
+
+#define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, }
+#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
+
+#define DEFINE_STATIC_KEY_TRUE(name) \
+ struct static_key_true name = STATIC_KEY_TRUE_INIT
+
+#define DEFINE_STATIC_KEY_FALSE(name) \
+ struct static_key_false name = STATIC_KEY_FALSE_INIT
+
+#ifdef HAVE_JUMP_LABEL
+
+/*
+ * Combine the right initial value (type) with the right branch order
+ * to generate the desired result.
+ *
+ *
+ * type\branch| likely (1) | unlikely (0)
+ * -----------+-----------------------+------------------
+ * | |
+ * true (1) | ... | ...
+ * | NOP | JMP L
+ * | <br-stmts> | 1: ...
+ * | L: ... |
+ * | |
+ * | | L: <br-stmts>
+ * | | jmp 1b
+ * | |
+ * -----------+-----------------------+------------------
+ * | |
+ * false (0) | ... | ...
+ * | JMP L | NOP
+ * | <br-stmts> | 1: ...
+ * | L: ... |
+ * | |
+ * | | L: <br-stmts>
+ * | | jmp 1b
+ * | |
+ * -----------+-----------------------+------------------
+ *
+ * The initial value is encoded in the LSB of static_key::entries,
+ * type: 0 = false, 1 = true.
+ *
+ * The branch type is encoded in the LSB of jump_entry::key,
+ * branch: 0 = unlikely, 1 = likely.
+ *
+ * This gives the following logic table:
+ *
+ * enabled type branch instuction
+ * -----------------------------+-----------
+ * 0 0 0 | NOP
+ * 0 0 1 | JMP
+ * 0 1 0 | NOP
+ * 0 1 1 | JMP
+ *
+ * 1 0 0 | JMP
+ * 1 0 1 | NOP
+ * 1 1 0 | JMP
+ * 1 1 1 | NOP
+ *
+ * Which gives the following functions:
+ *
+ * dynamic: instruction = enabled ^ branch
+ * static: instruction = type ^ branch
+ *
+ * See jump_label_type() / jump_label_init_type().
+ */
+
+extern bool ____wrong_branch_error(void);
+
+#define static_branch_likely(x) \
+({ \
+ bool branch; \
+ if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
+ branch = !arch_static_branch(&(x)->key, true); \
+ else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+ branch = !arch_static_branch_jump(&(x)->key, true); \
+ else \
+ branch = ____wrong_branch_error(); \
+ branch; \
+})
+
+#define static_branch_unlikely(x) \
+({ \
+ bool branch; \
+ if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
+ branch = arch_static_branch_jump(&(x)->key, false); \
+ else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+ branch = arch_static_branch(&(x)->key, false); \
+ else \
+ branch = ____wrong_branch_error(); \
+ branch; \
+})
+
+#else /* !HAVE_JUMP_LABEL */
+
+#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
+#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
+
+#endif /* HAVE_JUMP_LABEL */
+
+/*
+ * Advanced usage; refcount, branch is enabled when: count != 0
+ */
+
+#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
+#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
+
+/*
+ * Normal usage; boolean enable/disable.
+ */
+
+#define static_branch_enable(x) static_key_enable(&(x)->key)
+#define static_branch_disable(x) static_key_disable(&(x)->key)
+
#endif /* _LINUX_JUMP_LABEL_H */
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5486d777b706..4b9f85c963d0 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -10,11 +10,19 @@ struct vm_struct;
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SCALE_SHIFT 3
-#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#include <asm/kasan.h>
+#include <asm/pgtable.h>
#include <linux/sched.h>
+extern unsigned char kasan_zero_page[PAGE_SIZE];
+extern pte_t kasan_zero_pte[PTRS_PER_PTE];
+extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
+extern pud_t kasan_zero_pud[PTRS_PER_PUD];
+
+void kasan_populate_zero_shadow(const void *shadow_start,
+ const void *shadow_end);
+
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index e804306ef5e8..b63218f68c4b 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -323,6 +323,7 @@ struct pt_regs;
struct task_struct;
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
+#define kexec_in_progress false
#endif /* CONFIG_KEXEC */
#endif /* !defined(__ASSEBMLY__) */
diff --git a/include/linux/klist.h b/include/linux/klist.h
index 61e5b723ae73..953f283f8451 100644
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -63,6 +63,7 @@ extern void klist_iter_init(struct klist *k, struct klist_iter *i);
extern void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n);
extern void klist_iter_exit(struct klist_iter *i);
+extern struct klist_node *klist_prev(struct klist_iter *i);
extern struct klist_node *klist_next(struct klist_iter *i);
#endif
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 1ab54754a86d..8f6849084248 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -267,6 +267,8 @@ extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
+extern bool within_kprobe_blacklist(unsigned long addr);
+
struct kprobe_insn_cache {
struct mutex mutex;
void *(*alloc)(void); /* allocate insn page */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 13d55206ccf6..869b21dcf503 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -38,6 +38,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
})
void kthread_bind(struct task_struct *k, unsigned int cpu);
+void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
bool kthread_should_stop(void);
bool kthread_should_park(void);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 05e99b8ef465..81089cf1f0c1 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -139,6 +139,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_DISABLE_IBS 24
#define KVM_REQ_APIC_PAGE_RELOAD 25
#define KVM_REQ_SMI 26
+#define KVM_REQ_HV_CRASH 27
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -363,9 +364,6 @@ struct kvm {
struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
- u32 bsp_vcpu_id;
-#endif
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
atomic_t online_vcpus;
int last_boosted_vcpu;
@@ -424,8 +422,15 @@ struct kvm {
#define vcpu_unimpl(vcpu, fmt, ...) \
kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+#define vcpu_debug(vcpu, fmt, ...) \
+ kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
+ /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
+ * the caller has read kvm->online_vcpus before (as is the case
+ * for kvm_for_each_vcpu, for example).
+ */
smp_rmb();
return kvm->vcpus[i];
}
@@ -1055,22 +1060,9 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
#endif /* CONFIG_HAVE_KVM_EVENTFD */
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-static inline bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
-{
- return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
-}
-
-static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
-{
- return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
-}
-
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
-
#else
-
static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
-
#endif
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
diff --git a/include/linux/llist.h b/include/linux/llist.h
index fbf10a0bc095..fd4ca0b4fe0f 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -55,8 +55,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/atomic.h>
#include <linux/kernel.h>
-#include <asm/cmpxchg.h>
struct llist_head {
struct llist_node *first;
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index a16b1f9c1aca..0962b2ca628a 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -6,6 +6,7 @@
#include <linux/mod_devicetable.h>
struct mei_cl_device;
+struct mei_device;
typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
u32 events, void *context);
@@ -17,6 +18,8 @@ typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
* Drivers for MEI devices will get an mei_cl_device pointer
* when being probed and shall use it for doing ME bus I/O.
*
+ * @bus_list: device on the bus list
+ * @bus: parent mei device
* @dev: linux driver model device pointer
* @me_cl: me client
* @cl: mei client
@@ -25,10 +28,16 @@ typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
* @event_cb: Drivers register this callback to get asynchronous ME
* events (e.g. Rx buffer pending) notifications.
* @event_context: event callback run context
+ * @events_mask: Events bit mask requested by driver.
* @events: Events bitmask sent to the driver.
+ *
+ * @do_match: wheather device can be matched with a driver
+ * @is_added: device is already scanned
* @priv_data: client private data
*/
struct mei_cl_device {
+ struct list_head bus_list;
+ struct mei_device *bus;
struct device dev;
struct mei_me_client *me_cl;
@@ -38,8 +47,12 @@ struct mei_cl_device {
struct work_struct event_work;
mei_cl_event_cb_t event_cb;
void *event_context;
+ unsigned long events_mask;
unsigned long events;
+ unsigned int do_match:1;
+ unsigned int is_added:1;
+
void *priv_data;
};
@@ -65,10 +78,12 @@ ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length);
ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
int mei_cl_register_event_cb(struct mei_cl_device *device,
+ unsigned long event_mask,
mei_cl_event_cb_t read_cb, void *context);
#define MEI_CL_EVENT_RX 0
#define MEI_CL_EVENT_TX 1
+#define MEI_CL_EVENT_NOTIF 2
void *mei_cl_get_drvdata(const struct mei_cl_device *device);
void mei_cl_set_drvdata(struct mei_cl_device *device, void *data);
diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h
new file mode 100644
index 000000000000..095b121aa725
--- /dev/null
+++ b/include/linux/mfd/max77693-common.h
@@ -0,0 +1,49 @@
+/*
+ * Common data shared between Maxim 77693 and 77843 drivers
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_MFD_MAX77693_COMMON_H
+#define __LINUX_MFD_MAX77693_COMMON_H
+
+enum max77693_types {
+ TYPE_MAX77693_UNKNOWN,
+ TYPE_MAX77693,
+ TYPE_MAX77843,
+
+ TYPE_MAX77693_NUM,
+};
+
+/*
+ * Shared also with max77843.
+ */
+struct max77693_dev {
+ struct device *dev;
+ struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
+ struct i2c_client *i2c_muic; /* 0x4A , MUIC */
+ struct i2c_client *i2c_haptic; /* MAX77693: 0x90 , Haptic */
+ struct i2c_client *i2c_chg; /* MAX77843: 0xD2, Charger */
+
+ enum max77693_types type;
+
+ struct regmap *regmap;
+ struct regmap *regmap_muic;
+ struct regmap *regmap_haptic; /* Only MAX77693 */
+ struct regmap *regmap_chg; /* Only MAX77843 */
+
+ struct regmap_irq_chip_data *irq_data_led;
+ struct regmap_irq_chip_data *irq_data_topsys;
+ struct regmap_irq_chip_data *irq_data_chg; /* Only MAX77693 */
+ struct regmap_irq_chip_data *irq_data_muic;
+
+ int irq;
+};
+
+
+#endif /* __LINUX_MFD_MAX77693_COMMON_H */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 51633ea6f910..3c7a63b98ad6 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -310,30 +310,30 @@ enum max77693_muic_reg {
#define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT)
/* MAX77693 MUIC - STATUS1~3 Register */
-#define STATUS1_ADC_SHIFT (0)
-#define STATUS1_ADCLOW_SHIFT (5)
-#define STATUS1_ADCERR_SHIFT (6)
-#define STATUS1_ADC1K_SHIFT (7)
-#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
-#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
-#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
-#define STATUS1_ADC1K_MASK (0x1 << STATUS1_ADC1K_SHIFT)
-
-#define STATUS2_CHGTYP_SHIFT (0)
-#define STATUS2_CHGDETRUN_SHIFT (3)
-#define STATUS2_DCDTMR_SHIFT (4)
-#define STATUS2_DXOVP_SHIFT (5)
-#define STATUS2_VBVOLT_SHIFT (6)
-#define STATUS2_VIDRM_SHIFT (7)
-#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
-#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
-#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
-#define STATUS2_DXOVP_MASK (0x1 << STATUS2_DXOVP_SHIFT)
-#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
-#define STATUS2_VIDRM_MASK (0x1 << STATUS2_VIDRM_SHIFT)
-
-#define STATUS3_OVP_SHIFT (2)
-#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
+#define MAX77693_STATUS1_ADC_SHIFT 0
+#define MAX77693_STATUS1_ADCLOW_SHIFT 5
+#define MAX77693_STATUS1_ADCERR_SHIFT 6
+#define MAX77693_STATUS1_ADC1K_SHIFT 7
+#define MAX77693_STATUS1_ADC_MASK (0x1f << MAX77693_STATUS1_ADC_SHIFT)
+#define MAX77693_STATUS1_ADCLOW_MASK BIT(MAX77693_STATUS1_ADCLOW_SHIFT)
+#define MAX77693_STATUS1_ADCERR_MASK BIT(MAX77693_STATUS1_ADCERR_SHIFT)
+#define MAX77693_STATUS1_ADC1K_MASK BIT(MAX77693_STATUS1_ADC1K_SHIFT)
+
+#define MAX77693_STATUS2_CHGTYP_SHIFT 0
+#define MAX77693_STATUS2_CHGDETRUN_SHIFT 3
+#define MAX77693_STATUS2_DCDTMR_SHIFT 4
+#define MAX77693_STATUS2_DXOVP_SHIFT 5
+#define MAX77693_STATUS2_VBVOLT_SHIFT 6
+#define MAX77693_STATUS2_VIDRM_SHIFT 7
+#define MAX77693_STATUS2_CHGTYP_MASK (0x7 << MAX77693_STATUS2_CHGTYP_SHIFT)
+#define MAX77693_STATUS2_CHGDETRUN_MASK BIT(MAX77693_STATUS2_CHGDETRUN_SHIFT)
+#define MAX77693_STATUS2_DCDTMR_MASK BIT(MAX77693_STATUS2_DCDTMR_SHIFT)
+#define MAX77693_STATUS2_DXOVP_MASK BIT(MAX77693_STATUS2_DXOVP_SHIFT)
+#define MAX77693_STATUS2_VBVOLT_MASK BIT(MAX77693_STATUS2_VBVOLT_SHIFT)
+#define MAX77693_STATUS2_VIDRM_MASK BIT(MAX77693_STATUS2_VIDRM_SHIFT)
+
+#define MAX77693_STATUS3_OVP_SHIFT 2
+#define MAX77693_STATUS3_OVP_MASK BIT(MAX77693_STATUS3_OVP_SHIFT)
/* MAX77693 CDETCTRL1~2 register */
#define CDETCTRL1_CHGDETEN_SHIFT (0)
@@ -362,38 +362,38 @@ enum max77693_muic_reg {
#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
-#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
| (1 << COMN1SW_SHIFT))
-#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
| (2 << COMN1SW_SHIFT))
-#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
| (3 << COMN1SW_SHIFT))
-#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
| (0 << COMN1SW_SHIFT))
-#define CONTROL2_LOWPWR_SHIFT (0)
-#define CONTROL2_ADCEN_SHIFT (1)
-#define CONTROL2_CPEN_SHIFT (2)
-#define CONTROL2_SFOUTASRT_SHIFT (3)
-#define CONTROL2_SFOUTORD_SHIFT (4)
-#define CONTROL2_ACCDET_SHIFT (5)
-#define CONTROL2_USBCPINT_SHIFT (6)
-#define CONTROL2_RCPS_SHIFT (7)
-#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT)
-#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT)
-#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT)
-#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT)
-#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT)
-#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT)
-#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT)
-#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT)
-
-#define CONTROL3_JIGSET_SHIFT (0)
-#define CONTROL3_BTLDSET_SHIFT (2)
-#define CONTROL3_ADCDBSET_SHIFT (4)
-#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
-#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT)
-#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT)
+#define MAX77693_CONTROL2_LOWPWR_SHIFT 0
+#define MAX77693_CONTROL2_ADCEN_SHIFT 1
+#define MAX77693_CONTROL2_CPEN_SHIFT 2
+#define MAX77693_CONTROL2_SFOUTASRT_SHIFT 3
+#define MAX77693_CONTROL2_SFOUTORD_SHIFT 4
+#define MAX77693_CONTROL2_ACCDET_SHIFT 5
+#define MAX77693_CONTROL2_USBCPINT_SHIFT 6
+#define MAX77693_CONTROL2_RCPS_SHIFT 7
+#define MAX77693_CONTROL2_LOWPWR_MASK BIT(MAX77693_CONTROL2_LOWPWR_SHIFT)
+#define MAX77693_CONTROL2_ADCEN_MASK BIT(MAX77693_CONTROL2_ADCEN_SHIFT)
+#define MAX77693_CONTROL2_CPEN_MASK BIT(MAX77693_CONTROL2_CPEN_SHIFT)
+#define MAX77693_CONTROL2_SFOUTASRT_MASK BIT(MAX77693_CONTROL2_SFOUTASRT_SHIFT)
+#define MAX77693_CONTROL2_SFOUTORD_MASK BIT(MAX77693_CONTROL2_SFOUTORD_SHIFT)
+#define MAX77693_CONTROL2_ACCDET_MASK BIT(MAX77693_CONTROL2_ACCDET_SHIFT)
+#define MAX77693_CONTROL2_USBCPINT_MASK BIT(MAX77693_CONTROL2_USBCPINT_SHIFT)
+#define MAX77693_CONTROL2_RCPS_MASK BIT(MAX77693_CONTROL2_RCPS_SHIFT)
+
+#define MAX77693_CONTROL3_JIGSET_SHIFT 0
+#define MAX77693_CONTROL3_BTLDSET_SHIFT 2
+#define MAX77693_CONTROL3_ADCDBSET_SHIFT 4
+#define MAX77693_CONTROL3_JIGSET_MASK (0x3 << MAX77693_CONTROL3_JIGSET_SHIFT)
+#define MAX77693_CONTROL3_BTLDSET_MASK (0x3 << MAX77693_CONTROL3_BTLDSET_SHIFT)
+#define MAX77693_CONTROL3_ADCDBSET_MASK (0x3 << MAX77693_CONTROL3_ADCDBSET_SHIFT)
/* Slave addr = 0x90: Haptic */
enum max77693_haptic_reg {
@@ -529,36 +529,4 @@ enum max77693_irq_muic {
MAX77693_MUIC_IRQ_NR,
};
-struct max77693_dev {
- struct device *dev;
- struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
- struct i2c_client *muic; /* 0x4A , MUIC */
- struct i2c_client *haptic; /* 0x90 , Haptic */
-
- int type;
-
- struct regmap *regmap;
- struct regmap *regmap_muic;
- struct regmap *regmap_haptic;
-
- struct regmap_irq_chip_data *irq_data_led;
- struct regmap_irq_chip_data *irq_data_topsys;
- struct regmap_irq_chip_data *irq_data_charger;
- struct regmap_irq_chip_data *irq_data_muic;
-
- int irq;
- int irq_gpio;
- struct mutex irqlock;
- int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
- int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
-};
-
-enum max77693_types {
- TYPE_MAX77693,
-};
-
-extern int max77693_irq_init(struct max77693_dev *max77686);
-extern void max77693_irq_exit(struct max77693_dev *max77686);
-extern int max77693_irq_resume(struct max77693_dev *max77686);
-
#endif /* __LINUX_MFD_MAX77693_PRIV_H */
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index 7178ace8379e..c19303b0ccfd 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -318,62 +318,62 @@ enum max77843_irq_muic {
MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK)
/* MAX77843 STATUS register*/
-#define STATUS1_ADC_SHIFT 0
-#define STATUS1_ADCERROR_SHIFT 6
-#define STATUS1_ADC1K_SHIFT 7
-#define STATUS2_CHGTYP_SHIFT 0
-#define STATUS2_CHGDETRUN_SHIFT 3
-#define STATUS2_DCDTMR_SHIFT 4
-#define STATUS2_DXOVP_SHIFT 5
-#define STATUS2_VBVOLT_SHIFT 6
-#define STATUS3_VBADC_SHIFT 0
-#define STATUS3_VDNMON_SHIFT 4
-#define STATUS3_DNRES_SHIFT 5
-#define STATUS3_MPNACK_SHIFT 6
-
-#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
-#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(STATUS1_ADCERROR_SHIFT)
-#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(STATUS1_ADC1K_SHIFT)
-#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
-#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT)
-#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT)
-#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(STATUS2_DXOVP_SHIFT)
-#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT)
-#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << STATUS3_VBADC_SHIFT)
-#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(STATUS3_VDNMON_SHIFT)
-#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(STATUS3_DNRES_SHIFT)
-#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(STATUS3_MPNACK_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADC_SHIFT 0
+#define MAX77843_MUIC_STATUS1_ADCERROR_SHIFT 6
+#define MAX77843_MUIC_STATUS1_ADC1K_SHIFT 7
+#define MAX77843_MUIC_STATUS2_CHGTYP_SHIFT 0
+#define MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT 3
+#define MAX77843_MUIC_STATUS2_DCDTMR_SHIFT 4
+#define MAX77843_MUIC_STATUS2_DXOVP_SHIFT 5
+#define MAX77843_MUIC_STATUS2_VBVOLT_SHIFT 6
+#define MAX77843_MUIC_STATUS3_VBADC_SHIFT 0
+#define MAX77843_MUIC_STATUS3_VDNMON_SHIFT 4
+#define MAX77843_MUIC_STATUS3_DNRES_SHIFT 5
+#define MAX77843_MUIC_STATUS3_MPNACK_SHIFT 6
+
+#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << MAX77843_MUIC_STATUS1_ADC_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(MAX77843_MUIC_STATUS1_ADCERROR_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(MAX77843_MUIC_STATUS1_ADC1K_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << MAX77843_MUIC_STATUS2_CHGTYP_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT)
+#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(MAX77843_MUIC_STATUS2_DCDTMR_SHIFT)
+#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(MAX77843_MUIC_STATUS2_DXOVP_SHIFT)
+#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(MAX77843_MUIC_STATUS2_VBVOLT_SHIFT)
+#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << MAX77843_MUIC_STATUS3_VBADC_SHIFT)
+#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(MAX77843_MUIC_STATUS3_VDNMON_SHIFT)
+#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(MAX77843_MUIC_STATUS3_DNRES_SHIFT)
+#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(MAX77843_MUIC_STATUS3_MPNACK_SHIFT)
/* MAX77843 CONTROL register */
-#define CONTROL1_COMP1SW_SHIFT 0
-#define CONTROL1_COMP2SW_SHIFT 3
-#define CONTROL1_IDBEN_SHIFT 7
-#define CONTROL2_LOWPWR_SHIFT 0
-#define CONTROL2_ADCEN_SHIFT 1
-#define CONTROL2_CPEN_SHIFT 2
-#define CONTROL2_ACC_DET_SHIFT 5
-#define CONTROL2_USBCPINT_SHIFT 6
-#define CONTROL2_RCPS_SHIFT 7
-#define CONTROL3_JIGSET_SHIFT 0
-#define CONTROL4_ADCDBSET_SHIFT 0
-#define CONTROL4_USBAUTO_SHIFT 4
-#define CONTROL4_FCTAUTO_SHIFT 5
-#define CONTROL4_ADCMODE_SHIFT 6
-
-#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << CONTROL1_COMP1SW_SHIFT)
-#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << CONTROL1_COMP2SW_SHIFT)
-#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(CONTROL1_IDBEN_SHIFT)
-#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(CONTROL2_LOWPWR_SHIFT)
-#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(CONTROL2_ADCEN_SHIFT)
-#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(CONTROL2_CPEN_SHIFT)
-#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(CONTROL2_ACC_DET_SHIFT)
-#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(CONTROL2_USBCPINT_SHIFT)
-#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(CONTROL2_RCPS_SHIFT)
-#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
-#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << CONTROL4_ADCDBSET_SHIFT)
-#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(CONTROL4_USBAUTO_SHIFT)
-#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(CONTROL4_FCTAUTO_SHIFT)
-#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << CONTROL4_ADCMODE_SHIFT)
+#define MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT 0
+#define MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT 3
+#define MAX77843_MUIC_CONTROL1_IDBEN_SHIFT 7
+#define MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT 0
+#define MAX77843_MUIC_CONTROL2_ADCEN_SHIFT 1
+#define MAX77843_MUIC_CONTROL2_CPEN_SHIFT 2
+#define MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT 5
+#define MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT 6
+#define MAX77843_MUIC_CONTROL2_RCPS_SHIFT 7
+#define MAX77843_MUIC_CONTROL3_JIGSET_SHIFT 0
+#define MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT 0
+#define MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT 4
+#define MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT 5
+#define MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT 6
+
+#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(MAX77843_MUIC_CONTROL1_IDBEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(MAX77843_MUIC_CONTROL2_ADCEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(MAX77843_MUIC_CONTROL2_CPEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT)
+#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT)
+#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(MAX77843_MUIC_CONTROL2_RCPS_SHIFT)
+#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << MAX77843_MUIC_CONTROL3_JIGSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT)
/* MAX77843 switch port */
#define COM_OPEN 0
@@ -383,38 +383,38 @@ enum max77843_irq_muic {
#define COM_AUX_USB 4
#define COM_AUX_UART 5
-#define CONTROL1_COM_SW \
+#define MAX77843_MUIC_CONTROL1_COM_SW \
((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \
MAX77843_MUIC_CONTROL1_COMP2SW_MASK))
-#define CONTROL1_SW_OPEN \
- ((COM_OPEN << CONTROL1_COMP1SW_SHIFT | \
- COM_OPEN << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_USB \
- ((COM_USB << CONTROL1_COMP1SW_SHIFT | \
- COM_USB << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_AUDIO \
- ((COM_AUDIO << CONTROL1_COMP1SW_SHIFT | \
- COM_AUDIO << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_UART \
- ((COM_UART << CONTROL1_COMP1SW_SHIFT | \
- COM_UART << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_AUX_USB \
- ((COM_AUX_USB << CONTROL1_COMP1SW_SHIFT | \
- COM_AUX_USB << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_AUX_UART \
- ((COM_AUX_UART << CONTROL1_COMP1SW_SHIFT | \
- COM_AUX_UART << CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_OPEN \
+ ((COM_OPEN << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_OPEN << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_USB \
+ ((COM_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_AUDIO \
+ ((COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_UART \
+ ((COM_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_AUX_USB \
+ ((COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_AUX_UART \
+ ((COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
#define MAX77843_DISABLE 0
#define MAX77843_ENABLE 1
#define CONTROL4_AUTO_DISABLE \
- ((MAX77843_DISABLE << CONTROL4_USBAUTO_SHIFT) | \
- (MAX77843_DISABLE << CONTROL4_FCTAUTO_SHIFT))
+ ((MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \
+ (MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT))
#define CONTROL4_AUTO_ENABLE \
- ((MAX77843_ENABLE << CONTROL4_USBAUTO_SHIFT) | \
- (MAX77843_ENABLE << CONTROL4_FCTAUTO_SHIFT))
+ ((MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \
+ (MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT))
/* MAX77843 SAFEOUT LDO Control register */
#define SAFEOUTCTRL_SAFEOUT1_SHIFT 0
@@ -431,24 +431,4 @@ enum max77843_irq_muic {
#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \
(0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT)
-struct max77843 {
- struct device *dev;
-
- struct i2c_client *i2c;
- struct i2c_client *i2c_chg;
- struct i2c_client *i2c_fuel;
- struct i2c_client *i2c_muic;
-
- struct regmap *regmap;
- struct regmap *regmap_chg;
- struct regmap *regmap_fuel;
- struct regmap *regmap_muic;
-
- struct regmap_irq_chip_data *irq_data;
- struct regmap_irq_chip_data *irq_data_chg;
- struct regmap_irq_chip_data *irq_data_fuel;
- struct regmap_irq_chip_data *irq_data_muic;
-
- int irq;
-};
#endif /* __MAX77843_H__ */
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index bb270bd03eed..13e1d96935ed 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -21,6 +21,7 @@
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/extcon.h>
+#include <linux/of_gpio.h>
#include <linux/usb/phy_companion.h>
#define PALMAS_NUM_CLIENTS 3
@@ -551,10 +552,16 @@ struct palmas_usb {
int vbus_otg_irq;
int vbus_irq;
+ int gpio_id_irq;
+ struct gpio_desc *id_gpiod;
+ unsigned long sw_debounce_jiffies;
+ struct delayed_work wq_detectid;
+
enum palmas_usb_state linkstat;
int wakeup;
bool enable_vbus_detection;
bool enable_id_detection;
+ bool enable_gpio_id_detection;
};
#define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator)
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index d16f4c82c568..558a485d03ab 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -435,4 +435,12 @@
#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1)
#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
+/* For imx6ul iomux gpr register field define */
+#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17)
+#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18)
+#define IMX6UL_GPR1_ENET1_CLK_OUTPUT (0x1 << 17)
+#define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18)
+#define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17)
+#define IMX6UL_GPR1_ENET_CLK_OUTPUT (0x3 << 17)
+
#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 819077c32690..81f6e427ba6b 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -67,7 +67,7 @@ struct miscdevice {
};
extern int misc_register(struct miscdevice *misc);
-extern int misc_deregister(struct miscdevice *misc);
+extern void misc_deregister(struct miscdevice *misc);
#define MODULE_ALIAS_MISCDEV(minor) \
MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index e7ecc12a1163..09cebe528488 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -88,7 +88,8 @@ struct mlx4_ts_cqe {
enum {
MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31,
- MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
+ MLX4_CQE_CVLAN_PRESENT_MASK = 1 << 29,
+ MLX4_CQE_SVLAN_PRESENT_MASK = 1 << 30,
MLX4_CQE_L2_TUNNEL = 1 << 27,
MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26,
MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index fd13c1ce3b4a..bcbf8c72a77b 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -211,6 +211,8 @@ enum {
MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26,
MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27,
MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
+ MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29,
+ MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30,
};
enum {
@@ -581,6 +583,7 @@ struct mlx4_caps {
u64 phys_port_id[MLX4_MAX_PORTS + 1];
int tunnel_offload_mode;
u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
+ u8 phv_bit[MLX4_MAX_PORTS + 1];
u8 alloc_res_qp_mask;
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
@@ -1332,6 +1335,8 @@ int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
u8 ignore_fcs_value);
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 6fed539e5456..de45a51b3f04 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -272,7 +272,8 @@ enum {
MLX4_WQE_CTRL_SOLICITED = 1 << 1,
MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
- MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
+ MLX4_WQE_CTRL_INS_CVLAN = 1 << 6,
+ MLX4_WQE_CTRL_INS_SVLAN = 1 << 7,
MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index b943cd9e2097..250b1ff8b48d 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1182,6 +1182,16 @@ enum {
MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
};
+enum {
+ MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
+ MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
+ MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
+ MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
+ MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
+ MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
+ MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11
+};
+
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
{
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5722d88c2429..8b6d6f2154a4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -103,6 +103,8 @@ enum {
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
MLX5_REG_PAOS = 0x5006,
+ MLX5_REG_PFCC = 0x5007,
+ MLX5_REG_PPCNT = 0x5008,
MLX5_REG_PMAOS = 0x5012,
MLX5_REG_PUDE = 0x5009,
MLX5_REG_PMPE = 0x5010,
@@ -151,8 +153,8 @@ enum mlx5_dev_event {
};
enum mlx5_port_status {
- MLX5_PORT_UP = 1 << 1,
- MLX5_PORT_DOWN = 1 << 2,
+ MLX5_PORT_UP = 1,
+ MLX5_PORT_DOWN = 2,
};
struct mlx5_uuar_info {
@@ -380,7 +382,7 @@ struct mlx5_uar {
u32 index;
struct list_head bf_list;
unsigned free_bf_bmap;
- void __iomem *wc_map;
+ void __iomem *bf_map;
void __iomem *map;
};
@@ -435,6 +437,8 @@ struct mlx5_priv {
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
+ struct io_mapping *bf_mapping;
+
/* pages stuff */
struct workqueue_struct *pg_wq;
struct rb_root page_root;
@@ -463,6 +467,10 @@ struct mlx5_priv {
/* end: mr staff */
/* start: alloc staff */
+ /* protect buffer alocation according to numa node */
+ struct mutex alloc_mutex;
+ int numa_node;
+
struct mutex pgdir_mutex;
struct list_head pgdir_list;
/* end: alloc staff */
@@ -672,6 +680,8 @@ void mlx5_health_cleanup(void);
void __init mlx5_health_init(void);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -752,9 +762,10 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
u8 local_port);
int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
int proto_mask);
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status status);
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
@@ -764,6 +775,10 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
u8 *vl_hw_cap, u8 local_port);
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause);
+
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
@@ -773,6 +788,8 @@ void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
+ int node);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
const char *mlx5_command_str(int command);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6d2f6fee041c..dd2097455a2e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1936,9 +1936,9 @@ enum {
};
enum {
- MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0,
- MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1,
- MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2,
+ MLX5_RX_HASH_FN_NONE = 0x0,
+ MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1,
+ MLX5_RX_HASH_FN_TOEPLITZ = 0x2,
};
enum {
@@ -4050,6 +4050,13 @@ struct mlx5_ifc_modify_tis_in_bits {
struct mlx5_ifc_tisc_bits ctx;
};
+struct mlx5_ifc_modify_tir_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1f];
+ u8 lro[0x1];
+};
+
struct mlx5_ifc_modify_tir_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -4071,7 +4078,7 @@ struct mlx5_ifc_modify_tir_in_bits {
u8 reserved_3[0x20];
- u8 modify_bitmask[0x40];
+ struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
u8 reserved_4[0x40];
@@ -4116,6 +4123,13 @@ struct mlx5_ifc_modify_rqt_out_bits {
u8 reserved_1[0x40];
};
+struct mlx5_ifc_rqt_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1f];
+ u8 rqn_list[0x1];
+};
+
struct mlx5_ifc_modify_rqt_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@@ -4128,7 +4142,7 @@ struct mlx5_ifc_modify_rqt_in_bits {
u8 reserved_3[0x20];
- u8 modify_bitmask[0x40];
+ struct mlx5_ifc_rqt_bitmask_bits bitmask;
u8 reserved_4[0x40];
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 34f25b7bf642..688997a24aad 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -253,7 +253,7 @@ struct pcmcia_device_id {
__u32 prod_id_hash[4];
- /* not matched against in kernelspace*/
+ /* not matched against in kernelspace */
const char * prod_id[4];
/* not matched against */
diff --git a/include/linux/mpls_iptunnel.h b/include/linux/mpls_iptunnel.h
new file mode 100644
index 000000000000..ef29eb2d6dfd
--- /dev/null
+++ b/include/linux/mpls_iptunnel.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_MPLS_IPTUNNEL_H
+#define _LINUX_MPLS_IPTUNNEL_H
+
+#include <uapi/linux/mpls_iptunnel.h>
+
+#endif /* _LINUX_MPLS_IPTUNNEL_H */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8ac4a68ffae2..ad939d0ba816 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -14,38 +14,85 @@ extern int pci_msi_ignore_mask;
/* Helper functions */
struct irq_data;
struct msi_desc;
+struct pci_dev;
+struct platform_msi_priv_data;
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
+typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
+ struct msi_msg *msg);
+
+/**
+ * platform_msi_desc - Platform device specific msi descriptor data
+ * @msi_priv_data: Pointer to platform private data
+ * @msi_index: The index of the MSI descriptor for multi MSI
+ */
+struct platform_msi_desc {
+ struct platform_msi_priv_data *msi_priv_data;
+ u16 msi_index;
+};
+
+/**
+ * struct msi_desc - Descriptor structure for MSI based interrupts
+ * @list: List head for management
+ * @irq: The base interrupt number
+ * @nvec_used: The number of vectors used
+ * @dev: Pointer to the device which uses this descriptor
+ * @msg: The last set MSI message cached for reuse
+ *
+ * @masked: [PCI MSI/X] Mask bits
+ * @is_msix: [PCI MSI/X] True if MSI-X
+ * @multiple: [PCI MSI/X] log2 num of messages allocated
+ * @multi_cap: [PCI MSI/X] log2 num of messages supported
+ * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
+ * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
+ * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
+ * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
+ * @mask_pos: [PCI MSI] Mask register position
+ * @mask_base: [PCI MSI-X] Mask register base address
+ * @platform: [platform] Platform device specific msi descriptor data
+ */
struct msi_desc {
- struct {
- __u8 is_msix : 1;
- __u8 multiple: 3; /* log2 num of messages allocated */
- __u8 multi_cap : 3; /* log2 num of messages supported */
- __u8 maskbit : 1; /* mask-pending bit supported ? */
- __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
- __u16 entry_nr; /* specific enabled entry */
- unsigned default_irq; /* default pre-assigned irq */
- } msi_attrib;
-
- u32 masked; /* mask bits */
- unsigned int irq;
- unsigned int nvec_used; /* number of messages */
- struct list_head list;
+ /* Shared device/bus type independent data */
+ struct list_head list;
+ unsigned int irq;
+ unsigned int nvec_used;
+ struct device *dev;
+ struct msi_msg msg;
union {
- void __iomem *mask_base;
- u8 mask_pos;
- };
- struct pci_dev *dev;
+ /* PCI MSI/X specific data */
+ struct {
+ u32 masked;
+ struct {
+ __u8 is_msix : 1;
+ __u8 multiple : 3;
+ __u8 multi_cap : 3;
+ __u8 maskbit : 1;
+ __u8 is_64 : 1;
+ __u16 entry_nr;
+ unsigned default_irq;
+ } msi_attrib;
+ union {
+ u8 mask_pos;
+ void __iomem *mask_base;
+ };
+ };
- /* Last set MSI message */
- struct msi_msg msg;
+ /*
+ * Non PCI variants add their data structure here. New
+ * entries need to use a named structure. We want
+ * proper name spaces for this. The PCI part is
+ * anonymous for now as it would require an immediate
+ * tree wide cleanup.
+ */
+ struct platform_msi_desc platform;
+ };
};
/* Helpers to hide struct msi_desc implementation details */
-#define msi_desc_to_dev(desc) (&(desc)->dev.dev)
-#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list)
+#define msi_desc_to_dev(desc) ((desc)->dev)
+#define dev_to_msi_list(dev) (&(dev)->msi_list)
#define first_msi_entry(dev) \
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
#define for_each_msi_entry(desc, dev) \
@@ -56,12 +103,17 @@ struct msi_desc {
#define for_each_pci_msi_entry(desc, pdev) \
for_each_msi_entry((desc), &(pdev)->dev)
-static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
+void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
+#else /* CONFIG_PCI_MSI */
+static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
{
- return desc->dev;
+ return NULL;
}
#endif /* CONFIG_PCI_MSI */
+struct msi_desc *alloc_msi_entry(struct device *dev);
+void free_msi_entry(struct msi_desc *entry);
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
@@ -108,9 +160,6 @@ struct msi_controller {
struct device *dev;
struct device_node *of_node;
struct list_head list;
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- struct irq_domain *domain;
-#endif
int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
struct msi_desc *desc);
@@ -221,6 +270,12 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
+struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
+ struct msi_domain_info *info,
+ struct irq_domain *parent);
+int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg);
+void platform_msi_domain_free_irqs(struct device *dev);
#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
diff --git a/include/linux/net.h b/include/linux/net.h
index 04aa06852771..049d4b03c4c4 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -239,8 +239,16 @@ do { \
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
+#if defined(DEBUG)
#define net_dbg_ratelimited(fmt, ...) \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
+#else
+#define net_dbg_ratelimited(fmt, ...) \
+ do { \
+ if (0) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+ } while (0)
+#endif
bool __net_get_random_once(void *buf, int nbytes, bool *done,
struct static_key *done_key);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e20979dfd6a9..88a00694eda5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -766,6 +766,13 @@ struct netdev_phys_item_id {
unsigned char id_len;
};
+static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
+ struct netdev_phys_item_id *b)
+{
+ return a->id_len == b->id_len &&
+ memcmp(a->id, b->id, a->id_len) == 0;
+}
+
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb);
@@ -1041,6 +1048,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* TX queue.
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
+ * void (*ndo_change_proto_down)(struct net_device *dev,
+ * bool proto_down);
+ * This function is used to pass protocol port error state information
+ * to the switch driver. The switch driver can react to the proto_down
+ * by doing a phys down on the associated switch port.
+ *
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1211,6 +1224,8 @@ struct net_device_ops {
int queue_index,
u32 maxrate);
int (*ndo_get_iflink)(const struct net_device *dev);
+ int (*ndo_change_proto_down)(struct net_device *dev,
+ bool proto_down);
};
/**
@@ -1225,13 +1240,8 @@ struct net_device_ops {
*
* @IFF_802_1Q_VLAN: 802.1Q VLAN device
* @IFF_EBRIDGE: Ethernet bridging device
- * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
- * @IFF_MASTER_8023AD: bonding master, 802.3ad
- * @IFF_MASTER_ALB: bonding master, balance-alb
* @IFF_BONDING: bonding master or slave
- * @IFF_SLAVE_NEEDARP: need ARPs for validation
* @IFF_ISATAP: ISATAP interface (RFC4214)
- * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
* @IFF_WAN_HDLC: WAN HDLC device
* @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
* release skb->dst
@@ -1247,44 +1257,40 @@ struct net_device_ops {
* @IFF_LIVE_ADDR_CHANGE: device supports hardware address
* change when it's running
* @IFF_MACVLAN: Macvlan device
+ * @IFF_VRF_MASTER: device is a VRF master
+ * @IFF_NO_QUEUE: device can run without qdisc attached
+ * @IFF_OPENVSWITCH: device is a Open vSwitch master
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
IFF_EBRIDGE = 1<<1,
- IFF_SLAVE_INACTIVE = 1<<2,
- IFF_MASTER_8023AD = 1<<3,
- IFF_MASTER_ALB = 1<<4,
- IFF_BONDING = 1<<5,
- IFF_SLAVE_NEEDARP = 1<<6,
- IFF_ISATAP = 1<<7,
- IFF_MASTER_ARPMON = 1<<8,
- IFF_WAN_HDLC = 1<<9,
- IFF_XMIT_DST_RELEASE = 1<<10,
- IFF_DONT_BRIDGE = 1<<11,
- IFF_DISABLE_NETPOLL = 1<<12,
- IFF_MACVLAN_PORT = 1<<13,
- IFF_BRIDGE_PORT = 1<<14,
- IFF_OVS_DATAPATH = 1<<15,
- IFF_TX_SKB_SHARING = 1<<16,
- IFF_UNICAST_FLT = 1<<17,
- IFF_TEAM_PORT = 1<<18,
- IFF_SUPP_NOFCS = 1<<19,
- IFF_LIVE_ADDR_CHANGE = 1<<20,
- IFF_MACVLAN = 1<<21,
- IFF_XMIT_DST_RELEASE_PERM = 1<<22,
- IFF_IPVLAN_MASTER = 1<<23,
- IFF_IPVLAN_SLAVE = 1<<24,
+ IFF_BONDING = 1<<2,
+ IFF_ISATAP = 1<<3,
+ IFF_WAN_HDLC = 1<<4,
+ IFF_XMIT_DST_RELEASE = 1<<5,
+ IFF_DONT_BRIDGE = 1<<6,
+ IFF_DISABLE_NETPOLL = 1<<7,
+ IFF_MACVLAN_PORT = 1<<8,
+ IFF_BRIDGE_PORT = 1<<9,
+ IFF_OVS_DATAPATH = 1<<10,
+ IFF_TX_SKB_SHARING = 1<<11,
+ IFF_UNICAST_FLT = 1<<12,
+ IFF_TEAM_PORT = 1<<13,
+ IFF_SUPP_NOFCS = 1<<14,
+ IFF_LIVE_ADDR_CHANGE = 1<<15,
+ IFF_MACVLAN = 1<<16,
+ IFF_XMIT_DST_RELEASE_PERM = 1<<17,
+ IFF_IPVLAN_MASTER = 1<<18,
+ IFF_IPVLAN_SLAVE = 1<<19,
+ IFF_VRF_MASTER = 1<<20,
+ IFF_NO_QUEUE = 1<<21,
+ IFF_OPENVSWITCH = 1<<22,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
#define IFF_EBRIDGE IFF_EBRIDGE
-#define IFF_SLAVE_INACTIVE IFF_SLAVE_INACTIVE
-#define IFF_MASTER_8023AD IFF_MASTER_8023AD
-#define IFF_MASTER_ALB IFF_MASTER_ALB
#define IFF_BONDING IFF_BONDING
-#define IFF_SLAVE_NEEDARP IFF_SLAVE_NEEDARP
#define IFF_ISATAP IFF_ISATAP
-#define IFF_MASTER_ARPMON IFF_MASTER_ARPMON
#define IFF_WAN_HDLC IFF_WAN_HDLC
#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
@@ -1301,6 +1307,9 @@ enum netdev_priv_flags {
#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
+#define IFF_VRF_MASTER IFF_VRF_MASTER
+#define IFF_NO_QUEUE IFF_NO_QUEUE
+#define IFF_OPENVSWITCH IFF_OPENVSWITCH
/**
* struct net_device - The DEVICE structure.
@@ -1417,6 +1426,7 @@ enum netdev_priv_flags {
* @dn_ptr: DECnet specific data
* @ip6_ptr: IPv6 specific data
* @ax25_ptr: AX.25 specific data
+ * @vrf_ptr: VRF specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
*
* @last_rx: Time of last Rx
@@ -1448,6 +1458,8 @@ enum netdev_priv_flags {
*
* @xps_maps: XXX: need comments on this one
*
+ * @offload_fwd_mark: Offload device fwding mark
+ *
* @trans_start: Time (in jiffies) of last Tx
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog ( see dev_watchdog() )
@@ -1502,6 +1514,10 @@ enum netdev_priv_flags {
*
* @qdisc_tx_busylock: XXX: need comments on this one
*
+ * @proto_down: protocol port state information can be sent to the
+ * switch driver and used to set the phys state of the
+ * switch port.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -1629,6 +1645,7 @@ struct net_device {
struct dn_dev __rcu *dn_ptr;
struct inet6_dev __rcu *ip6_ptr;
void *ax25_ptr;
+ struct net_vrf_dev __rcu *vrf_ptr;
struct wireless_dev *ieee80211_ptr;
struct wpan_dev *ieee802154_ptr;
#if IS_ENABLED(CONFIG_MPLS_ROUTING)
@@ -1685,6 +1702,10 @@ struct net_device {
struct xps_dev_maps __rcu *xps_maps;
#endif
+#ifdef CONFIG_NET_SWITCHDEV
+ u32 offload_fwd_mark;
+#endif
+
/* These may be needed for future network-power-down code. */
/*
@@ -1762,6 +1783,7 @@ struct net_device {
#endif
struct phy_device *phydev;
struct lock_class_key *qdisc_tx_busylock;
+ bool proto_down;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2093,6 +2115,13 @@ struct netdev_notifier_change_info {
unsigned int flags_changed;
};
+struct netdev_notifier_changeupper_info {
+ struct netdev_notifier_info info; /* must be first */
+ struct net_device *upper_dev; /* new upper dev */
+ bool master; /* is upper dev master */
+ bool linking; /* is the nofication for link or unlink */
+};
+
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
struct net_device *dev)
{
@@ -2277,8 +2306,7 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
{
- return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) ==
- skb_gro_offset(skb));
+ return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
}
static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
@@ -2374,37 +2402,58 @@ static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
grc->delta = 0;
}
-static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
- int start, int offset,
- struct gro_remcsum *grc,
- bool nopartial)
+static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+ unsigned int off, size_t hdrlen,
+ int start, int offset,
+ struct gro_remcsum *grc,
+ bool nopartial)
{
__wsum delta;
+ size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
if (!nopartial) {
- NAPI_GRO_CB(skb)->gro_remcsum_start =
- ((unsigned char *)ptr + start) - skb->head;
- return;
+ NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
+ return ptr;
+ }
+
+ ptr = skb_gro_header_fast(skb, off);
+ if (skb_gro_header_hard(skb, off + plen)) {
+ ptr = skb_gro_header_slow(skb, off + plen, off);
+ if (!ptr)
+ return NULL;
}
- delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+ delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
+ start, offset);
/* Adjust skb->csum since we changed the packet */
NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
- grc->offset = (ptr + offset) - (void *)skb->head;
+ grc->offset = off + hdrlen + offset;
grc->delta = delta;
+
+ return ptr;
}
static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
struct gro_remcsum *grc)
{
+ void *ptr;
+ size_t plen = grc->offset + sizeof(u16);
+
if (!grc->delta)
return;
- remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta);
+ ptr = skb_gro_header_fast(skb, grc->offset);
+ if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
+ ptr = skb_gro_header_slow(skb, plen, grc->offset);
+ if (!ptr)
+ return;
+ }
+
+ remcsum_unadjust((__sum16 *)ptr, grc->delta);
}
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -2982,6 +3031,7 @@ int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
+int dev_change_proto_down(struct net_device *dev, bool proto_down);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3781,6 +3831,42 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
return dev->priv_flags & IFF_SUPP_NOFCS;
}
+static inline bool netif_is_vrf(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_VRF_MASTER;
+}
+
+static inline bool netif_is_bridge_master(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_EBRIDGE;
+}
+
+static inline bool netif_is_ovs_master(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_OPENVSWITCH;
+}
+
+static inline bool netif_index_is_vrf(struct net *net, int ifindex)
+{
+ bool rc = false;
+
+#if IS_ENABLED(CONFIG_NET_VRF)
+ struct net_device *dev;
+
+ if (ifindex == 0)
+ return false;
+
+ rcu_read_lock();
+
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (dev)
+ rc = netif_is_vrf(dev);
+
+ rcu_read_unlock();
+#endif
+ return rc;
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 00050dfd9f23..36a652531791 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -11,6 +11,8 @@
#include <linux/list.h>
#include <linux/static_key.h>
#include <linux/netfilter_defs.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#ifdef CONFIG_NETFILTER
static inline int NF_DROP_GETERR(int verdict)
@@ -118,6 +120,13 @@ struct nf_sockopt_ops {
};
/* Function to register/unregister hook points. */
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
+int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+ unsigned int n);
+void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+ unsigned int n);
+
int nf_register_hook(struct nf_hook_ops *reg);
void nf_unregister_hook(struct nf_hook_ops *reg);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
@@ -128,33 +137,26 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
int nf_register_sockopt(struct nf_sockopt_ops *reg);
void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
-extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-
#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+static inline bool nf_hook_list_active(struct list_head *hook_list,
u_int8_t pf, unsigned int hook)
{
if (__builtin_constant_p(pf) &&
__builtin_constant_p(hook))
return static_key_false(&nf_hooks_needed[pf][hook]);
- return !list_empty(nf_hook_list);
+ return !list_empty(hook_list);
}
#else
-static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+static inline bool nf_hook_list_active(struct list_head *hook_list,
u_int8_t pf, unsigned int hook)
{
- return !list_empty(nf_hook_list);
+ return !list_empty(hook_list);
}
#endif
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
-{
- return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
-}
-
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
/**
@@ -172,10 +174,13 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
int (*okfn)(struct sock *, struct sk_buff *),
int thresh)
{
- if (nf_hooks_active(pf, hook)) {
+ struct net *net = dev_net(indev ? indev : outdev);
+ struct list_head *hook_list = &net->nf.hooks[pf][hook];
+
+ if (nf_hook_list_active(hook_list, pf, hook)) {
struct nf_hook_state state;
- nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh,
+ nf_hook_state_init(&state, hook_list, hook, thresh,
pf, indev, outdev, sk, okfn);
return nf_hook_slow(skb, &state);
}
@@ -363,6 +368,8 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#include <linux/netfilter/nf_conntrack_zones_common.h>
+
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
@@ -385,4 +392,15 @@ extern struct nfq_ct_hook __rcu *nfq_ct_hook;
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
#endif
+/**
+ * nf_skb_duplicated - TEE target has sent a packet
+ *
+ * When a xtables target sends a packet, the OUTPUT and POSTROUTING
+ * hooks are traversed again, i.e. nft and xtables are invoked recursively.
+ *
+ * This is used by xtables TEE target to prevent the duplicated skb from
+ * being duplicated again.
+ */
+DECLARE_PER_CPU(bool, nf_skb_duplicated);
+
#endif /*__LINUX_NETFILTER_H*/
diff --git a/include/linux/netfilter/nf_conntrack_zones_common.h b/include/linux/netfilter/nf_conntrack_zones_common.h
new file mode 100644
index 000000000000..5d7cf36d4766
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_zones_common.h
@@ -0,0 +1,23 @@
+#ifndef _NF_CONNTRACK_ZONES_COMMON_H
+#define _NF_CONNTRACK_ZONES_COMMON_H
+
+#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
+
+#define NF_CT_DEFAULT_ZONE_ID 0
+
+#define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL)
+#define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY)
+
+#define NF_CT_DEFAULT_ZONE_DIR (NF_CT_ZONE_DIR_ORIG | NF_CT_ZONE_DIR_REPL)
+
+#define NF_CT_FLAG_MARK 1
+
+struct nf_conntrack_zone {
+ u16 id;
+ u8 flags;
+ u8 dir;
+};
+
+extern const struct nf_conntrack_zone nf_ct_zone_dflt;
+
+#endif /* _NF_CONNTRACK_ZONES_COMMON_H */
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index 6ec975748742..80ca889b164e 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -2,6 +2,7 @@
#define _NFNL_ACCT_H_
#include <uapi/linux/netfilter/nfnetlink_acct.h>
+#include <net/net_namespace.h>
enum {
NFACCT_NO_QUOTA = -1,
@@ -11,7 +12,7 @@ enum {
struct nf_acct;
-struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
void nfnl_acct_put(struct nf_acct *acct);
void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
extern int nfnl_acct_overquota(const struct sk_buff *skb,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 286098a5667f..b006b719183f 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -3,6 +3,7 @@
#include <linux/netdevice.h>
+#include <linux/static_key.h>
#include <uapi/linux/netfilter/x_tables.h>
/**
@@ -222,7 +223,6 @@ struct xt_table_info {
* @stacksize jumps (number of user chains) can possibly be made.
*/
unsigned int stacksize;
- unsigned int __percpu *stackptr;
void ***jumpstack;
unsigned char entries[0] __aligned(8);
@@ -281,6 +281,12 @@ void xt_free_table_info(struct xt_table_info *info);
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
+/* xt_tee_enabled - true if x_tables needs to handle reentrancy
+ *
+ * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
+ */
+extern struct static_key xt_tee_enabled;
+
/**
* xt_write_recseq_begin - start of a write section
*
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 6d80fc686323..2437b8a5d7a9 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -17,9 +17,6 @@ enum nf_br_hook_priorities {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-#define BRNF_BRIDGED_DNAT 0x02
-#define BRNF_NF_BRIDGE_PREROUTING 0x08
-
int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
static inline void br_drop_fake_rtable(struct sk_buff *skb)
@@ -63,8 +60,17 @@ nf_bridge_get_physoutdev(const struct sk_buff *skb)
{
return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
}
+
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+ return skb->nf_bridge && skb->nf_bridge->in_prerouting;
+}
#else
#define br_drop_fake_rtable(skb) do { } while (0)
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+ return false;
+}
#endif /* CONFIG_BRIDGE_NETFILTER */
#endif
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 8b7d28f3aada..771574677e83 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -9,15 +9,6 @@
#include <uapi/linux/netfilter_ipv6.h>
-
-#ifdef CONFIG_NETFILTER
-int ip6_route_me_harder(struct sk_buff *skb);
-__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
-
-int ipv6_netfilter_init(void);
-void ipv6_netfilter_fini(void);
-
/*
* Hook functions for ipv6 to allow xt_* modules to be built-in even
* if IPv6 is a module.
@@ -30,6 +21,14 @@ struct nf_ipv6_ops {
int (*output)(struct sock *, struct sk_buff *));
};
+#ifdef CONFIG_NETFILTER
+int ip6_route_me_harder(struct sk_buff *skb);
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
+
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
+
extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
{
@@ -39,6 +38,7 @@ static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
#else /* CONFIG_NETFILTER */
static inline int ipv6_netfilter_init(void) { return 0; }
static inline void ipv6_netfilter_fini(void) { return; }
+static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return NULL; }
#endif /* CONFIG_NETFILTER */
#endif /*__LINUX_IP6_NETFILTER_H*/
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c0d94ed8ce9a..b5812c395351 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -28,18 +28,32 @@ struct nvme_bar {
__u32 cc; /* Controller Configuration */
__u32 rsvd1; /* Reserved */
__u32 csts; /* Controller Status */
- __u32 rsvd2; /* Reserved */
+ __u32 nssr; /* Subsystem Reset */
__u32 aqa; /* Admin Queue Attributes */
__u64 asq; /* Admin SQ Base Address */
__u64 acq; /* Admin CQ Base Address */
+ __u32 cmbloc; /* Controller Memory Buffer Location */
+ __u32 cmbsz; /* Controller Memory Buffer Size */
};
#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
+#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
+#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
+#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
+
+#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
+#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
+#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
+#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
+#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
+
enum {
NVME_CC_ENABLE = 1 << 0,
NVME_CC_CSS_NVM = 0 << 4,
@@ -55,6 +69,7 @@ enum {
NVME_CC_IOCQES = 4 << 20,
NVME_CSTS_RDY = 1 << 0,
NVME_CSTS_CFS = 1 << 1,
+ NVME_CSTS_NSSRO = 1 << 4,
NVME_CSTS_SHST_NORMAL = 0 << 2,
NVME_CSTS_SHST_OCCUR = 1 << 2,
NVME_CSTS_SHST_CMPLT = 2 << 2,
@@ -97,9 +112,14 @@ struct nvme_dev {
char serial[20];
char model[40];
char firmware_rev[8];
+ bool subsystem;
u32 max_hw_sectors;
u32 stripe_size;
u32 page_size;
+ void __iomem *cmb;
+ dma_addr_t cmb_dma_addr;
+ u64 cmb_size;
+ u32 cmbsz;
u16 oncs;
u16 abort_limit;
u8 event_limit;
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
new file mode 100644
index 000000000000..9bb77d3ed6e0
--- /dev/null
+++ b/include/linux/nvmem-consumer.h
@@ -0,0 +1,157 @@
+/*
+ * nvmem framework consumer.
+ *
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _LINUX_NVMEM_CONSUMER_H
+#define _LINUX_NVMEM_CONSUMER_H
+
+struct device;
+struct device_node;
+/* consumer cookie */
+struct nvmem_cell;
+struct nvmem_device;
+
+struct nvmem_cell_info {
+ const char *name;
+ unsigned int offset;
+ unsigned int bytes;
+ unsigned int bit_offset;
+ unsigned int nbits;
+};
+
+#if IS_ENABLED(CONFIG_NVMEM)
+
+/* Cell based interface */
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name);
+struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name);
+void nvmem_cell_put(struct nvmem_cell *cell);
+void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
+void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
+int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
+
+/* direct nvmem device read/write interface */
+struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
+struct nvmem_device *devm_nvmem_device_get(struct device *dev,
+ const char *name);
+void nvmem_device_put(struct nvmem_device *nvmem);
+void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem);
+int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset,
+ size_t bytes, void *buf);
+int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset,
+ size_t bytes, void *buf);
+ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info, void *buf);
+int nvmem_device_cell_write(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info, void *buf);
+
+#else
+
+static inline struct nvmem_cell *nvmem_cell_get(struct device *dev,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void devm_nvmem_cell_put(struct device *dev,
+ struct nvmem_cell *cell)
+{
+
+}
+static inline void nvmem_cell_put(struct nvmem_cell *cell)
+{
+}
+
+static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int nvmem_cell_write(struct nvmem_cell *cell,
+ const char *buf, size_t len)
+{
+ return -ENOSYS;
+}
+
+static inline struct nvmem_device *nvmem_device_get(struct device *dev,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void nvmem_device_put(struct nvmem_device *nvmem)
+{
+}
+
+static inline void devm_nvmem_device_put(struct device *dev,
+ struct nvmem_device *nvmem)
+{
+}
+
+static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info,
+ void *buf)
+{
+ return -ENOSYS;
+}
+
+static inline int nvmem_device_cell_write(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info,
+ void *buf)
+{
+ return -ENOSYS;
+}
+
+static inline int nvmem_device_read(struct nvmem_device *nvmem,
+ unsigned int offset, size_t bytes,
+ void *buf)
+{
+ return -ENOSYS;
+}
+
+static inline int nvmem_device_write(struct nvmem_device *nvmem,
+ unsigned int offset, size_t bytes,
+ void *buf)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_NVMEM */
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
+ const char *name);
+struct nvmem_device *of_nvmem_device_get(struct device_node *np,
+ const char *name);
+#else
+static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+#endif /* CONFIG_NVMEM && CONFIG_OF */
+
+#endif /* ifndef _LINUX_NVMEM_CONSUMER_H */
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
new file mode 100644
index 000000000000..0b68caff1b3c
--- /dev/null
+++ b/include/linux/nvmem-provider.h
@@ -0,0 +1,47 @@
+/*
+ * nvmem framework provider.
+ *
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _LINUX_NVMEM_PROVIDER_H
+#define _LINUX_NVMEM_PROVIDER_H
+
+struct nvmem_device;
+struct nvmem_cell_info;
+
+struct nvmem_config {
+ struct device *dev;
+ const char *name;
+ int id;
+ struct module *owner;
+ const struct nvmem_cell_info *cells;
+ int ncells;
+ bool read_only;
+};
+
+#if IS_ENABLED(CONFIG_NVMEM)
+
+struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
+int nvmem_unregister(struct nvmem_device *nvmem);
+
+#else
+
+static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int nvmem_unregister(struct nvmem_device *nvmem)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_NVMEM */
+
+#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index edc068d19c79..2194b8ca41f9 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -136,7 +136,8 @@ static inline bool is_of_node(struct fwnode_handle *fwnode)
static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
{
- return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL;
+ return is_of_node(fwnode) ?
+ container_of(fwnode, struct device_node, fwnode) : NULL;
}
static inline bool of_have_populated_dt(void)
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index d884929a7747..4bcbd586a672 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -74,6 +74,7 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
*/
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
extern struct device_node *of_irq_find_parent(struct device_node *child);
+extern void of_msi_configure(struct device *dev, struct device_node *np);
#else /* !CONFIG_OF */
static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 611a691145c4..956a1006aefc 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -72,6 +72,9 @@ extern int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
struct device *parent);
+extern int of_platform_default_populate(struct device_node *root,
+ const struct of_dev_auxdata *lookup,
+ struct device *parent);
extern void of_platform_depopulate(struct device *parent);
#else
static inline int of_platform_populate(struct device_node *root,
@@ -81,6 +84,12 @@ static inline int of_platform_populate(struct device_node *root,
{
return -ENODEV;
}
+static inline int of_platform_default_populate(struct device_node *root,
+ const struct of_dev_auxdata *lookup,
+ struct device *parent)
+{
+ return -ENODEV;
+}
static inline void of_platform_depopulate(struct device *parent) { }
#endif
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 72031785fe1d..57e0b8250947 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -3,55 +3,6 @@
#include <linux/pci.h>
-/* Address Translation Service */
-struct pci_ats {
- int pos; /* capability position */
- int stu; /* Smallest Translation Unit */
- int qdep; /* Invalidate Queue Depth */
- int ref_cnt; /* Physical Function reference count */
- unsigned int is_enabled:1; /* Enable bit is set */
-};
-
-#ifdef CONFIG_PCI_ATS
-
-int pci_enable_ats(struct pci_dev *dev, int ps);
-void pci_disable_ats(struct pci_dev *dev);
-int pci_ats_queue_depth(struct pci_dev *dev);
-
-/**
- * pci_ats_enabled - query the ATS status
- * @dev: the PCI device
- *
- * Returns 1 if ATS capability is enabled, or 0 if not.
- */
-static inline int pci_ats_enabled(struct pci_dev *dev)
-{
- return dev->ats && dev->ats->is_enabled;
-}
-
-#else /* CONFIG_PCI_ATS */
-
-static inline int pci_enable_ats(struct pci_dev *dev, int ps)
-{
- return -ENODEV;
-}
-
-static inline void pci_disable_ats(struct pci_dev *dev)
-{
-}
-
-static inline int pci_ats_queue_depth(struct pci_dev *dev)
-{
- return -ENODEV;
-}
-
-static inline int pci_ats_enabled(struct pci_dev *dev)
-{
- return 0;
-}
-
-#endif /* CONFIG_PCI_ATS */
-
#ifdef CONFIG_PCI_PRI
int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 8a0321a8fb59..1a64733c48c7 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -180,6 +180,8 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
/* Do not use PM reset even if device advertises NoSoftRst- */
PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
+ /* Get VPD from function 0 VPD */
+ PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
};
enum pci_irq_reroute_variant {
@@ -343,6 +345,7 @@ struct pci_dev {
unsigned int msi_enabled:1;
unsigned int msix_enabled:1;
unsigned int ari_enabled:1; /* ARI forwarding */
+ unsigned int ats_enabled:1; /* Address Translation Service */
unsigned int is_managed:1;
unsigned int needs_freset:1; /* Dev requires fundamental reset */
unsigned int state_saved:1;
@@ -366,7 +369,6 @@ struct pci_dev {
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
#ifdef CONFIG_PCI_MSI
- struct list_head msi_list;
const struct attribute_group **msi_irq_groups;
#endif
struct pci_vpd *vpd;
@@ -375,7 +377,9 @@ struct pci_dev {
struct pci_sriov *sriov; /* SR-IOV capability related */
struct pci_dev *physfn; /* the PF this VF is associated with */
};
- struct pci_ats *ats; /* Address Translation Service */
+ u16 ats_cap; /* ATS Capability offset */
+ u8 ats_stu; /* ATS Smallest Translation Unit */
+ atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */
#endif
phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
size_t romlen; /* Length of ROM if it's not from the BAR */
@@ -446,7 +450,8 @@ struct pci_bus {
struct list_head children; /* list of child buses */
struct list_head devices; /* list of devices on this bus */
struct pci_dev *self; /* bridge device as seen by parent */
- struct list_head slots; /* list of slots on this bus */
+ struct list_head slots; /* list of slots on this bus;
+ protected by pci_slot_mutex */
struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
struct list_head resources; /* address space routed to this bus */
struct resource busn_res; /* bus numbers routed to this bus */
@@ -738,10 +743,11 @@ struct pci_driver {
void pcie_bus_configure_settings(struct pci_bus *bus);
enum pcie_bus_config_types {
- PCIE_BUS_TUNE_OFF,
- PCIE_BUS_SAFE,
- PCIE_BUS_PERFORMANCE,
- PCIE_BUS_PEER2PEER,
+ PCIE_BUS_TUNE_OFF, /* don't touch MPS at all */
+ PCIE_BUS_DEFAULT, /* ensure MPS matches upstream bridge */
+ PCIE_BUS_SAFE, /* use largest MPS boot-time devices support */
+ PCIE_BUS_PERFORMANCE, /* use MPS and MRRS for best performance */
+ PCIE_BUS_PEER2PEER, /* set MPS = 128 for all devices */
};
extern enum pcie_bus_config_types pcie_bus_config;
@@ -787,6 +793,10 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
void pci_bus_release_busn_res(struct pci_bus *b);
+struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata,
+ struct list_head *resources,
+ struct msi_controller *msi);
struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
struct list_head *resources);
@@ -797,6 +807,11 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
const char *name,
struct hotplug_slot *hotplug);
void pci_destroy_slot(struct pci_slot *slot);
+#ifdef CONFIG_SYSFS
+void pci_dev_assign_slot(struct pci_dev *dev);
+#else
+static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
+#endif
int pci_scan_slot(struct pci_bus *bus, int devfn);
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
@@ -963,6 +978,23 @@ static inline int pci_is_managed(struct pci_dev *pdev)
return pdev->is_managed;
}
+static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq)
+{
+ pdev->irq = irq;
+ pdev->irq_managed = 1;
+}
+
+static inline void pci_reset_managed_irq(struct pci_dev *pdev)
+{
+ pdev->irq = 0;
+ pdev->irq_managed = 0;
+}
+
+static inline bool pci_has_managed_irq(struct pci_dev *pdev)
+{
+ return pdev->irq_managed && pdev->irq > 0;
+}
+
void pci_disable_device(struct pci_dev *dev);
extern unsigned int pcibios_max_latency;
@@ -1202,6 +1234,7 @@ struct msix_entry {
u16 entry; /* driver uses to specify entry, OS writes */
};
+void pci_msi_setup_pci_dev(struct pci_dev *dev);
#ifdef CONFIG_PCI_MSI
int pci_msi_vec_count(struct pci_dev *dev);
@@ -1294,6 +1327,19 @@ int ht_create_irq(struct pci_dev *dev, int idx);
void ht_destroy_irq(unsigned int irq);
#endif /* CONFIG_HT_IRQ */
+#ifdef CONFIG_PCI_ATS
+/* Address Translation Service */
+void pci_ats_init(struct pci_dev *dev);
+int pci_enable_ats(struct pci_dev *dev, int ps);
+void pci_disable_ats(struct pci_dev *dev);
+int pci_ats_queue_depth(struct pci_dev *dev);
+#else
+static inline void pci_ats_init(struct pci_dev *d) { }
+static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
+static inline void pci_disable_ats(struct pci_dev *d) { }
+static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
+#endif
+
void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
@@ -1645,6 +1691,8 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev,
int pcibios_add_device(struct pci_dev *dev);
void pcibios_release_device(struct pci_dev *dev);
void pcibios_penalize_isa_irq(int irq, int active);
+int pcibios_alloc_irq(struct pci_dev *dev);
+void pcibios_free_irq(struct pci_dev *dev);
#ifdef CONFIG_HIBERNATE_CALLBACKS
extern struct dev_pm_ops pcibios_pm_ops;
@@ -1661,6 +1709,7 @@ static inline void pci_mmcfg_late_init(void) { }
int pci_ext_cfg_avail(void);
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
+void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
#ifdef CONFIG_PCI_IOV
int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
@@ -1842,10 +1891,12 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
/* PCI <-> OF binding helpers */
#ifdef CONFIG_OF
struct device_node;
+struct irq_domain;
void pci_set_of_node(struct pci_dev *dev);
void pci_release_of_node(struct pci_dev *dev);
void pci_set_bus_of_node(struct pci_bus *bus);
void pci_release_bus_of_node(struct pci_bus *bus);
+struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -1868,6 +1919,8 @@ static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
+static inline struct irq_domain *
+pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
#endif /* CONFIG_OF */
#ifdef CONFIG_EEH
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 57f3a1c550dc..8f16299ca068 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -488,10 +488,8 @@ do { \
#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
/*
- * Operations with implied preemption protection. These operations can be
- * used without worrying about preemption. Note that interrupts may still
- * occur while an operation is in progress and if the interrupt modifies
- * the variable too then RMW actions may not be reliable.
+ * Operations with implied preemption/interrupt protection. These
+ * operations can be used without worrying about preemption or interrupt.
*/
#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp)
#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val)
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
new file mode 100644
index 000000000000..bfa673bb822d
--- /dev/null
+++ b/include/linux/perf/arm_pmu.h
@@ -0,0 +1,154 @@
+/*
+ * linux/arch/arm/include/asm/pmu.h
+ *
+ * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ARM_PMU_H__
+#define __ARM_PMU_H__
+
+#include <linux/interrupt.h>
+#include <linux/perf_event.h>
+
+#include <asm/cputype.h>
+
+/*
+ * struct arm_pmu_platdata - ARM PMU platform data
+ *
+ * @handle_irq: an optional handler which will be called from the
+ * interrupt and passed the address of the low level handler,
+ * and can be used to implement any platform specific handling
+ * before or after calling it.
+ */
+struct arm_pmu_platdata {
+ irqreturn_t (*handle_irq)(int irq, void *dev,
+ irq_handler_t pmu_handler);
+};
+
+#ifdef CONFIG_ARM_PMU
+
+/*
+ * The ARMv7 CPU PMU supports up to 32 event counters.
+ */
+#define ARMPMU_MAX_HWEVENTS 32
+
+#define HW_OP_UNSUPPORTED 0xFFFF
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED 0xFFFF
+
+#define PERF_MAP_ALL_UNSUPPORTED \
+ [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
+[0 ... C(MAX) - 1] = { \
+ [0 ... C(OP_MAX) - 1] = { \
+ [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
+ }, \
+}
+
+/* The events for a given PMU register set. */
+struct pmu_hw_events {
+ /*
+ * The events that are active on the PMU for the given index.
+ */
+ struct perf_event *events[ARMPMU_MAX_HWEVENTS];
+
+ /*
+ * A 1 bit for an index indicates that the counter is being used for
+ * an event. A 0 means that the counter can be used.
+ */
+ DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
+
+ /*
+ * Hardware lock to serialize accesses to PMU registers. Needed for the
+ * read/modify/write sequences.
+ */
+ raw_spinlock_t pmu_lock;
+
+ /*
+ * When using percpu IRQs, we need a percpu dev_id. Place it here as we
+ * already have to allocate this struct per cpu.
+ */
+ struct arm_pmu *percpu_pmu;
+};
+
+struct arm_pmu {
+ struct pmu pmu;
+ cpumask_t active_irqs;
+ cpumask_t supported_cpus;
+ int *irq_affinity;
+ char *name;
+ irqreturn_t (*handle_irq)(int irq_num, void *dev);
+ void (*enable)(struct perf_event *event);
+ void (*disable)(struct perf_event *event);
+ int (*get_event_idx)(struct pmu_hw_events *hw_events,
+ struct perf_event *event);
+ void (*clear_event_idx)(struct pmu_hw_events *hw_events,
+ struct perf_event *event);
+ int (*set_event_filter)(struct hw_perf_event *evt,
+ struct perf_event_attr *attr);
+ u32 (*read_counter)(struct perf_event *event);
+ void (*write_counter)(struct perf_event *event, u32 val);
+ void (*start)(struct arm_pmu *);
+ void (*stop)(struct arm_pmu *);
+ void (*reset)(void *);
+ int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
+ void (*free_irq)(struct arm_pmu *);
+ int (*map_event)(struct perf_event *event);
+ int num_events;
+ atomic_t active_events;
+ struct mutex reserve_mutex;
+ u64 max_period;
+ struct platform_device *plat_device;
+ struct pmu_hw_events __percpu *hw_events;
+ struct notifier_block hotplug_nb;
+};
+
+#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
+
+int armpmu_register(struct arm_pmu *armpmu, int type);
+
+u64 armpmu_event_update(struct perf_event *event);
+
+int armpmu_event_set_period(struct perf_event *event);
+
+int armpmu_map_event(struct perf_event *event,
+ const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+ const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX],
+ u32 raw_event_mask);
+
+struct pmu_probe_info {
+ unsigned int cpuid;
+ unsigned int mask;
+ int (*init)(struct arm_pmu *);
+};
+
+#define PMU_PROBE(_cpuid, _mask, _fn) \
+{ \
+ .cpuid = (_cpuid), \
+ .mask = (_mask), \
+ .init = (_fn), \
+}
+
+#define ARM_PMU_PROBE(_cpuid, _fn) \
+ PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
+
+#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
+
+#define XSCALE_PMU_PROBE(_version, _fn) \
+ PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
+
+int arm_pmu_device_probe(struct platform_device *pdev,
+ const struct of_device_id *of_table,
+ const struct pmu_probe_info *probe_table);
+
+#endif /* CONFIG_ARM_PMU */
+
+#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2027809433b3..092a0e8a479a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -641,6 +641,8 @@ extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
+extern struct perf_event *perf_event_get(unsigned int fd);
+extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
@@ -659,6 +661,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
void *context);
extern void perf_pmu_migrate_context(struct pmu *pmu,
int src_cpu, int dst_cpu);
+extern u64 perf_event_read_local(struct perf_event *event);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
@@ -979,6 +982,12 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
+static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
+static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
static inline void perf_event_print_debug(void) { }
static inline int perf_event_task_disable(void) { return -EINVAL; }
static inline int perf_event_task_enable(void) { return -EINVAL; }
@@ -1011,6 +1020,7 @@ static inline void perf_event_enable(struct perf_event *event) { }
static inline void perf_event_disable(struct perf_event *event) { }
static inline int __perf_event_disable(void *info) { return -1; }
static inline void perf_event_task_tick(void) { }
+static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
#endif
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a26c3f84b8dd..962387a192f1 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -330,6 +330,7 @@ struct phy_c45_device_ids {
* c45_ids: 802.3-c45 Device Identifers if is_c45.
* is_c45: Set to true if this phy uses clause 45 addressing.
* is_internal: Set to true if this phy is internal to a MAC.
+ * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
* has_fixups: Set to true if this phy has fixups/quirks.
* suspended: Set to true if this phy has been suspended successfully.
* state: state of the PHY for management purposes
@@ -368,6 +369,7 @@ struct phy_device {
struct phy_c45_device_ids c45_ids;
bool is_c45;
bool is_internal;
+ bool is_pseudo_fixed_link;
bool has_fixups;
bool suspended;
@@ -424,6 +426,8 @@ struct phy_device {
struct net_device *attached_dev;
+ u8 mdix;
+
void (*adjust_link)(struct net_device *dev);
};
#define to_phy_device(d) container_of(d, struct phy_device, dev)
@@ -686,6 +690,16 @@ static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
{
return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
+};
+
+/*
+ * phy_is_pseudo_fixed_link - Convenience function for testing if this
+ * PHY is the CPU port facing side of an Ethernet switch, or similar.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
+{
+ return phydev->is_pseudo_fixed_link;
}
/**
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index fe5732d53eda..2400d2ea4f34 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -13,9 +13,11 @@ struct device_node;
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status);
+ struct fixed_phy_status *status,
+ int link_gpio);
extern struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
+ int link_gpio,
struct device_node *np);
extern void fixed_phy_del(int phy_addr);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
@@ -26,12 +28,14 @@ extern int fixed_phy_update_state(struct phy_device *phydev,
const struct fixed_phy_status *changed);
#else
static inline int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status)
+ struct fixed_phy_status *status,
+ int link_gpio)
{
return -ENODEV;
}
static inline struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
+ int gpio_link,
struct device_node *np)
{
return ERR_PTR(-ENODEV);
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
index 4b452c6a2f7b..527a85c61924 100644
--- a/include/linux/platform_data/atmel.h
+++ b/include/linux/platform_data/atmel.h
@@ -46,18 +46,6 @@ struct at91_cf_data {
#define AT91_IDE_SWAP_A0_A2 0x02
};
- /* USB Host */
-#define AT91_MAX_USBH_PORTS 3
-struct at91_usbh_data {
- int vbus_pin[AT91_MAX_USBH_PORTS]; /* port power-control pin */
- int overcurrent_pin[AT91_MAX_USBH_PORTS];
- u8 ports; /* number of ports on root hub */
- u8 overcurrent_supported;
- u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
- u8 overcurrent_status[AT91_MAX_USBH_PORTS];
- u8 overcurrent_changed[AT91_MAX_USBH_PORTS];
-};
-
/* NAND / SmartMedia */
struct atmel_nand_data {
int enable_pin; /* chip enable */
diff --git a/include/linux/platform_data/clk-ux500.h b/include/linux/platform_data/clk-ux500.h
index 97baf831e071..3af0da1f3be5 100644
--- a/include/linux/platform_data/clk-ux500.h
+++ b/include/linux/platform_data/clk-ux500.h
@@ -10,14 +10,8 @@
#ifndef __CLK_UX500_H
#define __CLK_UX500_H
-void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base);
-
-void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base);
-void u9540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base);
-void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base);
+void u8500_clk_init(void);
+void u9540_clk_init(void);
+void u8540_clk_init(void);
#endif /* __CLK_UX500_H */
diff --git a/include/linux/platform_data/leds-kirkwood-ns2.h b/include/linux/platform_data/leds-kirkwood-ns2.h
index 6a9fed57f346..eb8a6860e816 100644
--- a/include/linux/platform_data/leds-kirkwood-ns2.h
+++ b/include/linux/platform_data/leds-kirkwood-ns2.h
@@ -9,11 +9,25 @@
#ifndef __LEDS_KIRKWOOD_NS2_H
#define __LEDS_KIRKWOOD_NS2_H
+enum ns2_led_modes {
+ NS_V2_LED_OFF,
+ NS_V2_LED_ON,
+ NS_V2_LED_SATA,
+};
+
+struct ns2_led_modval {
+ enum ns2_led_modes mode;
+ int cmd_level;
+ int slow_level;
+};
+
struct ns2_led {
const char *name;
const char *default_trigger;
unsigned cmd;
unsigned slow;
+ int num_modes;
+ struct ns2_led_modval *modval;
};
struct ns2_led_platform_data {
diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h
index 8dc2fa47a2aa..f4edcb03c40c 100644
--- a/include/linux/platform_data/spi-davinci.h
+++ b/include/linux/platform_data/spi-davinci.h
@@ -49,6 +49,7 @@ struct davinci_spi_platform_data {
u8 num_chipselect;
u8 intr_line;
u8 *chip_sel;
+ u8 prescaler_limit;
bool cshold_bug;
enum dma_event_q dma_event_q;
};
diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
new file mode 100644
index 000000000000..54b04483976c
--- /dev/null
+++ b/include/linux/platform_data/spi-mt65xx.h
@@ -0,0 +1,20 @@
+/*
+ * MTK SPI bus driver definitions
+ *
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Leilk Liu <leilk.liu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ____LINUX_PLATFORM_DATA_SPI_MTK_H
+#define ____LINUX_PLATFORM_DATA_SPI_MTK_H
+
+/* Board specific platform_data */
+struct mtk_chip_config {
+ u32 tx_mlsb;
+ u32 rx_mlsb;
+};
+#endif
diff --git a/include/linux/platform_data/st_nci.h b/include/linux/platform_data/st_nci.h
deleted file mode 100644
index d9d400a297bd..000000000000
--- a/include/linux/platform_data/st_nci.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Driver include for ST NCI NFC chip family.
- *
- * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _ST_NCI_H_
-#define _ST_NCI_H_
-
-#define ST_NCI_DRIVER_NAME "st_nci"
-
-struct st_nci_nfc_platform_data {
- unsigned int gpio_reset;
- unsigned int irq_polarity;
-};
-
-#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/video-ep93xx.h b/include/linux/platform_data/video-ep93xx.h
index 92fc2b2232e7..699ac4109366 100644
--- a/include/linux/platform_data/video-ep93xx.h
+++ b/include/linux/platform_data/video-ep93xx.h
@@ -2,11 +2,8 @@
#define __VIDEO_EP93XX_H
struct platform_device;
-struct fb_videomode;
struct fb_info;
-#define EP93XXFB_USE_MODEDB 0
-
/* VideoAttributes flags */
#define EP93XXFB_STATE_MACHINE_ENABLE (1 << 0)
#define EP93XXFB_PIXEL_CLOCK_ENABLE (1 << 1)
@@ -38,12 +35,7 @@ struct fb_info;
EP93XXFB_PIXEL_DATA_ENABLE)
struct ep93xxfb_mach_info {
- unsigned int num_modes;
- const struct fb_videomode *modes;
- const struct fb_videomode *default_mode;
- int bpp;
unsigned int flags;
-
int (*setup)(struct platform_device *pdev);
void (*teardown)(struct platform_device *pdev);
void (*blank)(int blank_mode, struct fb_info *info);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 681ccb053f72..b1cf7e797892 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -22,9 +22,6 @@
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
- GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
- GPD_STATE_BUSY, /* Something is happening to the PM domain */
- GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
GPD_STATE_POWER_OFF, /* PM domain is off */
};
@@ -59,9 +56,6 @@ struct generic_pm_domain {
unsigned int in_progress; /* Number of devices being suspended now */
atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */
- wait_queue_head_t status_wait_queue;
- struct task_struct *poweroff_task; /* Powering off task */
- unsigned int resume_count; /* Number of devices being resumed */
unsigned int device_count; /* Number of devices */
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
@@ -113,7 +107,6 @@ struct generic_pm_domain_data {
struct pm_domain_data base;
struct gpd_timing_data td;
struct notifier_block nb;
- int need_restore;
};
#ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -228,8 +221,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name)
return -ENOSYS;
}
static inline void pm_genpd_poweroff_unused(void) {}
-#define simple_qos_governor NULL
-#define pm_domain_always_on_gov NULL
#endif
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cec2d4540914..cab7ba55bedb 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -30,7 +30,10 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
+
int dev_pm_opp_get_opp_count(struct device *dev);
+unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
@@ -62,11 +65,21 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
return 0;
}
+static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
+{
+ return false;
+}
+
static inline int dev_pm_opp_get_opp_count(struct device *dev)
{
return 0;
}
+static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
+{
+ return 0;
+}
+
static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq, bool available)
{
@@ -115,6 +128,10 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int of_init_opp_table(struct device *dev);
void of_free_opp_table(struct device *dev);
+int of_cpumask_init_opp_table(cpumask_var_t cpumask);
+void of_cpumask_free_opp_table(cpumask_var_t cpumask);
+int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
+int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
#else
static inline int of_init_opp_table(struct device *dev)
{
@@ -124,6 +141,25 @@ static inline int of_init_opp_table(struct device *dev)
static inline void of_free_opp_table(struct device *dev)
{
}
+
+static inline int of_cpumask_init_opp_table(cpumask_var_t cpumask)
+{
+ return -ENOSYS;
+}
+
+static inline void of_cpumask_free_opp_table(cpumask_var_t cpumask)
+{
+}
+
+static inline int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ return -ENOSYS;
+}
+
+static inline int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ return -ENOSYS;
+}
#endif
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 7b3ae0cffc05..0f65d36c2a75 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -161,6 +161,8 @@ void dev_pm_qos_hide_flags(struct device *dev);
int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
+int dev_pm_qos_expose_latency_tolerance(struct device *dev);
+void dev_pm_qos_hide_latency_tolerance(struct device *dev);
static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
{
@@ -229,6 +231,9 @@ static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
{ return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
{ return 0; }
+static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
+ { return 0; }
+static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 30e84d48bfea..3bdbb4189780 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -98,11 +98,6 @@ static inline bool pm_runtime_status_suspended(struct device *dev)
return dev->power.runtime_status == RPM_SUSPENDED;
}
-static inline bool pm_runtime_suspended_if_enabled(struct device *dev)
-{
- return pm_runtime_status_suspended(dev) && dev->power.disable_depth == 1;
-}
-
static inline bool pm_runtime_enabled(struct device *dev)
{
return !dev->power.disable_depth;
@@ -164,7 +159,6 @@ static inline void device_set_run_wake(struct device *dev, bool enable) {}
static inline bool pm_runtime_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_active(struct device *dev) { return true; }
static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
-static inline bool pm_runtime_suspended_if_enabled(struct device *dev) { return false; }
static inline bool pm_runtime_enabled(struct device *dev) { return false; }
static inline void pm_runtime_no_callbacks(struct device *dev) {}
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 84991f185173..bea8dd8ff5e0 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -84,13 +84,21 @@
*/
#define in_nmi() (preempt_count() & NMI_MASK)
+/*
+ * The preempt_count offset after preempt_disable();
+ */
#if defined(CONFIG_PREEMPT_COUNT)
-# define PREEMPT_DISABLE_OFFSET 1
+# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
#else
-# define PREEMPT_DISABLE_OFFSET 0
+# define PREEMPT_DISABLE_OFFSET 0
#endif
/*
+ * The preempt_count offset after spin_lock()
+ */
+#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+
+/*
* The preempt_count offset needed for things like:
*
* spin_lock_bh()
@@ -103,7 +111,7 @@
*
* Work as expected.
*/
-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
/*
* Are we running in atomic context? WARNING: this macro cannot
@@ -124,7 +132,8 @@
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
-#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
+#define preempt_count_dec_and_test() \
+ ({ preempt_count_sub(1); should_resched(0); })
#else
#define preempt_count_add(val) __preempt_count_add(val)
#define preempt_count_sub(val) __preempt_count_sub(val)
@@ -184,7 +193,7 @@ do { \
#define preempt_check_resched() \
do { \
- if (should_resched()) \
+ if (should_resched(0)) \
__preempt_schedule(); \
} while (0)
diff --git a/include/linux/property.h b/include/linux/property.h
index 76ebde9c11d4..a59c6ee566c2 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -166,4 +166,8 @@ void device_add_property_set(struct device *dev, struct property_set *pset);
bool device_dma_is_coherent(struct device *dev);
+int device_get_phy_mode(struct device *dev);
+
+void *device_get_mac_address(struct device *dev, char *addr, int alen);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 00e8e8fa7358..5440f64d2942 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -33,7 +33,7 @@ struct prop_global {
/*
* global proportion descriptor
*
- * this is needed to consitently flip prop_global structures.
+ * this is needed to consistently flip prop_global structures.
*/
struct prop_descriptor {
int index;
diff --git a/include/linux/psci.h b/include/linux/psci.h
new file mode 100644
index 000000000000..a682fcc91c33
--- /dev/null
+++ b/include/linux/psci.h
@@ -0,0 +1,52 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#ifndef __LINUX_PSCI_H
+#define __LINUX_PSCI_H
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+bool psci_tos_resident_on(int cpu);
+
+struct psci_operations {
+ int (*cpu_suspend)(u32 state, unsigned long entry_point);
+ int (*cpu_off)(u32 state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+ int (*affinity_info)(unsigned long target_affinity,
+ unsigned long lowest_affinity_level);
+ int (*migrate_info_type)(void);
+};
+
+extern struct psci_operations psci_ops;
+
+#if defined(CONFIG_ARM_PSCI_FW)
+int __init psci_dt_init(void);
+#else
+static inline int psci_dt_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
+int __init psci_acpi_init(void);
+bool __init acpi_psci_present(void);
+bool __init acpi_psci_use_hvc(void);
+#else
+static inline int psci_acpi_init(void) { return 0; }
+static inline bool acpi_psci_present(void) { return false; }
+#endif
+
+#endif /* __LINUX_PSCI_H */
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 0485bab061fd..92273776bce6 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -197,6 +197,7 @@ enum pxa_ssp_type {
QUARK_X1000_SSP,
LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
LPSS_BYT_SSP,
+ LPSS_SPT_SSP,
};
struct ssp_device {
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 77ca6601ff25..7a57c28eb5e7 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -43,7 +43,7 @@ void inode_claim_rsv_space(struct inode *inode, qsize_t number);
void inode_sub_rsv_space(struct inode *inode, qsize_t number);
void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
-void dquot_initialize(struct inode *inode);
+int dquot_initialize(struct inode *inode);
void dquot_drop(struct inode *inode);
struct dquot *dqget(struct super_block *sb, struct kqid qid);
static inline struct dquot *dqgrab(struct dquot *dquot)
@@ -200,8 +200,9 @@ static inline int sb_has_quota_active(struct super_block *sb, int type)
return 0;
}
-static inline void dquot_initialize(struct inode *inode)
+static inline int dquot_initialize(struct inode *inode)
{
+ return 0;
}
static inline void dquot_drop(struct inode *inode)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 4cf5f51b4c9c..ff476515f716 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -226,6 +226,37 @@ struct rcu_synchronize {
};
void wakeme_after_rcu(struct rcu_head *head);
+void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
+ struct rcu_synchronize *rs_array);
+
+#define _wait_rcu_gp(checktiny, ...) \
+do { \
+ call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
+ const int __n = ARRAY_SIZE(__crcu_array); \
+ struct rcu_synchronize __rs_array[__n]; \
+ \
+ __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \
+} while (0)
+
+#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
+
+/**
+ * synchronize_rcu_mult - Wait concurrently for multiple grace periods
+ * @...: List of call_rcu() functions for the flavors to wait on.
+ *
+ * This macro waits concurrently for multiple flavors of RCU grace periods.
+ * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait
+ * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU
+ * domain requires you to write a wrapper function for that SRCU domain's
+ * call_srcu() function, supplying the corresponding srcu_struct.
+ *
+ * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
+ * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called
+ * is automatically a grace period.
+ */
+#define synchronize_rcu_mult(...) \
+ _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
+
/**
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period
* @head: structure to be used for queueing the RCU updates.
@@ -309,7 +340,7 @@ static inline void rcu_sysrq_end(void)
}
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
-#ifdef CONFIG_RCU_USER_QS
+#ifdef CONFIG_NO_HZ_FULL
void rcu_user_enter(void);
void rcu_user_exit(void);
#else
@@ -317,7 +348,7 @@ static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
-#endif /* CONFIG_RCU_USER_QS */
+#endif /* CONFIG_NO_HZ_FULL */
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
@@ -392,10 +423,6 @@ bool __rcu_is_watching(void);
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
-typedef void call_rcu_func_t(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
-void wait_rcu_gp(call_rcu_func_t crf);
-
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU)
@@ -469,46 +496,10 @@ int rcu_read_lock_bh_held(void);
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
* RCU-sched read-side critical section. In absence of
* CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
- * critical section unless it can prove otherwise. Note that disabling
- * of preemption (including disabling irqs) counts as an RCU-sched
- * read-side critical section. This is useful for debug checks in functions
- * that required that they be called within an RCU-sched read-side
- * critical section.
- *
- * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
- * and while lockdep is disabled.
- *
- * Note that if the CPU is in the idle loop from an RCU point of
- * view (ie: that we are in the section between rcu_idle_enter() and
- * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
- * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
- * that are in such a section, considering these as in extended quiescent
- * state, so such a CPU is effectively never in an RCU read-side critical
- * section regardless of what RCU primitives it invokes. This state of
- * affairs is required --- we need to keep an RCU-free window in idle
- * where the CPU may possibly enter into low power mode. This way we can
- * notice an extended quiescent state to other CPUs that started a grace
- * period. Otherwise we would delay any grace period as long as we run in
- * the idle task.
- *
- * Similarly, we avoid claiming an SRCU read lock held if the current
- * CPU is offline.
+ * critical section unless it can prove otherwise.
*/
#ifdef CONFIG_PREEMPT_COUNT
-static inline int rcu_read_lock_sched_held(void)
-{
- int lockdep_opinion = 0;
-
- if (!debug_lockdep_rcu_enabled())
- return 1;
- if (!rcu_is_watching())
- return 0;
- if (!rcu_lockdep_current_cpu_online())
- return 0;
- if (debug_locks)
- lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
- return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
-}
+int rcu_read_lock_sched_held(void);
#else /* #ifdef CONFIG_PREEMPT_COUNT */
static inline int rcu_read_lock_sched_held(void)
{
@@ -545,6 +536,11 @@ static inline int rcu_read_lock_sched_held(void)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+/* Deprecate rcu_lockdep_assert(): Use RCU_LOCKDEP_WARN() instead. */
+static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void)
+{
+}
+
#ifdef CONFIG_PROVE_RCU
/**
@@ -555,17 +551,32 @@ static inline int rcu_read_lock_sched_held(void)
#define rcu_lockdep_assert(c, s) \
do { \
static bool __section(.data.unlikely) __warned; \
+ deprecate_rcu_lockdep_assert(); \
if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
} while (0)
+/**
+ * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
+ * @c: condition to check
+ * @s: informative message
+ */
+#define RCU_LOCKDEP_WARN(c, s) \
+ do { \
+ static bool __section(.data.unlikely) __warned; \
+ if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
+ __warned = true; \
+ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
+ } \
+ } while (0)
+
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
static inline void rcu_preempt_sleep_check(void)
{
- rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
- "Illegal context switch in RCU read-side critical section");
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
+ "Illegal context switch in RCU read-side critical section");
}
#else /* #ifdef CONFIG_PROVE_RCU */
static inline void rcu_preempt_sleep_check(void)
@@ -576,15 +587,16 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_sleep_check() \
do { \
rcu_preempt_sleep_check(); \
- rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
- "Illegal context switch in RCU-bh read-side critical section"); \
- rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \
- "Illegal context switch in RCU-sched read-side critical section"); \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
+ "Illegal context switch in RCU-bh read-side critical section"); \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
+ "Illegal context switch in RCU-sched read-side critical section"); \
} while (0)
#else /* #ifdef CONFIG_PROVE_RCU */
-#define rcu_lockdep_assert(c, s) do { } while (0)
+#define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert()
+#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
#define rcu_sleep_check() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -615,13 +627,13 @@ static inline void rcu_preempt_sleep_check(void)
({ \
/* Dependency order vs. p above. */ \
typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
- rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
+ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(________p1)); \
})
#define __rcu_dereference_protected(p, c, space) \
({ \
- rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \
+ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
@@ -845,8 +857,8 @@ static inline void rcu_read_lock(void)
__rcu_read_lock();
__acquire(RCU);
rcu_lock_acquire(&rcu_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock() used illegally while idle");
}
/*
@@ -896,8 +908,8 @@ static inline void rcu_read_lock(void)
*/
static inline void rcu_read_unlock(void)
{
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock() used illegally while idle");
__release(RCU);
__rcu_read_unlock();
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
@@ -925,8 +937,8 @@ static inline void rcu_read_lock_bh(void)
local_bh_disable();
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock_bh() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock_bh() used illegally while idle");
}
/*
@@ -936,8 +948,8 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock_bh() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
local_bh_enable();
@@ -961,8 +973,8 @@ static inline void rcu_read_lock_sched(void)
preempt_disable();
__acquire(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock_sched() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock_sched() used illegally while idle");
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -979,8 +991,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
*/
static inline void rcu_read_unlock_sched(void)
{
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock_sched() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED);
preempt_enable();
@@ -1031,7 +1043,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define RCU_INIT_POINTER(p, v) \
do { \
rcu_dereference_sparse(p, __rcu); \
- p = RCU_INITIALIZER(v); \
+ WRITE_ONCE(p, RCU_INITIALIZER(v)); \
} while (0)
/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 3df6c1ec4e25..ff968b7af3a4 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -37,6 +37,16 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
might_sleep();
}
+static inline unsigned long get_state_synchronize_sched(void)
+{
+ return 0;
+}
+
+static inline void cond_synchronize_sched(unsigned long oldstate)
+{
+ might_sleep();
+}
+
static inline void rcu_barrier_bh(void)
{
wait_rcu_gp(call_rcu_bh);
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 456879143f89..5abec82f325e 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -76,6 +76,8 @@ void rcu_barrier_bh(void);
void rcu_barrier_sched(void);
unsigned long get_state_synchronize_rcu(void);
void cond_synchronize_rcu(unsigned long oldstate);
+unsigned long get_state_synchronize_sched(void);
+void cond_synchronize_sched(unsigned long oldstate);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f8a689ed62a5..9e0e76992be0 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -550,8 +550,24 @@ static inline int regulator_count_voltages(struct regulator *regulator)
{
return 0;
}
+
+static inline int regulator_list_voltage(struct regulator *regulator, unsigned selector)
+{
+ return -EINVAL;
+}
+
#endif
+static inline int regulator_set_voltage_triplet(struct regulator *regulator,
+ int min_uV, int target_uV,
+ int max_uV)
+{
+ if (regulator_set_voltage(regulator, target_uV, max_uV) == 0)
+ return 0;
+
+ return regulator_set_voltage(regulator, min_uV, max_uV);
+}
+
static inline int regulator_set_voltage_tol(struct regulator *regulator,
int new_uV, int tol_uV)
{
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 5dd65acc2a69..a43a5ca1167b 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -1,16 +1,16 @@
/*
- * da9211.h - Regulator device driver for DA9211/DA9213
- * Copyright (C) 2014 Dialog Semiconductor Ltd.
+ * da9211.h - Regulator device driver for DA9211/DA9213/DA9215
+ * Copyright (C) 2015 Dialog Semiconductor Ltd.
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __LINUX_REGULATOR_DA9211_H
@@ -23,6 +23,7 @@
enum da9211_chip_id {
DA9211,
DA9213,
+ DA9215,
};
struct da9211_pdata {
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4db9fbe4889d..45932228cbf5 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -148,6 +148,7 @@ struct regulator_ops {
int (*get_current_limit) (struct regulator_dev *);
int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
+ int (*set_over_current_protection) (struct regulator_dev *);
/* enable/disable regulator */
int (*enable) (struct regulator_dev *);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index b11be1260129..a1067d0b3991 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -147,6 +147,7 @@ struct regulation_constraints {
unsigned ramp_disable:1; /* disable ramp delay */
unsigned soft_start:1; /* ramp voltage slowly */
unsigned pull_down:1; /* pull down resistor when regulator off */
+ unsigned over_current_protection:1; /* auto disable on over current */
};
/**
diff --git a/include/linux/regulator/mt6311.h b/include/linux/regulator/mt6311.h
new file mode 100644
index 000000000000..8473259395b6
--- /dev/null
+++ b/include/linux/regulator/mt6311.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Henry Chen <henryc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6311_H
+#define __LINUX_REGULATOR_MT6311_H
+
+#define MT6311_MAX_REGULATORS 2
+
+enum {
+ MT6311_ID_VDVFS = 0,
+ MT6311_ID_VBIASN,
+};
+
+#define MT6311_E1_CID_CODE 0x10
+#define MT6311_E2_CID_CODE 0x20
+#define MT6311_E3_CID_CODE 0x30
+
+#endif /* __LINUX_REGULATOR_MT6311_H */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 9b1ef0c820a7..556ec1ea2574 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -161,10 +161,6 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
struct scatterlist *sgl)
{
-#ifndef CONFIG_ARCH_HAS_SG_CHAIN
- BUG();
-#endif
-
/*
* offset and length are unused for chain entry. Clear them.
*/
@@ -251,6 +247,11 @@ struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
void sg_init_one(struct scatterlist *, const void *, unsigned int);
+int sg_split(struct scatterlist *in, const int in_mapped_nents,
+ const off_t skip, const int nb_splits,
+ const size_t *split_sizes,
+ struct scatterlist **out, int *out_mapped_nents,
+ gfp_t gfp_mask);
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 04b5ada460b4..119823decc46 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -530,39 +530,49 @@ struct cpu_itimer {
};
/**
- * struct cputime - snaphsot of system and user cputime
+ * struct prev_cputime - snaphsot of system and user cputime
* @utime: time spent in user mode
* @stime: time spent in system mode
+ * @lock: protects the above two fields
*
- * Gathers a generic snapshot of user and system time.
+ * Stores previous user/system time values such that we can guarantee
+ * monotonicity.
*/
-struct cputime {
+struct prev_cputime {
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
cputime_t utime;
cputime_t stime;
+ raw_spinlock_t lock;
+#endif
};
+static inline void prev_cputime_init(struct prev_cputime *prev)
+{
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ prev->utime = prev->stime = 0;
+ raw_spin_lock_init(&prev->lock);
+#endif
+}
+
/**
* struct task_cputime - collected CPU time counts
* @utime: time spent in user mode, in &cputime_t units
* @stime: time spent in kernel mode, in &cputime_t units
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
*
- * This is an extension of struct cputime that includes the total runtime
- * spent by the task from the scheduler point of view.
- *
- * As a result, this structure groups together three kinds of CPU time
- * that are tracked for threads and thread groups. Most things considering
- * CPU time want to group these counts together and treat all three
- * of them in parallel.
+ * This structure groups together three kinds of CPU time that are tracked for
+ * threads and thread groups. Most things considering CPU time want to group
+ * these counts together and treat all three of them in parallel.
*/
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
+
/* Alternate field names when used to cache expirations. */
-#define prof_exp stime
#define virt_exp utime
+#define prof_exp stime
#define sched_exp sum_exec_runtime
#define INIT_CPUTIME \
@@ -715,9 +725,7 @@ struct signal_struct {
cputime_t utime, stime, cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- struct cputime prev_cputime;
-#endif
+ struct prev_cputime prev_cputime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
unsigned long inblock, oublock, cinblock, coublock;
@@ -1167,29 +1175,24 @@ struct load_weight {
u32 inv_weight;
};
+/*
+ * The load_avg/util_avg accumulates an infinite geometric series.
+ * 1) load_avg factors the amount of time that a sched_entity is
+ * runnable on a rq into its weight. For cfs_rq, it is the aggregated
+ * such weights of all runnable and blocked sched_entities.
+ * 2) util_avg factors frequency scaling into the amount of time
+ * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
+ * For cfs_rq, it is the aggregated such times of all runnable and
+ * blocked sched_entities.
+ * The 64 bit load_sum can:
+ * 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with
+ * the highest weight (=88761) always runnable, we should not overflow
+ * 2) for entity, support any load.weight always runnable
+ */
struct sched_avg {
- u64 last_runnable_update;
- s64 decay_count;
- /*
- * utilization_avg_contrib describes the amount of time that a
- * sched_entity is running on a CPU. It is based on running_avg_sum
- * and is scaled in the range [0..SCHED_LOAD_SCALE].
- * load_avg_contrib described the amount of time that a sched_entity
- * is runnable on a rq. It is based on both runnable_avg_sum and the
- * weight of the task.
- */
- unsigned long load_avg_contrib, utilization_avg_contrib;
- /*
- * These sums represent an infinite geometric series and so are bound
- * above by 1024/(1-y). Thus we only need a u32 to store them for all
- * choices of y < 1-2^(-32)*1024.
- * running_avg_sum reflects the time that the sched_entity is
- * effectively running on the CPU.
- * runnable_avg_sum represents the amount of time a sched_entity is on
- * a runqueue which includes the running time that is monitored by
- * running_avg_sum.
- */
- u32 runnable_avg_sum, avg_period, running_avg_sum;
+ u64 last_update_time, load_sum;
+ u32 util_sum, period_contrib;
+ unsigned long load_avg, util_avg;
};
#ifdef CONFIG_SCHEDSTATS
@@ -1255,7 +1258,7 @@ struct sched_entity {
#endif
#ifdef CONFIG_SMP
- /* Per-entity load-tracking */
+ /* Per entity load average tracking */
struct sched_avg avg;
#endif
};
@@ -1351,9 +1354,9 @@ struct task_struct {
#ifdef CONFIG_SMP
struct llist_node wake_entry;
int on_cpu;
- struct task_struct *last_wakee;
- unsigned long wakee_flips;
+ unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
+ struct task_struct *last_wakee;
int wake_cpu;
#endif
@@ -1481,9 +1484,7 @@ struct task_struct {
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- struct cputime prev_cputime;
-#endif
+ struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqlock_t vtime_seqlock;
unsigned long long vtime_snap;
@@ -2214,13 +2215,6 @@ static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
-#ifndef CONFIG_CPUMASK_OFFSTACK
-static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
-{
- return set_cpus_allowed_ptr(p, &new_mask);
-}
-#endif
-
/*
* Do not use outside of architecture code which knows its limitations.
*
@@ -2897,12 +2891,6 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
-#ifdef CONFIG_PREEMPT_COUNT
-#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
-#else
-#define PREEMPT_LOCK_OFFSET 0
-#endif
-
#define cond_resched_lock(lock) ({ \
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
__cond_resched_lock(lock); \
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index ba82c07feb95..faa0e0370ce7 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -136,8 +136,6 @@ void serial8250_resume_port(int line);
extern int early_serial_setup(struct uart_port *port);
-extern unsigned int serial8250_early_in(struct uart_port *port, int offset);
-extern void serial8250_early_out(struct uart_port *port, int offset, int value);
extern int early_serial8250_setup(struct earlycon_device *device,
const char *options);
extern void serial8250_do_set_termios(struct uart_port *port,
@@ -152,6 +150,11 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
void serial8250_tx_chars(struct uart_8250_port *up);
unsigned int serial8250_modem_status(struct uart_8250_port *up);
+void serial8250_init_port(struct uart_8250_port *up);
+void serial8250_set_defaults(struct uart_8250_port *up);
+void serial8250_console_write(struct uart_8250_port *up, const char *s,
+ unsigned int count);
+int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
extern void serial8250_set_isa_configurator(void (*v)
(int port, struct uart_port *up,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9b88536487e6..2738d355cdf9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -37,6 +37,7 @@
#include <net/flow_dissector.h>
#include <linux/splice.h>
#include <linux/in6.h>
+#include <net/flow.h>
/* A. Checksumming of received packets by device.
*
@@ -173,17 +174,24 @@ struct nf_bridge_info {
BRNF_PROTO_8021Q,
BRNF_PROTO_PPPOE
} orig_proto:8;
- bool pkt_otherhost;
+ u8 pkt_otherhost:1;
+ u8 in_prerouting:1;
+ u8 bridged_dnat:1;
__u16 frag_max_size;
- unsigned int mask;
struct net_device *physindev;
union {
- struct net_device *physoutdev;
- char neigh_header[8];
- };
- union {
+ /* prerouting: detect dnat in orig/reply direction */
__be32 ipv4_daddr;
struct in6_addr ipv6_daddr;
+
+ /* after prerouting + nat detected: store original source
+ * mac since neigh resolution overwrites it, only used while
+ * skb is out in neigh layer.
+ */
+ char neigh_header[8];
+
+ /* always valid & non-NULL from FORWARD on, for physdev match */
+ struct net_device *physoutdev;
};
};
#endif
@@ -506,6 +514,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
+ * @offload_fwd_mark: fwding offload mark
* @mark: Generic packet mark
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
@@ -650,9 +659,15 @@ struct sk_buff {
unsigned int sender_cpu;
};
#endif
+ union {
#ifdef CONFIG_NETWORK_SECMARK
- __u32 secmark;
+ __u32 secmark;
+#endif
+#ifdef CONFIG_NET_SWITCHDEV
+ __u32 offload_fwd_mark;
#endif
+ };
+
union {
__u32 mark;
__u32 reserved_tailroom;
@@ -922,14 +937,90 @@ enum pkt_hash_types {
PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
};
-static inline void
-skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+static inline void skb_clear_hash(struct sk_buff *skb)
{
- skb->l4_hash = (type == PKT_HASH_TYPE_L4);
+ skb->hash = 0;
skb->sw_hash = 0;
+ skb->l4_hash = 0;
+}
+
+static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
+{
+ if (!skb->l4_hash)
+ skb_clear_hash(skb);
+}
+
+static inline void
+__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
+{
+ skb->l4_hash = is_l4;
+ skb->sw_hash = is_sw;
skb->hash = hash;
}
+static inline void
+skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+{
+ /* Used by drivers to set hash from HW */
+ __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
+}
+
+static inline void
+__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
+{
+ __skb_set_hash(skb, hash, true, is_l4);
+}
+
+void __skb_get_hash(struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+ const struct flow_keys *keys, int hlen);
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+ void *data, int hlen_proto);
+
+static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
+ int thoff, u8 ip_proto)
+{
+ return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+}
+
+void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
+ const struct flow_dissector_key *key,
+ unsigned int key_count);
+
+bool __skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container,
+ void *data, __be16 proto, int nhoff, int hlen,
+ unsigned int flags);
+
+static inline bool skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container, unsigned int flags)
+{
+ return __skb_flow_dissect(skb, flow_dissector, target_container,
+ NULL, 0, 0, 0, flags);
+}
+
+static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
+ struct flow_keys *flow,
+ unsigned int flags)
+{
+ memset(flow, 0, sizeof(*flow));
+ return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
+ NULL, 0, 0, 0, flags);
+}
+
+static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
+ void *data, __be16 proto,
+ int nhoff, int hlen,
+ unsigned int flags)
+{
+ memset(flow, 0, sizeof(*flow));
+ return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
+ data, proto, nhoff, hlen, flags);
+}
+
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
@@ -938,24 +1029,39 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
return skb->hash;
}
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
-static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
+static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
{
+ if (!skb->l4_hash && !skb->sw_hash) {
+ struct flow_keys keys;
+ __u32 hash = __get_hash_from_flowi6(fl6, &keys);
+
+ __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+ }
+
return skb->hash;
}
-static inline void skb_clear_hash(struct sk_buff *skb)
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
+
+static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
{
- skb->hash = 0;
- skb->sw_hash = 0;
- skb->l4_hash = 0;
+ if (!skb->l4_hash && !skb->sw_hash) {
+ struct flow_keys keys;
+ __u32 hash = __get_hash_from_flowi4(fl4, &keys);
+
+ __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+ }
+
+ return skb->hash;
}
-static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
+__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+
+static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
- if (!skb->l4_hash)
- skb_clear_hash(skb);
+ return skb->hash;
}
static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
@@ -1943,7 +2049,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_transport_header_was_set(skb))
return;
- else if (skb_flow_dissect_flow_keys(skb, &keys))
+ else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
skb_set_transport_header(skb, keys.control.thoff);
else
skb_set_transport_header(skb, offset_hint);
@@ -2667,12 +2773,6 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
skb_shinfo(skb)->frag_list = NULL;
}
-static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
-{
- frag->next = skb_shinfo(skb)->frag_list;
- skb_shinfo(skb)->frag_list = frag;
-}
-
#define skb_walk_frags(skb, iter) \
for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
@@ -3464,5 +3564,6 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
skb_network_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/soc/dove/pmu.h b/include/linux/soc/dove/pmu.h
new file mode 100644
index 000000000000..9c99f84bcc0e
--- /dev/null
+++ b/include/linux/soc/dove/pmu.h
@@ -0,0 +1,6 @@
+#ifndef LINUX_SOC_DOVE_PMU_H
+#define LINUX_SOC_DOVE_PMU_H
+
+int dove_init_pmu(void);
+
+#endif
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
new file mode 100644
index 000000000000..a5714e93fb34
--- /dev/null
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -0,0 +1,26 @@
+#ifndef __SOC_MEDIATEK_INFRACFG_H
+#define __SOC_MEDIATEK_INFRACFG_H
+
+#define MT8173_TOP_AXI_PROT_EN_MCI_M2 BIT(0)
+#define MT8173_TOP_AXI_PROT_EN_MM_M0 BIT(1)
+#define MT8173_TOP_AXI_PROT_EN_MM_M1 BIT(2)
+#define MT8173_TOP_AXI_PROT_EN_MMAPB_S BIT(6)
+#define MT8173_TOP_AXI_PROT_EN_L2C_M2 BIT(9)
+#define MT8173_TOP_AXI_PROT_EN_L2SS_SMI BIT(11)
+#define MT8173_TOP_AXI_PROT_EN_L2SS_ADD BIT(12)
+#define MT8173_TOP_AXI_PROT_EN_CCI_M2 BIT(13)
+#define MT8173_TOP_AXI_PROT_EN_MFG_S BIT(14)
+#define MT8173_TOP_AXI_PROT_EN_PERI_M0 BIT(15)
+#define MT8173_TOP_AXI_PROT_EN_PERI_M1 BIT(16)
+#define MT8173_TOP_AXI_PROT_EN_DEBUGSYS BIT(17)
+#define MT8173_TOP_AXI_PROT_EN_CQ_DMA BIT(18)
+#define MT8173_TOP_AXI_PROT_EN_GCPU BIT(19)
+#define MT8173_TOP_AXI_PROT_EN_IOMMU BIT(20)
+#define MT8173_TOP_AXI_PROT_EN_MFG_M0 BIT(21)
+#define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22)
+#define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23)
+
+int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask);
+int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask);
+
+#endif /* __SOC_MEDIATEK_INFRACFG_H */
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
new file mode 100644
index 000000000000..2a53dcaeeeed
--- /dev/null
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -0,0 +1,35 @@
+#ifndef __QCOM_SMD_RPM_H__
+#define __QCOM_SMD_RPM_H__
+
+struct qcom_smd_rpm;
+
+#define QCOM_SMD_RPM_ACTIVE_STATE 0
+#define QCOM_SMD_RPM_SLEEP_STATE 1
+
+/*
+ * Constants used for addressing resources in the RPM.
+ */
+#define QCOM_SMD_RPM_BOOST 0x61747362
+#define QCOM_SMD_RPM_BUS_CLK 0x316b6c63
+#define QCOM_SMD_RPM_BUS_MASTER 0x73616d62
+#define QCOM_SMD_RPM_BUS_SLAVE 0x766c7362
+#define QCOM_SMD_RPM_CLK_BUF_A 0x616B6C63
+#define QCOM_SMD_RPM_LDOA 0x616f646c
+#define QCOM_SMD_RPM_LDOB 0x626F646C
+#define QCOM_SMD_RPM_MEM_CLK 0x326b6c63
+#define QCOM_SMD_RPM_MISC_CLK 0x306b6c63
+#define QCOM_SMD_RPM_NCPA 0x6170636E
+#define QCOM_SMD_RPM_NCPB 0x6270636E
+#define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f
+#define QCOM_SMD_RPM_QPIC_CLK 0x63697071
+#define QCOM_SMD_RPM_SMPA 0x61706d73
+#define QCOM_SMD_RPM_SMPB 0x62706d73
+#define QCOM_SMD_RPM_SPDM 0x63707362
+#define QCOM_SMD_RPM_VSA 0x00617376
+
+int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
+ int state,
+ u32 resource_type, u32 resource_id,
+ void *buf, size_t count);
+
+#endif
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
new file mode 100644
index 000000000000..d7e50aa6a4ac
--- /dev/null
+++ b/include/linux/soc/qcom/smd.h
@@ -0,0 +1,46 @@
+#ifndef __QCOM_SMD_H__
+#define __QCOM_SMD_H__
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct qcom_smd;
+struct qcom_smd_channel;
+struct qcom_smd_lookup;
+
+/**
+ * struct qcom_smd_device - smd device struct
+ * @dev: the device struct
+ * @channel: handle to the smd channel for this device
+ */
+struct qcom_smd_device {
+ struct device dev;
+ struct qcom_smd_channel *channel;
+};
+
+/**
+ * struct qcom_smd_driver - smd driver struct
+ * @driver: underlying device driver
+ * @probe: invoked when the smd channel is found
+ * @remove: invoked when the smd channel is closed
+ * @callback: invoked when an inbound message is received on the channel,
+ * should return 0 on success or -EBUSY if the data cannot be
+ * consumed at this time
+ */
+struct qcom_smd_driver {
+ struct device_driver driver;
+ int (*probe)(struct qcom_smd_device *dev);
+ void (*remove)(struct qcom_smd_device *dev);
+ int (*callback)(struct qcom_smd_device *, const void *, size_t);
+};
+
+int qcom_smd_driver_register(struct qcom_smd_driver *drv);
+void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
+
+#define module_qcom_smd_driver(__smd_driver) \
+ module_driver(__smd_driver, qcom_smd_driver_register, \
+ qcom_smd_driver_unregister)
+
+int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
+
+#endif
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
new file mode 100644
index 000000000000..bc9630d3aced
--- /dev/null
+++ b/include/linux/soc/qcom/smem.h
@@ -0,0 +1,11 @@
+#ifndef __QCOM_SMEM_H__
+#define __QCOM_SMEM_H__
+
+#define QCOM_SMEM_HOST_ANY -1
+
+int qcom_smem_alloc(unsigned host, unsigned item, size_t size);
+int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size);
+
+int qcom_smem_get_free_space(unsigned host);
+
+#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index d673072346f2..269e8afd3e2a 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -23,6 +23,8 @@
#include <linux/scatterlist.h>
struct dma_chan;
+struct spi_master;
+struct spi_transfer;
/*
* INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -31,6 +33,59 @@ struct dma_chan;
extern struct bus_type spi_bus_type;
/**
+ * struct spi_statistics - statistics for spi transfers
+ * @clock: lock protecting this structure
+ *
+ * @messages: number of spi-messages handled
+ * @transfers: number of spi_transfers handled
+ * @errors: number of errors during spi_transfer
+ * @timedout: number of timeouts during spi_transfer
+ *
+ * @spi_sync: number of times spi_sync is used
+ * @spi_sync_immediate:
+ * number of times spi_sync is executed immediately
+ * in calling context without queuing and scheduling
+ * @spi_async: number of times spi_async is used
+ *
+ * @bytes: number of bytes transferred to/from device
+ * @bytes_tx: number of bytes sent to device
+ * @bytes_rx: number of bytes received from device
+ *
+ */
+struct spi_statistics {
+ spinlock_t lock; /* lock for the whole structure */
+
+ unsigned long messages;
+ unsigned long transfers;
+ unsigned long errors;
+ unsigned long timedout;
+
+ unsigned long spi_sync;
+ unsigned long spi_sync_immediate;
+ unsigned long spi_async;
+
+ unsigned long long bytes;
+ unsigned long long bytes_rx;
+ unsigned long long bytes_tx;
+
+};
+
+void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
+ struct spi_transfer *xfer,
+ struct spi_master *master);
+
+#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
+ do { \
+ unsigned long flags; \
+ spin_lock_irqsave(&(stats)->lock, flags); \
+ (stats)->field += count; \
+ spin_unlock_irqrestore(&(stats)->lock, flags); \
+ } while (0)
+
+#define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \
+ SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
+
+/**
* struct spi_device - Master side proxy for an SPI slave device
* @dev: Driver model representation of the device.
* @master: SPI controller used with the device.
@@ -60,6 +115,8 @@ extern struct bus_type spi_bus_type;
* @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
* when not using a GPIO line)
*
+ * @statistics: statistics for the spi_device
+ *
* A @spi_device is used to interchange data between an SPI slave
* (usually a discrete chip) and CPU memory.
*
@@ -98,6 +155,9 @@ struct spi_device {
char modalias[SPI_NAME_SIZE];
int cs_gpio; /* chip select gpio */
+ /* the statistics */
+ struct spi_statistics statistics;
+
/*
* likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
@@ -296,6 +356,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself).
+ * @statistics: statistics for the spi_master
* @dma_tx: DMA transmit channel
* @dma_rx: DMA receive channel
* @dummy_rx: dummy receive buffer for full-duplex devices
@@ -452,6 +513,9 @@ struct spi_master {
/* gpio chip select */
int *cs_gpios;
+ /* statistics */
+ struct spi_statistics statistics;
+
/* DMA channels for use with core dmaengine helpers */
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 0063b24b4f36..47dd0cebd204 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -130,16 +130,6 @@ do { \
#define smp_mb__before_spinlock() smp_wmb()
#endif
-/*
- * Place this after a lock-acquisition primitive to guarantee that
- * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
- * if the UNLOCK and LOCK are executed by the same CPU or if the
- * UNLOCK and LOCK operate on the same lock variable.
- */
-#ifndef smp_mb__after_unlock_lock
-#define smp_mb__after_unlock_lock() do { } while (0)
-#endif
-
/**
* raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
@@ -296,7 +286,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
-static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
+static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
return &lock->rlock;
}
@@ -307,17 +297,17 @@ do { \
raw_spin_lock_init(&(_lock)->rlock); \
} while (0)
-static inline void spin_lock(spinlock_t *lock)
+static __always_inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
}
-static inline void spin_lock_bh(spinlock_t *lock)
+static __always_inline void spin_lock_bh(spinlock_t *lock)
{
raw_spin_lock_bh(&lock->rlock);
}
-static inline int spin_trylock(spinlock_t *lock)
+static __always_inline int spin_trylock(spinlock_t *lock)
{
return raw_spin_trylock(&lock->rlock);
}
@@ -337,7 +327,7 @@ do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
} while (0)
-static inline void spin_lock_irq(spinlock_t *lock)
+static __always_inline void spin_lock_irq(spinlock_t *lock)
{
raw_spin_lock_irq(&lock->rlock);
}
@@ -352,32 +342,32 @@ do { \
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)
-static inline void spin_unlock(spinlock_t *lock)
+static __always_inline void spin_unlock(spinlock_t *lock)
{
raw_spin_unlock(&lock->rlock);
}
-static inline void spin_unlock_bh(spinlock_t *lock)
+static __always_inline void spin_unlock_bh(spinlock_t *lock)
{
raw_spin_unlock_bh(&lock->rlock);
}
-static inline void spin_unlock_irq(spinlock_t *lock)
+static __always_inline void spin_unlock_irq(spinlock_t *lock)
{
raw_spin_unlock_irq(&lock->rlock);
}
-static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
-static inline int spin_trylock_bh(spinlock_t *lock)
+static __always_inline int spin_trylock_bh(spinlock_t *lock)
{
return raw_spin_trylock_bh(&lock->rlock);
}
-static inline int spin_trylock_irq(spinlock_t *lock)
+static __always_inline int spin_trylock_irq(spinlock_t *lock)
{
return raw_spin_trylock_irq(&lock->rlock);
}
@@ -387,22 +377,22 @@ static inline int spin_trylock_irq(spinlock_t *lock)
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
-static inline void spin_unlock_wait(spinlock_t *lock)
+static __always_inline void spin_unlock_wait(spinlock_t *lock)
{
raw_spin_unlock_wait(&lock->rlock);
}
-static inline int spin_is_locked(spinlock_t *lock)
+static __always_inline int spin_is_locked(spinlock_t *lock)
{
return raw_spin_is_locked(&lock->rlock);
}
-static inline int spin_is_contended(spinlock_t *lock)
+static __always_inline int spin_is_contended(spinlock_t *lock)
{
return raw_spin_is_contended(&lock->rlock);
}
-static inline int spin_can_lock(spinlock_t *lock)
+static __always_inline int spin_can_lock(spinlock_t *lock)
{
return raw_spin_can_lock(&lock->rlock);
}
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c735f5c91eea..eead8ab93c0a 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -119,30 +119,8 @@ struct plat_stmmacenet_data {
int rx_fifo_size;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr);
- void *(*setup)(struct platform_device *pdev);
- void (*free)(struct platform_device *pdev, void *priv);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
- void *custom_cfg;
- void *custom_data;
void *bsp_priv;
};
-
-/* of_data for SoC glue layer device tree bindings */
-
-struct stmmac_of_data {
- int has_gmac;
- int enh_desc;
- int tx_coe;
- int rx_coe;
- int bugged_jumbo;
- int pmt;
- int riwt_off;
- void (*fix_mac_speed)(void *priv, unsigned int speed);
- void (*bus_setup)(void __iomem *ioaddr);
- void *(*setup)(struct platform_device *pdev);
- void (*free)(struct platform_device *pdev, void *priv);
- int (*init)(struct platform_device *pdev, void *priv);
- void (*exit)(struct platform_device *pdev, void *priv);
-};
#endif
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index d2abbdb8c6aa..414d924318ce 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -112,25 +112,13 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
*
* This can be thought of as a very heavy write lock, equivalent to
* grabbing every spinlock in the kernel. */
-int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
+int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
-/**
- * __stop_machine: freeze the machine on all CPUs and run this function
- * @fn: the function to run
- * @data: the data ptr for the @fn
- * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
- *
- * Description: This is a special version of the above, which assumes cpus
- * won't come or go while it's being called. Used by hotplug cpu.
- */
-int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
-
-int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus);
-
#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
-static inline int __stop_machine(int (*fn)(void *), void *data,
+static inline int stop_machine(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
{
unsigned long flags;
@@ -141,16 +129,10 @@ static inline int __stop_machine(int (*fn)(void *), void *data,
return ret;
}
-static inline int stop_machine(int (*fn)(void *), void *data,
- const struct cpumask *cpus)
-{
- return __stop_machine(fn, data, cpus);
-}
-
-static inline int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
{
- return __stop_machine(fn, data, cpus);
+ return stop_machine(fn, data, cpus);
}
#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 38874729dc5f..31496d201fdc 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -373,9 +373,9 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry)
/* linux/mm/page_io.c */
extern int swap_readpage(struct page *);
extern int swap_writepage(struct page *page, struct writeback_control *wbc);
-extern void end_swap_bio_write(struct bio *bio, int err);
+extern void end_swap_bio_write(struct bio *bio);
extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
- void (*end_write_func)(struct bio *, int));
+ bio_end_io_t end_write_func);
extern int swap_set_page_dirty(struct page *page);
int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index c78dcfeaf25f..d4217eff489f 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -86,7 +86,6 @@ struct st_proto_s {
extern long st_register(struct st_proto_s *);
extern long st_unregister(struct st_proto_s *);
-extern struct ti_st_plat_data *dt_pdata;
/*
* header information used by st_core.c
diff --git a/include/linux/tick.h b/include/linux/tick.h
index edbfc9a5293e..48d901f83f92 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -147,22 +147,29 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
cpumask_or(mask, mask, tick_nohz_full_mask);
}
-extern void __tick_nohz_full_check(void);
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
extern void tick_nohz_full_kick_all(void);
-extern void __tick_nohz_task_switch(struct task_struct *tsk);
+extern void __tick_nohz_task_switch(void);
#else
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
-static inline void __tick_nohz_full_check(void) { }
static inline void tick_nohz_full_kick_cpu(int cpu) { }
static inline void tick_nohz_full_kick(void) { }
static inline void tick_nohz_full_kick_all(void) { }
-static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
+static inline void __tick_nohz_task_switch(void) { }
#endif
+static inline const struct cpumask *housekeeping_cpumask(void)
+{
+#ifdef CONFIG_NO_HZ_FULL
+ if (tick_nohz_full_enabled())
+ return housekeeping_mask;
+#endif
+ return cpu_possible_mask;
+}
+
static inline bool is_housekeeping_cpu(int cpu)
{
#ifdef CONFIG_NO_HZ_FULL
@@ -181,16 +188,10 @@ static inline void housekeeping_affine(struct task_struct *t)
#endif
}
-static inline void tick_nohz_full_check(void)
-{
- if (tick_nohz_full_enabled())
- __tick_nohz_full_check();
-}
-
-static inline void tick_nohz_task_switch(struct task_struct *tsk)
+static inline void tick_nohz_task_switch(void)
{
if (tick_nohz_full_enabled())
- __tick_nohz_task_switch(tsk);
+ __tick_nohz_task_switch();
}
#endif
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 77b5df2acd2a..367d5af899e8 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -12,11 +12,18 @@ typedef __s64 time64_t;
*/
#if __BITS_PER_LONG == 64
# define timespec64 timespec
+#define itimerspec64 itimerspec
#else
struct timespec64 {
time64_t tv_sec; /* seconds */
long tv_nsec; /* nanoseconds */
};
+
+struct itimerspec64 {
+ struct timespec64 it_interval;
+ struct timespec64 it_value;
+};
+
#endif
/* Parameters used to convert the timespec values: */
@@ -45,6 +52,16 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
return ts;
}
+static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
+{
+ return *its64;
+}
+
+static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
+{
+ return *its;
+}
+
# define timespec64_equal timespec_equal
# define timespec64_compare timespec_compare
# define set_normalized_timespec64 set_normalized_timespec
@@ -77,6 +94,24 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
return ret;
}
+static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
+{
+ struct itimerspec ret;
+
+ ret.it_interval = timespec64_to_timespec(its64->it_interval);
+ ret.it_value = timespec64_to_timespec(its64->it_value);
+ return ret;
+}
+
+static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
+{
+ struct itimerspec64 ret;
+
+ ret.it_interval = timespec_to_timespec64(its->it_interval);
+ ret.it_value = timespec_to_timespec64(its->it_value);
+ return ret;
+}
+
static inline int timespec64_equal(const struct timespec64 *a,
const struct timespec64 *b)
{
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 6e191e4e6ab6..ba0ae09cbb21 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -18,10 +18,17 @@ extern int do_sys_settimeofday(const struct timespec *tv,
* Kernel time accessors
*/
unsigned long get_seconds(void);
-struct timespec current_kernel_time(void);
+struct timespec64 current_kernel_time64(void);
/* does not take xtime_lock */
struct timespec __current_kernel_time(void);
+static inline struct timespec current_kernel_time(void)
+{
+ struct timespec64 now = current_kernel_time64();
+
+ return timespec64_to_timespec(now);
+}
+
/*
* timespec based interfaces
*/
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 1063c850dbab..ed27917cabc9 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -243,6 +243,7 @@ enum {
TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
TRACE_EVENT_FL_TRACEPOINT_BIT,
TRACE_EVENT_FL_KPROBE_BIT,
+ TRACE_EVENT_FL_UPROBE_BIT,
};
/*
@@ -257,6 +258,7 @@ enum {
* USE_CALL_FILTER - For trace internal events, don't use file filter
* TRACEPOINT - Event is a tracepoint
* KPROBE - Event is a kprobe
+ * UPROBE - Event is a uprobe
*/
enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -267,8 +269,11 @@ enum {
TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
+ TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
};
+#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
+
struct trace_event_call {
struct list_head list;
struct trace_event_class *class;
@@ -542,7 +547,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file,
event_triggers_post_call(file, tt);
}
-#ifdef CONFIG_BPF_SYSCALL
+#ifdef CONFIG_BPF_EVENTS
unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
#else
static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index ad6c8913aa3e..d072ded41678 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -709,4 +709,10 @@ static inline void proc_tty_register_driver(struct tty_driver *d) {}
static inline void proc_tty_unregister_driver(struct tty_driver *d) {}
#endif
+#define tty_debug(tty, f, args...) \
+ do { \
+ printk(KERN_DEBUG "%s: %s: " f, __func__, \
+ tty_name(tty), ##args); \
+ } while (0)
+
#endif
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 92e337c18839..161052477f77 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -296,7 +296,7 @@ struct tty_operations {
struct tty_driver {
int magic; /* magic number for this structure */
struct kref kref; /* Reference management */
- struct cdev *cdevs;
+ struct cdev **cdevs;
struct module *owner;
const char *driver_name;
const char *name;
diff --git a/include/linux/types.h b/include/linux/types.h
index 8715287c3b1f..c314989d9158 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -212,6 +212,9 @@ struct callback_head {
};
#define rcu_head callback_head
+typedef void (*rcu_callback_t)(struct rcu_head *head);
+typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
+
/* clocksource cycle base type */
typedef u64 cycle_t;
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ae572c138607..d6f2c2c5b043 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -129,4 +129,6 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+
#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 60beb5dc7977..0bdc72f36905 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -92,6 +92,22 @@ struct uprobe_task {
unsigned int depth;
};
+struct return_instance {
+ struct uprobe *uprobe;
+ unsigned long func;
+ unsigned long stack; /* stack pointer */
+ unsigned long orig_ret_vaddr; /* original return address */
+ bool chained; /* true, if instance is nested */
+
+ struct return_instance *next; /* keep as stack */
+};
+
+enum rp_check {
+ RP_CHECK_CALL,
+ RP_CHECK_CHAIN_CALL,
+ RP_CHECK_RET,
+};
+
struct xol_area;
struct uprobes_state {
@@ -128,6 +144,7 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
+extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs);
extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len);
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index ab94f78c4dd1..a41833cd184c 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -19,8 +19,11 @@ struct ci_hdrc_platform_data {
enum usb_phy_interface phy_mode;
unsigned long flags;
#define CI_HDRC_REGS_SHARED BIT(0)
+#define CI_HDRC_DISABLE_DEVICE_STREAMING BIT(1)
#define CI_HDRC_SUPPORTS_RUNTIME_PM BIT(2)
-#define CI_HDRC_DISABLE_STREAMING BIT(3)
+#define CI_HDRC_DISABLE_HOST_STREAMING BIT(3)
+#define CI_HDRC_DISABLE_STREAMING (CI_HDRC_DISABLE_DEVICE_STREAMING | \
+ CI_HDRC_DISABLE_HOST_STREAMING)
/*
* Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
* but otg is not supported (no register otgsc).
@@ -29,12 +32,22 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_IMX28_WRITE_FIX BIT(5)
#define CI_HDRC_FORCE_FULLSPEED BIT(6)
#define CI_HDRC_TURN_VBUS_EARLY_ON BIT(7)
+#define CI_HDRC_SET_NON_ZERO_TTHA BIT(8)
+#define CI_HDRC_OVERRIDE_AHB_BURST BIT(9)
+#define CI_HDRC_OVERRIDE_TX_BURST BIT(10)
+#define CI_HDRC_OVERRIDE_RX_BURST BIT(11)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
void (*notify_event) (struct ci_hdrc *ci, unsigned event);
struct regulator *reg_vbus;
+ struct usb_otg_caps ci_otg_caps;
bool tpl_support;
+ /* interrupt threshold setting */
+ u32 itc_setting;
+ u32 ahb_burst_config;
+ u32 tx_burst_size;
+ u32 rx_burst_size;
};
/* Default offset of capability registers */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 2511469a9904..1074b8921a5d 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -228,6 +228,8 @@ struct usb_function {
struct list_head list;
DECLARE_BITMAP(endpoints, 32);
const struct usb_function_instance *fi;
+
+ unsigned int bind_deactivated:1;
};
int usb_add_function(struct usb_configuration *, struct usb_function *);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 4f3dfb7d0654..c14a69b36d27 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -141,10 +141,49 @@ struct usb_ep_ops {
};
/**
+ * struct usb_ep_caps - endpoint capabilities description
+ * @type_control:Endpoint supports control type (reserved for ep0).
+ * @type_iso:Endpoint supports isochronous transfers.
+ * @type_bulk:Endpoint supports bulk transfers.
+ * @type_int:Endpoint supports interrupt transfers.
+ * @dir_in:Endpoint supports IN direction.
+ * @dir_out:Endpoint supports OUT direction.
+ */
+struct usb_ep_caps {
+ unsigned type_control:1;
+ unsigned type_iso:1;
+ unsigned type_bulk:1;
+ unsigned type_int:1;
+ unsigned dir_in:1;
+ unsigned dir_out:1;
+};
+
+#define USB_EP_CAPS_TYPE_CONTROL 0x01
+#define USB_EP_CAPS_TYPE_ISO 0x02
+#define USB_EP_CAPS_TYPE_BULK 0x04
+#define USB_EP_CAPS_TYPE_INT 0x08
+#define USB_EP_CAPS_TYPE_ALL \
+ (USB_EP_CAPS_TYPE_ISO | USB_EP_CAPS_TYPE_BULK | USB_EP_CAPS_TYPE_INT)
+#define USB_EP_CAPS_DIR_IN 0x01
+#define USB_EP_CAPS_DIR_OUT 0x02
+#define USB_EP_CAPS_DIR_ALL (USB_EP_CAPS_DIR_IN | USB_EP_CAPS_DIR_OUT)
+
+#define USB_EP_CAPS(_type, _dir) \
+ { \
+ .type_control = !!(_type & USB_EP_CAPS_TYPE_CONTROL), \
+ .type_iso = !!(_type & USB_EP_CAPS_TYPE_ISO), \
+ .type_bulk = !!(_type & USB_EP_CAPS_TYPE_BULK), \
+ .type_int = !!(_type & USB_EP_CAPS_TYPE_INT), \
+ .dir_in = !!(_dir & USB_EP_CAPS_DIR_IN), \
+ .dir_out = !!(_dir & USB_EP_CAPS_DIR_OUT), \
+ }
+
+/**
* struct usb_ep - device side representation of USB endpoint
* @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk"
* @ops: Function pointers used to access hardware-specific operations.
* @ep_list:the gadget's ep_list holds all of its endpoints
+ * @caps:The structure describing types and directions supported by endoint.
* @maxpacket:The maximum packet size used on this endpoint. The initial
* value can sometimes be reduced (hardware allowing), according to
* the endpoint descriptor used to configure the endpoint.
@@ -167,12 +206,15 @@ struct usb_ep_ops {
* gadget->ep_list. the control endpoint (gadget->ep0) is not in that list,
* and is accessed only in response to a driver setup() callback.
*/
+
struct usb_ep {
void *driver_data;
const char *name;
const struct usb_ep_ops *ops;
struct list_head ep_list;
+ struct usb_ep_caps caps;
+ bool claimed;
unsigned maxpacket:16;
unsigned maxpacket_limit:16;
unsigned max_streams:16;
@@ -492,6 +534,9 @@ struct usb_gadget_ops {
int (*udc_start)(struct usb_gadget *,
struct usb_gadget_driver *);
int (*udc_stop)(struct usb_gadget *);
+ struct usb_ep *(*match_ep)(struct usb_gadget *,
+ struct usb_endpoint_descriptor *,
+ struct usb_ss_ep_comp_descriptor *);
};
/**
@@ -511,6 +556,7 @@ struct usb_gadget_ops {
* @dev: Driver model state for this abstract device.
* @out_epnum: last used out ep number
* @in_epnum: last used in ep number
+ * @otg_caps: OTG capabilities of this gadget.
* @sg_supported: true if we can handle scatter-gather
* @is_otg: True if the USB device port uses a Mini-AB jack, so that the
* gadget driver must provide a USB OTG descriptor.
@@ -526,6 +572,9 @@ struct usb_gadget_ops {
* @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
* MaxPacketSize.
* @is_selfpowered: if the gadget is self-powered.
+ * @deactivated: True if gadget is deactivated - in deactivated state it cannot
+ * be connected.
+ * @connected: True if gadget is connected.
*
* Gadgets have a mostly-portable "gadget driver" implementing device
* functions, handling all usb configurations and interfaces. Gadget
@@ -559,6 +608,7 @@ struct usb_gadget {
struct device dev;
unsigned out_epnum;
unsigned in_epnum;
+ struct usb_otg_caps *otg_caps;
unsigned sg_supported:1;
unsigned is_otg:1;
@@ -567,7 +617,12 @@ struct usb_gadget {
unsigned a_hnp_support:1;
unsigned a_alt_hnp_support:1;
unsigned quirk_ep_out_aligned_size:1;
+ unsigned quirk_altset_not_supp:1;
+ unsigned quirk_stall_not_supp:1;
+ unsigned quirk_zlp_not_supp:1;
unsigned is_selfpowered:1;
+ unsigned deactivated:1;
+ unsigned connected:1;
};
#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
@@ -584,7 +639,6 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
#define gadget_for_each_ep(tmp, gadget) \
list_for_each_entry(tmp, &(gadget)->ep_list, ep_list)
-
/**
* usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget
* requires quirk_ep_out_aligned_size, otherwise reguens len.
@@ -603,6 +657,34 @@ usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len)
}
/**
+ * gadget_is_altset_supported - return true iff the hardware supports
+ * altsettings
+ * @g: controller to check for quirk
+ */
+static inline int gadget_is_altset_supported(struct usb_gadget *g)
+{
+ return !g->quirk_altset_not_supp;
+}
+
+/**
+ * gadget_is_stall_supported - return true iff the hardware supports stalling
+ * @g: controller to check for quirk
+ */
+static inline int gadget_is_stall_supported(struct usb_gadget *g)
+{
+ return !g->quirk_stall_not_supp;
+}
+
+/**
+ * gadget_is_zlp_supported - return true iff the hardware supports zlp
+ * @g: controller to check for quirk
+ */
+static inline int gadget_is_zlp_supported(struct usb_gadget *g)
+{
+ return !g->quirk_zlp_not_supp;
+}
+
+/**
* gadget_is_dualspeed - return true iff the hardware handles high speed
* @g: controller that might support both high and full speeds
*/
@@ -771,9 +853,24 @@ static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
*/
static inline int usb_gadget_connect(struct usb_gadget *gadget)
{
+ int ret;
+
if (!gadget->ops->pullup)
return -EOPNOTSUPP;
- return gadget->ops->pullup(gadget, 1);
+
+ if (gadget->deactivated) {
+ /*
+ * If gadget is deactivated we only save new state.
+ * Gadget will be connected automatically after activation.
+ */
+ gadget->connected = true;
+ return 0;
+ }
+
+ ret = gadget->ops->pullup(gadget, 1);
+ if (!ret)
+ gadget->connected = 1;
+ return ret;
}
/**
@@ -784,20 +881,88 @@ static inline int usb_gadget_connect(struct usb_gadget *gadget)
* as a disconnect (when a VBUS session is active). Not all systems
* support software pullup controls.
*
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
+{
+ int ret;
+
+ if (!gadget->ops->pullup)
+ return -EOPNOTSUPP;
+
+ if (gadget->deactivated) {
+ /*
+ * If gadget is deactivated we only save new state.
+ * Gadget will stay disconnected after activation.
+ */
+ gadget->connected = false;
+ return 0;
+ }
+
+ ret = gadget->ops->pullup(gadget, 0);
+ if (!ret)
+ gadget->connected = 0;
+ return ret;
+}
+
+/**
+ * usb_gadget_deactivate - deactivate function which is not ready to work
+ * @gadget: the peripheral being deactivated
+ *
* This routine may be used during the gadget driver bind() call to prevent
* the peripheral from ever being visible to the USB host, unless later
- * usb_gadget_connect() is called. For example, user mode components may
+ * usb_gadget_activate() is called. For example, user mode components may
* need to be activated before the system can talk to hosts.
*
* Returns zero on success, else negative errno.
*/
-static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
+static inline int usb_gadget_deactivate(struct usb_gadget *gadget)
{
- if (!gadget->ops->pullup)
- return -EOPNOTSUPP;
- return gadget->ops->pullup(gadget, 0);
+ int ret;
+
+ if (gadget->deactivated)
+ return 0;
+
+ if (gadget->connected) {
+ ret = usb_gadget_disconnect(gadget);
+ if (ret)
+ return ret;
+ /*
+ * If gadget was being connected before deactivation, we want
+ * to reconnect it in usb_gadget_activate().
+ */
+ gadget->connected = true;
+ }
+ gadget->deactivated = true;
+
+ return 0;
}
+/**
+ * usb_gadget_activate - activate function which is not ready to work
+ * @gadget: the peripheral being activated
+ *
+ * This routine activates gadget which was previously deactivated with
+ * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_activate(struct usb_gadget *gadget)
+{
+ if (!gadget->deactivated)
+ return 0;
+
+ gadget->deactivated = false;
+
+ /*
+ * If gadget has been connected before deactivation, or became connected
+ * while it was being deactivated, we call usb_gadget_connect().
+ */
+ if (gadget->connected)
+ return usb_gadget_connect(gadget);
+
+ return 0;
+}
/*-------------------------------------------------------------------------*/
@@ -1002,6 +1167,10 @@ int usb_assign_descriptors(struct usb_function *f,
struct usb_descriptor_header **ss);
void usb_free_all_descriptors(struct usb_function *f);
+struct usb_descriptor_header *usb_otg_descriptor_alloc(
+ struct usb_gadget *gadget);
+int usb_otg_descriptor_init(struct usb_gadget *gadget,
+ struct usb_descriptor_header *otg_desc);
/*-------------------------------------------------------------------------*/
/* utility to simplify map/unmap of usb_requests to/from DMA */
@@ -1034,6 +1203,21 @@ extern void usb_gadget_giveback_request(struct usb_ep *ep,
/*-------------------------------------------------------------------------*/
+/* utility to find endpoint by name */
+
+extern struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g,
+ const char *name);
+
+/*-------------------------------------------------------------------------*/
+
+/* utility to check if endpoint caps match descriptor needs */
+
+extern int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
+ struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp);
+
+/*-------------------------------------------------------------------------*/
+
/* utility to update vbus status for udc core, it may be scheduled */
extern void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status);
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index c9aa7792de10..d2784c10bfe2 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -564,9 +564,9 @@ extern void usb_ep0_reinit(struct usb_device *);
/*-------------------------------------------------------------------------*/
-/* class requests from USB 3.0 hub spec, table 10-5 */
-#define SetHubDepth (0x3000 | HUB_SET_DEPTH)
-#define GetPortErrorCount (0x8000 | HUB_GET_PORT_ERR_COUNT)
+/* class requests from USB 3.1 hub spec, table 10-7 */
+#define SetHubDepth (0x2000 | HUB_SET_DEPTH)
+#define GetPortErrorCount (0xa300 | HUB_GET_PORT_ERR_COUNT)
/*
* Generic bandwidth allocation constants/support
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index e55a1504266e..8c8f6854c993 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -128,7 +128,7 @@ struct msm_otg_platform_data {
*/
struct msm_usb_cable {
struct notifier_block nb;
- struct extcon_specific_cable_nb conn;
+ struct extcon_dev *extcon;
};
/**
@@ -155,6 +155,10 @@ struct msm_usb_cable {
* starting controller using usbcmd run/stop bit.
* @vbus: VBUS signal state trakining, using extcon framework
* @id: ID signal state trakining, using extcon framework
+ * @switch_gpio: Descriptor for GPIO used to control external Dual
+ * SPDT USB Switch.
+ * @reboot: Used to inform the driver to route USB D+/D- line to Device
+ * connector
*/
struct msm_otg {
struct usb_phy phy;
@@ -188,6 +192,9 @@ struct msm_otg {
struct msm_usb_cable vbus;
struct msm_usb_cable id;
+
+ struct gpio_desc *switch_gpio;
+ struct notifier_block reboot;
};
#endif
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index cfe0528cdbb1..8c5a818ec244 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -15,6 +15,8 @@
enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np);
bool of_usb_host_tpl_support(struct device_node *np);
+int of_usb_update_otg_caps(struct device_node *np,
+ struct usb_otg_caps *otg_caps);
#else
static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
{
@@ -30,6 +32,11 @@ static inline bool of_usb_host_tpl_support(struct device_node *np)
{
return false;
}
+static inline int of_usb_update_otg_caps(struct device_node *np,
+ struct usb_otg_caps *otg_caps)
+{
+ return 0;
+}
#endif
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 52661c5da690..bd1dcf816100 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -41,6 +41,21 @@ struct usb_otg {
};
+/**
+ * struct usb_otg_caps - describes the otg capabilities of the device
+ * @otg_rev: The OTG revision number the device is compliant with, it's
+ * in binary-coded decimal (i.e. 2.0 is 0200H).
+ * @hnp_support: Indicates if the device supports HNP.
+ * @srp_support: Indicates if the device supports SRP.
+ * @adp_support: Indicates if the device supports ADP.
+ */
+struct usb_otg_caps {
+ u16 otg_rev;
+ bool hnp_support;
+ bool srp_support;
+ bool adp_support;
+};
+
extern const char *usb_otg_state_string(enum usb_otg_state state);
/* Context: can sleep */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 738b30b39b68..0197358f1e81 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -265,7 +265,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
/**
* delayed_work_pending - Find out whether a delayable work item is currently
* pending
- * @work: The work item in question
+ * @w: The work item in question
*/
#define delayed_work_pending(w) \
work_pending(&(w)->work)
@@ -366,7 +366,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags
* @max_active: max in-flight work items, 0 for default
- * @args: args for @fmt
+ * @args...: args for @fmt
*
* Allocate a workqueue with the specified parameters. For detailed
* information on WQ_* flags, please refer to Documentation/workqueue.txt.
@@ -398,7 +398,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* alloc_ordered_workqueue - allocate an ordered workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
- * @args: args for @fmt
+ * @args...: args for @fmt
*
* Allocate an ordered workqueue. An ordered workqueue executes at
* most one work item at any given time in the queued order. They are