summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/cppc_acpi.c24
-rw-r--r--drivers/acpi/ec.c41
-rw-r--r--drivers/acpi/nfit.c3
-rw-r--r--drivers/acpi/numa.c16
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/acpi/sysfs.c7
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/bcma/bcma_private.h2
-rw-r--r--drivers/bluetooth/btusb.c11
-rw-r--r--drivers/bluetooth/hci_intel.c6
-rw-r--r--drivers/char/Kconfig21
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/hw_random/exynos-rng.c9
-rw-r--r--drivers/char/random.c42
-rw-r--r--drivers/char/rdbg.c1165
-rw-r--r--drivers/clk/clk-xgene.c3
-rw-r--r--drivers/clk/clk.c6
-rw-r--r--drivers/clk/clk.h2
-rw-r--r--drivers/clk/msm/clock-gpu-8998.c9
-rw-r--r--drivers/clk/msm/clock-osm.c4
-rw-r--r--drivers/clk/msm/mdss/mdss-hdmi-pll-8998.c8
-rw-r--r--drivers/clk/qcom/Kconfig2
-rw-r--r--drivers/clk/qcom/Makefile2
-rw-r--r--drivers/clk/qcom/clk-branch.c67
-rw-r--r--drivers/clk/qcom/clk-branch.h1
-rw-r--r--drivers/clk/qcom/clk-rcg.h4
-rw-r--r--drivers/clk/qcom/clk-rcg2.c188
-rw-r--r--drivers/clk/qcom/common.c218
-rw-r--r--drivers/clk/qcom/common.h91
-rw-r--r--drivers/clk/qcom/gcc-msmfalcon.c16
-rw-r--r--drivers/clk/qcom/mdss/Kconfig3
-rw-r--r--drivers/clk/qcom/mdss/Makefile9
-rw-r--r--drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c (renamed from drivers/clk/qcom/mdss/mdss-dsi-pll-8996-util.c)344
-rw-r--r--drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.c614
-rw-r--r--drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.h (renamed from drivers/clk/qcom/mdss/mdss-dsi-pll-8996.h)54
-rw-r--r--drivers/clk/qcom/mdss/mdss-dsi-pll-8996.c566
-rw-r--r--drivers/clk/qcom/mdss/mdss-dsi-pll.h65
-rw-r--r--drivers/clk/qcom/mdss/mdss-pll-util.c1
-rw-r--r--drivers/clk/qcom/mdss/mdss-pll.c25
-rw-r--r--drivers/clk/qcom/mdss/mdss-pll.h27
-rw-r--r--drivers/clk/qcom/mmcc-msmfalcon.c19
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c1
-rw-r--r--drivers/cpufreq/Kconfig21
-rw-r--r--drivers/cpufreq/cpufreq.c64
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c43
-rw-r--r--drivers/cpufreq/intel_pstate.c2
-rw-r--r--drivers/cpuidle/cpuidle.c4
-rw-r--r--drivers/cpuidle/lpm-levels.c4
-rw-r--r--drivers/crypto/caam/caamalg.c90
-rw-r--r--drivers/crypto/caam/caamhash.c1
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c12
-rw-r--r--drivers/crypto/nx/nx.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c4
-rw-r--r--drivers/crypto/vmx/aes_cbc.c2
-rw-r--r--drivers/crypto/vmx/aes_ctr.c2
-rw-r--r--drivers/crypto/vmx/ppc-xlate.pl20
-rw-r--r--drivers/dma/at_xdmac.c82
-rw-r--r--drivers/dma/sh/usb-dmac.c19
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c20
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-intel-mid.c19
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c1
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_cache.c1
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/drm_edid.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h15
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c39
-rw-r--r--drivers/gpu/drm/i915/intel_display.c60
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c26
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c108
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c127
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.c3
-rw-r--r--drivers/gpu/msm/kgsl.c152
-rw-r--r--drivers/gpu/msm/kgsl.h3
-rw-r--r--drivers/gpu/msm/kgsl_compat.c2
-rw-r--r--drivers/gpu/msm/kgsl_device.h15
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.c187
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.h33
-rw-r--r--drivers/gpu/msm/kgsl_ioctl.c2
-rw-r--r--drivers/hid/hid-sony.c6
-rw-r--r--drivers/hid/uhid.c33
-rw-r--r--drivers/hv/channel.c27
-rw-r--r--drivers/hv/channel_mgmt.c61
-rw-r--r--drivers/hv/hv.c10
-rw-r--r--drivers/hv/hv_fcopy.c37
-rw-r--r--drivers/hv/hv_kvp.c31
-rw-r--r--drivers/hv/hv_snapshot.c34
-rw-r--r--drivers/hv/hv_utils_transport.c9
-rw-r--r--drivers/hv/hyperv_vmbus.h11
-rw-r--r--drivers/hv/vmbus_drv.c38
-rw-r--r--drivers/hwmon/iio_hwmon.c24
-rw-r--r--drivers/hwtracing/coresight/Kconfig3
-rw-r--r--drivers/hwtracing/intel_th/core.c35
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h3
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c2
-rw-r--r--drivers/i2c/busses/i2c-efm32.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c103
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c2
-rw-r--r--drivers/idle/intel_idle.c25
-rw-r--r--drivers/iio/adc/qcom-rradc.c230
-rw-r--r--drivers/iio/industrialio-buffer.c23
-rw-r--r--drivers/infiniband/core/iwpm_util.c1
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c24
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c12
-rw-r--r--drivers/infiniband/hw/mlx5/main.c5
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c21
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/input/joystick/xpad.c348
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c79
-rw-r--r--drivers/input/mouse/elantech.c8
-rw-r--r--drivers/input/mouse/vmmouse.c22
-rw-r--r--drivers/input/serio/i8042.c17
-rw-r--r--drivers/input/serio/libps2.c10
-rw-r--r--drivers/input/touchscreen/sur40.c5
-rw-r--r--drivers/input/touchscreen/tsc2004.c7
-rw-r--r--drivers/input/touchscreen/tsc2005.c7
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c15
-rw-r--r--drivers/input/touchscreen/tsc200x-core.h2
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c2
-rw-r--r--drivers/iommu/amd_iommu.c40
-rw-r--r--drivers/iommu/arm-smmu-v3.c7
-rw-r--r--drivers/iommu/dma-iommu.c3
-rw-r--r--drivers/iommu/exynos-iommu.c1
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c49
-rw-r--r--drivers/leds/leds-qpnp-wled.c48
-rw-r--r--drivers/lightnvm/gennvm.c3
-rw-r--r--drivers/lightnvm/rrpc.c24
-rw-r--r--drivers/md/Kconfig3
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/dm-android-verity.c4
-rw-r--r--drivers/md/dm-android-verity.h5
-rw-r--r--drivers/md/dm-flakey.c23
-rw-r--r--drivers/md/dm-linear.c6
-rw-r--r--drivers/md/dm-verity-target.c7
-rw-r--r--drivers/md/dm.c14
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.c83
-rw-r--r--drivers/media/dvb-frontends/Kconfig2
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c5
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c81
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h6
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.c56
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c157
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h10
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h6
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp32.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp44.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp46.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp48.c21
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c25
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c47
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h1
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c19
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c14
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.h1
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c137
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h21
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c16
-rw-r--r--drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c4
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.c4
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c2
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c55
-rw-r--r--drivers/media/platform/msm/vidc/hfi_response_handler.c16
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c7
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c50
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.c5
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_api.h1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c11
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c2
-rw-r--r--drivers/media/tuners/tuner-xc2028.c7
-rw-r--r--drivers/media/usb/airspy/airspy.c3
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c5
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c20
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h12
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c10
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c6
-rw-r--r--drivers/mfd/qcom_rpm.c55
-rw-r--r--drivers/misc/Kconfig5
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cxl/Makefile2
-rw-r--r--drivers/misc/cxl/api.c6
-rw-r--r--drivers/misc/cxl/context.c15
-rw-r--r--drivers/misc/cxl/cxl.h15
-rw-r--r--drivers/misc/cxl/fault.c129
-rw-r--r--drivers/misc/cxl/file.c25
-rw-r--r--drivers/misc/cxl/pci.c1
-rw-r--r--drivers/misc/memory_state_time.c454
-rw-r--r--drivers/misc/qseecom.c31
-rw-r--r--drivers/mmc/card/block.c12
-rw-r--r--drivers/mmc/core/core.c66
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/core/host.h5
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c81
-rw-r--r--drivers/mmc/host/sdhci.c35
-rw-r--r--drivers/mmc/host/sdhci.h21
-rw-r--r--drivers/mtd/nand/nand_base.c2
-rw-r--r--drivers/mtd/ubi/build.c13
-rw-r--r--drivers/mtd/ubi/vmt.c25
-rw-r--r--drivers/net/bonding/bond_netlink.c6
-rw-r--r--drivers/net/can/at91_can.c5
-rw-r--r--drivers/net/can/c_can/c_can.c38
-rw-r--r--drivers/net/can/dev.c9
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h9
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h3
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c91
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h7
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c5
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c65
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c126
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c73
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c182
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c40
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c32
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c27
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c7
-rw-r--r--drivers/net/ppp/ppp_generic.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.c7
-rw-r--r--drivers/net/wireless/cnss/cnss_pci.c4
-rw-r--r--drivers/net/wireless/cnss/cnss_sdio.c3
-rw-r--r--drivers/nfc/nq-nci.c135
-rw-r--r--drivers/nfc/nq-nci.h2
-rw-r--r--drivers/nvme/host/pci.c71
-rw-r--r--drivers/of/base.c41
-rw-r--r--drivers/of/dynamic.c2
-rw-r--r--drivers/of/of_private.h3
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pci/pci-sysfs.c18
-rw-r--r--drivers/pci/quirks.c22
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-v3-falcon.c260
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-v3-falcon.h283
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c80
-rw-r--r--drivers/pinctrl/pinctrl-amd.c20
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c8
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c17
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c19
-rw-r--r--drivers/platform/msm/ipa/ipa_api.h3
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h8
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h8
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c30
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c32
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c239
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c5
-rw-r--r--drivers/platform/x86/hp-wmi.c7
-rw-r--r--drivers/pnp/quirks.c2
-rw-r--r--drivers/power/power_supply_core.c27
-rw-r--r--drivers/power/qcom-charger/fg-core.h12
-rw-r--r--drivers/power/qcom-charger/fg-reg.h4
-rw-r--r--drivers/power/qcom-charger/qpnp-fg-gen3.c174
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c23
-rw-r--r--drivers/power/qcom-charger/smb-lib.c90
-rw-r--r--drivers/power/qcom-charger/smb-lib.h6
-rw-r--r--drivers/power/qcom-charger/smb-reg.h3
-rw-r--r--drivers/power/qcom-charger/smb1351-charger.c2
-rw-r--r--drivers/power/qcom-charger/smb138x-charger.c110
-rw-r--r--drivers/pps/clients/pps_parport.c2
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c58
-rw-r--r--drivers/pwm/pwm-lpc32xx.c55
-rw-r--r--drivers/regulator/anatop-regulator.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c15
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/s390/block/dasd.c10
-rw-r--r--drivers/s390/char/sclp_ctl.c12
-rw-r--r--drivers/s390/cio/chp.c21
-rw-r--r--drivers/s390/cio/chp.h2
-rw-r--r--drivers/s390/cio/chsc.c43
-rw-r--r--drivers/s390/cio/cmf.c29
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/scsi/aacraid/commctrl.c13
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c26
-rw-r--r--drivers/scsi/constants.c5
-rw-r--r--drivers/scsi/cxlflash/common.h2
-rw-r--r--drivers/scsi/cxlflash/main.c69
-rw-r--r--drivers/scsi/cxlflash/main.h4
-rw-r--r--drivers/scsi/cxlflash/superpipe.c19
-rw-r--r--drivers/scsi/cxlflash/vlun.c2
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c373
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c134
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c23
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c274
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c46
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c10
-rw-r--r--drivers/scsi/scsi_sysfs.c7
-rw-r--r--drivers/scsi/ufs/ufshcd.c238
-rw-r--r--drivers/scsi/ufs/ufshcd.h3
-rw-r--r--drivers/soc/qcom/glink.c8
-rw-r--r--drivers/soc/qcom/icnss.c13
-rw-r--r--drivers/soc/qcom/pil-msa.c3
-rw-r--r--drivers/soc/qcom/pil-q6v5-mss.c4
-rw-r--r--drivers/soc/qcom/pil-q6v5.c10
-rw-r--r--drivers/soc/qcom/scm.c5
-rw-r--r--drivers/soc/qcom/smcinvoke.c10
-rw-r--r--drivers/soc/qcom/spcom.c79
-rw-r--r--drivers/soc/qcom/subsys-pil-tz.c6
-rw-r--r--drivers/soc/qcom/subsystem_notif.c6
-rw-r--r--drivers/soc/qcom/subsystem_restart.c36
-rw-r--r--drivers/spi/spi-pxa2xx.c9
-rw-r--r--drivers/spi/spi-sun4i.c23
-rw-r--r--drivers/spi/spi-sun6i.c10
-rw-r--r--drivers/staging/android/ion/msm/msm_ion.c25
-rw-r--r--drivers/staging/android/ion/msm/msm_ion.h26
-rw-r--r--drivers/staging/android/lowmemorykiller.c2
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c46
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h2
-rw-r--r--drivers/staging/rdma/ipath/ipath_file_ops.c5
-rw-r--r--drivers/target/iscsi/iscsi_target.c22
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c5
-rw-r--r--drivers/target/target_core_device.c8
-rw-r--r--drivers/target/target_core_file.c3
-rw-r--r--drivers/target/target_core_iblock.c3
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/target/target_core_transport.c78
-rw-r--r--drivers/tty/pty.c63
-rw-r--r--drivers/tty/serial/atmel_serial.c14
-rw-r--r--drivers/tty/serial/msm_serial.c135
-rw-r--r--drivers/tty/serial/samsung.c18
-rw-r--r--drivers/usb/chipidea/udc.c7
-rw-r--r--drivers/usb/class/cdc-acm.c5
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/common/common.c1
-rw-r--r--drivers/usb/core/config.c69
-rw-r--r--drivers/usb/core/devices.c10
-rw-r--r--drivers/usb/core/devio.c16
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hcd.c6
-rw-r--r--drivers/usb/core/hub.c49
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/urb.c3
-rw-r--r--drivers/usb/core/usb.c3
-rw-r--r--drivers/usb/core/usb.h2
-rw-r--r--drivers/usb/dwc3/core.c16
-rw-r--r--drivers/usb/dwc3/core.h7
-rw-r--r--drivers/usb/dwc3/dbm.c58
-rw-r--r--drivers/usb/dwc3/dbm.h3
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c283
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c29
-rw-r--r--drivers/usb/gadget/composite.c14
-rw-r--r--drivers/usb/gadget/function/f_accessory.c9
-rw-r--r--drivers/usb/gadget/function/f_fs.c10
-rw-r--r--drivers/usb/gadget/function/f_gsi.c14
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c51
-rw-r--r--drivers/usb/gadget/function/f_qdss.c23
-rw-r--r--drivers/usb/gadget/function/f_rmnet.c559
-rw-r--r--drivers/usb/gadget/function/f_uac2.c1
-rw-r--r--drivers/usb/gadget/function/u_ctrl_qti.c18
-rw-r--r--drivers/usb/gadget/function/u_data_ipa.c56
-rw-r--r--drivers/usb/gadget/function/u_data_ipa.h19
-rw-r--r--drivers/usb/gadget/function/u_qdss.c3
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c4
-rw-r--r--drivers/usb/host/ohci-q.c3
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-mem.c109
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-plat.c4
-rw-r--r--drivers/usb/host/xhci-ring.c16
-rw-r--r--drivers/usb/host/xhci.c7
-rw-r--r--drivers/usb/misc/usbtest.c7
-rw-r--r--drivers/usb/phy/phy-msm-qusb-v2.c3
-rw-r--r--drivers/usb/phy/phy-msm-qusb.c4
-rw-r--r--drivers/usb/phy/phy-msm-ssusb-qmp.c6
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c22
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c18
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h12
-rw-r--r--drivers/usb/serial/mos7720.c2
-rw-r--r--drivers/usb/serial/mos7840.c4
-rw-r--r--drivers/usb/serial/option.c34
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c85
-rw-r--r--drivers/vhost/scsi.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_compat_utils.c8
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_panel.c55
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c20
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pipe.c23
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c18
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.h19
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/w1/masters/omap_hdq.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space.c6
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c14
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c3
480 files changed, 11342 insertions, 4319 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 6730f965b379..0afd1981e350 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -216,8 +216,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
continue;
cpc_ptr = per_cpu(cpc_desc_ptr, i);
- if (!cpc_ptr)
- continue;
+ if (!cpc_ptr) {
+ retval = -EFAULT;
+ goto err_ret;
+ }
pdomain = &(cpc_ptr->domain_info);
cpumask_set_cpu(i, pr->shared_cpu_map);
@@ -239,8 +241,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
continue;
match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
- if (!match_cpc_ptr)
- continue;
+ if (!match_cpc_ptr) {
+ retval = -EFAULT;
+ goto err_ret;
+ }
match_pdomain = &(match_cpc_ptr->domain_info);
if (match_pdomain->domain != pdomain->domain)
@@ -270,8 +274,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
continue;
match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
- if (!match_cpc_ptr)
- continue;
+ if (!match_cpc_ptr) {
+ retval = -EFAULT;
+ goto err_ret;
+ }
match_pdomain = &(match_cpc_ptr->domain_info);
if (match_pdomain->domain != pdomain->domain)
@@ -502,9 +508,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
/* Store CPU Logical ID */
cpc_ptr->cpu_id = pr->id;
- /* Plug it into this CPUs CPC descriptor. */
- per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
-
/* Parse PSD data for this CPU */
ret = acpi_get_psd(cpc_ptr, handle);
if (ret)
@@ -517,6 +520,9 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
+ /* Plug PSD data into this CPUs CPC descriptor. */
+ per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
+
/* Everything looks okay */
pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index b420fb46669d..43f20328f830 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -101,6 +101,7 @@ enum ec_command {
#define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
* when trying to clear the EC */
+#define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
enum {
EC_FLAGS_QUERY_PENDING, /* Query is pending */
@@ -121,6 +122,10 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
module_param(ec_delay, uint, 0644);
MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
+static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
+module_param(ec_max_queries, uint, 0644);
+MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
+
static bool ec_busy_polling __read_mostly;
module_param(ec_busy_polling, bool, 0644);
MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
@@ -174,6 +179,7 @@ static void acpi_ec_event_processor(struct work_struct *work);
struct acpi_ec *boot_ec, *first_ec;
EXPORT_SYMBOL(first_ec);
+static struct workqueue_struct *ec_query_wq;
static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
@@ -1097,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
* work queue execution.
*/
ec_dbg_evt("Query(0x%02x) scheduled", value);
- if (!schedule_work(&q->work)) {
+ if (!queue_work(ec_query_wq, &q->work)) {
ec_dbg_evt("Query(0x%02x) overlapped", value);
result = -EBUSY;
}
@@ -1657,15 +1663,41 @@ static struct acpi_driver acpi_ec_driver = {
},
};
+static inline int acpi_ec_query_init(void)
+{
+ if (!ec_query_wq) {
+ ec_query_wq = alloc_workqueue("kec_query", 0,
+ ec_max_queries);
+ if (!ec_query_wq)
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static inline void acpi_ec_query_exit(void)
+{
+ if (ec_query_wq) {
+ destroy_workqueue(ec_query_wq);
+ ec_query_wq = NULL;
+ }
+}
+
int __init acpi_ec_init(void)
{
- int result = 0;
+ int result;
+ /* register workqueue for _Qxx evaluations */
+ result = acpi_ec_query_init();
+ if (result)
+ goto err_exit;
/* Now register the driver for the EC */
result = acpi_bus_register_driver(&acpi_ec_driver);
- if (result < 0)
- return -ENODEV;
+ if (result)
+ goto err_exit;
+err_exit:
+ if (result)
+ acpi_ec_query_exit();
return result;
}
@@ -1675,5 +1707,6 @@ static void __exit acpi_ec_exit(void)
{
acpi_bus_unregister_driver(&acpi_ec_driver);
+ acpi_ec_query_exit();
}
#endif /* 0 */
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 11d8209e6e5d..5230e8449d30 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -1072,11 +1072,12 @@ static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
{
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
u64 offset = nfit_blk->stat_offset + mmio->size * bw;
+ const u32 STATUS_MASK = 0x80000037;
if (mmio->num_lines)
offset = to_interleave_offset(offset, mmio);
- return readl(mmio->addr.base + offset);
+ return readl(mmio->addr.base + offset) & STATUS_MASK;
}
static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 72b6e9ef0ae9..d176e0ece470 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -327,10 +327,18 @@ int __init acpi_numa_init(void)
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
- acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
- acpi_parse_x2apic_affinity, 0);
- acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, 0);
+ struct acpi_subtable_proc srat_proc[2];
+
+ memset(srat_proc, 0, sizeof(srat_proc));
+ srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
+ srat_proc[0].handler = acpi_parse_processor_affinity;
+ srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY;
+ srat_proc[1].handler = acpi_parse_x2apic_affinity;
+
+ acpi_table_parse_entries_array(ACPI_SIG_SRAT,
+ sizeof(struct acpi_table_srat),
+ srat_proc, ARRAY_SIZE(srat_proc), 0);
+
cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
acpi_parse_memory_affinity,
NR_NODE_MEMBLKS);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 78d5f02a073b..dcb3d6245ca5 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1958,7 +1958,7 @@ int __init acpi_scan_init(void)
static struct acpi_probe_entry *ape;
static int acpi_probe_count;
-static DEFINE_SPINLOCK(acpi_probe_lock);
+static DEFINE_MUTEX(acpi_probe_mutex);
static int __init acpi_match_madt(struct acpi_subtable_header *header,
const unsigned long end)
@@ -1977,7 +1977,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
if (acpi_disabled)
return 0;
- spin_lock(&acpi_probe_lock);
+ mutex_lock(&acpi_probe_mutex);
for (ape = ap_head; nr; ape++, nr--) {
if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) {
acpi_probe_count = 0;
@@ -1990,7 +1990,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
count++;
}
}
- spin_unlock(&acpi_probe_lock);
+ mutex_unlock(&acpi_probe_mutex);
return count;
}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 0243d375c6fd..4b3a9e27f1b6 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -555,23 +555,22 @@ static void acpi_global_event_handler(u32 event_type, acpi_handle device,
static int get_status(u32 index, acpi_event_status *status,
acpi_handle *handle)
{
- int result = 0;
+ int result;
if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
- goto end;
+ return -EINVAL;
if (index < num_gpes) {
result = acpi_get_gpe_device(index, handle);
if (result) {
ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
"Invalid GPE 0x%x", index));
- goto end;
+ return result;
}
result = acpi_get_gpe_status(*handle, index, status);
} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
result = acpi_get_event_status(index - num_gpes, status);
-end:
return result;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b79cb10e289e..bd370c98f77d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4138,6 +4138,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*/
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+ /*
+ * Device times out with higher max sects.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+ */
+ { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 38f156745d53..71df8f2afc6c 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -8,8 +8,6 @@
#include <linux/bcma/bcma.h>
#include <linux/delay.h>
-#define BCMA_CORE_SIZE 0x1000
-
#define bcma_err(bus, fmt, ...) \
pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_warn(bus, fmt, ...) \
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 79107597a594..c306b483de60 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2056,12 +2056,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
return -EINVAL;
}
- /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
- * supported by this firmware loading method. This check has been
- * put in place to ensure correct forward compatibility options
- * when newer hardware variants come along.
+ /* At the moment the iBT 3.0 hardware variants 0x0b (LnP/SfP)
+ * and 0x0c (WsP) are supported by this firmware loading method.
+ *
+ * This check has been put in place to ensure correct forward
+ * compatibility options when newer hardware variants come along.
*/
- if (ver->hw_variant != 0x0b) {
+ if (ver->hw_variant != 0x0b && ver->hw_variant != 0x0c) {
BT_ERR("%s: Unsupported Intel hardware variant (%u)",
hdev->name, ver->hw_variant);
kfree_skb(skb);
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 4a414a5a3165..b9065506a847 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -1234,8 +1234,7 @@ static int intel_probe(struct platform_device *pdev)
idev->pdev = pdev;
- idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_LOW);
+ idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(idev->reset)) {
dev_err(&pdev->dev, "Unable to retrieve gpio\n");
return PTR_ERR(idev->reset);
@@ -1247,8 +1246,7 @@ static int intel_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n");
- host_wake = devm_gpiod_get_optional(&pdev->dev, "host-wake",
- GPIOD_IN);
+ host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN);
if (IS_ERR(host_wake)) {
dev_err(&pdev->dev, "Unable to retrieve IRQ\n");
goto no_irq;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 1046c262b46b..8e3bff9c7fe9 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -606,13 +606,20 @@ config TILE_SROM
source "drivers/char/xillybus/Kconfig"
config MSM_ADSPRPC
- tristate "Qualcomm ADSP RPC driver"
- depends on MSM_SMD
- help
- Provides a communication mechanism that allows for clients to
- make remote method invocations across processor boundary to
- applications DSP processor. Say M if you want to enable this
- module.
+ tristate "QTI ADSP RPC driver"
+ depends on MSM_SMD
+ help
+ Provides a communication mechanism that allows for clients to
+ make remote method invocations across processor boundary to
+ applications DSP processor. Say M if you want to enable this
+ module.
+
+config MSM_RDBG
+ tristate "QTI Remote debug driver"
+ help
+ Implements a shared memory based transport mechanism that allows
+ for a debugger running on a host PC to communicate with a remote
+ stub running on peripheral subsystems such as the ADSP, MODEM etc.
endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index e180562c725e..7b0bd5408324 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -65,3 +65,4 @@ obj-$(CONFIG_MSM_ADSPRPC) += adsprpc.o
ifdef CONFIG_COMPAT
obj-$(CONFIG_MSM_ADSPRPC) += adsprpc_compat.o
endif
+obj-$(CONFIG_MSM_RDBG) += rdbg.o
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index aa30af5f0f2b..7845a38b6604 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -118,6 +118,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
{
struct exynos_rng *exynos_rng;
struct resource *res;
+ int ret;
exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
GFP_KERNEL);
@@ -145,7 +146,13 @@ static int exynos_rng_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- return devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
+ ret = devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
+ if (ret) {
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ }
+
+ return ret;
}
#ifdef CONFIG_PM
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b583e5336630..d93dfebae0bb 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -722,15 +722,18 @@ retry:
}
}
-static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
{
const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
+ if (nbits < 0)
+ return -EINVAL;
+
/* Cap the value to avoid overflows */
nbits = min(nbits, nbits_max);
- nbits = max(nbits, -nbits_max);
credit_entropy_bits(r, nbits);
+ return 0;
}
/*********************************************************************
@@ -945,6 +948,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
/* award one bit for the contents of the fast pool */
credit_entropy_bits(r, credit + 1);
}
+EXPORT_SYMBOL_GPL(add_interrupt_randomness);
#ifdef CONFIG_BLOCK
void add_disk_randomness(struct gendisk *disk)
@@ -1457,12 +1461,16 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
static ssize_t
urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
+ static int maxwarn = 10;
int ret;
- if (unlikely(nonblocking_pool.initialized == 0))
- printk_once(KERN_NOTICE "random: %s urandom read "
- "with %d bits of entropy available\n",
- current->comm, nonblocking_pool.entropy_total);
+ if (unlikely(nonblocking_pool.initialized == 0) &&
+ maxwarn > 0) {
+ maxwarn--;
+ printk(KERN_NOTICE "random: %s: uninitialized urandom read "
+ "(%zd bytes read, %d bits of entropy available)\n",
+ current->comm, nbytes, nonblocking_pool.entropy_total);
+ }
nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
@@ -1542,8 +1550,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- credit_entropy_bits_safe(&input_pool, ent_count);
- return 0;
+ return credit_entropy_bits_safe(&input_pool, ent_count);
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1557,8 +1564,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
size);
if (retval < 0)
return retval;
- credit_entropy_bits_safe(&input_pool, ent_count);
- return 0;
+ return credit_entropy_bits_safe(&input_pool, ent_count);
case RNDZAPENTCNT:
case RNDCLEARPOOL:
/*
@@ -1868,12 +1874,18 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
{
struct entropy_store *poolp = &input_pool;
- /* Suspend writing if we're above the trickle threshold.
- * We'll be woken up again once below random_write_wakeup_thresh,
- * or when the calling thread is about to terminate.
- */
- wait_event_interruptible(random_write_wait, kthread_should_stop() ||
+ if (unlikely(nonblocking_pool.initialized == 0))
+ poolp = &nonblocking_pool;
+ else {
+ /* Suspend writing if we're above the trickle
+ * threshold. We'll be woken up again once below
+ * random_write_wakeup_thresh, or when the calling
+ * thread is about to terminate.
+ */
+ wait_event_interruptible(random_write_wait,
+ kthread_should_stop() ||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
+ }
mix_pool_bytes(poolp, buffer, count);
credit_entropy_bits(poolp, entropy);
}
diff --git a/drivers/char/rdbg.c b/drivers/char/rdbg.c
new file mode 100644
index 000000000000..0823ed78485e
--- /dev/null
+++ b/drivers/char/rdbg.c
@@ -0,0 +1,1165 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <soc/qcom/smsm.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+
+#define SMP2P_NUM_PROCS 8
+#define MAX_RETRIES 20
+
+#define SM_VERSION 1
+#define SM_BLOCKSIZE 128
+
+#define SMQ_MAGIC_INIT 0xFF00FF00
+#define SMQ_MAGIC_PRODUCER (SMQ_MAGIC_INIT | 0x1)
+#define SMQ_MAGIC_CONSUMER (SMQ_MAGIC_INIT | 0x2)
+
+enum SMQ_STATUS {
+ SMQ_SUCCESS = 0,
+ SMQ_ENOMEMORY = -1,
+ SMQ_EBADPARM = -2,
+ SMQ_UNDERFLOW = -3,
+ SMQ_OVERFLOW = -4
+};
+
+enum smq_type {
+ PRODUCER = 1,
+ CONSUMER = 2,
+ INVALID = 3
+};
+
+struct smq_block_map {
+ uint32_t index_read;
+ uint32_t num_blocks;
+ uint8_t *map;
+};
+
+struct smq_node {
+ uint16_t index_block;
+ uint16_t num_blocks;
+} __attribute__ ((__packed__));
+
+struct smq_hdr {
+ uint8_t producer_version;
+ uint8_t consumer_version;
+} __attribute__ ((__packed__));
+
+struct smq_out_state {
+ uint32_t init;
+ uint32_t index_check_queue_for_reset;
+ uint32_t index_sent_write;
+ uint32_t index_free_read;
+} __attribute__ ((__packed__));
+
+struct smq_out {
+ struct smq_out_state s;
+ struct smq_node sent[1];
+};
+
+struct smq_in_state {
+ uint32_t init;
+ uint32_t index_check_queue_for_reset_ack;
+ uint32_t index_sent_read;
+ uint32_t index_free_write;
+} __attribute__ ((__packed__));
+
+struct smq_in {
+ struct smq_in_state s;
+ struct smq_node free[1];
+};
+
+struct smq {
+ struct smq_hdr *hdr;
+ struct smq_out *out;
+ struct smq_in *in;
+ uint8_t *blocks;
+ uint32_t num_blocks;
+ struct mutex *lock;
+ uint32_t initialized;
+ struct smq_block_map block_map;
+ enum smq_type type;
+};
+
+struct gpio_info {
+ int gpio_base_id;
+ int irq_base_id;
+};
+
+struct rdbg_data {
+ struct device *device;
+ struct completion work;
+ struct gpio_info in;
+ struct gpio_info out;
+ bool device_initialized;
+ int gpio_out_offset;
+ bool device_opened;
+ void *smem_addr;
+ size_t smem_size;
+ struct smq producer_smrb;
+ struct smq consumer_smrb;
+ struct mutex write_mutex;
+};
+
+struct rdbg_device {
+ struct cdev cdev;
+ struct class *class;
+ dev_t dev_no;
+ int num_devices;
+ struct rdbg_data *rdbg_data;
+};
+
+static struct rdbg_device g_rdbg_instance = {
+ { {0} },
+ NULL,
+ 0,
+ SMP2P_NUM_PROCS,
+ NULL
+};
+
+struct processor_specific_info {
+ char *name;
+ unsigned int smem_buffer_addr;
+ size_t smem_buffer_size;
+};
+
+static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = {
+ {0}, /*APPS*/
+ {"rdbg_modem", 0, 0}, /*MODEM*/
+ {"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024}, /*ADSP*/
+ {0}, /*SMP2P_RESERVED_PROC_1*/
+ {"rdbg_wcnss", 0, 0}, /*WCNSS*/
+ {0}, /*SMP2P_RESERVED_PROC_2*/
+ {0}, /*SMP2P_POWER_PROC*/
+ {0} /*SMP2P_REMOTE_MOCK_PROC*/
+};
+
+static int smq_blockmap_get(struct smq_block_map *block_map,
+ uint32_t *block_index, uint32_t n)
+{
+ uint32_t start;
+ uint32_t mark = 0;
+ uint32_t found = 0;
+ uint32_t i = 0;
+
+ start = block_map->index_read;
+
+ if (n == 1) {
+ do {
+ if (!block_map->map[block_map->index_read]) {
+ *block_index = block_map->index_read;
+ block_map->map[block_map->index_read] = 1;
+ block_map->index_read++;
+ block_map->index_read %= block_map->num_blocks;
+ return SMQ_SUCCESS;
+ }
+ block_map->index_read++;
+ } while (start != (block_map->index_read %=
+ block_map->num_blocks));
+ } else {
+ mark = block_map->num_blocks;
+
+ do {
+ if (!block_map->map[block_map->index_read]) {
+ if (mark > block_map->index_read) {
+ mark = block_map->index_read;
+ start = block_map->index_read;
+ found = 0;
+ }
+
+ found++;
+ if (found == n) {
+ *block_index = mark;
+ for (i = 0; i < n; i++)
+ block_map->map[mark + i] =
+ (uint8_t)(n - i);
+ block_map->index_read += block_map->map
+ [block_map->index_read] - 1;
+ return SMQ_SUCCESS;
+ }
+ } else {
+ found = 0;
+ block_map->index_read += block_map->map
+ [block_map->index_read] - 1;
+ mark = block_map->num_blocks;
+ }
+ block_map->index_read++;
+ } while (start != (block_map->index_read %=
+ block_map->num_blocks));
+ }
+
+ return SMQ_ENOMEMORY;
+}
+
+static void smq_blockmap_put(struct smq_block_map *block_map, uint32_t i)
+{
+ uint32_t num_blocks = block_map->map[i];
+
+ while (num_blocks--) {
+ block_map->map[i] = 0;
+ i++;
+ }
+}
+
+static int smq_blockmap_reset(struct smq_block_map *block_map)
+{
+ if (!block_map->map)
+ return SMQ_ENOMEMORY;
+ memset(block_map->map, 0, block_map->num_blocks + 1);
+ block_map->index_read = 0;
+
+ return SMQ_SUCCESS;
+}
+
+static int smq_blockmap_ctor(struct smq_block_map *block_map,
+ uint32_t num_blocks)
+{
+ if (num_blocks <= 1)
+ return SMQ_ENOMEMORY;
+
+ block_map->map = kcalloc(num_blocks, sizeof(uint8_t), GFP_KERNEL);
+ if (!block_map->map)
+ return SMQ_ENOMEMORY;
+
+ block_map->num_blocks = num_blocks - 1;
+ smq_blockmap_reset(block_map);
+
+ return SMQ_SUCCESS;
+}
+
+static void smq_blockmap_dtor(struct smq_block_map *block_map)
+{
+ kfree(block_map->map);
+ block_map->map = NULL;
+}
+
+static int smq_free(struct smq *smq, void *data)
+{
+ struct smq_node node;
+ uint32_t index_block;
+ int err = SMQ_SUCCESS;
+
+ if (smq->lock)
+ mutex_lock(smq->lock);
+
+ if ((smq->hdr->producer_version != SM_VERSION) &&
+ (smq->out->s.init != SMQ_MAGIC_PRODUCER)) {
+ err = SMQ_UNDERFLOW;
+ goto bail;
+ }
+
+ index_block = ((uint8_t *)data - smq->blocks) / SM_BLOCKSIZE;
+ if (index_block >= smq->num_blocks) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ node.index_block = (uint16_t)index_block;
+ node.num_blocks = 0;
+ *((struct smq_node *)(smq->in->free + smq->in->
+ s.index_free_write)) = node;
+
+ smq->in->s.index_free_write = (smq->in->s.index_free_write + 1)
+ % smq->num_blocks;
+
+bail:
+ if (smq->lock)
+ mutex_unlock(smq->lock);
+ return err;
+}
+
+static int smq_receive(struct smq *smq, void **pp, int *pnsize, int *pbmore)
+{
+ struct smq_node *node;
+ int err = SMQ_SUCCESS;
+ int more = 0;
+
+ if ((smq->hdr->producer_version != SM_VERSION) &&
+ (smq->out->s.init != SMQ_MAGIC_PRODUCER))
+ return SMQ_UNDERFLOW;
+
+ if (smq->in->s.index_sent_read == smq->out->s.index_sent_write) {
+ err = SMQ_UNDERFLOW;
+ goto bail;
+ }
+
+ node = (struct smq_node *)(smq->out->sent + smq->in->s.index_sent_read);
+ if (node->index_block >= smq->num_blocks) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ smq->in->s.index_sent_read = (smq->in->s.index_sent_read + 1)
+ % smq->num_blocks;
+
+ *pp = smq->blocks + (node->index_block * SM_BLOCKSIZE);
+ *pnsize = SM_BLOCKSIZE * node->num_blocks;
+
+ /* Ensure that the reads and writes are updated in the memory
+ * when they are done and not cached. Also, ensure that the reads
+ * and writes are not reordered as they are shared between two cores.
+ */
+ rmb();
+ if (smq->in->s.index_sent_read != smq->out->s.index_sent_write)
+ more = 1;
+
+bail:
+ *pbmore = more;
+ return err;
+}
+
+static int smq_alloc_send(struct smq *smq, const uint8_t *pcb, int nsize)
+{
+ void *pv = 0;
+ int num_blocks;
+ uint32_t index_block = 0;
+ int err = SMQ_SUCCESS;
+ struct smq_node *node = NULL;
+
+ mutex_lock(smq->lock);
+
+ if ((smq->in->s.init == SMQ_MAGIC_CONSUMER) &&
+ (smq->hdr->consumer_version == SM_VERSION)) {
+ if (smq->out->s.index_check_queue_for_reset ==
+ smq->in->s.index_check_queue_for_reset_ack) {
+ while (smq->out->s.index_free_read !=
+ smq->in->s.index_free_write) {
+ node = (struct smq_node *)(
+ smq->in->free +
+ smq->out->s.index_free_read);
+ if (node->index_block >= smq->num_blocks) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ smq->out->s.index_free_read =
+ (smq->out->s.index_free_read + 1)
+ % smq->num_blocks;
+
+ smq_blockmap_put(&smq->block_map,
+ node->index_block);
+ /* Ensure that the reads and writes are
+ * updated in the memory when they are done
+ * and not cached. Also, ensure that the reads
+ * and writes are not reordered as they are
+ * shared between two cores.
+ */
+ rmb();
+ }
+ }
+ }
+
+ num_blocks = ALIGN(nsize, SM_BLOCKSIZE)/SM_BLOCKSIZE;
+ err = smq_blockmap_get(&smq->block_map, &index_block, num_blocks);
+ if (err != SMQ_SUCCESS)
+ goto bail;
+
+ pv = smq->blocks + (SM_BLOCKSIZE * index_block);
+
+ err = copy_from_user((void *)pv, (void *)pcb, nsize);
+ if (err != 0)
+ goto bail;
+
+ ((struct smq_node *)(smq->out->sent +
+ smq->out->s.index_sent_write))->index_block
+ = (uint16_t)index_block;
+ ((struct smq_node *)(smq->out->sent +
+ smq->out->s.index_sent_write))->num_blocks
+ = (uint16_t)num_blocks;
+
+ smq->out->s.index_sent_write = (smq->out->s.index_sent_write + 1)
+ % smq->num_blocks;
+
+bail:
+ if (err != SMQ_SUCCESS) {
+ if (pv)
+ smq_blockmap_put(&smq->block_map, index_block);
+ }
+ mutex_unlock(smq->lock);
+ return err;
+}
+
+static int smq_reset_producer_queue_internal(struct smq *smq,
+ uint32_t reset_num)
+{
+ int retval = 0;
+ uint32_t i;
+
+ if (smq->type != PRODUCER)
+ goto bail;
+
+ mutex_lock(smq->lock);
+ if (smq->out->s.index_check_queue_for_reset != reset_num) {
+ smq->out->s.index_check_queue_for_reset = reset_num;
+ for (i = 0; i < smq->num_blocks; i++)
+ (smq->out->sent + i)->index_block = 0xFFFF;
+
+ smq_blockmap_reset(&smq->block_map);
+ smq->out->s.index_sent_write = 0;
+ smq->out->s.index_free_read = 0;
+ retval = 1;
+ }
+ mutex_unlock(smq->lock);
+
+bail:
+ return retval;
+}
+
+static int smq_check_queue_reset(struct smq *p_cons, struct smq *p_prod)
+{
+ int retval = 0;
+ uint32_t reset_num, i;
+
+ if ((p_cons->type != CONSUMER) ||
+ (p_cons->out->s.init != SMQ_MAGIC_PRODUCER) ||
+ (p_cons->hdr->producer_version != SM_VERSION))
+ goto bail;
+
+ reset_num = p_cons->out->s.index_check_queue_for_reset;
+ if (p_cons->in->s.index_check_queue_for_reset_ack != reset_num) {
+ p_cons->in->s.index_check_queue_for_reset_ack = reset_num;
+ for (i = 0; i < p_cons->num_blocks; i++)
+ (p_cons->in->free + i)->index_block = 0xFFFF;
+
+ p_cons->in->s.index_sent_read = 0;
+ p_cons->in->s.index_free_write = 0;
+
+ retval = smq_reset_producer_queue_internal(p_prod, reset_num);
+ }
+
+bail:
+ return retval;
+}
+
+static int check_subsystem_debug_enabled(void *base_addr, int size)
+{
+ int num_blocks;
+ uint8_t *pb_orig;
+ uint8_t *pb;
+ struct smq smq;
+ int err = 0;
+
+ pb = pb_orig = (uint8_t *)base_addr;
+ pb += sizeof(struct smq_hdr);
+ pb = PTR_ALIGN(pb, 8);
+ size -= pb - (uint8_t *)pb_orig;
+ num_blocks = (int)((size - sizeof(struct smq_out_state) -
+ sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
+ sizeof(struct smq_node) * 2));
+ if (num_blocks <= 0) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ pb += num_blocks * SM_BLOCKSIZE;
+ smq.out = (struct smq_out *)pb;
+ pb += sizeof(struct smq_out_state) + (num_blocks *
+ sizeof(struct smq_node));
+ smq.in = (struct smq_in *)pb;
+
+ if (smq.in->s.init != SMQ_MAGIC_CONSUMER) {
+ pr_err("%s, smq in consumer not initialized", __func__);
+ err = -ECOMM;
+ }
+
+bail:
+ return err;
+}
+
+static void smq_dtor(struct smq *smq)
+{
+ if (smq->initialized == SMQ_MAGIC_INIT) {
+ switch (smq->type) {
+ case PRODUCER:
+ smq->out->s.init = 0;
+ smq_blockmap_dtor(&smq->block_map);
+ break;
+ case CONSUMER:
+ smq->in->s.init = 0;
+ break;
+ default:
+ case INVALID:
+ break;
+ }
+
+ smq->initialized = 0;
+ }
+}
+
+/*
+ * The shared memory is used as a circular ring buffer in each direction.
+ * Thus we have a bi-directional shared memory channel between the AP
+ * and a subsystem. We call this SMQ. Each memory channel contains a header,
+ * data and a control mechanism that is used to synchronize read and write
+ * of data between the AP and the remote subsystem.
+ *
+ * Overall SMQ memory view:
+ *
+ * +------------------------------------------------+
+ * | SMEM buffer |
+ * |-----------------------+------------------------|
+ * |Producer: LA | Producer: Remote |
+ * |Consumer: Remote | subsystem |
+ * | subsystem | Consumer: LA |
+ * | | |
+ * | Producer| Consumer|
+ * +-----------------------+------------------------+
+ * | |
+ * | |
+ * | +--------------------------------------+
+ * | |
+ * | |
+ * v v
+ * +--------------------------------------------------------------+
+ * | Header | Data | Control |
+ * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+ * | | b | b | b | | S |n |n | | S |n |n | |
+ * | Producer | l | l | l | | M |o |o | | M |o |o | |
+ * | Ver | o | o | o | | Q |d |d | | Q |d |d | |
+ * |-----------| c | c | c | ... | |e |e | ... | |e |e | ... |
+ * | | k | k | k | | O | | | | I | | | |
+ * | Consumer | | | | | u |0 |1 | | n |0 |1 | |
+ * | Ver | 0 | 1 | 2 | | t | | | | | | | |
+ * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+ * | |
+ * + |
+ * |
+ * +------------------------+
+ * |
+ * v
+ * +----+----+----+----+
+ * | SMQ Nodes |
+ * |----|----|----|----|
+ * Node # | 0 | 1 | 2 | ...|
+ * |----|----|----|----|
+ * Starting Block Index # | 0 | 3 | 8 | ...|
+ * |----|----|----|----|
+ * # of blocks | 3 | 5 | 1 | ...|
+ * +----+----+----+----+
+ *
+ * Header: Contains version numbers for software compatibility to ensure
+ * that both producers and consumers on the AP and subsystems know how to
+ * read from and write to the queue.
+ * Both the producer and consumer versions are 1.
+ * +---------+-------------------+
+ * | Size | Field |
+ * +---------+-------------------+
+ * | 1 byte | Producer Version |
+ * +---------+-------------------+
+ * | 1 byte | Consumer Version |
+ * +---------+-------------------+
+ *
+ * Data: The data portion contains multiple blocks [0..N] of a fixed size.
+ * The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1.
+ * Payload sent from the debug agent app is split (if necessary) and placed
+ * in these blocks. The first data block is placed at the next 8 byte aligned
+ * address after the header.
+ *
+ * The number of blocks for a given SMEM allocation is derived as follows:
+ * Number of Blocks = ((Total Size - Alignment - Size of Header
+ * - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE))
+ *
+ * The producer maintains a private block map of each of these blocks to
+ * determine which of these blocks in the queue is available and which are free.
+ *
+ * Control:
+ * The control portion contains a list of nodes [0..N] where N is number
+ * of available data blocks. Each node identifies the data
+ * block indexes that contain a particular debug message to be transferred,
+ * and the number of blocks it took to hold the contents of the message.
+ *
+ * Each node has the following structure:
+ * +---------+-------------------+
+ * | Size | Field |
+ * +---------+-------------------+
+ * | 2 bytes |Staring Block Index|
+ * +---------+-------------------+
+ * | 2 bytes |Number of Blocks |
+ * +---------+-------------------+
+ *
+ * The producer and the consumer update different parts of the control channel
+ * (SMQOut / SMQIn) respectively. Each of these control data structures contains
+ * information about the last node that was written / read, and the actual nodes
+ * that were written/read.
+ *
+ * SMQOut Structure (R/W by producer, R by consumer):
+ * +---------+-------------------+
+ * | Size | Field |
+ * +---------+-------------------+
+ * | 4 bytes | Magic Init Number |
+ * +---------+-------------------+
+ * | 4 bytes | Reset |
+ * +---------+-------------------+
+ * | 4 bytes | Last Sent Index |
+ * +---------+-------------------+
+ * | 4 bytes | Index Free Read |
+ * +---------+-------------------+
+ *
+ * SMQIn Structure (R/W by consumer, R by producer):
+ * +---------+-------------------+
+ * | Size | Field |
+ * +---------+-------------------+
+ * | 4 bytes | Magic Init Number |
+ * +---------+-------------------+
+ * | 4 bytes | Reset ACK |
+ * +---------+-------------------+
+ * | 4 bytes | Last Read Index |
+ * +---------+-------------------+
+ * | 4 bytes | Index Free Write |
+ * +---------+-------------------+
+ *
+ * Magic Init Number:
+ * Both SMQ Out and SMQ In initialize this field with a predefined magic
+ * number so as to make sure that both the consumer and producer blocks
+ * have fully initialized and have valid data in the shared memory control area.
+ * Producer Magic #: 0xFF00FF01
+ * Consumer Magic #: 0xFF00FF02
+ */
+static int smq_ctor(struct smq *smq, void *base_addr, int size,
+ enum smq_type type, struct mutex *lock_ptr)
+{
+ int num_blocks;
+ uint8_t *pb_orig;
+ uint8_t *pb;
+ uint32_t i;
+ int err;
+
+ if (smq->initialized == SMQ_MAGIC_INIT) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ if (!base_addr || !size) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ if (type == PRODUCER)
+ smq->lock = lock_ptr;
+
+ pb_orig = (uint8_t *)base_addr;
+ smq->hdr = (struct smq_hdr *)pb_orig;
+ pb = pb_orig;
+ pb += sizeof(struct smq_hdr);
+ pb = PTR_ALIGN(pb, 8);
+ size -= pb - (uint8_t *)pb_orig;
+ num_blocks = (int)((size - sizeof(struct smq_out_state) -
+ sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
+ sizeof(struct smq_node) * 2));
+ if (num_blocks <= 0) {
+ err = SMQ_ENOMEMORY;
+ goto bail;
+ }
+
+ smq->blocks = pb;
+ smq->num_blocks = num_blocks;
+ pb += num_blocks * SM_BLOCKSIZE;
+ smq->out = (struct smq_out *)pb;
+ pb += sizeof(struct smq_out_state) + (num_blocks *
+ sizeof(struct smq_node));
+ smq->in = (struct smq_in *)pb;
+ smq->type = type;
+ if (type == PRODUCER) {
+ smq->hdr->producer_version = SM_VERSION;
+ for (i = 0; i < smq->num_blocks; i++)
+ (smq->out->sent + i)->index_block = 0xFFFF;
+
+ err = smq_blockmap_ctor(&smq->block_map, smq->num_blocks);
+ if (err != SMQ_SUCCESS)
+ goto bail;
+
+ smq->out->s.index_sent_write = 0;
+ smq->out->s.index_free_read = 0;
+ if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
+ smq->out->s.index_check_queue_for_reset += 1;
+ } else {
+ smq->out->s.index_check_queue_for_reset = 1;
+ smq->out->s.init = SMQ_MAGIC_PRODUCER;
+ }
+ } else {
+ smq->hdr->consumer_version = SM_VERSION;
+ for (i = 0; i < smq->num_blocks; i++)
+ (smq->in->free + i)->index_block = 0xFFFF;
+
+ smq->in->s.index_sent_read = 0;
+ smq->in->s.index_free_write = 0;
+ if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
+ smq->in->s.index_check_queue_for_reset_ack =
+ smq->out->s.index_check_queue_for_reset;
+ } else {
+ smq->in->s.index_check_queue_for_reset_ack = 0;
+ }
+
+ smq->in->s.init = SMQ_MAGIC_CONSUMER;
+ }
+ smq->initialized = SMQ_MAGIC_INIT;
+ err = SMQ_SUCCESS;
+
+bail:
+ return err;
+}
+
+static void send_interrupt_to_subsystem(struct rdbg_data *rdbgdata)
+{
+ int offset = rdbgdata->gpio_out_offset;
+ int val = 1 ^ gpio_get_value(rdbgdata->out.gpio_base_id + offset);
+
+ gpio_set_value(rdbgdata->out.gpio_base_id + offset, val);
+ rdbgdata->gpio_out_offset = (offset + 1) % 32;
+
+ dev_dbg(rdbgdata->device, "%s: sent interrupt %d to subsystem",
+ __func__, val);
+}
+
+static irqreturn_t on_interrupt_from(int irq, void *ptr)
+{
+ struct rdbg_data *rdbgdata = (struct rdbg_data *) ptr;
+
+ dev_dbg(rdbgdata->device, "%s: Received interrupt %d from subsystem",
+ __func__, irq);
+
+ complete(&(rdbgdata->work));
+ return IRQ_HANDLED;
+}
+
+static int initialize_smq(struct rdbg_data *rdbgdata)
+{
+ int err = 0;
+ unsigned char *smem_consumer_buffer = rdbgdata->smem_addr;
+
+ smem_consumer_buffer += (rdbgdata->smem_size/2);
+
+ if (smq_ctor(&(rdbgdata->producer_smrb), (void *)(rdbgdata->smem_addr),
+ ((rdbgdata->smem_size)/2), PRODUCER, &rdbgdata->write_mutex)) {
+ dev_err(rdbgdata->device, "%s: smq producer allocation failed",
+ __func__);
+ err = -ENOMEM;
+ goto bail;
+ }
+
+ if (smq_ctor(&(rdbgdata->consumer_smrb), (void *)smem_consumer_buffer,
+ ((rdbgdata->smem_size)/2), CONSUMER, NULL)) {
+ dev_err(rdbgdata->device, "%s: smq conmsumer allocation failed",
+ __func__);
+ err = -ENOMEM;
+ }
+
+bail:
+ return err;
+
+}
+
+static int rdbg_open(struct inode *inode, struct file *filp)
+{
+ int device_id = -1;
+ struct rdbg_device *device = &g_rdbg_instance;
+ struct rdbg_data *rdbgdata = NULL;
+ int err = 0;
+
+ if (!inode || !device->rdbg_data) {
+ pr_err("Memory not allocated yet");
+ err = -ENODEV;
+ goto bail;
+ }
+
+ device_id = MINOR(inode->i_rdev);
+ rdbgdata = &device->rdbg_data[device_id];
+
+ if (rdbgdata->device_opened) {
+ dev_err(rdbgdata->device, "%s: Device already opened",
+ __func__);
+ err = -EEXIST;
+ goto bail;
+ }
+
+ rdbgdata->smem_size = proc_info[device_id].smem_buffer_size;
+ if (!rdbgdata->smem_size) {
+ dev_err(rdbgdata->device, "%s: smem not initialized", __func__);
+ err = -ENOMEM;
+ goto bail;
+ }
+
+ rdbgdata->smem_addr = smem_find(proc_info[device_id].smem_buffer_addr,
+ rdbgdata->smem_size, 0, SMEM_ANY_HOST_FLAG);
+ if (!rdbgdata->smem_addr) {
+ dev_err(rdbgdata->device, "%s: Could not allocate smem memory",
+ __func__);
+ err = -ENOMEM;
+ goto bail;
+ }
+ dev_dbg(rdbgdata->device, "%s: SMEM address=0x%lx smem_size=%d",
+ __func__, (unsigned long)rdbgdata->smem_addr,
+ (unsigned int)rdbgdata->smem_size);
+
+ if (check_subsystem_debug_enabled(rdbgdata->smem_addr,
+ rdbgdata->smem_size/2)) {
+ dev_err(rdbgdata->device, "%s: Subsystem %s is not debug enabled",
+ __func__, proc_info[device_id].name);
+ err = -ECOMM;
+ goto bail;
+ }
+
+ init_completion(&rdbgdata->work);
+
+ err = request_irq(rdbgdata->in.irq_base_id, on_interrupt_from,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ proc_info[device_id].name,
+ (void *)&device->rdbg_data[device_id]);
+ if (err) {
+ dev_err(rdbgdata->device,
+ "%s: Failed to register interrupt.Err=%d,irqid=%d.",
+ __func__, err, rdbgdata->in.irq_base_id);
+ goto irq_bail;
+ }
+
+ err = enable_irq_wake(rdbgdata->in.irq_base_id);
+ if (err < 0) {
+ dev_dbg(rdbgdata->device, "enable_irq_wake() failed with err=%d",
+ err);
+ err = 0;
+ }
+
+ mutex_init(&rdbgdata->write_mutex);
+
+ err = initialize_smq(rdbgdata);
+ if (err) {
+ dev_err(rdbgdata->device, "Error initializing smq. Err=%d",
+ err);
+ goto smq_bail;
+ }
+
+ rdbgdata->device_opened = 1;
+
+ filp->private_data = (void *)rdbgdata;
+
+ return 0;
+
+smq_bail:
+ smq_dtor(&(rdbgdata->producer_smrb));
+ smq_dtor(&(rdbgdata->consumer_smrb));
+ mutex_destroy(&rdbgdata->write_mutex);
+irq_bail:
+ free_irq(rdbgdata->in.irq_base_id, (void *)
+ &device->rdbg_data[device_id]);
+bail:
+ return err;
+}
+
+static int rdbg_release(struct inode *inode, struct file *filp)
+{
+ int device_id = -1;
+ struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+ struct rdbg_data *rdbgdata = NULL;
+ int err = 0;
+
+ if (!inode || !rdbgdevice->rdbg_data) {
+ pr_err("Memory not allocated yet");
+ err = -ENODEV;
+ goto bail;
+ }
+
+ device_id = MINOR(inode->i_rdev);
+ rdbgdata = &rdbgdevice->rdbg_data[device_id];
+
+ if (rdbgdata->device_opened == 1) {
+ dev_dbg(rdbgdata->device, "%s: Destroying %s.", __func__,
+ proc_info[device_id].name);
+ rdbgdata->device_opened = 0;
+ complete(&(rdbgdata->work));
+ free_irq(rdbgdata->in.irq_base_id, (void *)
+ &rdbgdevice->rdbg_data[device_id]);
+ if (rdbgdevice->rdbg_data[device_id].producer_smrb.initialized)
+ smq_dtor(&(rdbgdevice->rdbg_data[device_id].
+ producer_smrb));
+ if (rdbgdevice->rdbg_data[device_id].consumer_smrb.initialized)
+ smq_dtor(&(rdbgdevice->rdbg_data[device_id].
+ consumer_smrb));
+ mutex_destroy(&rdbgdata->write_mutex);
+ }
+
+ filp->private_data = NULL;
+
+bail:
+ return err;
+}
+
+static ssize_t rdbg_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *offset)
+{
+ int err = 0;
+ struct rdbg_data *rdbgdata = filp->private_data;
+ void *p_sent_buffer = NULL;
+ int nsize = 0;
+ int more = 0;
+
+ if (!rdbgdata) {
+ pr_err("Invalid argument");
+ err = -EINVAL;
+ goto bail;
+ }
+
+ dev_dbg(rdbgdata->device, "%s: In receive", __func__);
+ err = wait_for_completion_interruptible(&(rdbgdata->work));
+ if (err) {
+ dev_err(rdbgdata->device, "%s: Error in wait", __func__);
+ goto bail;
+ }
+
+ smq_check_queue_reset(&(rdbgdata->consumer_smrb),
+ &(rdbgdata->producer_smrb));
+ if (smq_receive(&(rdbgdata->consumer_smrb), &p_sent_buffer,
+ &nsize, &more) != SMQ_SUCCESS) {
+ dev_err(rdbgdata->device, "%s: Error in smq_recv(). Err code = %d",
+ __func__, err);
+ err = -ENODATA;
+ goto bail;
+ }
+
+ size = ((size < nsize) ? size : nsize);
+ err = copy_to_user(buf, p_sent_buffer, size);
+ if (err != 0) {
+ dev_err(rdbgdata->device, "%s: Error in copy_to_user(). Err code = %d",
+ __func__, err);
+ err = -ENODATA;
+ goto bail;
+ }
+
+ smq_free(&(rdbgdata->consumer_smrb), p_sent_buffer);
+ err = size;
+ dev_dbg(rdbgdata->device, "%s: Read data to buffer with address 0x%lx",
+ __func__, (unsigned long) buf);
+
+bail:
+ return err;
+}
+
+static ssize_t rdbg_write(struct file *filp, const char __user *buf,
+ size_t size, loff_t *offset)
+{
+ int err = 0;
+ int num_retries = 0;
+ struct rdbg_data *rdbgdata = filp->private_data;
+
+ if (!rdbgdata) {
+ pr_err("Invalid argument");
+ err = -EINVAL;
+ goto bail;
+ }
+
+ do {
+ err = smq_alloc_send(&(rdbgdata->producer_smrb), buf, size);
+ dev_dbg(rdbgdata->device, "%s, smq_alloc_send returned %d.",
+ __func__, err);
+ } while (err != 0 && num_retries++ < MAX_RETRIES);
+
+ if (err != 0) {
+ err = -ECOMM;
+ goto bail;
+ }
+
+ send_interrupt_to_subsystem(rdbgdata);
+
+ err = size;
+
+bail:
+ return err;
+}
+
+
+static const struct file_operations rdbg_fops = {
+ .open = rdbg_open,
+ .read = rdbg_read,
+ .write = rdbg_write,
+ .release = rdbg_release,
+};
+
+static int register_smp2p(char *node_name, struct gpio_info *gpio_info_ptr)
+{
+ struct device_node *node = NULL;
+ int cnt = 0;
+ int id = 0;
+
+ node = of_find_compatible_node(NULL, NULL, node_name);
+ if (node) {
+ cnt = of_gpio_count(node);
+ if (cnt && gpio_info_ptr) {
+ id = of_get_gpio(node, 0);
+ gpio_info_ptr->gpio_base_id = id;
+ gpio_info_ptr->irq_base_id = gpio_to_irq(id);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int __init rdbg_init(void)
+{
+ int err = 0;
+ struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+ int minor = 0;
+ int major = 0;
+ int minor_nodes_created = 0;
+
+ char *rdbg_compatible_string = "qcom,smp2pgpio_client_rdbg_";
+ int max_len = strlen(rdbg_compatible_string) + strlen("xx_out");
+
+ char *node_name = kcalloc(max_len, sizeof(char), GFP_KERNEL);
+
+ if (!node_name) {
+ err = -ENOMEM;
+ goto bail;
+ }
+
+ if (rdbgdevice->num_devices < 1 ||
+ rdbgdevice->num_devices > SMP2P_NUM_PROCS) {
+ pr_err("rgdb: invalid num_devices");
+ err = -EDOM;
+ goto name_bail;
+ }
+
+ rdbgdevice->rdbg_data = kcalloc(rdbgdevice->num_devices,
+ sizeof(struct rdbg_data), GFP_KERNEL);
+ if (!rdbgdevice->rdbg_data) {
+ err = -ENOMEM;
+ goto name_bail;
+ }
+
+ err = alloc_chrdev_region(&rdbgdevice->dev_no, 0,
+ rdbgdevice->num_devices, "rdbgctl");
+ if (err) {
+ pr_err("Error in alloc_chrdev_region.");
+ goto data_bail;
+ }
+ major = MAJOR(rdbgdevice->dev_no);
+
+ cdev_init(&rdbgdevice->cdev, &rdbg_fops);
+ rdbgdevice->cdev.owner = THIS_MODULE;
+ err = cdev_add(&rdbgdevice->cdev, MKDEV(major, 0),
+ rdbgdevice->num_devices);
+ if (err) {
+ pr_err("Error in cdev_add");
+ goto chrdev_bail;
+ }
+
+ rdbgdevice->class = class_create(THIS_MODULE, "rdbg");
+ if (IS_ERR(rdbgdevice->class)) {
+ err = PTR_ERR(rdbgdevice->class);
+ pr_err("Error in class_create");
+ goto cdev_bail;
+ }
+
+ for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
+ if (!proc_info[minor].name)
+ continue;
+
+ if (snprintf(node_name, max_len, "%s%d_in",
+ rdbg_compatible_string, minor) <= 0) {
+ pr_err("Error in snprintf");
+ err = -ENOMEM;
+ goto device_bail;
+ }
+
+ if (register_smp2p(node_name,
+ &rdbgdevice->rdbg_data[minor].in)) {
+ pr_debug("No incoming device tree entry found for %s",
+ proc_info[minor].name);
+ continue;
+ }
+
+ if (snprintf(node_name, max_len, "%s%d_out",
+ rdbg_compatible_string, minor) <= 0) {
+ pr_err("Error in snprintf");
+ err = -ENOMEM;
+ goto device_bail;
+ }
+
+ if (register_smp2p(node_name,
+ &rdbgdevice->rdbg_data[minor].out)) {
+ pr_err("No outgoing device tree entry found for %s",
+ proc_info[minor].name);
+ err = -EINVAL;
+ goto device_bail;
+ }
+
+ rdbgdevice->rdbg_data[minor].device = device_create(
+ rdbgdevice->class, NULL, MKDEV(major, minor),
+ NULL, "%s", proc_info[minor].name);
+ if (IS_ERR(rdbgdevice->rdbg_data[minor].device)) {
+ err = PTR_ERR(rdbgdevice->rdbg_data[minor].device);
+ pr_err("Error in device_create");
+ goto device_bail;
+ }
+ rdbgdevice->rdbg_data[minor].device_initialized = 1;
+ minor_nodes_created++;
+ dev_dbg(rdbgdevice->rdbg_data[minor].device,
+ "%s: created /dev/%s c %d %d'", __func__,
+ proc_info[minor].name, major, minor);
+ }
+
+ if (!minor_nodes_created) {
+ pr_err("No device tree entries found");
+ err = -EINVAL;
+ goto class_bail;
+ }
+
+ goto name_bail;
+
+device_bail:
+ for (--minor; minor >= 0; minor--) {
+ if (rdbgdevice->rdbg_data[minor].device_initialized)
+ device_destroy(rdbgdevice->class,
+ MKDEV(MAJOR(rdbgdevice->dev_no), minor));
+ }
+class_bail:
+ class_destroy(rdbgdevice->class);
+cdev_bail:
+ cdev_del(&rdbgdevice->cdev);
+chrdev_bail:
+ unregister_chrdev_region(rdbgdevice->dev_no, rdbgdevice->num_devices);
+data_bail:
+ kfree(rdbgdevice->rdbg_data);
+name_bail:
+ kfree(node_name);
+bail:
+ return err;
+}
+
+static void __exit rdbg_exit(void)
+{
+ struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+ int minor;
+
+ for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
+ if (rdbgdevice->rdbg_data[minor].device_initialized) {
+ device_destroy(rdbgdevice->class,
+ MKDEV(MAJOR(rdbgdevice->dev_no), minor));
+ }
+ }
+ class_destroy(rdbgdevice->class);
+ cdev_del(&rdbgdevice->cdev);
+ unregister_chrdev_region(rdbgdevice->dev_no, 1);
+ kfree(rdbgdevice->rdbg_data);
+}
+
+module_init(rdbg_init);
+module_exit(rdbg_exit);
+
+MODULE_DESCRIPTION("rdbg module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 27c0da29eca3..10224b01b97c 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -351,7 +351,8 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
/* Set new divider */
data = xgene_clk_read(pclk->param.divider_reg +
pclk->param.reg_divider_offset);
- data &= ~((1 << pclk->param.reg_divider_width) - 1);
+ data &= ~((1 << pclk->param.reg_divider_width) - 1)
+ << pclk->param.reg_divider_shift;
data |= divider;
xgene_clk_write(data, pclk->param.divider_reg +
pclk->param.reg_divider_offset);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index eb44cf9ddd17..c4aec62d1014 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2801,6 +2801,8 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
goto err_out;
}
+ clk_debug_measure_add(core->hw, core->dentry);
+
ret = 0;
goto out;
@@ -2930,8 +2932,10 @@ static int __init clk_debug_init(void)
return -ENOMEM;
mutex_lock(&clk_debug_lock);
- hlist_for_each_entry(core, &clk_debug_list, debug_node)
+ hlist_for_each_entry(core, &clk_debug_list, debug_node) {
+ clk_register_debug(core->hw, core->dentry);
clk_debug_create_one(core, rootdir);
+ }
inited = 1;
mutex_unlock(&clk_debug_lock);
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index 179b27c08022..c95a327a9301 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -23,6 +23,8 @@ void __clk_free_clk(struct clk *clk);
/* Debugfs API to print the enabled clocks */
void clock_debug_print_enabled(void);
+int clk_register_debug(struct clk_hw *hw, struct dentry *dentry);
+void clk_debug_measure_add(struct clk_hw *hw, struct dentry *dentry);
#else
/* All these casts to avoid ifdefs in clkdev... */
diff --git a/drivers/clk/msm/clock-gpu-8998.c b/drivers/clk/msm/clock-gpu-8998.c
index e4789a51b738..fce293db93f3 100644
--- a/drivers/clk/msm/clock-gpu-8998.c
+++ b/drivers/clk/msm/clock-gpu-8998.c
@@ -622,6 +622,7 @@ int msm_gfxcc_8998_probe(struct platform_device *pdev)
struct device_node *of_node = pdev->dev.of_node;
int rc;
struct regulator *reg;
+ u32 regval;
bool is_v2 = 0, is_vq = 0;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
@@ -697,6 +698,14 @@ int msm_gfxcc_8998_probe(struct platform_device *pdev)
*/
clk_set_flags(&gpucc_gfx3d_clk.c, CLKFLAG_RETAIN_PERIPH);
+ /*
+ * Program the droop detector's gfx_pdn to 1'b1 in order to reduce
+ * leakage between the graphics and CX rails.
+ */
+ regval = readl_relaxed(virt_base_gfx + GPUCC_GPU_DD_WRAP_CTRL);
+ regval |= BIT(0);
+ writel_relaxed(regval, virt_base_gfx + GPUCC_GPU_DD_WRAP_CTRL);
+
dev_info(&pdev->dev, "Completed registering all GPU clocks\n");
return 0;
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index 7094fb4d40af..9e1036c19760 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -1850,9 +1850,9 @@ static int clk_osm_set_llm_volt_policy(struct platform_device *pdev)
/* Enable or disable LLM VOLT DVCS */
regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
- clk_osm_write_reg(&pwrcl_clk, val, LLM_INTF_DCVS_DISABLE);
+ clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
- clk_osm_write_reg(&perfcl_clk, val, LLM_INTF_DCVS_DISABLE);
+ clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
/* Wait for the writes to complete */
clk_osm_mb(&perfcl_clk, OSM_BASE);
diff --git a/drivers/clk/msm/mdss/mdss-hdmi-pll-8998.c b/drivers/clk/msm/mdss/mdss-hdmi-pll-8998.c
index 029f779979c7..c60c4864442f 100644
--- a/drivers/clk/msm/mdss/mdss-hdmi-pll-8998.c
+++ b/drivers/clk/msm/mdss/mdss-hdmi-pll-8998.c
@@ -494,7 +494,7 @@ static int hdmi_8998_pll_set_clk_rate(struct clk *c, unsigned long rate)
_W(pll, SYSCLK_EN_SEL, 0x37);
_W(pll, SYS_CLK_CTRL, 0x2);
_W(pll, CLK_ENABLE1, 0xE);
- _W(pll, PLL_IVCO, 0xF);
+ _W(pll, PLL_IVCO, 0x7);
_W(pll, VCO_TUNE_CTRL, 0x0);
_W(pll, SVS_MODE_CLK_SEL, cfg.svs_mode_clk_sel);
_W(pll, CLK_SELECT, 0x30);
@@ -536,10 +536,10 @@ static int hdmi_8998_pll_set_clk_rate(struct clk *c, unsigned long rate)
_W(pll, PHY_TX_PRE_DRIVER_2(2), cfg.l2_pre_driver_2);
_W(pll, PHY_TX_PRE_DRIVER_2(3), cfg.l3_pre_driver_2);
- _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(0), 0x0);
+ _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(0), 0x3);
_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(1), 0x0);
_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(2), 0x0);
- _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(3), 0x0);
+ _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(3), 0x3);
_W(phy, PHY_MODE, cfg.phy_mode);
@@ -627,8 +627,6 @@ static int hdmi_8998_pll_enable(struct clk *c)
_W(phy, PHY_CFG, 0x59);
udelay(100);
- _W(phy, PHY_CLOCK, 0x6);
-
/* Ensure all registers are flushed to hardware */
wmb();
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 36ab5cf68740..2148dad33e87 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -218,3 +218,5 @@ config QCOM_A53
Support for the A53 clock controller on MSM devices.
Say Y if you want to support CPU frequency scaling on devices
such as MSM8916.
+
+source "drivers/clk/qcom/mdss/Kconfig"
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 595254f69db1..176dc3103cdb 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -37,3 +37,5 @@ obj-$(CONFIG_KRAITCC) += krait-cc.o
obj-$(CONFIG_QCOM_A53) += clk-a53.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+
+obj-y += mdss/
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index cffaf46d732f..096e16db02fe 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -248,6 +248,73 @@ const struct clk_ops clk_branch2_ops = {
};
EXPORT_SYMBOL_GPL(clk_branch2_ops);
+static int clk_branch2_hw_ctl_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ /*
+ * Make sure the branch clock has CLK_SET_RATE_PARENT flag,
+ * and the RCG has FORCE_ENABLE_RCGR flag set.
+ */
+ if (!(hw->init->flags & CLK_SET_RATE_PARENT)) {
+ pr_err("set rate would not get propagated to parent\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned long clk_branch2_hw_ctl_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return parent_rate;
+}
+
+static int clk_branch2_hw_ctl_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *clkp;
+
+ clkp = __clk_get_hw(clk_get_parent(hw->clk));
+
+ req->best_parent_hw = clkp;
+ req->best_parent_rate = clk_round_rate(clkp->clk, req->rate);
+
+ return 0;
+}
+
+static int clk_branch2_hw_ctl_enable(struct clk_hw *hw)
+{
+ struct clk_hw *parent = __clk_get_hw(clk_get_parent(hw->clk));
+
+ /* The parent branch clock should have been prepared prior to this. */
+ if (!parent || (parent && !clk_hw_is_prepared(parent)))
+ return -EINVAL;
+
+ return clk_enable_regmap(hw);
+}
+
+static void clk_branch2_hw_ctl_disable(struct clk_hw *hw)
+{
+ struct clk_hw *parent = __clk_get_hw(clk_get_parent(hw->clk));
+
+ if (!parent)
+ return;
+
+ clk_disable_regmap(hw);
+}
+
+const struct clk_ops clk_branch2_hw_ctl_ops = {
+ .enable = clk_branch2_hw_ctl_enable,
+ .disable = clk_branch2_hw_ctl_disable,
+ .is_enabled = clk_is_enabled_regmap,
+ .set_rate = clk_branch2_hw_ctl_set_rate,
+ .recalc_rate = clk_branch2_hw_ctl_recalc_rate,
+ .determine_rate = clk_branch2_hw_ctl_determine_rate,
+ .set_flags = clk_branch_set_flags,
+ .list_registers = clk_branch2_list_registers,
+};
+EXPORT_SYMBOL_GPL(clk_branch2_hw_ctl_ops);
+
static int clk_gate_toggle(struct clk_hw *hw, bool en)
{
struct clk_gate2 *gt = to_clk_gate2(hw);
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 8a934cf8bed1..b67ac1dfbbf9 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -62,6 +62,7 @@ extern const struct clk_ops clk_branch_ops;
extern const struct clk_ops clk_branch2_ops;
extern const struct clk_ops clk_gate2_ops;
extern const struct clk_ops clk_branch_simple_ops;
+extern const struct clk_ops clk_branch2_hw_ctl_ops;
#define to_clk_branch(_hw) \
container_of(to_clk_regmap(_hw), struct clk_branch, clkr)
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 020bd351bbd8..accdac9fb964 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -171,10 +171,12 @@ struct clk_rcg2 {
const struct parent_map *parent_map;
const struct freq_tbl *freq_tbl;
unsigned long current_freq;
+ u32 new_index;
+ u32 curr_index;
struct clk_regmap clkr;
-#define FORCE_ENABLE_RCGR BIT(0)
u8 flags;
+#define FORCE_ENABLE_RCGR BIT(0)
};
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 4104a238c088..653722f9c4b0 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -89,30 +89,6 @@ static int clk_rcg_set_force_enable(struct clk_hw *hw)
return ret;
}
-static int clk_rcg2_enable(struct clk_hw *hw)
-{
- int ret = 0;
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
- if (rcg->flags & FORCE_ENABLE_RCGR)
- ret = clk_rcg_set_force_enable(hw);
-
- return ret;
-}
-
-static void clk_rcg2_disable(struct clk_hw *hw)
-{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
- if (rcg->flags & FORCE_ENABLE_RCGR) {
- /* force disable RCG - clear CMD_ROOT_EN bit */
- regmap_update_bits(rcg->clkr.regmap,
- rcg->cmd_rcgr + CMD_REG, CMD_ROOT_EN, 0);
- /* Add a delay to disable the RCG */
- udelay(100);
- }
-}
-
static u8 clk_rcg2_get_parent(struct clk_hw *hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -381,16 +357,178 @@ static long clk_rcg2_list_rate(struct clk_hw *hw, unsigned n,
return (rcg->freq_tbl + n)->freq;
}
+static int prepare_enable_rcg_srcs(struct clk_hw *hw, struct clk *curr,
+ struct clk *new)
+{
+ int rc = 0;
+
+ rc = clk_prepare(curr);
+ if (rc)
+ return rc;
+
+ if (clk_hw_is_prepared(hw)) {
+ rc = clk_prepare(new);
+ if (rc)
+ goto err_new_src_prepare;
+ }
+
+ rc = clk_prepare(new);
+ if (rc)
+ goto err_new_src_prepare2;
+
+ rc = clk_enable(curr);
+ if (rc)
+ goto err_curr_src_enable;
+
+ if (__clk_get_enable_count(hw->clk)) {
+ rc = clk_enable(new);
+ if (rc)
+ goto err_new_src_enable;
+ }
+
+ rc = clk_enable(new);
+ if (rc)
+ goto err_new_src_enable2;
+
+ return rc;
+
+err_new_src_enable2:
+ if (__clk_get_enable_count(hw->clk))
+ clk_disable(new);
+err_new_src_enable:
+ clk_disable(curr);
+err_curr_src_enable:
+ clk_unprepare(new);
+err_new_src_prepare2:
+ if (clk_hw_is_prepared(hw))
+ clk_unprepare(new);
+err_new_src_prepare:
+ clk_unprepare(curr);
+
+ return rc;
+}
+
+static void disable_unprepare_rcg_srcs(struct clk_hw *hw, struct clk *curr,
+ struct clk *new)
+{
+ clk_disable(new);
+
+ clk_disable(curr);
+
+ if (__clk_get_enable_count(hw->clk))
+ clk_disable(new);
+
+ clk_unprepare(new);
+ clk_unprepare(curr);
+
+ if (clk_hw_is_prepared(hw))
+ clk_unprepare(new);
+}
+
+static struct freq_tbl cxo_f = {
+ .freq = 19200000,
+ .src = 0,
+ .pre_div = 1,
+ .m = 0,
+ .n = 0,
+};
+
+static int clk_enable_disable_prepare_unprepare(struct clk_hw *hw, int cindex,
+ int nindex, bool enable)
+{
+ struct clk_hw *new_p, *curr_p;
+
+ curr_p = clk_hw_get_parent_by_index(hw, cindex);
+ new_p = clk_hw_get_parent_by_index(hw, nindex);
+
+ if (enable)
+ return prepare_enable_rcg_srcs(hw, curr_p->clk, new_p->clk);
+
+ disable_unprepare_rcg_srcs(hw, curr_p->clk, new_p->clk);
+ return 0;
+}
+
+static int clk_rcg2_enable(struct clk_hw *hw)
+{
+ int ret = 0;
+ const struct freq_tbl *f;
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (rcg->flags & FORCE_ENABLE_RCGR) {
+ if (!rcg->current_freq)
+ rcg->current_freq = cxo_f.freq;
+
+ if (rcg->current_freq == cxo_f.freq)
+ rcg->curr_index = 0;
+ else {
+ f = qcom_find_freq(rcg->freq_tbl, rcg->current_freq);
+ rcg->curr_index = qcom_find_src_index(hw,
+ rcg->parent_map, f->src);
+ }
+
+ ret = clk_enable_disable_prepare_unprepare(hw, rcg->curr_index,
+ rcg->new_index, true);
+ if (ret) {
+ pr_err("Failed to prepare_enable new and current sources\n");
+ return ret;
+ }
+
+ clk_rcg_set_force_enable(hw);
+
+ clk_enable_disable_prepare_unprepare(hw, rcg->curr_index,
+ rcg->new_index, false);
+ }
+
+ return ret;
+}
+
+static void clk_rcg2_disable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (rcg->flags & FORCE_ENABLE_RCGR) {
+ /* force disable RCG - clear CMD_ROOT_EN bit */
+ regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + CMD_REG, CMD_ROOT_EN, 0);
+ /* Add a delay to disable the RCG */
+ udelay(100);
+ }
+}
+
+
static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
const struct freq_tbl *f;
+ int ret = 0;
+
+ /* Current frequency */
+ if (rcg->flags & FORCE_ENABLE_RCGR)
+ rcg->current_freq = clk_get_rate(hw->clk);
f = qcom_find_freq(rcg->freq_tbl, rate);
if (!f)
return -EINVAL;
- return clk_rcg2_configure(rcg, f);
+ /* New parent index */
+ if (rcg->flags & FORCE_ENABLE_RCGR) {
+ rcg->new_index = qcom_find_src_index(hw,
+ rcg->parent_map, f->src);
+ ret = clk_rcg2_enable(hw);
+ if (ret) {
+ pr_err("Failed to enable rcg\n");
+ return ret;
+ }
+ }
+
+ ret = clk_rcg2_configure(rcg, f);
+ if (ret)
+ return ret;
+
+ if (rcg->flags & FORCE_ENABLE_RCGR)
+ clk_rcg2_disable(hw);
+
+ return ret;
}
static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 423e975dffee..c762a387068b 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -11,6 +11,7 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -286,4 +287,221 @@ int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc)
}
EXPORT_SYMBOL_GPL(qcom_cc_probe);
+/* Debugfs Support */
+static struct clk_hw *measure;
+
+DEFINE_SPINLOCK(clk_reg_lock);
+
+/* Sample clock for 'ticks' reference clock ticks. */
+static u32 run_measurement(unsigned ticks, struct regmap *regmap,
+ u32 ctl_reg, u32 status_reg)
+{
+ u32 regval;
+
+ /* Stop counters and set the XO4 counter start value. */
+ regmap_write(regmap, ctl_reg, ticks);
+
+ regmap_read(regmap, status_reg, &regval);
+
+ /* Wait for timer to become ready. */
+ while ((regval & BIT(25)) != 0) {
+ cpu_relax();
+ regmap_read(regmap, status_reg, &regval);
+ }
+
+ /* Run measurement and wait for completion. */
+ regmap_write(regmap, ctl_reg, (BIT(20)|ticks));
+ regmap_read(regmap, ctl_reg, &regval);
+
+ regmap_read(regmap, status_reg, &regval);
+
+ while ((regval & BIT(25)) == 0) {
+ cpu_relax();
+ regmap_read(regmap, status_reg, &regval);
+ }
+
+ /* Return measured ticks. */
+ regmap_read(regmap, status_reg, &regval);
+ regval &= BM(24, 0);
+
+ return regval;
+}
+
+/*
+ * Perform a hardware rate measurement for a given clock.
+ * FOR DEBUG USE ONLY: Measurements take ~15 ms!
+ */
+static unsigned long clk_debug_mux_measure_rate(struct clk_hw *hw)
+{
+ unsigned long flags, ret = 0;
+ u32 gcc_xo4_reg, sample_ticks = 0x10000, multiplier = 1;
+ u64 raw_count_short, raw_count_full;
+ struct clk_debug_mux *meas = to_clk_measure(hw);
+ struct measure_clk_data *data = meas->priv;
+
+ spin_lock_irqsave(&clk_reg_lock, flags);
+
+ clk_prepare_enable(data->cxo);
+
+ /* Enable CXO/4 and RINGOSC branch. */
+ regmap_read(meas->regmap[GCC], data->xo_div4_cbcr, &gcc_xo4_reg);
+ gcc_xo4_reg |= BIT(0);
+ regmap_write(meas->regmap[GCC], data->xo_div4_cbcr, gcc_xo4_reg);
+
+ /*
+ * The ring oscillator counter will not reset if the measured clock
+ * is not running. To detect this, run a short measurement before
+ * the full measurement. If the raw results of the two are the same
+ * then the clock must be off.
+ */
+
+ /* Run a short measurement. (~1 ms) */
+ raw_count_short = run_measurement(0x1000, meas->regmap[GCC],
+ data->ctl_reg, data->status_reg);
+
+ /* Run a full measurement. (~14 ms) */
+ raw_count_full = run_measurement(sample_ticks, meas->regmap[GCC],
+ data->ctl_reg, data->status_reg);
+
+ gcc_xo4_reg &= ~BIT(0);
+ regmap_write(meas->regmap[GCC], data->xo_div4_cbcr, gcc_xo4_reg);
+
+ /* Return 0 if the clock is off. */
+ if (raw_count_full == raw_count_short)
+ ret = 0;
+ else {
+ /* Compute rate in Hz. */
+ raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
+ do_div(raw_count_full, ((sample_ticks * 10) + 35));
+ ret = (raw_count_full * multiplier);
+ }
+
+ clk_disable_unprepare(data->cxo);
+
+ spin_unlock_irqrestore(&clk_reg_lock, flags);
+
+ return ret;
+}
+
+static u8 clk_debug_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_debug_mux *meas = to_clk_measure(hw);
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ for (i = 0; i < num_parents; i++) {
+ if (!strcmp(meas->parent[i].parents,
+ hw->init->parent_names[i])) {
+ pr_debug("%s :Clock name %s index %d\n", __func__,
+ hw->init->name, i);
+ return i;
+ }
+ }
+
+ return 0;
+}
+
+static int clk_debug_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_debug_mux *meas = to_clk_measure(hw);
+ u32 regval = 0;
+ int dbg_cc = 0;
+
+ dbg_cc = meas->parent[index].dbg_cc;
+
+ if (dbg_cc != GCC) {
+ regmap_read(meas->regmap[dbg_cc], 0x0, &regval);
+
+ if (meas->parent[index].mask)
+ regval &= ~meas->parent[index].mask <<
+ meas->parent[index].shift;
+ else
+ regval &= ~meas->mask;
+
+ regval |= (meas->parent[index].next_sel & meas->mask);
+
+ if (meas->parent[index].en_mask == 0xFF)
+ /* Skip en_mask */
+ regval = regval;
+ else if (meas->parent[index].en_mask)
+ regval |= meas->parent[index].en_mask;
+ else
+ regval |= meas->en_mask;
+
+ regmap_write(meas->regmap[dbg_cc], 0x0, regval);
+ }
+
+ /* update the debug sel for GCC */
+ regmap_read(meas->regmap[GCC], meas->debug_offset, &regval);
+
+ /* clear post divider bits */
+ regval &= ~BM(15, 12);
+ regval &= ~meas->mask;
+ regval |= (meas->parent[index].sel & meas->mask);
+ regval |= meas->en_mask;
+
+ regmap_write(meas->regmap[GCC], meas->debug_offset, regval);
+
+ return 0;
+}
+
+const struct clk_ops clk_debug_mux_ops = {
+ .get_parent = clk_debug_mux_get_parent,
+ .set_parent = clk_debug_mux_set_parent,
+};
+EXPORT_SYMBOL_GPL(clk_debug_mux_ops);
+
+static int clk_debug_measure_get(void *data, u64 *val)
+{
+ struct clk_hw *hw = data, *par;
+ int ret = 0;
+ unsigned long meas_rate, sw_rate;
+
+ ret = clk_set_parent(measure->clk, hw->clk);
+ if (!ret) {
+ par = measure;
+ while (par && par != hw) {
+ if (par->init->ops->enable)
+ par->init->ops->enable(par);
+ par = clk_hw_get_parent(par);
+ }
+ *val = clk_debug_mux_measure_rate(measure);
+ }
+
+ meas_rate = clk_get_rate(hw->clk);
+ sw_rate = clk_get_rate(clk_hw_get_parent(measure)->clk);
+ if (sw_rate && meas_rate >= (sw_rate * 2))
+ *val *= DIV_ROUND_CLOSEST(meas_rate, sw_rate);
+
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clk_measure_fops, clk_debug_measure_get,
+ NULL, "%lld\n");
+
+void clk_debug_measure_add(struct clk_hw *hw, struct dentry *dentry)
+{
+ if (IS_ERR_OR_NULL(measure))
+ return;
+
+ if (clk_set_parent(measure->clk, hw->clk))
+ return;
+
+ debugfs_create_file("measure", S_IRUGO, dentry, hw,
+ &clk_measure_fops);
+}
+EXPORT_SYMBOL_GPL(clk_debug_measure_add);
+
+int clk_register_debug(struct clk_hw *hw, struct dentry *dentry)
+{
+ if (IS_ERR_OR_NULL(measure)) {
+ if (hw->init->flags & CLK_IS_MEASURE)
+ measure = hw;
+ if (!IS_ERR_OR_NULL(measure))
+ clk_debug_measure_add(hw, dentry);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(clk_register_debug);
+
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index e3f450533470..841367eb21ff 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -51,4 +51,95 @@ extern int qcom_cc_really_probe(struct platform_device *pdev,
extern int qcom_cc_probe(struct platform_device *pdev,
const struct qcom_cc_desc *desc);
extern struct clk_ops clk_dummy_ops;
+
+/* Debugfs Measure Clocks */
+
+/**
+ * struct measure_clk_data - Structure of clk measure
+ *
+ * @cxo: XO clock.
+ * @xo_div4_cbcr: offset of debug XO/4 div register.
+ * @ctl_reg: offset of debug control register.
+ * @status_reg: offset of debug status register.
+ *
+ */
+struct measure_clk_data {
+ struct clk *cxo;
+ u32 xo_div4_cbcr;
+ u32 ctl_reg;
+ u32 status_reg;
+};
+
+/**
+ * List of Debug clock controllers.
+ */
+enum debug_cc {
+ GCC,
+ MMCC,
+ GPU,
+ CPU,
+};
+
+/**
+ * struct clk_src - Struture of clock source for debug mux
+ *
+ * @parents: clock name to be used as parent for debug mux.
+ * @sel: debug mux index at global clock controller.
+ * @dbg_cc: indicates the clock controller for recursive debug clock
+ * controllers.
+ * @next_sel: indicates the debug mux index at recursive debug mux.
+ * @mask: indicates the mask required at recursive debug mux.
+ * @shift: indicates the shift required at recursive debug mux.
+ * @en_mask: indicates the enable bit mask at recursive debug mux.
+ * Incase the recursive debug mux does not have a enable bit,
+ * 0xFF should be used to indicate the same, otherwise global
+ * enable bit would be used.
+ */
+struct clk_src {
+ const char *parents;
+ int sel;
+ enum debug_cc dbg_cc;
+ int next_sel;
+ u32 mask;
+ u32 shift;
+ u32 en_mask;
+};
+
+#define MUX_SRC_LIST(...) \
+ .parent = (struct clk_src[]){__VA_ARGS__}, \
+ .num_parents = ARRAY_SIZE(((struct clk_src[]){__VA_ARGS__}))
+
+/**
+ * struct clk_debug_mux - Struture of clock debug mux
+ *
+ * @parent: structure of clk_src
+ * @num_parents: number of parents
+ * @regmap: regmaps of debug mux
+ * @num_parent_regmap: number of regmap of debug mux
+ * @priv: private measure_clk_data to be used by debug mux
+ * @en_mask: indicates the enable bit mask at global clock
+ * controller debug mux.
+ * @mask: indicates the mask to be used at global clock
+ * controller debug mux.
+ * @debug_offset: Start of debug mux offset.
+ * @hw: handle between common and hardware-specific interfaces.
+ */
+struct clk_debug_mux {
+ struct clk_src *parent;
+ int num_parents;
+ struct regmap **regmap;
+ int num_parent_regmap;
+ void *priv;
+ u32 en_mask;
+ u32 mask;
+ u32 debug_offset;
+ struct clk_hw hw;
+};
+
+#define BM(msb, lsb) (((((uint32_t)-1) << (31-msb)) >> (31-msb+lsb)) << lsb)
+
+#define to_clk_measure(_hw) container_of((_hw), struct clk_debug_mux, hw)
+
+extern const struct clk_ops clk_debug_mux_ops;
+
#endif
diff --git a/drivers/clk/qcom/gcc-msmfalcon.c b/drivers/clk/qcom/gcc-msmfalcon.c
index b5f7e18cf495..5a48fb79c8b2 100644
--- a/drivers/clk/qcom/gcc-msmfalcon.c
+++ b/drivers/clk/qcom/gcc-msmfalcon.c
@@ -705,6 +705,7 @@ static const struct freq_tbl ftbl_hmss_ahb_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
{ }
};
@@ -820,7 +821,7 @@ static const struct freq_tbl ftbl_qspi_ser_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(80200000, P_PLL1_EARLY_DIV_CLK_SRC, 5, 0, 0),
F(160400000, P_GPLL1_OUT_MAIN, 5, 0, 0),
- F(320800000, P_GPLL1_OUT_MAIN, 2.5, 0, 0),
+ F(267333333, P_GPLL1_OUT_MAIN, 3, 0, 0),
{ }
};
@@ -838,7 +839,7 @@ static struct clk_rcg2 qspi_ser_clk_src = {
VDD_DIG_FMAX_MAP3(
LOWER, 80200000,
LOW, 160400000,
- NOMINAL, 320800000),
+ NOMINAL, 267333333),
},
};
@@ -876,6 +877,7 @@ static const struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
F(75000000, P_PLL0_EARLY_DIV_CLK_SRC, 4, 0, 0),
F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
{ }
};
@@ -905,6 +907,7 @@ static const struct freq_tbl ftbl_sdcc2_apps_clk_src[] = {
F(50000000, P_PLL0_EARLY_DIV_CLK_SRC, 6, 0, 0),
F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
F(192000000, P_GPLL4_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
{ }
};
@@ -929,6 +932,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
static const struct freq_tbl ftbl_ufs_axi_clk_src[] = {
F(50000000, P_PLL0_EARLY_DIV_CLK_SRC, 6, 0, 0),
F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
{ }
@@ -1045,12 +1049,18 @@ static struct clk_rcg2 usb20_master_clk_src = {
},
};
+static const struct freq_tbl ftbl_usb20_mock_utmi_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
static struct clk_rcg2 usb20_mock_utmi_clk_src = {
.cmd_rcgr = 0x2f024,
.mnd_width = 0,
.hid_width = 5,
.parent_map = gcc_parent_map_0,
- .freq_tbl = ftbl_hmss_rbcpr_clk_src,
+ .freq_tbl = ftbl_usb20_mock_utmi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb20_mock_utmi_clk_src",
.parent_names = gcc_parent_names_0,
diff --git a/drivers/clk/qcom/mdss/Kconfig b/drivers/clk/qcom/mdss/Kconfig
index 229780e45bb8..7213e375f1ef 100644
--- a/drivers/clk/qcom/mdss/Kconfig
+++ b/drivers/clk/qcom/mdss/Kconfig
@@ -1,5 +1,6 @@
-config MSM_MDSS_PLL
+config QCOM_MDSS_PLL
bool "MDSS pll programming"
+ depends on COMMON_CLK_QCOM
---help---
It provides support for DSI, eDP and HDMI interface pll programming on MDSS
hardware. It also handles the pll specific resources and turn them on/off when
diff --git a/drivers/clk/qcom/mdss/Makefile b/drivers/clk/qcom/mdss/Makefile
index 75891dc10dda..6a0a1de1e942 100644
--- a/drivers/clk/qcom/mdss/Makefile
+++ b/drivers/clk/qcom/mdss/Makefile
@@ -1,5 +1,4 @@
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll-util.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996-util.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-8996.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll-util.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-14nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-14nm-util.o
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c
index 6d2694d5a2e9..a4044955c68f 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996-util.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c
@@ -16,37 +16,18 @@
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/delay.h>
-#include <linux/clk/msm-clock-generic.h>
#include "mdss-pll.h"
#include "mdss-dsi-pll.h"
-#include "mdss-dsi-pll-8996.h"
+#include "mdss-dsi-pll-14nm.h"
#define DSI_PLL_POLL_MAX_READS 15
#define DSI_PLL_POLL_TIMEOUT_US 1000
#define MSM8996_DSI_PLL_REVISION_2 2
-#define CEIL(x, y) (((x) + ((y)-1)) / (y))
-
-int set_mdss_byte_mux_sel_8996(struct mux_clk *clk, int sel)
-{
- return 0;
-}
-
-int get_mdss_byte_mux_sel_8996(struct mux_clk *clk)
-{
- return 0;
-}
-
-int set_mdss_pixel_mux_sel_8996(struct mux_clk *clk, int sel)
-{
- return 0;
-}
+#define VCO_REF_CLK_RATE 19200000
-int get_mdss_pixel_mux_sel_8996(struct mux_clk *clk)
-{
- return 0;
-}
+#define CEIL(x, y) (((x) + ((y)-1)) / (y))
static int mdss_pll_read_stored_trim_codes(
struct mdss_pll_resources *dsi_pll_res, s64 vco_clk_rate)
@@ -94,9 +75,9 @@ end_read:
return rc;
}
-int post_n1_div_set_div(struct div_clk *clk, int div)
+int post_n1_div_set_div(void *context, unsigned int reg, unsigned int div)
{
- struct mdss_pll_resources *pll = clk->priv;
+ struct mdss_pll_resources *pll = context;
struct dsi_pll_db *pdb;
struct dsi_pll_output *pout;
int rc;
@@ -108,6 +89,9 @@ int post_n1_div_set_div(struct div_clk *clk, int div)
return rc;
}
+ /* in common clock framework the divider value provided is one less */
+ div++;
+
pdb = (struct dsi_pll_db *)pll->priv;
pout = &pdb->out;
@@ -120,8 +104,6 @@ int post_n1_div_set_div(struct div_clk *clk, int div)
* support bit_clk above 86.67Mhz
*/
- /* this is for vco/bit clock */
- pout->pll_postdiv = 1; /* fixed, divided by 1 */
pout->pll_n1div = div;
n1div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
@@ -138,11 +120,15 @@ int post_n1_div_set_div(struct div_clk *clk, int div)
return 0;
}
-int post_n1_div_get_div(struct div_clk *clk)
+int post_n1_div_get_div(void *context, unsigned int reg, unsigned int *div)
{
- u32 div;
int rc;
- struct mdss_pll_resources *pll = clk->priv;
+ struct mdss_pll_resources *pll = context;
+ struct dsi_pll_db *pdb;
+ struct dsi_pll_output *pout;
+
+ pdb = (struct dsi_pll_db *)pll->priv;
+ pout = &pdb->out;
if (is_gdsc_disabled(pll))
return 0;
@@ -159,20 +145,33 @@ int post_n1_div_get_div(struct div_clk *clk)
* fot the time being, assume postdiv = 1
*/
- div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
- div &= 0xF;
- pr_debug("n1 div = %d\n", div);
+ *div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+ *div &= 0xF;
+
+ /*
+ * initialize n1div here, it will get updated when
+ * corresponding set_div is called.
+ */
+ pout->pll_n1div = *div;
+
+ /* common clock framework will add one to the divider value sent */
+ if (*div == 0)
+ *div = 1; /* value of zero means div is 2 as per SWI */
+ else
+ *div -= 1;
+
+ pr_debug("post n1 get div = %d\n", *div);
mdss_pll_resource_enable(pll, false);
- return div;
+ return rc;
}
-int n2_div_set_div(struct div_clk *clk, int div)
+int n2_div_set_div(void *context, unsigned int reg, unsigned int div)
{
int rc;
u32 n2div;
- struct mdss_pll_resources *pll = clk->priv;
+ struct mdss_pll_resources *pll = context;
struct dsi_pll_db *pdb;
struct dsi_pll_output *pout;
struct mdss_pll_resources *slave;
@@ -183,6 +182,12 @@ int n2_div_set_div(struct div_clk *clk, int div)
return rc;
}
+ /*
+ * in common clock framework the actual divider value
+ * provided is one less.
+ */
+ div++;
+
pdb = (struct dsi_pll_db *)pll->priv;
pout = &pdb->out;
@@ -208,9 +213,9 @@ int n2_div_set_div(struct div_clk *clk, int div)
return rc;
}
-int shadow_n2_div_set_div(struct div_clk *clk, int div)
+int shadow_n2_div_set_div(void *context, unsigned int reg, unsigned int div)
{
- struct mdss_pll_resources *pll = clk->priv;
+ struct mdss_pll_resources *pll = context;
struct dsi_pll_db *pdb;
struct dsi_pll_output *pout;
u32 data;
@@ -218,6 +223,12 @@ int shadow_n2_div_set_div(struct div_clk *clk, int div)
pdb = pll->priv;
pout = &pdb->out;
+ /*
+ * in common clock framework the actual divider value
+ * provided is one less.
+ */
+ div++;
+
pout->pll_n2div = div;
data = (pout->pll_n1div | (pout->pll_n2div << 4));
@@ -228,15 +239,20 @@ int shadow_n2_div_set_div(struct div_clk *clk, int div)
return 0;
}
-int n2_div_get_div(struct div_clk *clk)
+int n2_div_get_div(void *context, unsigned int reg, unsigned int *div)
{
int rc;
u32 n2div;
- struct mdss_pll_resources *pll = clk->priv;
+ struct mdss_pll_resources *pll = context;
+ struct dsi_pll_db *pdb;
+ struct dsi_pll_output *pout;
if (is_gdsc_disabled(pll))
return 0;
+ pdb = (struct dsi_pll_db *)pll->priv;
+ pout = &pdb->out;
+
rc = mdss_pll_resource_enable(pll, true);
if (rc) {
pr_err("Failed to enable mdss dsi pll=%d resources\n",
@@ -247,15 +263,27 @@ int n2_div_get_div(struct div_clk *clk)
n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
n2div >>= 4;
n2div &= 0x0f;
-
+ /*
+ * initialize n2div here, it will get updated when
+ * corresponding set_div is called.
+ */
+ pout->pll_n2div = n2div;
mdss_pll_resource_enable(pll, false);
- pr_debug("ndx=%d div=%d\n", pll->index, n2div);
+ *div = n2div;
+
+ /* common clock framework will add one to the divider value sent */
+ if (*div == 0)
+ *div = 1; /* value of zero means div is 2 as per SWI */
+ else
+ *div -= 1;
+
+ pr_debug("ndx=%d div=%d\n", pll->index, *div);
- return n2div;
+ return rc;
}
-static bool pll_is_pll_locked_8996(struct mdss_pll_resources *pll)
+static bool pll_is_pll_locked_14nm(struct mdss_pll_resources *pll)
{
u32 status;
bool pll_locked;
@@ -286,7 +314,7 @@ static bool pll_is_pll_locked_8996(struct mdss_pll_resources *pll)
return pll_locked;
}
-static void dsi_pll_start_8996(void __iomem *pll_base)
+static void dsi_pll_start_14nm(void __iomem *pll_base)
{
pr_debug("start PLL at base=%p\n", pll_base);
@@ -294,14 +322,14 @@ static void dsi_pll_start_8996(void __iomem *pll_base)
MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 1);
}
-static void dsi_pll_stop_8996(void __iomem *pll_base)
+static void dsi_pll_stop_14nm(void __iomem *pll_base)
{
pr_debug("stop PLL at base=%p\n", pll_base);
MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
}
-int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll)
+int dsi_pll_enable_seq_14nm(struct mdss_pll_resources *pll)
{
int rc = 0;
@@ -310,14 +338,14 @@ int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll)
return -EINVAL;
}
- dsi_pll_start_8996(pll->pll_base);
+ dsi_pll_start_14nm(pll->pll_base);
/*
* both DSIPHY_PLL_CLKBUFLR_EN and DSIPHY_CMN_GLBL_TEST_CTRL
- * enabled at mdss_dsi_8996_phy_config()
+ * enabled at mdss_dsi_14nm_phy_config()
*/
- if (!pll_is_pll_locked_8996(pll)) {
+ if (!pll_is_pll_locked_14nm(pll)) {
pr_err("DSI PLL ndx=%d lock failed\n", pll->index);
rc = -EINVAL;
goto init_lock_err;
@@ -329,10 +357,10 @@ init_lock_err:
return rc;
}
-static int dsi_pll_enable(struct clk *c)
+static int dsi_pll_enable(struct clk_hw *hw)
{
int i, rc = 0;
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
/* Try all enable sequences until one succeeds */
@@ -352,9 +380,9 @@ static int dsi_pll_enable(struct clk *c)
return rc;
}
-static void dsi_pll_disable(struct clk *c)
+static void dsi_pll_disable(struct clk_hw *hw)
{
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
struct mdss_pll_resources *slave;
@@ -367,7 +395,7 @@ static void dsi_pll_disable(struct clk *c)
pll->handoff_resources = false;
slave = pll->slave;
- dsi_pll_stop_8996(pll->pll_base);
+ dsi_pll_stop_14nm(pll->pll_base);
mdss_pll_resource_enable(pll, false);
@@ -376,7 +404,7 @@ static void dsi_pll_disable(struct clk *c)
pr_debug("DSI PLL ndx=%d Disabled\n", pll->index);
}
-static void mdss_dsi_pll_8996_input_init(struct mdss_pll_resources *pll,
+static void mdss_dsi_pll_14nm_input_init(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
pdb->in.fref = 19200000; /* 19.2 Mhz*/
@@ -414,9 +442,10 @@ static void mdss_dsi_pll_8996_input_init(struct mdss_pll_resources *pll,
pdb->in.pll_iptat_trim = 7;
pdb->in.pll_c3ctrl = 2; /* 2 */
pdb->in.pll_r3ctrl = 1; /* 1 */
+ pdb->out.pll_postdiv = 1;
}
-static void pll_8996_ssc_calc(struct mdss_pll_resources *pll,
+static void pll_14nm_ssc_calc(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
u32 period, ssc_period;
@@ -457,7 +486,7 @@ static void pll_8996_ssc_calc(struct mdss_pll_resources *pll,
pdb->out.ssc_step_size = step_size;
}
-static void pll_8996_dec_frac_calc(struct mdss_pll_resources *pll,
+static void pll_14nm_dec_frac_calc(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
struct dsi_pll_input *pin = &pdb->in;
@@ -501,7 +530,7 @@ static void pll_8996_dec_frac_calc(struct mdss_pll_resources *pll,
pout->cmn_ldo_cntrl = 0x1c;
}
-static u32 pll_8996_kvco_slop(u32 vrate)
+static u32 pll_14nm_kvco_slop(u32 vrate)
{
u32 slop = 0;
@@ -515,7 +544,7 @@ static u32 pll_8996_kvco_slop(u32 vrate)
return slop;
}
-static void pll_8996_calc_vco_count(struct dsi_pll_db *pdb,
+static void pll_14nm_calc_vco_count(struct dsi_pll_db *pdb,
s64 vco_clk_rate, s64 fref)
{
struct dsi_pll_input *pin = &pdb->in;
@@ -540,7 +569,7 @@ static void pll_8996_calc_vco_count(struct dsi_pll_db *pdb,
data -= 1;
pout->pll_kvco_div_ref = data;
- cnt = pll_8996_kvco_slop(vco_clk_rate);
+ cnt = pll_14nm_kvco_slop(vco_clk_rate);
cnt *= 2;
do_div(cnt, 100);
cnt *= pin->kvco_measure_time;
@@ -659,7 +688,7 @@ static void pll_db_commit_common(struct mdss_pll_resources *pll,
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_CRCTRL, data);
}
-static void pll_db_commit_8996(struct mdss_pll_resources *pll,
+static void pll_db_commit_14nm(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
void __iomem *pll_base = pll->pll_base;
@@ -753,7 +782,7 @@ static void pll_db_commit_8996(struct mdss_pll_resources *pll,
/*
* pll_source_finding:
* Both GLBL_TEST_CTRL and CLKBUFLR_EN are configured
- * at mdss_dsi_8996_phy_config()
+ * at mdss_dsi_14nm_phy_config()
*/
static int pll_source_finding(struct mdss_pll_resources *pll)
{
@@ -820,10 +849,59 @@ static void pll_source_setup(struct mdss_pll_resources *pll)
other->slave = pll;
}
-int pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
+unsigned long pll_vco_recalc_rate_14nm(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
+ struct mdss_pll_resources *pll = vco->priv;
+ u64 vco_rate, multiplier = BIT(20);
+ s32 div_frac_start;
+ u32 dec_start;
+ u64 ref_clk = vco->ref_clk_rate;
+ int rc;
+
+ if (pll->vco_current_rate)
+ return (unsigned long)pll->vco_current_rate;
+
+ if (is_gdsc_disabled(pll))
+ return 0;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+ return rc;
+ }
+
+ dec_start = MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_DEC_START);
+ dec_start &= 0x0ff;
+ pr_debug("dec_start = 0x%x\n", dec_start);
+
+ div_frac_start = (MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_DIV_FRAC_START3) & 0x0f) << 16;
+ div_frac_start |= (MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_DIV_FRAC_START2) & 0x0ff) << 8;
+ div_frac_start |= MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_DIV_FRAC_START1) & 0x0ff;
+ pr_debug("div_frac_start = 0x%x\n", div_frac_start);
+
+ vco_rate = ref_clk * dec_start;
+ vco_rate += ((ref_clk * div_frac_start) / multiplier);
+
+ pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+ mdss_pll_resource_enable(pll, false);
+
+ pr_debug("%s: returning vco rate as %lu\n",
+ __func__, (unsigned long)vco_rate);
+ return (unsigned long)vco_rate;
+}
+
+int pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
int rc;
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
struct mdss_pll_resources *slave;
struct dsi_pll_db *pdb;
@@ -848,30 +926,30 @@ int pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
pll->vco_current_rate = rate;
pll->vco_ref_clk_rate = vco->ref_clk_rate;
- mdss_dsi_pll_8996_input_init(pll, pdb);
+ mdss_dsi_pll_14nm_input_init(pll, pdb);
- pll_8996_dec_frac_calc(pll, pdb);
+ pll_14nm_dec_frac_calc(pll, pdb);
if (pll->ssc_en)
- pll_8996_ssc_calc(pll, pdb);
+ pll_14nm_ssc_calc(pll, pdb);
- pll_8996_calc_vco_count(pdb, pll->vco_current_rate,
+ pll_14nm_calc_vco_count(pdb, pll->vco_current_rate,
pll->vco_ref_clk_rate);
/* commit slave if split display is enabled */
slave = pll->slave;
if (slave)
- pll_db_commit_8996(slave, pdb);
+ pll_db_commit_14nm(slave, pdb);
/* commit master itself */
- pll_db_commit_8996(pll, pdb);
+ pll_db_commit_14nm(pll, pdb);
mdss_pll_resource_enable(pll, false);
return rc;
}
-static void shadow_pll_dynamic_refresh_8996(struct mdss_pll_resources *pll,
+static void shadow_pll_dynamic_refresh_14nm(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
struct dsi_pll_output *pout = &pdb->out;
@@ -931,10 +1009,11 @@ static void shadow_pll_dynamic_refresh_8996(struct mdss_pll_resources *pll,
wmb();
}
-int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
+int shadow_pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
int rc;
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
struct dsi_pll_db *pdb;
s64 vco_clk_rate = (s64)rate;
@@ -968,14 +1047,14 @@ int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
pll->vco_current_rate = rate;
pll->vco_ref_clk_rate = vco->ref_clk_rate;
- mdss_dsi_pll_8996_input_init(pll, pdb);
+ mdss_dsi_pll_14nm_input_init(pll, pdb);
- pll_8996_dec_frac_calc(pll, pdb);
+ pll_14nm_dec_frac_calc(pll, pdb);
- pll_8996_calc_vco_count(pdb, pll->vco_current_rate,
+ pll_14nm_calc_vco_count(pdb, pll->vco_current_rate,
pll->vco_ref_clk_rate);
- shadow_pll_dynamic_refresh_8996(pll, pdb);
+ shadow_pll_dynamic_refresh_14nm(pll, pdb);
rc = mdss_pll_resource_enable(pll, false);
if (rc) {
@@ -986,53 +1065,12 @@ int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
return rc;
}
-unsigned long pll_vco_get_rate_8996(struct clk *c)
-{
- u64 vco_rate, multiplier = BIT(20);
- s32 div_frac_start;
- u32 dec_start;
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
- u64 ref_clk = vco->ref_clk_rate;
- int rc;
- struct mdss_pll_resources *pll = vco->priv;
-
- if (is_gdsc_disabled(pll))
- return 0;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
- return rc;
- }
-
- dec_start = MDSS_PLL_REG_R(pll->pll_base,
- DSIPHY_PLL_DEC_START);
- dec_start &= 0x0ff;
- pr_debug("dec_start = 0x%x\n", dec_start);
-
- div_frac_start = (MDSS_PLL_REG_R(pll->pll_base,
- DSIPHY_PLL_DIV_FRAC_START3) & 0x0f) << 16;
- div_frac_start |= (MDSS_PLL_REG_R(pll->pll_base,
- DSIPHY_PLL_DIV_FRAC_START2) & 0x0ff) << 8;
- div_frac_start |= MDSS_PLL_REG_R(pll->pll_base,
- DSIPHY_PLL_DIV_FRAC_START1) & 0x0ff;
- pr_debug("div_frac_start = 0x%x\n", div_frac_start);
-
- vco_rate = ref_clk * dec_start;
- vco_rate += ((ref_clk * div_frac_start) / multiplier);
-
- pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
-
- mdss_pll_resource_enable(pll, false);
-
- return (unsigned long)vco_rate;
-}
-
-long pll_vco_round_rate_8996(struct clk *c, unsigned long rate)
+long pll_vco_round_rate_14nm(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
{
unsigned long rrate = rate;
u32 div;
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
div = vco->min_rate / rate;
if (div > 15) {
@@ -1046,46 +1084,14 @@ long pll_vco_round_rate_8996(struct clk *c, unsigned long rate)
if (rate > vco->max_rate)
rrate = vco->max_rate;
+ *parent_rate = rrate;
return rrate;
}
-enum handoff pll_vco_handoff_8996(struct clk *c)
-{
- int rc;
- enum handoff ret = HANDOFF_DISABLED_CLK;
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
- struct mdss_pll_resources *pll = vco->priv;
-
- if (is_gdsc_disabled(pll))
- return HANDOFF_DISABLED_CLK;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
- return ret;
- }
-
- if (pll_is_pll_locked_8996(pll)) {
- pll->handoff_resources = true;
- pll->pll_on = true;
- c->rate = pll_vco_get_rate_8996(c);
- ret = HANDOFF_ENABLED_CLK;
- } else {
- mdss_pll_resource_enable(pll, false);
- }
-
- return ret;
-}
-
-enum handoff shadow_pll_vco_handoff_8996(struct clk *c)
-{
- return HANDOFF_DISABLED_CLK;
-}
-
-int pll_vco_prepare_8996(struct clk *c)
+int pll_vco_prepare_14nm(struct clk_hw *hw)
{
int rc = 0;
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
if (!pll) {
@@ -1101,8 +1107,9 @@ int pll_vco_prepare_8996(struct clk *c)
}
if ((pll->vco_cached_rate != 0)
- && (pll->vco_cached_rate == c->rate)) {
- rc = c->ops->set_rate(c, pll->vco_cached_rate);
+ && (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
+ rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
+ pll->vco_cached_rate);
if (rc) {
pr_err("index=%d vco_set_rate failed. rc=%d\n",
rc, pll->index);
@@ -1111,7 +1118,7 @@ int pll_vco_prepare_8996(struct clk *c)
}
}
- rc = dsi_pll_enable(c);
+ rc = dsi_pll_enable(hw);
if (rc) {
mdss_pll_resource_enable(pll, false);
@@ -1122,9 +1129,9 @@ error:
return rc;
}
-void pll_vco_unprepare_8996(struct clk *c)
+void pll_vco_unprepare_14nm(struct clk_hw *hw)
{
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
if (!pll) {
@@ -1132,6 +1139,17 @@ void pll_vco_unprepare_8996(struct clk *c)
return;
}
- pll->vco_cached_rate = c->rate;
- dsi_pll_disable(c);
+ pll->vco_cached_rate = clk_hw_get_rate(hw);
+ dsi_pll_disable(hw);
+}
+
+int dsi_mux_set_parent_14nm(void *context, unsigned int reg, unsigned int val)
+{
+ return 0;
+}
+
+int dsi_mux_get_parent_14nm(void *context, unsigned int reg, unsigned int *val)
+{
+ *val = 0;
+ return 0;
}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.c
new file mode 100644
index 000000000000..667a1512d54f
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.c
@@ -0,0 +1,614 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-dsi-pll-14nm.h"
+
+#define VCO_DELAY_USEC 1
+
+static struct dsi_pll_db pll_db[DSI_PLL_NUM];
+
+static struct regmap_config dsi_pll_14nm_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x588,
+};
+
+static struct regmap_bus post_n1_div_regmap_bus = {
+ .reg_write = post_n1_div_set_div,
+ .reg_read = post_n1_div_get_div,
+};
+
+static struct regmap_bus n2_div_regmap_bus = {
+ .reg_write = n2_div_set_div,
+ .reg_read = n2_div_get_div,
+};
+
+static struct regmap_bus shadow_n2_div_regmap_bus = {
+ .reg_write = shadow_n2_div_set_div,
+ .reg_read = n2_div_get_div,
+};
+
+static struct regmap_bus dsi_mux_regmap_bus = {
+ .reg_write = dsi_mux_set_parent_14nm,
+ .reg_read = dsi_mux_get_parent_14nm,
+};
+
+/* Op structures */
+static struct clk_ops clk_ops_dsi_vco = {
+ .recalc_rate = pll_vco_recalc_rate_14nm,
+ .set_rate = pll_vco_set_rate_14nm,
+ .round_rate = pll_vco_round_rate_14nm,
+ .prepare = pll_vco_prepare_14nm,
+ .unprepare = pll_vco_unprepare_14nm,
+};
+
+/* Shadow ops for dynamic refresh */
+static struct clk_ops clk_ops_shadow_dsi_vco = {
+ .recalc_rate = pll_vco_recalc_rate_14nm,
+ .set_rate = shadow_pll_vco_set_rate_14nm,
+ .round_rate = pll_vco_round_rate_14nm,
+};
+
+static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
+ .ref_clk_rate = 19200000UL,
+ .min_rate = 1300000000UL,
+ .max_rate = 2600000000UL,
+ .pll_en_seq_cnt = 1,
+ .pll_enable_seqs[0] = dsi_pll_enable_seq_14nm,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_vco_clk_14nm",
+ .parent_names = (const char *[]){ "xo_board" },
+ .num_parents = 1,
+ .ops = &clk_ops_dsi_vco,
+ },
+};
+
+static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
+ .ref_clk_rate = 19200000u,
+ .min_rate = 1300000000u,
+ .max_rate = 2600000000u,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_vco_clk_14nm",
+ .parent_names = (const char *[]){ "xo_board" },
+ .num_parents = 1,
+ .ops = &clk_ops_shadow_dsi_vco,
+ },
+};
+
+static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
+ .ref_clk_rate = 19200000UL,
+ .min_rate = 1300000000UL,
+ .max_rate = 2600000000UL,
+ .pll_en_seq_cnt = 1,
+ .pll_enable_seqs[0] = dsi_pll_enable_seq_14nm,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_vco_clk_14nm",
+ .parent_names = (const char *[]){ "xo_board" },
+ .num_parents = 1,
+ .ops = &clk_ops_dsi_vco,
+ },
+};
+
+static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
+ .ref_clk_rate = 19200000u,
+ .min_rate = 1300000000u,
+ .max_rate = 2600000000u,
+ .pll_en_seq_cnt = 1,
+ .pll_enable_seqs[0] = dsi_pll_enable_seq_14nm,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_vco_clk_14nm",
+ .parent_names = (const char *[]){ "xo_board" },
+ .num_parents = 1,
+ .ops = &clk_ops_shadow_dsi_vco,
+ },
+};
+
+static struct clk_regmap_div dsi0pll_post_n1_div_clk = {
+ .reg = 0x48,
+ .shift = 0,
+ .width = 4,
+
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_post_n1_div_clk",
+ .parent_names =
+ (const char *[]){ "dsi0pll_vco_clk_14nm" },
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi0pll_shadow_post_n1_div_clk = {
+ .reg = 0x48,
+ .shift = 0,
+ .width = 4,
+
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_post_n1_div_clk",
+ .parent_names =
+ (const char *[]){"dsi0pll_shadow_vco_clk_14nm"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi1pll_post_n1_div_clk = {
+ .reg = 0x48,
+ .shift = 0,
+ .width = 4,
+
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_post_n1_div_clk",
+ .parent_names =
+ (const char *[]){ "dsi1pll_vco_clk_14nm" },
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi1pll_shadow_post_n1_div_clk = {
+ .reg = 0x48,
+ .shift = 0,
+ .width = 4,
+
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_post_n1_div_clk",
+ .parent_names =
+ (const char *[]){"dsi1pll_shadow_vco_clk_14nm"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi0pll_n2_div_clk = {
+ .reg = 0x48,
+ .shift = 0,
+ .width = 4,
+
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_n2_div_clk",
+ .parent_names =
+ (const char *[]){ "dsi0pll_post_n1_div_clk" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi0pll_shadow_n2_div_clk = {
+ .reg = 0x48,
+ .shift = 0,
+ .width = 4,
+
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_n2_div_clk",
+ .parent_names =
+ (const char *[]){ "dsi0pll_shadow_post_n1_div_clk" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi1pll_n2_div_clk = {
+ .reg = 0x48,
+ .shift = 0,
+ .width = 4,
+
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_n2_div_clk",
+ .parent_names =
+ (const char *[]){ "dsi1pll_post_n1_div_clk" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi1pll_shadow_n2_div_clk = {
+ .reg = 0x48,
+ .shift = 0,
+ .width = 4,
+
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_n2_div_clk",
+ .parent_names =
+ (const char *[]){ "dsi1pll_shadow_post_n1_div_clk" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor dsi0pll_pixel_clk_src = {
+ .div = 2,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_pixel_clk_src",
+ .parent_names = (const char *[]){ "dsi0pll_n2_div_clk" },
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dsi0pll_shadow_pixel_clk_src = {
+ .div = 2,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_pixel_clk_src",
+ .parent_names = (const char *[]){ "dsi0pll_shadow_n2_div_clk" },
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dsi1pll_pixel_clk_src = {
+ .div = 2,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_pixel_clk_src",
+ .parent_names = (const char *[]){ "dsi1pll_n2_div_clk" },
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dsi1pll_shadow_pixel_clk_src = {
+ .div = 2,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_pixel_clk_src",
+ .parent_names = (const char *[]){ "dsi1pll_shadow_n2_div_clk" },
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_regmap_mux dsi0pll_pixel_clk_mux = {
+ .reg = 0x48,
+ .shift = 0,
+ .width = 1,
+
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_pixel_clk_mux",
+ .parent_names =
+ (const char *[]){ "dsi0pll_pixel_clk_src",
+ "dsi0pll_shadow_pixel_clk_src"},
+ .num_parents = 2,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux dsi1pll_pixel_clk_mux = {
+ .reg = 0x48,
+ .shift = 0,
+ .width = 1,
+
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_pixel_clk_mux",
+ .parent_names =
+ (const char *[]){ "dsi1pll_pixel_clk_src",
+ "dsi1pll_shadow_pixel_clk_src"},
+ .num_parents = 2,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor dsi0pll_byte_clk_src = {
+ .div = 8,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_byte_clk_src",
+ .parent_names = (const char *[]){ "dsi0pll_post_n1_div_clk" },
+ .num_parents = 1,
+ .flags = (CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dsi0pll_shadow_byte_clk_src = {
+ .div = 8,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_byte_clk_src",
+ .parent_names =
+ (const char *[]){ "dsi0pll_shadow_post_n1_div_clk" },
+ .num_parents = 1,
+ .flags = (CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dsi1pll_byte_clk_src = {
+ .div = 8,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_byte_clk_src",
+ .parent_names = (const char *[]){ "dsi1pll_post_n1_div_clk" },
+ .num_parents = 1,
+ .flags = (CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dsi1pll_shadow_byte_clk_src = {
+ .div = 8,
+ .mult = 1,
+
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_byte_clk_src",
+ .parent_names =
+ (const char *[]){ "dsi1pll_shadow_post_n1_div_clk" },
+ .num_parents = 1,
+ .flags = (CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_regmap_mux dsi0pll_byte_clk_mux = {
+ .reg = 0x48,
+ .shift = 0,
+ .width = 1,
+
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_byte_clk_mux",
+ .parent_names =
+ (const char *[]){"dsi0pll_byte_clk_src",
+ "dsi0pll_shadow_byte_clk_src"},
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ },
+ },
+};
+
+static struct clk_regmap_mux dsi1pll_byte_clk_mux = {
+ .reg = 0x48,
+ .shift = 0,
+ .width = 1,
+
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_byte_clk_mux",
+ .parent_names =
+ (const char *[]){"dsi1pll_byte_clk_src",
+ "dsi1pll_shadow_byte_clk_src"},
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ },
+ },
+};
+
+static struct clk_hw *mdss_dsi_pllcc_14nm[] = {
+ [BYTE0_MUX_CLK] = &dsi0pll_byte_clk_mux.clkr.hw,
+ [BYTE0_SRC_CLK] = &dsi0pll_byte_clk_src.hw,
+ [PIX0_MUX_CLK] = &dsi0pll_pixel_clk_mux.clkr.hw,
+ [PIX0_SRC_CLK] = &dsi0pll_pixel_clk_src.hw,
+ [N2_DIV_0_CLK] = &dsi0pll_n2_div_clk.clkr.hw,
+ [POST_N1_DIV_0_CLK] = &dsi0pll_post_n1_div_clk.clkr.hw,
+ [VCO_CLK_0_CLK] = &dsi0pll_vco_clk.hw,
+ [SHADOW_BYTE0_SRC_CLK] = &dsi0pll_shadow_byte_clk_src.hw,
+ [SHADOW_PIX0_SRC_CLK] = &dsi0pll_shadow_pixel_clk_src.hw,
+ [SHADOW_N2_DIV_0_CLK] = &dsi0pll_shadow_n2_div_clk.clkr.hw,
+ [SHADOW_POST_N1_DIV_0_CLK] = &dsi0pll_shadow_post_n1_div_clk.clkr.hw,
+ [SHADOW_VCO_CLK_0_CLK] = &dsi0pll_shadow_vco_clk.hw,
+ [BYTE1_MUX_CLK] = &dsi1pll_byte_clk_mux.clkr.hw,
+ [BYTE1_SRC_CLK] = &dsi1pll_byte_clk_src.hw,
+ [PIX1_MUX_CLK] = &dsi1pll_pixel_clk_mux.clkr.hw,
+ [PIX1_SRC_CLK] = &dsi1pll_pixel_clk_src.hw,
+ [N2_DIV_1_CLK] = &dsi1pll_n2_div_clk.clkr.hw,
+ [POST_N1_DIV_1_CLK] = &dsi1pll_post_n1_div_clk.clkr.hw,
+ [VCO_CLK_1_CLK] = &dsi1pll_vco_clk.hw,
+ [SHADOW_BYTE1_SRC_CLK] = &dsi1pll_shadow_byte_clk_src.hw,
+ [SHADOW_PIX1_SRC_CLK] = &dsi1pll_shadow_pixel_clk_src.hw,
+ [SHADOW_N2_DIV_1_CLK] = &dsi1pll_shadow_n2_div_clk.clkr.hw,
+ [SHADOW_POST_N1_DIV_1_CLK] = &dsi1pll_shadow_post_n1_div_clk.clkr.hw,
+ [SHADOW_VCO_CLK_1_CLK] = &dsi1pll_shadow_vco_clk.hw,
+};
+
+int dsi_pll_clock_register_14nm(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = 0, ndx, i;
+ int const ssc_freq_default = 31500; /* default h/w recommended value */
+ int const ssc_ppm_default = 5000; /* default h/w recommended value */
+ struct dsi_pll_db *pdb;
+ struct clk_onecell_data *clk_data;
+ struct clk *clk;
+ struct regmap *regmap;
+ int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_14nm);
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ if (!pll_res || !pll_res->pll_base) {
+ pr_err("Invalid PLL resources\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (pll_res->index >= DSI_PLL_NUM) {
+ pr_err("pll ndx=%d is NOT supported\n", pll_res->index);
+ return -EINVAL;
+ }
+
+ ndx = pll_res->index;
+ pdb = &pll_db[ndx];
+ pll_res->priv = pdb;
+ pdb->pll = pll_res;
+ ndx++;
+ ndx %= DSI_PLL_NUM;
+ pdb->next = &pll_db[ndx];
+
+ if (pll_res->ssc_en) {
+ if (!pll_res->ssc_freq)
+ pll_res->ssc_freq = ssc_freq_default;
+ if (!pll_res->ssc_ppm)
+ pll_res->ssc_ppm = ssc_ppm_default;
+ }
+
+ clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
+ sizeof(struct clk *)), GFP_KERNEL);
+ if (!clk_data->clks) {
+ devm_kfree(&pdev->dev, clk_data);
+ return -ENOMEM;
+ }
+
+ clk_data->clk_num = num_clks;
+
+ /* Set client data to mux, div and vco clocks. */
+ if (pll_res->index == DSI_PLL_1) {
+ regmap = devm_regmap_init(&pdev->dev, &post_n1_div_regmap_bus,
+ pll_res, &dsi_pll_14nm_config);
+ dsi1pll_post_n1_div_clk.clkr.regmap = regmap;
+ dsi1pll_shadow_post_n1_div_clk.clkr.regmap = regmap;
+
+ regmap = devm_regmap_init(&pdev->dev, &n2_div_regmap_bus,
+ pll_res, &dsi_pll_14nm_config);
+ dsi1pll_n2_div_clk.clkr.regmap = regmap;
+
+ regmap = devm_regmap_init(&pdev->dev, &shadow_n2_div_regmap_bus,
+ pll_res, &dsi_pll_14nm_config);
+ dsi1pll_shadow_n2_div_clk.clkr.regmap = regmap;
+
+ regmap = devm_regmap_init(&pdev->dev, &dsi_mux_regmap_bus,
+ pll_res, &dsi_pll_14nm_config);
+ dsi1pll_byte_clk_mux.clkr.regmap = regmap;
+ dsi1pll_pixel_clk_mux.clkr.regmap = regmap;
+
+ dsi1pll_vco_clk.priv = pll_res;
+ dsi1pll_shadow_vco_clk.priv = pll_res;
+
+ pll_res->vco_delay = VCO_DELAY_USEC;
+
+ for (i = BYTE1_MUX_CLK; i <= SHADOW_VCO_CLK_1_CLK; i++) {
+ pr_debug("register clk: %d index: %d\n",
+ i, pll_res->index);
+ clk = devm_clk_register(&pdev->dev,
+ mdss_dsi_pllcc_14nm[i]);
+ if (IS_ERR(clk)) {
+ pr_err("clk registration failed for DSI: %d\n",
+ pll_res->index);
+ rc = -EINVAL;
+ goto clk_reg_fail;
+ }
+ clk_data->clks[i] = clk;
+ }
+
+ rc = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, clk_data);
+ } else {
+ regmap = devm_regmap_init(&pdev->dev, &post_n1_div_regmap_bus,
+ pll_res, &dsi_pll_14nm_config);
+ dsi0pll_post_n1_div_clk.clkr.regmap = regmap;
+ dsi0pll_shadow_post_n1_div_clk.clkr.regmap = regmap;
+
+ regmap = devm_regmap_init(&pdev->dev, &n2_div_regmap_bus,
+ pll_res, &dsi_pll_14nm_config);
+ dsi0pll_n2_div_clk.clkr.regmap = regmap;
+
+ regmap = devm_regmap_init(&pdev->dev, &shadow_n2_div_regmap_bus,
+ pll_res, &dsi_pll_14nm_config);
+ dsi0pll_shadow_n2_div_clk.clkr.regmap = regmap;
+
+ regmap = devm_regmap_init(&pdev->dev, &dsi_mux_regmap_bus,
+ pll_res, &dsi_pll_14nm_config);
+ dsi0pll_byte_clk_mux.clkr.regmap = regmap;
+ dsi0pll_pixel_clk_mux.clkr.regmap = regmap;
+
+ dsi0pll_vco_clk.priv = pll_res;
+ dsi0pll_shadow_vco_clk.priv = pll_res;
+ pll_res->vco_delay = VCO_DELAY_USEC;
+
+ for (i = BYTE0_MUX_CLK; i <= SHADOW_VCO_CLK_0_CLK; i++) {
+ pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
+ clk = devm_clk_register(&pdev->dev,
+ mdss_dsi_pllcc_14nm[i]);
+ if (IS_ERR(clk)) {
+ pr_err("clk registration failed for DSI: %d\n",
+ pll_res->index);
+ rc = -EINVAL;
+ goto clk_reg_fail;
+ }
+ clk_data->clks[i] = clk;
+ }
+
+ rc = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, clk_data);
+ }
+
+ if (!rc) {
+ pr_info("Registered DSI PLL ndx=%d clocks successfully\n",
+ pll_res->index);
+ return rc;
+ }
+
+clk_reg_fail:
+ devm_kfree(&pdev->dev, clk_data->clks);
+ devm_kfree(&pdev->dev, clk_data);
+ return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.h b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.h
index 611e79101d4f..c2a3b12d8174 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.h
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.h
@@ -10,8 +10,8 @@
* GNU General Public License for more details.
*/
-#ifndef MDSS_DSI_PLL_8996_H
-#define MDSS_DSI_PLL_8996_H
+#ifndef MDSS_DSI_PLL_14NM_H
+#define MDSS_DSI_PLL_14NM_H
#define DSIPHY_CMN_CLK_CFG0 0x0010
#define DSIPHY_CMN_CLK_CFG1 0x0014
@@ -197,25 +197,31 @@ enum {
PLL_MASTER
};
-int pll_vco_set_rate_8996(struct clk *c, unsigned long rate);
-long pll_vco_round_rate_8996(struct clk *c, unsigned long rate);
-enum handoff pll_vco_handoff_8996(struct clk *c);
-enum handoff shadow_pll_vco_handoff_8996(struct clk *c);
-int shadow_post_n1_div_set_div(struct div_clk *clk, int div);
-int shadow_post_n1_div_get_div(struct div_clk *clk);
-int shadow_n2_div_set_div(struct div_clk *clk, int div);
-int shadow_n2_div_get_div(struct div_clk *clk);
-int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate);
-int pll_vco_prepare_8996(struct clk *c);
-void pll_vco_unprepare_8996(struct clk *c);
-int set_mdss_byte_mux_sel_8996(struct mux_clk *clk, int sel);
-int get_mdss_byte_mux_sel_8996(struct mux_clk *clk);
-int set_mdss_pixel_mux_sel_8996(struct mux_clk *clk, int sel);
-int get_mdss_pixel_mux_sel_8996(struct mux_clk *clk);
-int post_n1_div_set_div(struct div_clk *clk, int div);
-int post_n1_div_get_div(struct div_clk *clk);
-int n2_div_set_div(struct div_clk *clk, int div);
-int n2_div_get_div(struct div_clk *clk);
-int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll);
-
-#endif /* MDSS_DSI_PLL_8996_H */
+int pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+int shadow_pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+long pll_vco_round_rate_14nm(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate);
+unsigned long pll_vco_recalc_rate_14nm(struct clk_hw *hw,
+ unsigned long parent_rate);
+
+int pll_vco_prepare_14nm(struct clk_hw *hw);
+void pll_vco_unprepare_14nm(struct clk_hw *hw);
+
+int shadow_post_n1_div_set_div(void *context,
+ unsigned int reg, unsigned int div);
+int shadow_post_n1_div_get_div(void *context,
+ unsigned int reg, unsigned int *div);
+int shadow_n2_div_set_div(void *context, unsigned int reg, unsigned int div);
+int shadow_n2_div_get_div(void *context, unsigned int reg, unsigned int *div);
+
+int post_n1_div_set_div(void *context, unsigned int reg, unsigned int div);
+int post_n1_div_get_div(void *context, unsigned int reg, unsigned int *div);
+int n2_div_set_div(void *context, unsigned int reg, unsigned int div);
+int n2_div_get_div(void *context, unsigned int reg, unsigned int *div);
+int dsi_pll_enable_seq_14nm(struct mdss_pll_resources *pll);
+int dsi_mux_set_parent_14nm(void *context, unsigned int reg, unsigned int val);
+int dsi_mux_get_parent_14nm(void *context, unsigned int reg, unsigned int *val);
+
+#endif /* MDSS_DSI_PLL_14NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.c
deleted file mode 100644
index 1de1b997a041..000000000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.c
+++ /dev/null
@@ -1,566 +0,0 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/workqueue.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8996.h>
-
-#include "mdss-pll.h"
-#include "mdss-dsi-pll.h"
-#include "mdss-dsi-pll-8996.h"
-
-#define VCO_DELAY_USEC 1
-
-static struct dsi_pll_db pll_db[DSI_PLL_NUM];
-
-static struct clk_ops n2_clk_src_ops;
-static struct clk_ops shadow_n2_clk_src_ops;
-static struct clk_ops byte_clk_src_ops;
-static struct clk_ops post_n1_div_clk_src_ops;
-static struct clk_ops shadow_post_n1_div_clk_src_ops;
-
-static struct clk_ops clk_ops_gen_mux_dsi;
-
-/* Op structures */
-static struct clk_ops clk_ops_dsi_vco = {
- .set_rate = pll_vco_set_rate_8996,
- .round_rate = pll_vco_round_rate_8996,
- .handoff = pll_vco_handoff_8996,
- .prepare = pll_vco_prepare_8996,
- .unprepare = pll_vco_unprepare_8996,
-};
-
-static struct clk_div_ops post_n1_div_ops = {
- .set_div = post_n1_div_set_div,
- .get_div = post_n1_div_get_div,
-};
-
-static struct clk_div_ops n2_div_ops = { /* hr_oclk3 */
- .set_div = n2_div_set_div,
- .get_div = n2_div_get_div,
-};
-
-static struct clk_mux_ops mdss_byte_mux_ops = {
- .set_mux_sel = set_mdss_byte_mux_sel_8996,
- .get_mux_sel = get_mdss_byte_mux_sel_8996,
-};
-
-static struct clk_mux_ops mdss_pixel_mux_ops = {
- .set_mux_sel = set_mdss_pixel_mux_sel_8996,
- .get_mux_sel = get_mdss_pixel_mux_sel_8996,
-};
-
-/* Shadow ops for dynamic refresh */
-static struct clk_ops clk_ops_shadow_dsi_vco = {
- .set_rate = shadow_pll_vco_set_rate_8996,
- .round_rate = pll_vco_round_rate_8996,
- .handoff = shadow_pll_vco_handoff_8996,
-};
-
-static struct clk_div_ops shadow_post_n1_div_ops = {
- .set_div = post_n1_div_set_div,
-};
-
-static struct clk_div_ops shadow_n2_div_ops = {
- .set_div = shadow_n2_div_set_div,
-};
-
-static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
- .ref_clk_rate = 19200000UL,
- .min_rate = 1300000000UL,
- .max_rate = 2600000000UL,
- .pll_en_seq_cnt = 1,
- .pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
- .c = {
- .dbg_name = "dsi0pll_vco_clk_8996",
- .ops = &clk_ops_dsi_vco,
- CLK_INIT(dsi0pll_vco_clk.c),
- },
-};
-
-static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
- .ref_clk_rate = 19200000u,
- .min_rate = 1300000000u,
- .max_rate = 2600000000u,
- .c = {
- .dbg_name = "dsi0pll_shadow_vco_clk",
- .ops = &clk_ops_shadow_dsi_vco,
- CLK_INIT(dsi0pll_shadow_vco_clk.c),
- },
-};
-
-static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
- .ref_clk_rate = 19200000UL,
- .min_rate = 1300000000UL,
- .max_rate = 2600000000UL,
- .pll_en_seq_cnt = 1,
- .pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
- .c = {
- .dbg_name = "dsi1pll_vco_clk_8996",
- .ops = &clk_ops_dsi_vco,
- CLK_INIT(dsi1pll_vco_clk.c),
- },
-};
-
-static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
- .ref_clk_rate = 19200000u,
- .min_rate = 1300000000u,
- .max_rate = 2600000000u,
- .pll_en_seq_cnt = 1,
- .pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
- .c = {
- .dbg_name = "dsi1pll_shadow_vco_clk",
- .ops = &clk_ops_shadow_dsi_vco,
- CLK_INIT(dsi1pll_shadow_vco_clk.c),
- },
-};
-
-static struct div_clk dsi0pll_post_n1_div_clk = {
- .data = {
- .max_div = 15,
- .min_div = 1,
- },
- .ops = &post_n1_div_ops,
- .c = {
- .parent = &dsi0pll_vco_clk.c,
- .dbg_name = "dsi0pll_post_n1_div_clk",
- .ops = &post_n1_div_clk_src_ops,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_post_n1_div_clk.c),
- },
-};
-
-static struct div_clk dsi0pll_shadow_post_n1_div_clk = {
- .data = {
- .max_div = 15,
- .min_div = 1,
- },
- .ops = &shadow_post_n1_div_ops,
- .c = {
- .parent = &dsi0pll_shadow_vco_clk.c,
- .dbg_name = "dsi0pll_shadow_post_n1_div_clk",
- .ops = &shadow_post_n1_div_clk_src_ops,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_shadow_post_n1_div_clk.c),
- },
-};
-
-static struct div_clk dsi1pll_post_n1_div_clk = {
- .data = {
- .max_div = 15,
- .min_div = 1,
- },
- .ops = &post_n1_div_ops,
- .c = {
- .parent = &dsi1pll_vco_clk.c,
- .dbg_name = "dsi1pll_post_n1_div_clk",
- .ops = &post_n1_div_clk_src_ops,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_post_n1_div_clk.c),
- },
-};
-
-static struct div_clk dsi1pll_shadow_post_n1_div_clk = {
- .data = {
- .max_div = 15,
- .min_div = 1,
- },
- .ops = &shadow_post_n1_div_ops,
- .c = {
- .parent = &dsi1pll_shadow_vco_clk.c,
- .dbg_name = "dsi1pll_shadow_post_n1_div_clk",
- .ops = &shadow_post_n1_div_clk_src_ops,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_shadow_post_n1_div_clk.c),
- },
-};
-
-static struct div_clk dsi0pll_n2_div_clk = {
- .data = {
- .max_div = 15,
- .min_div = 1,
- },
- .ops = &n2_div_ops,
- .c = {
- .parent = &dsi0pll_post_n1_div_clk.c,
- .dbg_name = "dsi0pll_n2_div_clk",
- .ops = &n2_clk_src_ops,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_n2_div_clk.c),
- },
-};
-
-static struct div_clk dsi0pll_shadow_n2_div_clk = {
- .data = {
- .max_div = 15,
- .min_div = 1,
- },
- .ops = &shadow_n2_div_ops,
- .c = {
- .parent = &dsi0pll_shadow_post_n1_div_clk.c,
- .dbg_name = "dsi0pll_shadow_n2_div_clk",
- .ops = &shadow_n2_clk_src_ops,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_shadow_n2_div_clk.c),
- },
-};
-
-static struct div_clk dsi1pll_n2_div_clk = {
- .data = {
- .max_div = 15,
- .min_div = 1,
- },
- .ops = &n2_div_ops,
- .c = {
- .parent = &dsi1pll_post_n1_div_clk.c,
- .dbg_name = "dsi1pll_n2_div_clk",
- .ops = &n2_clk_src_ops,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_n2_div_clk.c),
- },
-};
-
-static struct div_clk dsi1pll_shadow_n2_div_clk = {
- .data = {
- .max_div = 15,
- .min_div = 1,
- },
- .ops = &shadow_n2_div_ops,
- .c = {
- .parent = &dsi1pll_shadow_post_n1_div_clk.c,
- .dbg_name = "dsi1pll_shadow_n2_div_clk",
- .ops = &shadow_n2_clk_src_ops,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_shadow_n2_div_clk.c),
- },
-};
-
-static struct div_clk dsi0pll_pixel_clk_src = {
- .data = {
- .div = 2,
- .min_div = 2,
- .max_div = 2,
- },
- .c = {
- .parent = &dsi0pll_n2_div_clk.c,
- .dbg_name = "dsi0pll_pixel_clk_src",
- .ops = &clk_ops_div,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_pixel_clk_src.c),
- },
-};
-
-static struct div_clk dsi0pll_shadow_pixel_clk_src = {
- .data = {
- .div = 2,
- .min_div = 2,
- .max_div = 2,
- },
- .c = {
- .parent = &dsi0pll_shadow_n2_div_clk.c,
- .dbg_name = "dsi0pll_shadow_pixel_clk_src",
- .ops = &clk_ops_div,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_shadow_pixel_clk_src.c),
- },
-};
-
-static struct div_clk dsi1pll_pixel_clk_src = {
- .data = {
- .div = 2,
- .min_div = 2,
- .max_div = 2,
- },
- .c = {
- .parent = &dsi1pll_n2_div_clk.c,
- .dbg_name = "dsi1pll_pixel_clk_src",
- .ops = &clk_ops_div,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_pixel_clk_src.c),
- },
-};
-
-static struct div_clk dsi1pll_shadow_pixel_clk_src = {
- .data = {
- .div = 2,
- .min_div = 2,
- .max_div = 2,
- },
- .c = {
- .parent = &dsi1pll_shadow_n2_div_clk.c,
- .dbg_name = "dsi1pll_shadow_pixel_clk_src",
- .ops = &clk_ops_div,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_shadow_pixel_clk_src.c),
- },
-};
-
-static struct mux_clk dsi0pll_pixel_clk_mux = {
- .num_parents = 2,
- .parents = (struct clk_src[]) {
- {&dsi0pll_pixel_clk_src.c, 0},
- {&dsi0pll_shadow_pixel_clk_src.c, 1},
- },
- .ops = &mdss_pixel_mux_ops,
- .c = {
- .parent = &dsi0pll_pixel_clk_src.c,
- .dbg_name = "dsi0pll_pixel_clk_mux",
- .ops = &clk_ops_gen_mux_dsi,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_pixel_clk_mux.c),
- }
-};
-
-static struct mux_clk dsi1pll_pixel_clk_mux = {
- .num_parents = 2,
- .parents = (struct clk_src[]) {
- {&dsi1pll_pixel_clk_src.c, 0},
- {&dsi1pll_shadow_pixel_clk_src.c, 1},
- },
- .ops = &mdss_pixel_mux_ops,
- .c = {
- .parent = &dsi1pll_pixel_clk_src.c,
- .dbg_name = "dsi1pll_pixel_clk_mux",
- .ops = &clk_ops_gen_mux_dsi,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_pixel_clk_mux.c),
- }
-};
-
-static struct div_clk dsi0pll_byte_clk_src = {
- .data = {
- .div = 8,
- .min_div = 8,
- .max_div = 8,
- },
- .c = {
- .parent = &dsi0pll_post_n1_div_clk.c,
- .dbg_name = "dsi0pll_byte_clk_src",
- .ops = &clk_ops_div,
- CLK_INIT(dsi0pll_byte_clk_src.c),
- },
-};
-
-static struct div_clk dsi0pll_shadow_byte_clk_src = {
- .data = {
- .div = 8,
- .min_div = 8,
- .max_div = 8,
- },
- .c = {
- .parent = &dsi0pll_shadow_post_n1_div_clk.c,
- .dbg_name = "dsi0pll_shadow_byte_clk_src",
- .ops = &clk_ops_div,
- CLK_INIT(dsi0pll_shadow_byte_clk_src.c),
- },
-};
-
-static struct div_clk dsi1pll_byte_clk_src = {
- .data = {
- .div = 8,
- .min_div = 8,
- .max_div = 8,
- },
- .c = {
- .parent = &dsi1pll_post_n1_div_clk.c,
- .dbg_name = "dsi1pll_byte_clk_src",
- .ops = &clk_ops_div,
- CLK_INIT(dsi1pll_byte_clk_src.c),
- },
-};
-
-static struct div_clk dsi1pll_shadow_byte_clk_src = {
- .data = {
- .div = 8,
- .min_div = 8,
- .max_div = 8,
- },
- .c = {
- .parent = &dsi1pll_shadow_post_n1_div_clk.c,
- .dbg_name = "dsi1pll_shadow_byte_clk_src",
- .ops = &clk_ops_div,
- CLK_INIT(dsi1pll_shadow_byte_clk_src.c),
- },
-};
-
-static struct mux_clk dsi0pll_byte_clk_mux = {
- .num_parents = 2,
- .parents = (struct clk_src[]) {
- {&dsi0pll_byte_clk_src.c, 0},
- {&dsi0pll_shadow_byte_clk_src.c, 1},
- },
- .ops = &mdss_byte_mux_ops,
- .c = {
- .parent = &dsi0pll_byte_clk_src.c,
- .dbg_name = "dsi0pll_byte_clk_mux",
- .ops = &clk_ops_gen_mux_dsi,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_byte_clk_mux.c),
- }
-};
-static struct mux_clk dsi1pll_byte_clk_mux = {
- .num_parents = 2,
- .parents = (struct clk_src[]) {
- {&dsi1pll_byte_clk_src.c, 0},
- {&dsi1pll_shadow_byte_clk_src.c, 1},
- },
- .ops = &mdss_byte_mux_ops,
- .c = {
- .parent = &dsi1pll_byte_clk_src.c,
- .dbg_name = "dsi1pll_byte_clk_mux",
- .ops = &clk_ops_gen_mux_dsi,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_byte_clk_mux.c),
- }
-};
-
-static struct clk_lookup mdss_dsi_pllcc_8996[] = {
- CLK_LIST(dsi0pll_byte_clk_mux),
- CLK_LIST(dsi0pll_byte_clk_src),
- CLK_LIST(dsi0pll_pixel_clk_mux),
- CLK_LIST(dsi0pll_pixel_clk_src),
- CLK_LIST(dsi0pll_n2_div_clk),
- CLK_LIST(dsi0pll_post_n1_div_clk),
- CLK_LIST(dsi0pll_vco_clk),
- CLK_LIST(dsi0pll_shadow_byte_clk_src),
- CLK_LIST(dsi0pll_shadow_pixel_clk_src),
- CLK_LIST(dsi0pll_shadow_n2_div_clk),
- CLK_LIST(dsi0pll_shadow_post_n1_div_clk),
- CLK_LIST(dsi0pll_shadow_vco_clk),
-};
-
-static struct clk_lookup mdss_dsi_pllcc_8996_1[] = {
- CLK_LIST(dsi1pll_byte_clk_mux),
- CLK_LIST(dsi1pll_byte_clk_src),
- CLK_LIST(dsi1pll_pixel_clk_mux),
- CLK_LIST(dsi1pll_pixel_clk_src),
- CLK_LIST(dsi1pll_n2_div_clk),
- CLK_LIST(dsi1pll_post_n1_div_clk),
- CLK_LIST(dsi1pll_vco_clk),
- CLK_LIST(dsi1pll_shadow_byte_clk_src),
- CLK_LIST(dsi1pll_shadow_pixel_clk_src),
- CLK_LIST(dsi1pll_shadow_n2_div_clk),
- CLK_LIST(dsi1pll_shadow_post_n1_div_clk),
- CLK_LIST(dsi1pll_shadow_vco_clk),
-};
-
-int dsi_pll_clock_register_8996(struct platform_device *pdev,
- struct mdss_pll_resources *pll_res)
-{
- int rc = 0, ndx;
- int const ssc_freq_default = 31500; /* default h/w recommended value */
- int const ssc_ppm_default = 5000; /* default h/w recommended value */
- struct dsi_pll_db *pdb;
-
- if (!pdev || !pdev->dev.of_node) {
- pr_err("Invalid input parameters\n");
- return -EINVAL;
- }
-
- if (!pll_res || !pll_res->pll_base) {
- pr_err("Invalid PLL resources\n");
- return -EPROBE_DEFER;
- }
-
- if (pll_res->index >= DSI_PLL_NUM) {
- pr_err("pll ndx=%d is NOT supported\n", pll_res->index);
- return -EINVAL;
- }
-
- ndx = pll_res->index;
- pdb = &pll_db[ndx];
- pll_res->priv = pdb;
- pdb->pll = pll_res;
- ndx++;
- ndx %= DSI_PLL_NUM;
- pdb->next = &pll_db[ndx];
-
- /* Set clock source operations */
-
- /* hr_oclk3, pixel */
- n2_clk_src_ops = clk_ops_slave_div;
- n2_clk_src_ops.prepare = mdss_pll_div_prepare;
-
- shadow_n2_clk_src_ops = clk_ops_slave_div;
-
- /* hr_ockl2, byte, vco pll */
- post_n1_div_clk_src_ops = clk_ops_div;
- post_n1_div_clk_src_ops.prepare = mdss_pll_div_prepare;
-
- shadow_post_n1_div_clk_src_ops = clk_ops_div;
-
- byte_clk_src_ops = clk_ops_div;
- byte_clk_src_ops.prepare = mdss_pll_div_prepare;
-
- clk_ops_gen_mux_dsi = clk_ops_gen_mux;
- clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
- clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
-
- if (pll_res->ssc_en) {
- if (!pll_res->ssc_freq)
- pll_res->ssc_freq = ssc_freq_default;
- if (!pll_res->ssc_ppm)
- pll_res->ssc_ppm = ssc_ppm_default;
- }
-
- /* Set client data to mux, div and vco clocks. */
- if (pll_res->index == DSI_PLL_1) {
- dsi1pll_byte_clk_src.priv = pll_res;
- dsi1pll_pixel_clk_src.priv = pll_res;
- dsi1pll_post_n1_div_clk.priv = pll_res;
- dsi1pll_n2_div_clk.priv = pll_res;
- dsi1pll_vco_clk.priv = pll_res;
-
- dsi1pll_shadow_byte_clk_src.priv = pll_res;
- dsi1pll_shadow_pixel_clk_src.priv = pll_res;
- dsi1pll_shadow_post_n1_div_clk.priv = pll_res;
- dsi1pll_shadow_n2_div_clk.priv = pll_res;
- dsi1pll_shadow_vco_clk.priv = pll_res;
-
- pll_res->vco_delay = VCO_DELAY_USEC;
- rc = of_msm_clock_register(pdev->dev.of_node,
- mdss_dsi_pllcc_8996_1,
- ARRAY_SIZE(mdss_dsi_pllcc_8996_1));
- } else {
- dsi0pll_byte_clk_src.priv = pll_res;
- dsi0pll_pixel_clk_src.priv = pll_res;
- dsi0pll_post_n1_div_clk.priv = pll_res;
- dsi0pll_n2_div_clk.priv = pll_res;
- dsi0pll_vco_clk.priv = pll_res;
-
- dsi0pll_shadow_byte_clk_src.priv = pll_res;
- dsi0pll_shadow_pixel_clk_src.priv = pll_res;
- dsi0pll_shadow_post_n1_div_clk.priv = pll_res;
- dsi0pll_shadow_n2_div_clk.priv = pll_res;
- dsi0pll_shadow_vco_clk.priv = pll_res;
-
- pll_res->vco_delay = VCO_DELAY_USEC;
- rc = of_msm_clock_register(pdev->dev.of_node,
- mdss_dsi_pllcc_8996,
- ARRAY_SIZE(mdss_dsi_pllcc_8996));
- }
-
- if (!rc) {
- pr_info("Registered DSI PLL ndx=%d clocks successfully\n",
- pll_res->index);
- }
-
- return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll.h b/drivers/clk/qcom/mdss/mdss-dsi-pll.h
index 286c99e339c6..822d5181435d 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll.h
@@ -31,6 +31,8 @@ struct lpfr_cfg {
};
struct dsi_pll_vco_clk {
+ struct clk_hw hw;
+
unsigned long ref_clk_rate;
unsigned long min_rate;
unsigned long max_rate;
@@ -39,72 +41,15 @@ struct dsi_pll_vco_clk {
u32 lpfr_lut_size;
void *priv;
- struct clk c;
-
int (*pll_enable_seqs[MAX_DSI_PLL_EN_SEQS])
(struct mdss_pll_resources *dsi_pll_Res);
};
-static inline struct dsi_pll_vco_clk *to_vco_clk(struct clk *clk)
+static inline struct dsi_pll_vco_clk *to_vco_hw(struct clk_hw *hw)
{
- return container_of(clk, struct dsi_pll_vco_clk, c);
+ return container_of(hw, struct dsi_pll_vco_clk, hw);
}
-int dsi_pll_clock_register_hpm(struct platform_device *pdev,
- struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_20nm(struct platform_device *pdev,
- struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_lpm(struct platform_device *pdev,
- struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_8996(struct platform_device *pdev,
+int dsi_pll_clock_register_14nm(struct platform_device *pdev,
struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_8998(struct platform_device *pdev,
- struct mdss_pll_resources *pll_res);
-
-int set_byte_mux_sel(struct mux_clk *clk, int sel);
-int get_byte_mux_sel(struct mux_clk *clk);
-int dsi_pll_mux_prepare(struct clk *c);
-int fixed_4div_set_div(struct div_clk *clk, int div);
-int fixed_4div_get_div(struct div_clk *clk);
-int digital_set_div(struct div_clk *clk, int div);
-int digital_get_div(struct div_clk *clk);
-int analog_set_div(struct div_clk *clk, int div);
-int analog_get_div(struct div_clk *clk);
-int dsi_pll_lock_status(struct mdss_pll_resources *dsi_pll_res);
-int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
-unsigned long vco_get_rate(struct clk *c);
-long vco_round_rate(struct clk *c, unsigned long rate);
-enum handoff vco_handoff(struct clk *c);
-int vco_prepare(struct clk *c);
-void vco_unprepare(struct clk *c);
-
-/* APIs for 20nm PHY PLL */
-int pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
-int shadow_pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco,
- unsigned long rate);
-long pll_20nm_vco_round_rate(struct clk *c, unsigned long rate);
-enum handoff pll_20nm_vco_handoff(struct clk *c);
-int pll_20nm_vco_prepare(struct clk *c);
-void pll_20nm_vco_unprepare(struct clk *c);
-int pll_20nm_vco_enable_seq(struct mdss_pll_resources *dsi_pll_res);
-
-int set_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
-int set_shadow_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
-int get_bypass_lp_div_mux_sel(struct mux_clk *clk);
-int fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
-int shadow_fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
-int fixed_hr_oclk2_get_div(struct div_clk *clk);
-int hr_oclk3_set_div(struct div_clk *clk, int div);
-int shadow_hr_oclk3_set_div(struct div_clk *clk, int div);
-int hr_oclk3_get_div(struct div_clk *clk);
-int ndiv_set_div(struct div_clk *clk, int div);
-int shadow_ndiv_set_div(struct div_clk *clk, int div);
-int ndiv_get_div(struct div_clk *clk);
-void __dsi_pll_disable(void __iomem *pll_base);
-
-int set_mdss_pixel_mux_sel(struct mux_clk *clk, int sel);
-int get_mdss_pixel_mux_sel(struct mux_clk *clk);
-int set_mdss_byte_mux_sel(struct mux_clk *clk, int sel);
-int get_mdss_byte_mux_sel(struct mux_clk *clk);
-
#endif
diff --git a/drivers/clk/qcom/mdss/mdss-pll-util.c b/drivers/clk/qcom/mdss/mdss-pll-util.c
index 690c53f30eb7..881c973ec1b6 100644
--- a/drivers/clk/qcom/mdss/mdss-pll-util.c
+++ b/drivers/clk/qcom/mdss/mdss-pll-util.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/string.h>
-#include <linux/clk/msm-clock-generic.h>
#include <linux/of_address.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
index 8264d2e5a3cd..b4b73ea4211a 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.c
+++ b/drivers/clk/qcom/mdss/mdss-pll.c
@@ -19,12 +19,9 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
-#include <linux/clk/msm-clock-generic.h>
#include "mdss-pll.h"
#include "mdss-dsi-pll.h"
-#include "mdss-hdmi-pll.h"
-#include "mdss-dp-pll.h"
int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
{
@@ -175,27 +172,7 @@ static int mdss_pll_clock_register(struct platform_device *pdev,
switch (pll_res->pll_interface_type) {
case MDSS_DSI_PLL_8996:
- rc = dsi_pll_clock_register_8996(pdev, pll_res);
- break;
- case MDSS_DSI_PLL_8998:
- rc = dsi_pll_clock_register_8998(pdev, pll_res);
- case MDSS_DP_PLL_8998:
- rc = dp_pll_clock_register_8998(pdev, pll_res);
- break;
- case MDSS_HDMI_PLL_8996:
- rc = hdmi_8996_v1_pll_clock_register(pdev, pll_res);
- break;
- case MDSS_HDMI_PLL_8996_V2:
- rc = hdmi_8996_v2_pll_clock_register(pdev, pll_res);
- break;
- case MDSS_HDMI_PLL_8996_V3:
- rc = hdmi_8996_v3_pll_clock_register(pdev, pll_res);
- break;
- case MDSS_HDMI_PLL_8996_V3_1_8:
- rc = hdmi_8996_v3_1p8_pll_clock_register(pdev, pll_res);
- break;
- case MDSS_HDMI_PLL_8998:
- rc = hdmi_8998_pll_clock_register(pdev, pll_res);
+ rc = dsi_pll_clock_register_14nm(pdev, pll_res);
break;
case MDSS_UNKNOWN_PLL:
default:
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index 8fffaf30d4ec..3528dcfd0cb5 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -14,8 +14,16 @@
#define __MDSS_PLL_H
#include <linux/mdss_io_util.h>
-#include <linux/clk/msm-clock-generic.h>
+#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/regmap.h>
+
+#include "../clk-regmap.h"
+#include "../clk-regmap-divider.h"
+#include "../clk-regmap-mux.h"
+#include <dt-bindings/clock/mdss-pll-clk.h>
#define MDSS_PLL_REG_W(base, offset, data) \
writel_relaxed((data), (base) + (offset))
@@ -200,21 +208,12 @@ static inline bool is_gdsc_disabled(struct mdss_pll_resources *pll_res)
(!(readl_relaxed(pll_res->gdsc_base) & BIT(0)))) ? false : true;
}
-static inline int mdss_pll_div_prepare(struct clk *c)
+static inline int mdss_pll_div_prepare(struct clk_hw *hw)
{
- struct div_clk *div = to_div_clk(c);
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
/* Restore the divider's value */
- return div->ops->set_div(div, div->data.div);
-}
-
-static inline int mdss_set_mux_sel(struct mux_clk *clk, int sel)
-{
- return 0;
-}
-
-static inline int mdss_get_mux_sel(struct mux_clk *clk)
-{
- return 0;
+ return hw->init->ops->set_rate(hw, clk_hw_get_rate(hw),
+ clk_hw_get_rate(parent_hw));
}
int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable);
diff --git a/drivers/clk/qcom/mmcc-msmfalcon.c b/drivers/clk/qcom/mmcc-msmfalcon.c
index e4a84765430a..ef4c8c264078 100644
--- a/drivers/clk/qcom/mmcc-msmfalcon.c
+++ b/drivers/clk/qcom/mmcc-msmfalcon.c
@@ -112,8 +112,8 @@ static const struct parent_map mmcc_parent_map_1[] = {
static const char * const mmcc_parent_names_1[] = {
"xo",
- "dsi0_phy_pll_out_byteclk",
- "dsi1_phy_pll_out_byteclk",
+ "dsi0pll_byte_clk_mux",
+ "dsi1pll_byte_clk_mux",
"core_bi_pll_test_se",
};
@@ -240,8 +240,8 @@ static const struct parent_map mmcc_parent_map_8[] = {
static const char * const mmcc_parent_names_8[] = {
"xo",
- "dsi0_phy_pll_out_dsiclk",
- "dsi1_phy_pll_out_dsiclk",
+ "dsi0pll_pixel_clk_mux",
+ "dsi1pll_pixel_clk_mux",
"core_bi_pll_test_se",
};
@@ -906,7 +906,7 @@ static struct clk_rcg2 dp_crypto_clk_src = {
static const struct freq_tbl ftbl_dp_gtc_clk_src[] = {
F(40000000, P_GPLL0_OUT_MAIN_DIV, 7.5, 0, 0),
- F(300000000, P_GPLL0_OUT_MAIN_DIV, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
{ }
};
@@ -923,7 +923,7 @@ static struct clk_rcg2 dp_gtc_clk_src = {
.ops = &clk_rcg2_ops,
VDD_DIG_FMAX_MAP2(
LOWER, 40000000,
- LOW, 300000000),
+ LOW, 60000000),
},
};
@@ -1136,9 +1136,10 @@ static struct clk_rcg2 mdp_clk_src = {
.parent_names = mmcc_parent_names_7,
.num_parents = 7,
.ops = &clk_rcg2_ops,
- VDD_DIG_FMAX_MAP4(
+ VDD_DIG_FMAX_MAP5(
LOWER, 171428571,
LOW, 275000000,
+ LOW_L1, 300000000,
NOMINAL, 330000000,
HIGH, 412500000),
},
@@ -1183,6 +1184,7 @@ static struct clk_rcg2 pclk1_clk_src = {
static const struct freq_tbl ftbl_rot_clk_src[] = {
F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
F(275000000, P_MMPLL5_PLL_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
F(330000000, P_MMPLL5_PLL_OUT_MAIN, 2.5, 0, 0),
F(412500000, P_MMPLL5_PLL_OUT_MAIN, 2, 0, 0),
{ }
@@ -1199,9 +1201,10 @@ static struct clk_rcg2 rot_clk_src = {
.parent_names = mmcc_parent_names_7,
.num_parents = 7,
.ops = &clk_rcg2_ops,
- VDD_DIG_FMAX_MAP4(
+ VDD_DIG_FMAX_MAP5(
LOWER, 171428571,
LOW, 275000000,
+ LOW_L1, 300000000,
NOMINAL, 330000000,
HIGH, 412500000),
},
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 2685644826a0..33c20c6b45af 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -153,6 +153,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
return NULL;
init.name = name;
+ init.flags = 0;
init.num_parents = num_parents;
init.parent_names = parent_names;
init.ops = &rockchip_mmc_clk_ops;
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 4c5c65e1e5d0..3b2f46bacd77 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -112,6 +112,14 @@ config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
loading your cpufreq low-level hardware driver, using the
'interactive' governor for latency-sensitive workloads.
+config CPU_FREQ_DEFAULT_GOV_SCHED
+ bool "sched"
+ select CPU_FREQ_GOV_SCHED
+ help
+ Use the CPUfreq governor 'sched' as default. This scales
+ cpu frequency using CPU utilization estimates from the
+ scheduler.
+
endchoice
config CPU_FREQ_GOV_PERFORMANCE
@@ -218,6 +226,19 @@ config CPU_BOOST
If in doubt, say N.
+config CPU_FREQ_GOV_SCHED
+ bool "'sched' cpufreq governor"
+ depends on CPU_FREQ
+ depends on SMP
+ select CPU_FREQ_GOV_COMMON
+ help
+ 'sched' - this governor scales cpu frequency from the
+ scheduler as a function of cpu capacity utilization. It does
+ not evaluate utilization on a periodic basis (as ondemand
+ does) but instead is event-driven by the scheduler.
+
+ If in doubt, say N.
+
comment "CPU frequency scaling drivers"
config CPUFREQ_DT
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d45cf584d23b..118dbf1cef44 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -29,6 +29,9 @@
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/tick.h>
+#ifdef CONFIG_SMP
+#include <linux/sched.h>
+#endif
#include <trace/events/power.h>
static LIST_HEAD(cpufreq_policy_list);
@@ -164,6 +167,12 @@ bool have_governor_per_policy(void)
}
EXPORT_SYMBOL_GPL(have_governor_per_policy);
+bool cpufreq_driver_is_slow(void)
+{
+ return !(cpufreq_driver->flags & CPUFREQ_DRIVER_FAST);
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_is_slow);
+
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
{
if (have_governor_per_policy())
@@ -357,6 +366,50 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
#endif
}
+/*********************************************************************
+ * FREQUENCY INVARIANT CPU CAPACITY *
+ *********************************************************************/
+
+static DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
+static DEFINE_PER_CPU(unsigned long, max_freq_scale) = SCHED_CAPACITY_SCALE;
+
+static void
+scale_freq_capacity(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs)
+{
+ unsigned long cur = freqs ? freqs->new : policy->cur;
+ unsigned long scale = (cur << SCHED_CAPACITY_SHIFT) / policy->max;
+ struct cpufreq_cpuinfo *cpuinfo = &policy->cpuinfo;
+ int cpu;
+
+ pr_debug("cpus %*pbl cur/cur max freq %lu/%u kHz freq scale %lu\n",
+ cpumask_pr_args(policy->cpus), cur, policy->max, scale);
+
+ for_each_cpu(cpu, policy->cpus)
+ per_cpu(freq_scale, cpu) = scale;
+
+ if (freqs)
+ return;
+
+ scale = (policy->max << SCHED_CAPACITY_SHIFT) / cpuinfo->max_freq;
+
+ pr_debug("cpus %*pbl cur max/max freq %u/%u kHz max freq scale %lu\n",
+ cpumask_pr_args(policy->cpus), policy->max, cpuinfo->max_freq,
+ scale);
+
+ for_each_cpu(cpu, policy->cpus)
+ per_cpu(max_freq_scale, cpu) = scale;
+}
+
+unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(freq_scale, cpu);
+}
+
+unsigned long cpufreq_scale_max_freq_capacity(int cpu)
+{
+ return per_cpu(max_freq_scale, cpu);
+}
+
static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state)
{
@@ -433,6 +486,9 @@ static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs)
{
+#ifdef CONFIG_SMP
+ int cpu;
+#endif
/*
* Catch double invocations of _begin() which lead to self-deadlock.
@@ -460,6 +516,12 @@ wait:
spin_unlock(&policy->transition_lock);
+ scale_freq_capacity(policy, freqs);
+#ifdef CONFIG_SMP
+ for_each_cpu(cpu, policy->cpus)
+ trace_cpu_capacity(capacity_curr_of(cpu), cpu);
+#endif
+
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
}
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
@@ -2140,6 +2202,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_NOTIFY, new_policy);
+ scale_freq_capacity(new_policy, NULL);
+
policy->min = new_policy->min;
policy->max = new_policy->max;
trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 4dbf1db16aca..9cc8abd3d116 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
static DEFINE_MUTEX(userspace_mutex);
@@ -31,6 +32,7 @@ static DEFINE_MUTEX(userspace_mutex);
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
{
int ret = -EINVAL;
+ unsigned int *setspeed = policy->governor_data;
pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
@@ -38,6 +40,8 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
if (!per_cpu(cpu_is_managed, policy->cpu))
goto err;
+ *setspeed = freq;
+
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
err:
mutex_unlock(&userspace_mutex);
@@ -49,19 +53,45 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
return sprintf(buf, "%u\n", policy->cur);
}
+static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
+{
+ unsigned int *setspeed;
+
+ setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
+ if (!setspeed)
+ return -ENOMEM;
+
+ policy->governor_data = setspeed;
+ return 0;
+}
+
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
unsigned int event)
{
+ unsigned int *setspeed = policy->governor_data;
unsigned int cpu = policy->cpu;
int rc = 0;
+ if (event == CPUFREQ_GOV_POLICY_INIT)
+ return cpufreq_userspace_policy_init(policy);
+
+ if (!setspeed)
+ return -EINVAL;
+
switch (event) {
+ case CPUFREQ_GOV_POLICY_EXIT:
+ mutex_lock(&userspace_mutex);
+ policy->governor_data = NULL;
+ kfree(setspeed);
+ mutex_unlock(&userspace_mutex);
+ break;
case CPUFREQ_GOV_START:
BUG_ON(!policy->cur);
pr_debug("started managing cpu %u\n", cpu);
mutex_lock(&userspace_mutex);
per_cpu(cpu_is_managed, cpu) = 1;
+ *setspeed = policy->cur;
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_STOP:
@@ -69,20 +99,23 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
mutex_lock(&userspace_mutex);
per_cpu(cpu_is_managed, cpu) = 0;
+ *setspeed = 0;
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_LIMITS:
mutex_lock(&userspace_mutex);
- pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
- cpu, policy->min, policy->max,
- policy->cur);
+ pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
+ cpu, policy->min, policy->max, policy->cur, *setspeed);
- if (policy->max < policy->cur)
+ if (policy->max < *setspeed)
__cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
- else if (policy->min > policy->cur)
+ else if (policy->min > *setspeed)
__cpufreq_driver_target(policy, policy->min,
CPUFREQ_RELATION_L);
+ else
+ __cpufreq_driver_target(policy, *setspeed,
+ CPUFREQ_RELATION_L);
mutex_unlock(&userspace_mutex);
break;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f53b02a6bc05..6e80e4298274 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -662,7 +662,7 @@ static int core_get_max_pstate(void)
if (err)
goto skip_tar;
- tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
+ tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
err = rdmsrl_safe(tdp_msr, &tdp_ratio);
if (err)
goto skip_tar;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 7f437bc4431b..71ecc7924b58 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -192,7 +192,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
}
/* Take note of the planned idle state. */
- sched_idle_set_state(target_state);
+ sched_idle_set_state(target_state, index);
trace_cpu_idle_rcuidle(index, dev->cpu);
time_start = ktime_get();
@@ -205,7 +205,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* The cpu is no longer idle or about to enter idle. */
- sched_idle_set_state(NULL);
+ sched_idle_set_state(NULL, -1);
if (broadcast) {
if (WARN_ON_ONCE(!irqs_disabled()))
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index de033cc37a15..81801605d6e7 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1132,11 +1132,11 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
goto failed_set_mode;
}
- us = us + 1;
+ us = (us + 1) * 1000;
clear_predict_history();
clear_cl_predict_history();
- do_div(us, USEC_PER_SEC/SCLK_HZ);
+ do_div(us, NSEC_PER_SEC/SCLK_HZ);
msm_mpm_enter_sleep(us, from_idle, cpumask);
}
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ea8189f4b021..b3044219772c 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -441,6 +441,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
+ if (!ctx->authsize)
+ return 0;
+
/* NULL encryption / decryption */
if (!ctx->enckeylen)
return aead_null_set_sh_desc(aead);
@@ -553,7 +556,10 @@ skip_enc:
/* Read and write assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (alg->caam.geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -562,6 +568,14 @@ skip_enc:
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF);
+ if (alg->caam.geniv) {
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+ }
+
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
@@ -614,7 +628,7 @@ skip_enc:
keys_fit_inline = true;
/* aead_givencrypt shared descriptor */
- desc = ctx->sh_desc_givenc;
+ desc = ctx->sh_desc_enc;
/* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
@@ -645,13 +659,13 @@ copy_iv:
append_operation(desc, ctx->class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
- /* ivsize + cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
/* Read and write assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -697,7 +711,7 @@ copy_iv:
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
@@ -2147,7 +2161,7 @@ static void init_authenc_job(struct aead_request *req,
init_aead_job(req, edesc, all_contig, encrypt);
- if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
+ if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
append_load_as_imm(desc, req->iv, ivsize,
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
@@ -2534,20 +2548,6 @@ static int aead_decrypt(struct aead_request *req)
return ret;
}
-static int aead_givdecrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- unsigned int ivsize = crypto_aead_ivsize(aead);
-
- if (req->cryptlen < ivsize)
- return -EINVAL;
-
- req->cryptlen -= ivsize;
- req->assoclen += ivsize;
-
- return aead_decrypt(req);
-}
-
/*
* allocate and map the ablkcipher extended descriptor for ablkcipher
*/
@@ -3207,7 +3207,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
@@ -3253,7 +3253,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -3299,7 +3299,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
@@ -3345,7 +3345,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -3391,7 +3391,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
@@ -3437,7 +3437,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
@@ -3483,7 +3483,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
@@ -3531,7 +3531,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -3579,7 +3579,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
@@ -3627,7 +3627,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -3675,7 +3675,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
@@ -3723,7 +3723,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
@@ -3769,7 +3769,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
@@ -3815,7 +3815,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -3861,7 +3861,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
@@ -3907,7 +3907,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -3953,7 +3953,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
@@ -3999,7 +3999,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
@@ -4048,7 +4048,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
@@ -4099,7 +4099,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -4150,7 +4150,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
@@ -4201,7 +4201,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -4252,7 +4252,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
@@ -4303,7 +4303,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 49106ea42887..99d5e11db194 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1873,6 +1873,7 @@ caam_hash_alloc(struct caam_hash_template *template,
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
+ t_alg->ahash_alg.setkey = NULL;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 9ef51fafdbff..6e105e87b8ff 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -442,6 +442,14 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
(unsigned int)ccw,
(unsigned int)be32_to_cpu(crb->ccw));
+ /*
+ * NX842 coprocessor sets 3rd bit in CR register with XER[S0].
+ * XER[S0] is the integer summary overflow bit which is nothing
+ * to do NX. Since this bit can be set with other return values,
+ * mask this bit.
+ */
+ ret &= ~ICSWX_XERS0;
+
switch (ret) {
case ICSWX_INITIATED:
ret = wait_for_csb(wmem, csb);
@@ -454,10 +462,6 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
pr_err_ratelimited("ICSWX rejected\n");
ret = -EPROTO;
break;
- default:
- pr_err_ratelimited("Invalid ICSWX return code %x\n", ret);
- ret = -EPROTO;
- break;
}
if (!ret)
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 0794f1cc0018..42f0f229f7f7 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -392,7 +392,7 @@ static void nx_of_update_msc(struct device *dev,
((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
i < msc->triplets;
i++) {
- if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) {
+ if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) {
dev_err(dev, "unknown function code/mode "
"combo: %d/%d (ignored)\n", msc->fc,
msc->mode);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 59e4c3af15ed..367b6661ee04 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -1262,8 +1262,8 @@ static struct crypto_alg qat_algs[] = { {
.setkey = qat_alg_ablkcipher_xts_setkey,
.decrypt = qat_alg_ablkcipher_decrypt,
.encrypt = qat_alg_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
},
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index f3801b983f42..3f8bb9a40df1 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -191,7 +191,7 @@ struct crypto_alg p8_aes_cbc_alg = {
.cra_init = p8_aes_cbc_init,
.cra_exit = p8_aes_cbc_exit,
.cra_blkcipher = {
- .ivsize = 0,
+ .ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = p8_aes_cbc_setkey,
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 404a1b69a3ab..72f138985e18 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -175,7 +175,7 @@ struct crypto_alg p8_aes_ctr_alg = {
.cra_init = p8_aes_ctr_init,
.cra_exit = p8_aes_ctr_exit,
.cra_blkcipher = {
- .ivsize = 0,
+ .ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = p8_aes_ctr_setkey,
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
index b9997335f193..b18e67d0e065 100644
--- a/drivers/crypto/vmx/ppc-xlate.pl
+++ b/drivers/crypto/vmx/ppc-xlate.pl
@@ -139,6 +139,26 @@ my $vmr = sub {
" vor $vx,$vy,$vy";
};
+# Some ABIs specify vrsave, special-purpose register #256, as reserved
+# for system use.
+my $no_vrsave = ($flavour =~ /linux-ppc64le/);
+my $mtspr = sub {
+ my ($f,$idx,$ra) = @_;
+ if ($idx == 256 && $no_vrsave) {
+ " or $ra,$ra,$ra";
+ } else {
+ " mtspr $idx,$ra";
+ }
+};
+my $mfspr = sub {
+ my ($f,$rd,$idx) = @_;
+ if ($idx == 256 && $no_vrsave) {
+ " li $rd,-1";
+ } else {
+ " mfspr $rd,$idx";
+ }
+};
+
# PowerISA 2.06 stuff
sub vsxmem_op {
my ($f, $vrt, $ra, $rb, $op) = @_;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 02f9aa4ebe05..e44a1bfb0250 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -242,7 +242,7 @@ struct at_xdmac_lld {
u32 mbr_dus; /* Destination Microblock Stride Register */
};
-
+/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
struct at_xdmac_desc {
struct at_xdmac_lld lld;
enum dma_transfer_direction direction;
@@ -253,7 +253,7 @@ struct at_xdmac_desc {
unsigned int xfer_size;
struct list_head descs_list;
struct list_head xfer_node;
-};
+} __aligned(sizeof(u64));
static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
{
@@ -1388,6 +1388,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
u32 cur_nda, check_nda, cur_ubc, mask, value;
u8 dwidth = 0;
unsigned long flags;
+ bool initd;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_COMPLETE)
@@ -1412,7 +1413,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
residue = desc->xfer_size;
/*
* Flush FIFO: only relevant when the transfer is source peripheral
- * synchronized.
+ * synchronized. Flush is needed before reading CUBC because data in
+ * the FIFO are not reported by CUBC. Reporting a residue of the
+ * transfer length while we have data in FIFO can cause issue.
+ * Usecase: atmel USART has a timeout which means I have received
+ * characters but there is no more character received for a while. On
+ * timeout, it requests the residue. If the data are in the DMA FIFO,
+ * we will return a residue of the transfer length. It means no data
+ * received. If an application is waiting for these data, it will hang
+ * since we won't have another USART timeout without receiving new
+ * data.
*/
mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
@@ -1423,34 +1433,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
}
/*
- * When processing the residue, we need to read two registers but we
- * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
- * we stand in the descriptor list and AT_XDMAC_CUBC is used
- * to know how many data are remaining for the current descriptor.
- * Since the dma channel is not paused to not loose data, between the
- * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
- * descriptor.
- * For that reason, after reading AT_XDMAC_CUBC, we check if we are
- * still using the same descriptor by reading a second time
- * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
- * read again AT_XDMAC_CUBC.
+ * The easiest way to compute the residue should be to pause the DMA
+ * but doing this can lead to miss some data as some devices don't
+ * have FIFO.
+ * We need to read several registers because:
+ * - DMA is running therefore a descriptor change is possible while
+ * reading these registers
+ * - When the block transfer is done, the value of the CUBC register
+ * is set to its initial value until the fetch of the next descriptor.
+ * This value will corrupt the residue calculation so we have to skip
+ * it.
+ *
+ * INITD -------- ------------
+ * |____________________|
+ * _______________________ _______________
+ * NDA @desc2 \/ @desc3
+ * _______________________/\_______________
+ * __________ ___________ _______________
+ * CUBC 0 \/ MAX desc1 \/ MAX desc2
+ * __________/\___________/\_______________
+ *
+ * Since descriptors are aligned on 64 bits, we can assume that
+ * the update of NDA and CUBC is atomic.
* Memory barriers are used to ensure the read order of the registers.
- * A max number of retries is set because unlikely it can never ends if
- * we are transferring a lot of data with small buffers.
+ * A max number of retries is set because unlikely it could never ends.
*/
- cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
- rmb();
- cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
- rmb();
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
-
- if (likely(cur_nda == check_nda))
- break;
-
- cur_nda = check_nda;
+ rmb();
+ initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
rmb();
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ rmb();
+ cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ rmb();
+
+ if ((check_nda == cur_nda) && initd)
+ break;
}
if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
@@ -1459,6 +1478,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
}
/*
+ * Flush FIFO: only relevant when the transfer is source peripheral
+ * synchronized. Another flush is needed here because CUBC is updated
+ * when the controller sends the data write command. It can lead to
+ * report data that are not written in the memory or the device. The
+ * FIFO flush ensures that data are really written.
+ */
+ if ((desc->lld.mbr_cfg & mask) == value) {
+ at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
+ while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
+ cpu_relax();
+ }
+
+ /*
* Remove size of all microblocks already transferred and the current
* one. Then add the remaining size to transfer of the current
* microblock.
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index f1bcc2a163b3..b1bc945f008f 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
{
struct usb_dmac_chan *chan = dev;
irqreturn_t ret = IRQ_NONE;
- u32 mask = USB_DMACHCR_TE;
- u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP;
+ u32 mask = 0;
u32 chcr;
+ bool xfer_end = false;
spin_lock(&chan->vc.lock);
chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
- if (chcr & check_bits)
- mask |= USB_DMACHCR_DE | check_bits;
+ if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
+ mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
+ if (chcr & USB_DMACHCR_DE)
+ xfer_end = true;
+ ret |= IRQ_HANDLED;
+ }
if (chcr & USB_DMACHCR_NULL) {
/* An interruption of TE will happen after we set FTE */
mask |= USB_DMACHCR_NULL;
chcr |= USB_DMACHCR_FTE;
ret |= IRQ_HANDLED;
}
- usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
+ if (mask)
+ usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
- if (chcr & check_bits) {
+ if (xfer_end)
usb_dmac_isr_transfer_end(chan);
- ret |= IRQ_HANDLED;
- }
spin_unlock(&chan->vc.lock);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 1b2c2187b347..dc68394da682 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -966,7 +966,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci,
mci->ue_mc += count;
if (!enable_per_layer_report) {
- mci->ce_noinfo_count += count;
+ mci->ue_noinfo_count += count;
return;
}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 58aed67b7eba..3c8f19f5ac81 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -313,7 +313,6 @@ static struct device_type csrow_attr_type = {
* possible dynamic channel DIMM Label attribute files
*
*/
-
DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 0);
DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
@@ -326,6 +325,10 @@ DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 4);
DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 5);
+DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 6);
+DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 7);
/* Total possible dynamic DIMM Label attribute file table */
static struct attribute *dynamic_csrow_dimm_attr[] = {
@@ -335,6 +338,8 @@ static struct attribute *dynamic_csrow_dimm_attr[] = {
&dev_attr_legacy_ch3_dimm_label.attr.attr,
&dev_attr_legacy_ch4_dimm_label.attr.attr,
&dev_attr_legacy_ch5_dimm_label.attr.attr,
+ &dev_attr_legacy_ch6_dimm_label.attr.attr,
+ &dev_attr_legacy_ch7_dimm_label.attr.attr,
NULL
};
@@ -351,6 +356,10 @@ DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 4);
DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 5);
+DEVICE_CHANNEL(ch6_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 6);
+DEVICE_CHANNEL(ch7_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 7);
/* Total possible dynamic ce_count attribute file table */
static struct attribute *dynamic_csrow_ce_count_attr[] = {
@@ -360,6 +369,8 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = {
&dev_attr_legacy_ch3_ce_count.attr.attr,
&dev_attr_legacy_ch4_ce_count.attr.attr,
&dev_attr_legacy_ch5_ce_count.attr.attr,
+ &dev_attr_legacy_ch6_ce_count.attr.attr,
+ &dev_attr_legacy_ch7_ce_count.attr.attr,
NULL
};
@@ -371,9 +382,16 @@ static umode_t csrow_dev_is_visible(struct kobject *kobj,
if (idx >= csrow->nr_channels)
return 0;
+
+ if (idx >= ARRAY_SIZE(dynamic_csrow_ce_count_attr) - 1) {
+ WARN_ONCE(1, "idx: %d\n", idx);
+ return 0;
+ }
+
/* Only expose populated DIMMs */
if (!csrow->channels[idx]->dimm->nr_pages)
return 0;
+
return attr->mode;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 438ffcbe6654..008b8babf31e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -50,6 +50,7 @@ config GPIO_DEVRES
config OF_GPIO
def_bool y
depends on OF
+ depends on HAS_IOMEM
config GPIO_ACPI
def_bool y
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index 70097472b02c..c50e930d97d3 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -17,7 +17,6 @@
* Moorestown platform Langwell chip.
* Medfield platform Penwell chip.
* Clovertrail platform Cloverview chip.
- * Merrifield platform Tangier chip.
*/
#include <linux/module.h>
@@ -64,10 +63,6 @@ enum GPIO_REG {
/* intel_mid gpio driver data */
struct intel_mid_gpio_ddata {
u16 ngpio; /* number of gpio pins */
- u32 gplr_offset; /* offset of first GPLR register from base */
- u32 flis_base; /* base address of FLIS registers */
- u32 flis_len; /* length of FLIS registers */
- u32 (*get_flis_offset)(int gpio);
u32 chip_irq_type; /* chip interrupt type */
};
@@ -257,15 +252,6 @@ static const struct intel_mid_gpio_ddata gpio_cloverview_core = {
.chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
};
-static const struct intel_mid_gpio_ddata gpio_tangier = {
- .ngpio = 192,
- .gplr_offset = 4,
- .flis_base = 0xff0c0000,
- .flis_len = 0x8000,
- .get_flis_offset = NULL,
- .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
-};
-
static const struct pci_device_id intel_gpio_ids[] = {
{
/* Lincroft */
@@ -292,11 +278,6 @@ static const struct pci_device_id intel_gpio_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7),
.driver_data = (kernel_ulong_t)&gpio_cloverview_core,
},
- {
- /* Tangier */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199),
- .driver_data = (kernel_ulong_t)&gpio_tangier,
- },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, intel_gpio_ids);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 2d4892cc70fb..c844d7eccb6c 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -86,7 +86,7 @@ MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids);
#define MAX_BANK 5
#define BANK_SZ 8
-#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ)
+#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
struct pca953x_chip {
unsigned gpio_start;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 053fc2f465df..ff5566c69f7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -710,9 +710,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
int amdgpu_gart_init(struct amdgpu_device *adev);
void amdgpu_gart_fini(struct amdgpu_device *adev);
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages);
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr, uint32_t flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 9416e0f5c1db..51a9942cdb40 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -331,6 +331,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
(le16_to_cpu(path->usConnObjectId) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+ /* Skip TV/CV support */
+ if ((le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_TV1_SUPPORT) ||
+ (le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_CV_SUPPORT))
+ continue;
+
+ if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
+ DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
+ con_obj_id, le16_to_cpu(path->usDeviceTag));
+ continue;
+ }
+
connector_type =
object_connector_convert[con_obj_id];
connector_object_id = con_obj_id;
@@ -566,28 +579,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
le16_to_cpu(firmware_info->info.usReferenceClock);
ppll->reference_div = 0;
- if (crev < 2)
- ppll->pll_out_min =
- le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
- else
- ppll->pll_out_min =
- le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
+ ppll->pll_out_min =
+ le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
ppll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
- if (crev >= 4) {
- ppll->lcd_pll_out_min =
- le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
- if (ppll->lcd_pll_out_min == 0)
- ppll->lcd_pll_out_min = ppll->pll_out_min;
- ppll->lcd_pll_out_max =
- le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
- if (ppll->lcd_pll_out_max == 0)
- ppll->lcd_pll_out_max = ppll->pll_out_max;
- } else {
+ ppll->lcd_pll_out_min =
+ le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+ if (ppll->lcd_pll_out_min == 0)
ppll->lcd_pll_out_min = ppll->pll_out_min;
+ ppll->lcd_pll_out_max =
+ le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+ if (ppll->lcd_pll_out_max == 0)
ppll->lcd_pll_out_max = ppll->pll_out_max;
- }
if (ppll->pll_out_min == 0)
ppll->pll_out_min = 64800;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 5a8fbadbd27b..29adbbe225c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include "amdgpu_acpi.h"
@@ -256,6 +257,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
if (!info)
return -EIO;
kfree(info);
+
+ /* 200ms delay is required after off */
+ if (state == 0)
+ msleep(200);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 7ef2c13921b4..930083336968 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
}
if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c961fe093e12..16302f7d59f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1793,7 +1793,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
drm_kms_helper_poll_enable(dev);
+
+ /*
+ * Most of the connector probing functions try to acquire runtime pm
+ * refs to ensure that the GPU is powered on when connector polling is
+ * performed. Since we're calling this from a runtime PM callback,
+ * trying to acquire rpm refs will cause us to deadlock.
+ *
+ * Since we're guaranteed to be holding the rpm lock, it's safe to
+ * temporarily disable the rpm helpers so this doesn't deadlock us.
+ */
+#ifdef CONFIG_PM
+ dev->dev->power.disable_depth++;
+#endif
drm_helper_hpd_irq_event(dev);
+#ifdef CONFIG_PM
+ dev->dev->power.disable_depth--;
+#endif
if (fbcon) {
amdgpu_fbdev_set_suspend(adev, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 7312d729d300..22a613a95bf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
* Unbinds the requested pages from the gart page table and
* replaces them with the dummy page (all asics).
*/
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
{
unsigned t;
@@ -269,7 +269,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint32_t flags)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 9e25edafa721..c77a1ebfc632 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -288,7 +288,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
unsigned i;
- int r;
+ int r, ret = 0;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -309,10 +309,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
} else {
/* still not good, but we can live with it */
DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
+ ret = r;
}
}
}
- return 0;
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 1cbb16e15307..475c38fe9245 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -233,8 +233,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
adev = amdgpu_get_adev(bo->bdev);
ring = adev->mman.buffer_funcs_ring;
- old_start = old_mem->start << PAGE_SHIFT;
- new_start = new_mem->start << PAGE_SHIFT;
+ old_start = (u64)old_mem->start << PAGE_SHIFT;
+ new_start = (u64)new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 92b6acadfc52..21aacc1f45c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -243,7 +243,7 @@ static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STA
/* convert bits per color to bits per pixel */
/* get bpc from the EDID */
-static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
+static unsigned amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
{
if (bpc == 0)
return 24;
@@ -251,64 +251,32 @@ static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
return bpc * 3;
}
-/* get the max pix clock supported by the link rate and lane num */
-static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate,
- int lane_num,
- int bpp)
-{
- return (link_rate * lane_num * 8) / bpp;
-}
-
/***** amdgpu specific DP functions *****/
-/* First get the min lane# when low rate is used according to pixel clock
- * (prefer low rate), second check max lane# supported by DP panel,
- * if the max lane# < low rate lane# then use max lane# instead.
- */
-static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector,
+static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector,
const u8 dpcd[DP_DPCD_SIZE],
- int pix_clock)
-{
- int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
- int max_link_rate = drm_dp_max_link_rate(dpcd);
- int max_lane_num = drm_dp_max_lane_count(dpcd);
- int lane_num;
- int max_dp_pix_clock;
-
- for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
- max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
- if (pix_clock <= max_dp_pix_clock)
- break;
- }
-
- return lane_num;
-}
-
-static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector,
- const u8 dpcd[DP_DPCD_SIZE],
- int pix_clock)
+ unsigned pix_clock,
+ unsigned *dp_lanes, unsigned *dp_rate)
{
- int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
- int lane_num, max_pix_clock;
-
- if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_NUTMEG)
- return 270000;
-
- lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock);
- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 162000;
- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 270000;
- if (amdgpu_connector_is_dp12_capable(connector)) {
- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 540000;
+ unsigned bpp =
+ amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
+ static const unsigned link_rates[3] = { 162000, 270000, 540000 };
+ unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
+ unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
+ unsigned lane_num, i, max_pix_clock;
+
+ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+ max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ if (max_pix_clock >= pix_clock) {
+ *dp_lanes = lane_num;
+ *dp_rate = link_rates[i];
+ return 0;
+ }
+ }
}
- return drm_dp_max_link_rate(dpcd);
+ return -EINVAL;
}
static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
@@ -422,6 +390,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
+ int ret;
if (!amdgpu_connector->con_priv)
return;
@@ -429,10 +398,14 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
- dig_connector->dp_clock =
- amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
- dig_connector->dp_lane_count =
- amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
+ ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
+ mode->clock,
+ &dig_connector->dp_lane_count,
+ &dig_connector->dp_clock);
+ if (ret) {
+ dig_connector->dp_clock = 0;
+ dig_connector->dp_lane_count = 0;
+ }
}
}
@@ -441,14 +414,17 @@ int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
- int dp_clock;
+ unsigned dp_lanes, dp_clock;
+ int ret;
if (!amdgpu_connector->con_priv)
return MODE_CLOCK_HIGH;
dig_connector = amdgpu_connector->con_priv;
- dp_clock =
- amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+ ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
+ mode->clock, &dp_lanes, &dp_clock);
+ if (ret)
+ return MODE_CLOCK_HIGH;
if ((dp_clock == 540000) &&
(!amdgpu_connector_is_dp12_capable(connector)))
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 1cd6de575305..542517d4e584 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (dig->backlight_level == 0)
amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 5f712ceddf08..c568293cb6c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static int cik_sdma_soft_reset(void *handle);
MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
@@ -1030,6 +1031,8 @@ static int cik_sdma_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cik_sdma_soft_reset(handle);
+
return cik_sdma_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 8035d4d6a4f5..653917a3bcc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1955,10 +1955,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
} else { /*pi->caps_vce_pg*/
cz_update_vce_dpm(adev);
- cz_enable_vce_dpm(adev, true);
+ cz_enable_vce_dpm(adev, !gate);
}
-
- return;
}
const struct amd_ip_funcs cz_dpm_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index ea87033bfaf6..df17fababbd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -167,6 +167,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
break;
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_MULLINS:
return 0;
default: BUG();
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index e5aec45bf985..1ac29d703c12 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -108,7 +108,6 @@ steal_encoder(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
- int ret;
/*
* We can only steal an encoder coming from a connector, which means we
@@ -139,9 +138,6 @@ steal_encoder(struct drm_atomic_state *state,
if (IS_ERR(connector_state))
return PTR_ERR(connector_state);
- ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
- if (ret)
- return ret;
connector_state->best_encoder = NULL;
}
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 6743ff7dccfa..7f4a6c550319 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -136,6 +136,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
mb();
for (; addr < end; addr += size)
clflushopt(addr);
+ clflushopt(end - 1); /* force serialisation */
mb();
return;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index dc84003f694e..5e4bb4837bae 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -5231,6 +5231,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
unsigned long flags;
int ret = -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
page_flip->reserved != 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index d5d2c03fd136..8c9ac021608f 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -73,6 +73,8 @@
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
/* Force 12bpc */
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
+/* Force 6bpc */
+#define EDID_QUIRK_FORCE_6BPC (1 << 10)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -99,6 +101,9 @@ static struct edid_quirk {
/* Unknown Acer */
{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+ /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
+ { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -3820,6 +3825,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
drm_add_display_info(edid, &connector->display_info, connector);
+ if (quirks & EDID_QUIRK_FORCE_6BPC)
+ connector->display_info.bpc = 6;
+
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index c7de454e8e88..b205224f1a44 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -338,27 +338,32 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
spin_unlock(&file_priv->table_lock);
idr_preload_end();
mutex_unlock(&dev->object_name_lock);
- if (ret < 0) {
- drm_gem_object_handle_unreference_unlocked(obj);
- return ret;
- }
+ if (ret < 0)
+ goto err_unref;
+
*handlep = ret;
ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
- if (ret) {
- drm_gem_handle_delete(file_priv, *handlep);
- return ret;
- }
+ if (ret)
+ goto err_remove;
if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
- if (ret) {
- drm_gem_handle_delete(file_priv, *handlep);
- return ret;
- }
+ if (ret)
+ goto err_revoke;
}
return 0;
+
+err_revoke:
+ drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+err_remove:
+ spin_lock(&file_priv->table_lock);
+ idr_remove(&file_priv->object_idr, *handlep);
+ spin_unlock(&file_priv->table_lock);
+err_unref:
+ drm_gem_object_handle_unreference_unlocked(obj);
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d3ce4da6a6ad..d400d6773bbb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3313,6 +3313,9 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
}
extern void intel_i2c_reset(struct drm_device *dev);
+/* intel_bios.c */
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
+
/* intel_opregion.c */
#ifdef CONFIG_ACPI
extern int intel_opregion_setup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 6ed7d63a0688..201947b4377c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -513,9 +513,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
return ret;
if (r->presumed_offset != offset &&
- __copy_to_user_inatomic(&user_relocs->presumed_offset,
- &r->presumed_offset,
- sizeof(r->presumed_offset))) {
+ __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
return -EFAULT;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 86c7500454b4..b37fe0df743e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2747,6 +2747,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
ppgtt->base.cleanup(&ppgtt->base);
+ kfree(ppgtt);
}
if (drm_mm_initialized(&vm->mm)) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9ed9f6dde86f..cace154bbdc0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3240,19 +3240,20 @@ enum skl_disp_power_wells {
#define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114)
/*
- * HDMI/DP bits are gen4+
+ * HDMI/DP bits are g4x+
*
* WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
* Please check the detailed lore in the commit message for for experimental
* evidence.
*/
-#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
+/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
+#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
+#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
+/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
+#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
-/* VLV DP/HDMI bits again match Bspec */
-#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
-#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
+#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index ce82f9c7df24..d14bdc537587 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1351,3 +1351,42 @@ intel_parse_bios(struct drm_device *dev)
return 0;
}
+
+/**
+ * intel_bios_is_port_present - is the specified digital port present
+ * @dev_priv: i915 device instance
+ * @port: port to check
+ *
+ * Return true if the device in %port is present.
+ */
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
+{
+ static const struct {
+ u16 dp, hdmi;
+ } port_mapping[] = {
+ [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+ [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ };
+ int i;
+
+ /* FIXME maybe deal with port A as well? */
+ if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
+ return false;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ const union child_device_config *p_child =
+ &dev_priv->vbt.child_dev[i];
+ if ((p_child->common.dvo_port == port_mapping[port].dp ||
+ p_child->common.dvo_port == port_mapping[port].hdmi) &&
+ (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c41bc42b6fa7..a3254c3bcc7c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11952,21 +11952,11 @@ connected_sink_compute_bpp(struct intel_connector *connector,
pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
}
- /* Clamp bpp to default limit on screens without EDID 1.4 */
- if (connector->base.display_info.bpc == 0) {
- int type = connector->base.connector_type;
- int clamp_bpp = 24;
-
- /* Fall back to 18 bpp when DP sink capability is unknown. */
- if (type == DRM_MODE_CONNECTOR_DisplayPort ||
- type == DRM_MODE_CONNECTOR_eDP)
- clamp_bpp = 18;
-
- if (bpp > clamp_bpp) {
- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
- bpp, clamp_bpp);
- pipe_config->pipe_bpp = clamp_bpp;
- }
+ /* Clamp bpp to 8 on screens without EDID 1.4 */
+ if (connector->base.display_info.bpc == 0 && bpp > 24) {
+ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+ bpp);
+ pipe_config->pipe_bpp = 24;
}
}
@@ -14170,6 +14160,8 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
+ bool has_edp, has_port;
+
/*
* The DP_DETECTED bit is the latched state of the DDC
* SDA pin at boot. However since eDP doesn't require DDC
@@ -14178,27 +14170,37 @@ static void intel_setup_outputs(struct drm_device *dev)
* Thus we can't rely on the DP_DETECTED bit alone to detect
* eDP ports. Consult the VBT as well as DP_DETECTED to
* detect eDP ports.
+ *
+ * Sadly the straps seem to be missing sometimes even for HDMI
+ * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
+ * and VBT for the presence of the port. Additionally we can't
+ * trust the port type the VBT declares as we've seen at least
+ * HDMI ports that the VBT claim are DP or eDP.
*/
- if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
- !intel_dp_is_edp(dev, PORT_B))
+ has_edp = intel_dp_is_edp(dev, PORT_B);
+ has_port = intel_bios_is_port_present(dev_priv, PORT_B);
+ if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+ has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
+ if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
- if (I915_READ(VLV_DP_B) & DP_DETECTED ||
- intel_dp_is_edp(dev, PORT_B))
- intel_dp_init(dev, VLV_DP_B, PORT_B);
- if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
- !intel_dp_is_edp(dev, PORT_C))
+ has_edp = intel_dp_is_edp(dev, PORT_C);
+ has_port = intel_bios_is_port_present(dev_priv, PORT_C);
+ if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+ has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
+ if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
- if (I915_READ(VLV_DP_C) & DP_DETECTED ||
- intel_dp_is_edp(dev, PORT_C))
- intel_dp_init(dev, VLV_DP_C, PORT_C);
if (IS_CHERRYVIEW(dev)) {
- /* eDP not supported on port D, so don't check VBT */
- if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
- intel_hdmi_init(dev, CHV_HDMID, PORT_D);
- if (I915_READ(CHV_DP_D) & DP_DETECTED)
+ /*
+ * eDP not supported on port D,
+ * so no need to worry about it
+ */
+ has_port = intel_bios_is_port_present(dev_priv, PORT_D);
+ if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
intel_dp_init(dev, CHV_DP_D, PORT_D);
+ if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
+ intel_hdmi_init(dev, CHV_HDMID, PORT_D);
}
intel_dsi_init(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8e1d6d74c203..ebbd23407a80 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4592,20 +4592,20 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
return I915_READ(PORT_HOTPLUG_STAT) & bit;
}
-static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
{
u32 bit;
switch (port->port) {
case PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
break;
case PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
break;
case PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
break;
default:
MISSING_CASE(port->port);
@@ -4657,8 +4657,8 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
return cpt_digital_port_connected(dev_priv, port);
else if (IS_BROXTON(dev_priv))
return bxt_digital_port_connected(dev_priv, port);
- else if (IS_VALLEYVIEW(dev_priv))
- return vlv_digital_port_connected(dev_priv, port);
+ else if (IS_GM45(dev_priv))
+ return gm45_digital_port_connected(dev_priv, port);
else
return g4x_digital_port_connected(dev_priv, port);
}
@@ -6113,8 +6113,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
return true;
}
-void
-intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+bool intel_dp_init(struct drm_device *dev,
+ int output_reg,
+ enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
@@ -6124,7 +6125,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
- return;
+ return false;
intel_connector = intel_connector_alloc();
if (!intel_connector)
@@ -6179,15 +6180,14 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
- return;
+ return true;
err_init_connector:
drm_encoder_cleanup(encoder);
kfree(intel_connector);
err_connector_alloc:
kfree(intel_dig_port);
-
- return;
+ return false;
}
void intel_dp_mst_suspend(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index c5f11e0c5d5b..67f72a7ee7cb 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1195,7 +1195,7 @@ void intel_csr_ucode_fini(struct drm_device *dev);
void assert_csr_loaded(struct drm_i915_private *dev_priv);
/* intel_dp.c */
-void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
+bool intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4b8ed9f2dabc..dff69fef47e0 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -2030,6 +2030,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
+ DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
+ port_name(port));
+
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 6dc13c02c28e..e362a30776fa 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -682,7 +682,7 @@ static void intel_didl_outputs(struct drm_device *dev)
}
if (!acpi_video_bus) {
- DRM_ERROR("No ACPI video bus found\n");
+ DRM_DEBUG_KMS("No ACPI video bus found\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 62284e45d531..1e851e037c29 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1789,16 +1789,20 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t mem_value)
{
- int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+ /*
+ * We treat the cursor plane as always-on for the purposes of watermark
+ * calculation. Until we have two-stage watermark programming merged,
+ * this is necessary to avoid flickering.
+ */
+ int cpp = 4;
+ int width = pstate->visible ? pstate->base.crtc_w : 64;
- if (!cstate->base.active || !pstate->visible)
+ if (!cstate->base.active)
return 0;
return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&pstate->dst),
- bpp,
- mem_value);
+ width, cpp, mem_value);
}
/* Only for WM_LP. */
@@ -4522,7 +4526,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
dev_priv->rps.last_adj = 0;
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_sanitize_rps_pm_mask(dev_priv, ~0));
}
mutex_unlock(&dev_priv->rps.hw_lock);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 6d7cd3fe21e7..1847f83b1e33 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -55,6 +55,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return submit;
}
+static inline unsigned long __must_check
+copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+{
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_from_user_inatomic(to, from, n);
+ return -EFAULT;
+}
+
static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
@@ -62,6 +70,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
int ret = 0;
spin_lock(&file->table_lock);
+ pagefault_disable();
for (i = 0; i < args->nr_bos; i++) {
struct drm_msm_gem_submit_bo submit_bo;
@@ -70,10 +79,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
void __user *userptr =
to_user_ptr(args->bos + (i * sizeof(submit_bo)));
- ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
- if (ret) {
- ret = -EFAULT;
- goto out_unlock;
+ ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
+ if (unlikely(ret)) {
+ pagefault_enable();
+ spin_unlock(&file->table_lock);
+ ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
+ if (ret)
+ goto out;
+ spin_lock(&file->table_lock);
+ pagefault_disable();
}
if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
@@ -113,9 +127,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
}
out_unlock:
- submit->nr_bos = i;
+ pagefault_enable();
spin_unlock(&file->table_lock);
+out:
+ submit->nr_bos = i;
+
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 1d3ee5179ab8..d236fc7c425b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -308,7 +308,16 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
bool boot = false;
int ret;
- /* remove conflicting drivers (vesafb, efifb etc) */
+ /* We need to check that the chipset is supported before booting
+ * fbdev off the hardware, as there's no way to put it back.
+ */
+ ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
+ if (ret)
+ return ret;
+
+ nvkm_device_del(&device);
+
+ /* Remove conflicting drivers (vesafb, efifb etc). */
aper = alloc_apertures(3);
if (!aper)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 8f715feadf56..f90568327468 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -107,11 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
((image->dx + image->width) & 0xffff));
OUT_RING(chan, bg);
OUT_RING(chan, fg);
- OUT_RING(chan, (image->height << 16) | image->width);
+ OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8));
OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
- dsize = ALIGN(image->width * image->height, 32) >> 5;
+ dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dsize) {
int iter_len = dsize > 128 ? 128 : dsize;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index a4e259a00430..c8e096533f60 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -125,7 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING(chan, 0);
OUT_RING(chan, image->dy);
- dwords = ALIGN(image->width * image->height, 32) >> 5;
+ dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index f28315e865a5..22d32578dafd 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -125,7 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING (chan, 0);
OUT_RING (chan, image->dy);
- dwords = ALIGN(image->width * image->height, 32) >> 5;
+ dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index 69de8c6259fe..f1e15a4d4f64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -76,8 +76,8 @@ nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x1f18; i <= 0x3088 ; i += 16) {
nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
- nvkm_wo32(chan->inst, i + 1, 0x0436086c);
- nvkm_wo32(chan->inst, i + 2, 0x000c001b);
+ nvkm_wo32(chan->inst, i + 4, 0x0436086c);
+ nvkm_wo32(chan->inst, i + 8, 0x000c001b);
}
for (i = 0x30b8; i < 0x30c8; i += 4)
nvkm_wo32(chan->inst, i, 0x0000ffff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 2207dac23981..300f5ed5de0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -75,8 +75,8 @@ nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x15ac; i <= 0x271c ; i += 16) {
nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
- nvkm_wo32(chan->inst, i + 1, 0x0436086c);
- nvkm_wo32(chan->inst, i + 2, 0x000c001b);
+ nvkm_wo32(chan->inst, i + 4, 0x0436086c);
+ nvkm_wo32(chan->inst, i + 8, 0x000c001b);
}
for (i = 0x274c; i < 0x275c; i += 4)
nvkm_wo32(chan->inst, i, 0x0000ffff);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index bd73b4069069..44ee72e04df9 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -302,77 +302,31 @@ static int convert_bpc_to_bpp(int bpc)
return bpc * 3;
}
-/* get the max pix clock supported by the link rate and lane num */
-static int dp_get_max_dp_pix_clock(int link_rate,
- int lane_num,
- int bpp)
-{
- return (link_rate * lane_num * 8) / bpp;
-}
-
/***** radeon specific DP functions *****/
-int radeon_dp_get_max_link_rate(struct drm_connector *connector,
- const u8 dpcd[DP_DPCD_SIZE])
-{
- int max_link_rate;
-
- if (radeon_connector_is_dp12_capable(connector))
- max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
- else
- max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
-
- return max_link_rate;
-}
-
-/* First get the min lane# when low rate is used according to pixel clock
- * (prefer low rate), second check max lane# supported by DP panel,
- * if the max lane# < low rate lane# then use max lane# instead.
- */
-static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
- const u8 dpcd[DP_DPCD_SIZE],
- int pix_clock)
-{
- int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
- int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
- int max_lane_num = drm_dp_max_lane_count(dpcd);
- int lane_num;
- int max_dp_pix_clock;
-
- for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
- max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
- if (pix_clock <= max_dp_pix_clock)
- break;
- }
-
- return lane_num;
-}
-
-static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
- const u8 dpcd[DP_DPCD_SIZE],
- int pix_clock)
+int radeon_dp_get_dp_link_config(struct drm_connector *connector,
+ const u8 dpcd[DP_DPCD_SIZE],
+ unsigned pix_clock,
+ unsigned *dp_lanes, unsigned *dp_rate)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
- int lane_num, max_pix_clock;
-
- if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_NUTMEG)
- return 270000;
-
- lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
- max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 162000;
- max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 270000;
- if (radeon_connector_is_dp12_capable(connector)) {
- max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 540000;
+ static const unsigned link_rates[3] = { 162000, 270000, 540000 };
+ unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
+ unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
+ unsigned lane_num, i, max_pix_clock;
+
+ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+ max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ if (max_pix_clock >= pix_clock) {
+ *dp_lanes = lane_num;
+ *dp_rate = link_rates[i];
+ return 0;
+ }
+ }
}
- return radeon_dp_get_max_link_rate(connector, dpcd);
+ return -EINVAL;
}
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@@ -491,6 +445,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
+ int ret;
if (!radeon_connector->con_priv)
return;
@@ -498,10 +453,14 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
- dig_connector->dp_clock =
- radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
- dig_connector->dp_lane_count =
- radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
+ ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
+ mode->clock,
+ &dig_connector->dp_lane_count,
+ &dig_connector->dp_clock);
+ if (ret) {
+ dig_connector->dp_clock = 0;
+ dig_connector->dp_lane_count = 0;
+ }
}
}
@@ -510,7 +469,8 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
- int dp_clock;
+ unsigned dp_clock, dp_lanes;
+ int ret;
if ((mode->clock > 340000) &&
(!radeon_connector_is_dp12_capable(connector)))
@@ -520,8 +480,12 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
dig_connector = radeon_connector->con_priv;
- dp_clock =
- radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+ ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
+ mode->clock,
+ &dp_lanes,
+ &dp_clock);
+ if (ret)
+ return MODE_CLOCK_HIGH;
if ((dp_clock == 540000) &&
(!radeon_connector_is_dp12_capable(connector)))
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 0b04b9282f56..d4ac8c837314 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (dig->backlight_level == 0)
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
else {
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index de9a2ffcf5f7..0c5b3eeff82d 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1155,7 +1155,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
le16_to_cpu(firmware_info->info.usReferenceClock);
p1pll->reference_div = 0;
- if (crev < 2)
+ if ((frev < 2) && (crev < 2))
p1pll->pll_out_min =
le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
else
@@ -1164,7 +1164,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
p1pll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
- if (crev >= 4) {
+ if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
p1pll->lcd_pll_out_min =
le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
if (p1pll->lcd_pll_out_min == 0)
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index c4b4f298a283..69ce95571136 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include "radeon_acpi.h"
@@ -255,6 +256,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
if (!info)
return -EIO;
kfree(info);
+
+ /* 200ms delay is required after off */
+ if (state == 0)
+ msleep(200);
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 9cfc1c3e1965..30f00748ed37 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -2058,7 +2058,6 @@ radeon_add_atom_connector(struct drm_device *dev,
RADEON_OUTPUT_CSC_BYPASS);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -2308,8 +2307,10 @@ radeon_add_atom_connector(struct drm_device *dev,
}
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -2385,7 +2386,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -2470,10 +2470,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
}
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
+
connector->display_info.subpixel_order = subpixel_order;
drm_connector_register(connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 744f5c49c664..6dd39bdedb97 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -525,11 +525,9 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
{
struct radeon_connector_atom_dig *dig_connector;
-
dig_connector = mst_enc->connector->con_priv;
dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
- dig_connector->dp_clock = radeon_dp_get_max_link_rate(&mst_enc->connector->base,
- dig_connector->dpcd);
+ dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
dig_connector->dp_lane_count, dig_connector->dp_clock);
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index bba112628b47..7a0666ac4e23 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -757,8 +757,10 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector);
-int radeon_dp_get_max_link_rate(struct drm_connector *connector,
- const u8 *dpcd);
+extern int radeon_dp_get_dp_link_config(struct drm_connector *connector,
+ const u8 *dpcd,
+ unsigned pix_clock,
+ unsigned *dp_lanes, unsigned *dp_rate);
extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
u8 power_state);
extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index f342aad79cc6..35310336dd0a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -263,8 +263,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
rdev = radeon_get_rdev(bo->bdev);
ridx = radeon_copy_ring_index(rdev);
- old_start = old_mem->start << PAGE_SHIFT;
- new_start = new_mem->start << PAGE_SHIFT;
+ old_start = (u64)old_mem->start << PAGE_SHIFT;
+ new_start = (u64)new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index cb4108b4e1f9..18c05e930216 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -359,6 +359,13 @@ static inline void _pop_drawobj(struct adreno_context *drawctxt)
drawctxt->queued--;
}
+static void _retire_sparseobj(struct kgsl_drawobj_sparse *sparseobj,
+ struct adreno_context *drawctxt)
+{
+ kgsl_sparse_bind(drawctxt->base.proc_priv, sparseobj);
+ _retire_timestamp(DRAWOBJ(sparseobj));
+}
+
static int _retire_markerobj(struct kgsl_drawobj_cmd *cmdobj,
struct adreno_context *drawctxt)
{
@@ -436,6 +443,8 @@ static struct kgsl_drawobj *_process_drawqueue_get_next_drawobj(
return drawobj;
} else if (drawobj->type == SYNCOBJ_TYPE)
ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt);
+ else
+ return ERR_PTR(-EINVAL);
if (ret == -EAGAIN)
return ERR_PTR(-EAGAIN);
@@ -670,6 +679,76 @@ static int sendcmd(struct adreno_device *adreno_dev,
return 0;
}
+
+/*
+ * Retires all sync objs from the sparse context
+ * queue and returns one of the below
+ * a) next sparseobj
+ * b) -EAGAIN for syncobj with syncpoints pending
+ * c) -EINVAL for unexpected drawobj
+ * d) NULL for no sparseobj
+ */
+static struct kgsl_drawobj_sparse *_get_next_sparseobj(
+ struct adreno_context *drawctxt)
+{
+ struct kgsl_drawobj *drawobj;
+ unsigned int i = drawctxt->drawqueue_head;
+ int ret = 0;
+
+ if (drawctxt->drawqueue_head == drawctxt->drawqueue_tail)
+ return NULL;
+
+ for (i = drawctxt->drawqueue_head; i != drawctxt->drawqueue_tail;
+ i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE)) {
+
+ drawobj = drawctxt->drawqueue[i];
+
+ if (drawobj == NULL)
+ return NULL;
+
+ if (drawobj->type == SYNCOBJ_TYPE)
+ ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt);
+ else if (drawobj->type == SPARSEOBJ_TYPE)
+ return SPARSEOBJ(drawobj);
+ else
+ return ERR_PTR(-EINVAL);
+
+ if (ret == -EAGAIN)
+ return ERR_PTR(-EAGAIN);
+
+ continue;
+ }
+
+ return NULL;
+}
+
+static int _process_drawqueue_sparse(
+ struct adreno_context *drawctxt)
+{
+ struct kgsl_drawobj_sparse *sparseobj;
+ int ret = 0;
+ unsigned int i;
+
+ for (i = 0; i < ADRENO_CONTEXT_DRAWQUEUE_SIZE; i++) {
+
+ spin_lock(&drawctxt->lock);
+ sparseobj = _get_next_sparseobj(drawctxt);
+ if (IS_ERR_OR_NULL(sparseobj)) {
+ if (IS_ERR(sparseobj))
+ ret = PTR_ERR(sparseobj);
+ spin_unlock(&drawctxt->lock);
+ return ret;
+ }
+
+ _pop_drawobj(drawctxt);
+ spin_unlock(&drawctxt->lock);
+
+ _retire_sparseobj(sparseobj, drawctxt);
+ }
+
+ return 0;
+}
+
/**
* dispatcher_context_sendcmds() - Send commands from a context to the GPU
* @adreno_dev: Pointer to the adreno device struct
@@ -689,6 +768,9 @@ static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
int inflight = _drawqueue_inflight(dispatch_q);
unsigned int timestamp;
+ if (drawctxt->base.flags & KGSL_CONTEXT_SPARSE)
+ return _process_drawqueue_sparse(drawctxt);
+
if (dispatch_q->inflight >= inflight) {
spin_lock(&drawctxt->lock);
_process_drawqueue_get_next_drawobj(drawctxt);
@@ -1124,6 +1206,31 @@ static void _queue_drawobj(struct adreno_context *drawctxt,
trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
}
+static int _queue_sparseobj(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_drawobj_sparse *sparseobj,
+ uint32_t *timestamp, unsigned int user_ts)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(sparseobj);
+ int ret;
+
+ ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts);
+ if (ret)
+ return ret;
+
+ /*
+ * See if we can fastpath this thing - if nothing is
+ * queued bind/unbind without queueing the context
+ */
+ if (!drawctxt->queued)
+ return 1;
+
+ drawctxt->queued_timestamp = *timestamp;
+ _queue_drawobj(drawctxt, drawobj);
+
+ return 0;
+}
+
+
static int _queue_markerobj(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *markerobj,
uint32_t *timestamp, unsigned int user_ts)
@@ -1141,7 +1248,6 @@ static int _queue_markerobj(struct adreno_device *adreno_dev,
*/
if (!drawctxt->queued && kgsl_check_timestamp(drawobj->device,
drawobj->context, drawctxt->queued_timestamp)) {
- trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
_retire_timestamp(drawobj);
return 1;
}
@@ -1212,7 +1318,7 @@ static void _queue_syncobj(struct adreno_context *drawctxt,
}
/**
- * adreno_dispactcher_queue_drawobj() - Queue a new draw object in the context
+ * adreno_dispactcher_queue_cmds() - Queue a new draw object in the context
* @dev_priv: Pointer to the device private struct
* @context: Pointer to the kgsl draw context
* @drawobj: Pointer to the array of drawobj's being submitted
@@ -1234,6 +1340,9 @@ int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
int ret;
unsigned int i, user_ts;
+ if (!count)
+ return -EINVAL;
+
ret = _check_context_state(&drawctxt->base);
if (ret)
return ret;
@@ -1283,6 +1392,20 @@ int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
_queue_syncobj(drawctxt, SYNCOBJ(drawobj[i]),
timestamp);
break;
+ case SPARSEOBJ_TYPE:
+ ret = _queue_sparseobj(adreno_dev, drawctxt,
+ SPARSEOBJ(drawobj[i]),
+ timestamp, user_ts);
+ if (ret == 1) {
+ spin_unlock(&drawctxt->lock);
+ _retire_sparseobj(SPARSEOBJ(drawobj[i]),
+ drawctxt);
+ return 0;
+ } else if (ret) {
+ spin_unlock(&drawctxt->lock);
+ return ret;
+ }
+ break;
default:
spin_unlock(&drawctxt->lock);
return -EINVAL;
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 3a110ed221a8..1cbd2ef4b6b4 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -351,7 +351,8 @@ adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
KGSL_CONTEXT_IFH_NOP |
KGSL_CONTEXT_SECURE |
KGSL_CONTEXT_PREEMPT_STYLE_MASK |
- KGSL_CONTEXT_NO_SNAPSHOT);
+ KGSL_CONTEXT_NO_SNAPSHOT |
+ KGSL_CONTEXT_SPARSE);
/* Check for errors before trying to initialize */
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 993f22b26294..bae3884aa277 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1439,6 +1439,17 @@ long kgsl_ioctl_device_waittimestamp_ctxtid(
return result;
}
+static inline bool _check_context_is_sparse(struct kgsl_context *context,
+ uint64_t flags)
+{
+ if ((context->flags & KGSL_CONTEXT_SPARSE) ||
+ (flags & KGSL_DRAWOBJ_SPARSE))
+ return true;
+
+ return false;
+}
+
+
long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
@@ -1463,6 +1474,11 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
if (context == NULL)
return -EINVAL;
+ if (_check_context_is_sparse(context, param->flags)) {
+ kgsl_context_put(context);
+ return -EINVAL;
+ }
+
cmdobj = kgsl_drawobj_cmd_create(device, context, param->flags,
CMDOBJ_TYPE);
if (IS_ERR(cmdobj)) {
@@ -1558,6 +1574,11 @@ long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
if (context == NULL)
return -EINVAL;
+ if (_check_context_is_sparse(context, param->flags)) {
+ kgsl_context_put(context);
+ return -EINVAL;
+ }
+
if (type & SYNCOBJ_TYPE) {
struct kgsl_drawobj_sync *syncobj =
kgsl_drawobj_sync_create(device, context);
@@ -1632,6 +1653,11 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
if (context == NULL)
return -EINVAL;
+ if (_check_context_is_sparse(context, param->flags)) {
+ kgsl_context_put(context);
+ return -EINVAL;
+ }
+
if (type & SYNCOBJ_TYPE) {
struct kgsl_drawobj_sync *syncobj =
kgsl_drawobj_sync_create(device, context);
@@ -3742,6 +3768,128 @@ long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
return ret;
}
+long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_gpu_sparse_command *param = data;
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_context *context;
+ struct kgsl_drawobj *drawobj[2];
+ struct kgsl_drawobj_sparse *sparseobj;
+ long result;
+ unsigned int i = 0;
+
+ /* Make sure sparse and syncpoint count isn't too big */
+ if (param->numsparse > KGSL_MAX_SPARSE ||
+ param->numsyncs > KGSL_MAX_SYNCPOINTS)
+ return -EINVAL;
+
+ /* Make sure there is atleast one sparse or sync */
+ if (param->numsparse == 0 && param->numsyncs == 0)
+ return -EINVAL;
+
+ /* Only Sparse commands are supported in this ioctl */
+ if (!(param->flags & KGSL_DRAWOBJ_SPARSE) || (param->flags &
+ (KGSL_DRAWOBJ_SUBMIT_IB_LIST | KGSL_DRAWOBJ_MARKER
+ | KGSL_DRAWOBJ_SYNC)))
+ return -EINVAL;
+
+ context = kgsl_context_get_owner(dev_priv, param->context_id);
+ if (context == NULL)
+ return -EINVAL;
+
+ /* Restrict bind commands to bind context */
+ if (!(context->flags & KGSL_CONTEXT_SPARSE)) {
+ kgsl_context_put(context);
+ return -EINVAL;
+ }
+
+ if (param->numsyncs) {
+ struct kgsl_drawobj_sync *syncobj = kgsl_drawobj_sync_create(
+ device, context);
+ if (IS_ERR(syncobj)) {
+ result = PTR_ERR(syncobj);
+ goto done;
+ }
+
+ drawobj[i++] = DRAWOBJ(syncobj);
+ result = kgsl_drawobj_sync_add_synclist(device, syncobj,
+ to_user_ptr(param->synclist),
+ param->syncsize, param->numsyncs);
+ if (result)
+ goto done;
+ }
+
+ if (param->numsparse) {
+ sparseobj = kgsl_drawobj_sparse_create(device, context,
+ param->flags);
+ if (IS_ERR(sparseobj)) {
+ result = PTR_ERR(sparseobj);
+ goto done;
+ }
+
+ sparseobj->id = param->id;
+ drawobj[i++] = DRAWOBJ(sparseobj);
+ result = kgsl_drawobj_sparse_add_sparselist(device, sparseobj,
+ param->id, to_user_ptr(param->sparselist),
+ param->sparsesize, param->numsparse);
+ if (result)
+ goto done;
+ }
+
+ result = dev_priv->device->ftbl->queue_cmds(dev_priv, context,
+ drawobj, i, &param->timestamp);
+
+done:
+ /*
+ * -EPROTO is a "success" error - it just tells the user that the
+ * context had previously faulted
+ */
+ if (result && result != -EPROTO)
+ while (i--)
+ kgsl_drawobj_destroy(drawobj[i]);
+
+ kgsl_context_put(context);
+ return result;
+}
+
+void kgsl_sparse_bind(struct kgsl_process_private *private,
+ struct kgsl_drawobj_sparse *sparseobj)
+{
+ struct kgsl_sparseobj_node *sparse_node;
+ struct kgsl_mem_entry *virt_entry = NULL;
+ long ret = 0;
+ char *name;
+
+ virt_entry = kgsl_sharedmem_find_id_flags(private, sparseobj->id,
+ KGSL_MEMFLAGS_SPARSE_VIRT);
+ if (virt_entry == NULL)
+ return;
+
+ list_for_each_entry(sparse_node, &sparseobj->sparselist, node) {
+ if (sparse_node->obj.flags & KGSL_SPARSE_BIND) {
+ ret = sparse_bind_range(private, &sparse_node->obj,
+ virt_entry);
+ name = "bind";
+ } else {
+ ret = sparse_unbind_range(&sparse_node->obj,
+ virt_entry);
+ name = "unbind";
+ }
+
+ if (ret)
+ KGSL_CORE_ERR("kgsl: Unable to '%s' ret %ld virt_id %d, phys_id %d, virt_offset %16.16llX, phys_offset %16.16llX, size %16.16llX, flags %16.16llX\n",
+ name, ret, sparse_node->virt_id,
+ sparse_node->obj.id,
+ sparse_node->obj.virtoffset,
+ sparse_node->obj.physoffset,
+ sparse_node->obj.size, sparse_node->obj.flags);
+ }
+
+ kgsl_mem_entry_put(virt_entry);
+}
+EXPORT_SYMBOL(kgsl_sparse_bind);
+
long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
@@ -4656,7 +4804,7 @@ static void kgsl_core_exit(void)
kgsl_driver.class = NULL;
}
- kgsl_drawobj_exit();
+ kgsl_drawobjs_cache_exit();
kgsl_memfree_exit();
unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
@@ -4732,7 +4880,7 @@ static int __init kgsl_core_init(void)
kgsl_events_init();
- result = kgsl_drawobj_init();
+ result = kgsl_drawobjs_cache_init();
if (result)
goto err;
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index fbf9197b6d1b..2a9ac899725c 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -100,6 +100,7 @@ static inline void KGSL_STATS_ADD(uint64_t size, atomic_long_t *stat,
#define KGSL_MAX_NUMIBS 100000
#define KGSL_MAX_SYNCPOINTS 32
+#define KGSL_MAX_SPARSE 1000
struct kgsl_device;
struct kgsl_context;
@@ -432,6 +433,8 @@ long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
long kgsl_ioctl_sparse_unbind(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
+long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data);
void kgsl_mem_entry_destroy(struct kref *kref);
diff --git a/drivers/gpu/msm/kgsl_compat.c b/drivers/gpu/msm/kgsl_compat.c
index 028a9566fa14..6b8d8d34a988 100644
--- a/drivers/gpu/msm/kgsl_compat.c
+++ b/drivers/gpu/msm/kgsl_compat.c
@@ -382,6 +382,8 @@ static const struct kgsl_ioctl kgsl_compat_ioctl_funcs[] = {
kgsl_ioctl_sparse_virt_free),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_BIND,
kgsl_ioctl_sparse_bind),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_GPU_SPARSE_COMMAND,
+ kgsl_ioctl_gpu_sparse_command),
};
long kgsl_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 7d68c23ad5b7..cb7ffd51460a 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -203,6 +203,18 @@ struct kgsl_memobj_node {
unsigned long priv;
};
+/**
+ * struct kgsl_sparseobj_node - Sparse object descriptor
+ * @node: Local list node for the sparse cmdbatch
+ * @virt_id: Virtual ID to bind/unbind
+ * @obj: struct kgsl_sparse_binding_object
+ */
+struct kgsl_sparseobj_node {
+ struct list_head node;
+ unsigned int virt_id;
+ struct kgsl_sparse_binding_object obj;
+};
+
struct kgsl_device {
struct device *dev;
const char *name;
@@ -639,6 +651,9 @@ long kgsl_ioctl_copy_in(unsigned int kernel_cmd, unsigned int user_cmd,
long kgsl_ioctl_copy_out(unsigned int kernel_cmd, unsigned int user_cmd,
unsigned long, unsigned char *ptr);
+void kgsl_sparse_bind(struct kgsl_process_private *private,
+ struct kgsl_drawobj_sparse *sparse);
+
/**
* kgsl_context_put() - Release context reference count
* @context: Pointer to the KGSL context to be released
diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c
index 7840daa6a3e2..f8f0e7ccb0d3 100644
--- a/drivers/gpu/msm/kgsl_drawobj.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -37,10 +37,12 @@
#include "kgsl_compat.h"
/*
- * Define an kmem cache for the memobj structures since we allocate and free
- * them so frequently
+ * Define an kmem cache for the memobj & sparseobj structures since we
+ * allocate and free them so frequently
*/
static struct kmem_cache *memobjs_cache;
+static struct kmem_cache *sparseobjs_cache;
+
static void drawobj_destroy_object(struct kref *kref)
{
@@ -60,6 +62,9 @@ static void drawobj_destroy_object(struct kref *kref)
case MARKEROBJ_TYPE:
kfree(CMDOBJ(drawobj));
break;
+ case SPARSEOBJ_TYPE:
+ kfree(SPARSEOBJ(drawobj));
+ break;
}
}
@@ -211,6 +216,18 @@ static inline void memobj_list_free(struct list_head *list)
}
}
+static void drawobj_destroy_sparse(struct kgsl_drawobj *drawobj)
+{
+ struct kgsl_sparseobj_node *mem, *tmpmem;
+ struct list_head *list = &SPARSEOBJ(drawobj)->sparselist;
+
+ /* Free the sparse mem here */
+ list_for_each_entry_safe(mem, tmpmem, list, node) {
+ list_del_init(&mem->node);
+ kmem_cache_free(sparseobjs_cache, mem);
+ }
+}
+
static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
{
struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
@@ -297,6 +314,8 @@ void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj)
drawobj_destroy_sync(drawobj);
else if (drawobj->type & (CMDOBJ_TYPE | MARKEROBJ_TYPE))
drawobj_destroy_cmd(drawobj);
+ else if (drawobj->type == SPARSEOBJ_TYPE)
+ drawobj_destroy_sparse(drawobj);
else
return;
@@ -610,16 +629,26 @@ int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device,
return 0;
}
-static inline int drawobj_init(struct kgsl_device *device,
- struct kgsl_context *context, struct kgsl_drawobj *drawobj,
+static void *_drawobj_create(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int size,
unsigned int type)
{
+ void *obj = kzalloc(size, GFP_KERNEL);
+ struct kgsl_drawobj *drawobj;
+
+ if (obj == NULL)
+ return ERR_PTR(-ENOMEM);
+
/*
* Increase the reference count on the context so it doesn't disappear
* during the lifetime of this object
*/
- if (!_kgsl_context_get(context))
- return -ENOENT;
+ if (!_kgsl_context_get(context)) {
+ kfree(obj);
+ return ERR_PTR(-ENOENT);
+ }
+
+ drawobj = obj;
kref_init(&drawobj->refcount);
@@ -627,7 +656,28 @@ static inline int drawobj_init(struct kgsl_device *device,
drawobj->context = context;
drawobj->type = type;
- return 0;
+ return obj;
+}
+
+/**
+ * kgsl_drawobj_sparse_create() - Create a new sparse obj structure
+ * @device: Pointer to a KGSL device struct
+ * @context: Pointer to a KGSL context struct
+ * @flags: Flags for the sparse obj
+ *
+ * Allocate an new kgsl_drawobj_sparse structure
+ */
+struct kgsl_drawobj_sparse *kgsl_drawobj_sparse_create(
+ struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int flags)
+{
+ struct kgsl_drawobj_sparse *sparseobj = _drawobj_create(device,
+ context, sizeof(*sparseobj), SPARSEOBJ_TYPE);
+
+ if (!IS_ERR(sparseobj))
+ INIT_LIST_HEAD(&sparseobj->sparselist);
+
+ return sparseobj;
}
/**
@@ -641,18 +691,13 @@ static inline int drawobj_init(struct kgsl_device *device,
struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device,
struct kgsl_context *context)
{
- struct kgsl_drawobj_sync *syncobj = kzalloc(sizeof(*syncobj),
- GFP_KERNEL);
- if (syncobj == NULL)
- return ERR_PTR(-ENOMEM);
-
- if (drawobj_init(device, context, DRAWOBJ(syncobj), SYNCOBJ_TYPE)) {
- kfree(syncobj);
- return ERR_PTR(-ENOENT);
- }
+ struct kgsl_drawobj_sync *syncobj = _drawobj_create(device,
+ context, sizeof(*syncobj), SYNCOBJ_TYPE);
/* Add a timer to help debug sync deadlocks */
- setup_timer(&syncobj->timer, syncobj_timer, (unsigned long) syncobj);
+ if (!IS_ERR(syncobj))
+ setup_timer(&syncobj->timer, syncobj_timer,
+ (unsigned long) syncobj);
return syncobj;
}
@@ -671,27 +716,13 @@ struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
struct kgsl_context *context, unsigned int flags,
unsigned int type)
{
- struct kgsl_drawobj_cmd *cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
- struct kgsl_drawobj *drawobj;
-
- if (cmdobj == NULL)
- return ERR_PTR(-ENOMEM);
-
- type &= CMDOBJ_TYPE | MARKEROBJ_TYPE;
- if (type == 0) {
- kfree(cmdobj);
- return ERR_PTR(-EINVAL);
- }
-
- drawobj = DRAWOBJ(cmdobj);
-
- if (drawobj_init(device, context, drawobj, type)) {
- kfree(cmdobj);
- return ERR_PTR(-ENOENT);
- }
+ struct kgsl_drawobj_cmd *cmdobj = _drawobj_create(device,
+ context, sizeof(*cmdobj),
+ (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)));
- /* sanitize our flags for drawobj's */
- drawobj->flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH
+ if (!IS_ERR(cmdobj)) {
+ /* sanitize our flags for drawobj's */
+ cmdobj->base.flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH
| KGSL_DRAWOBJ_MARKER
| KGSL_DRAWOBJ_END_OF_FRAME
| KGSL_DRAWOBJ_PWR_CONSTRAINT
@@ -699,8 +730,9 @@ struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
| KGSL_DRAWOBJ_PROFILING
| KGSL_DRAWOBJ_PROFILING_KTIME);
- INIT_LIST_HEAD(&cmdobj->cmdlist);
- INIT_LIST_HEAD(&cmdobj->memlist);
+ INIT_LIST_HEAD(&cmdobj->cmdlist);
+ INIT_LIST_HEAD(&cmdobj->memlist);
+ }
return cmdobj;
}
@@ -864,7 +896,7 @@ int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device,
return 0;
}
-static int drawobj_add_object(struct list_head *head,
+static int kgsl_drawobj_add_memobject(struct list_head *head,
struct kgsl_command_object *obj)
{
struct kgsl_memobj_node *mem;
@@ -884,6 +916,62 @@ static int drawobj_add_object(struct list_head *head,
return 0;
}
+static int kgsl_drawobj_add_sparseobject(struct list_head *head,
+ struct kgsl_sparse_binding_object *obj, unsigned int virt_id)
+{
+ struct kgsl_sparseobj_node *mem;
+
+ mem = kmem_cache_alloc(sparseobjs_cache, GFP_KERNEL);
+ if (mem == NULL)
+ return -ENOMEM;
+
+ mem->virt_id = virt_id;
+ mem->obj.id = obj->id;
+ mem->obj.virtoffset = obj->virtoffset;
+ mem->obj.physoffset = obj->physoffset;
+ mem->obj.size = obj->size;
+ mem->obj.flags = obj->flags;
+
+ list_add_tail(&mem->node, head);
+ return 0;
+}
+
+int kgsl_drawobj_sparse_add_sparselist(struct kgsl_device *device,
+ struct kgsl_drawobj_sparse *sparseobj, unsigned int id,
+ void __user *ptr, unsigned int size, unsigned int count)
+{
+ struct kgsl_sparse_binding_object obj;
+ int i, ret = 0;
+
+ ret = _verify_input_list(count, ptr, size);
+ if (ret <= 0)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ memset(&obj, 0, sizeof(obj));
+
+ ret = _copy_from_user(&obj, ptr, sizeof(obj), size);
+ if (ret)
+ return ret;
+
+ if (!(obj.flags & (KGSL_SPARSE_BIND | KGSL_SPARSE_UNBIND)))
+ return -EINVAL;
+
+ ret = kgsl_drawobj_add_sparseobject(&sparseobj->sparselist,
+ &obj, id);
+ if (ret)
+ return ret;
+
+ ptr += sizeof(obj);
+ }
+
+ sparseobj->size = size;
+ sparseobj->count = count;
+
+ return 0;
+}
+
+
#define CMDLIST_FLAGS \
(KGSL_CMDLIST_IB | \
KGSL_CMDLIST_CTXTSWITCH_PREAMBLE | \
@@ -922,7 +1010,7 @@ int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device,
return -EINVAL;
}
- ret = drawobj_add_object(&cmdobj->cmdlist, &obj);
+ ret = kgsl_drawobj_add_memobject(&cmdobj->cmdlist, &obj);
if (ret)
return ret;
@@ -967,7 +1055,8 @@ int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device,
add_profiling_buffer(device, cmdobj, obj.gpuaddr,
obj.size, obj.id, obj.offset);
else {
- ret = drawobj_add_object(&cmdobj->memlist, &obj);
+ ret = kgsl_drawobj_add_memobject(&cmdobj->memlist,
+ &obj);
if (ret)
return ret;
}
@@ -1018,19 +1107,19 @@ int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
return 0;
}
-void kgsl_drawobj_exit(void)
+void kgsl_drawobjs_cache_exit(void)
{
- if (memobjs_cache != NULL)
- kmem_cache_destroy(memobjs_cache);
+ kmem_cache_destroy(memobjs_cache);
+ kmem_cache_destroy(sparseobjs_cache);
}
-int kgsl_drawobj_init(void)
+int kgsl_drawobjs_cache_init(void)
{
memobjs_cache = KMEM_CACHE(kgsl_memobj_node, 0);
- if (memobjs_cache == NULL) {
- KGSL_CORE_ERR("failed to create memobjs_cache");
+ sparseobjs_cache = KMEM_CACHE(kgsl_sparseobj_node, 0);
+
+ if (!memobjs_cache || !sparseobjs_cache)
return -ENOMEM;
- }
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h
index 89ed944c539a..fd9d2bc93f41 100644
--- a/drivers/gpu/msm/kgsl_drawobj.h
+++ b/drivers/gpu/msm/kgsl_drawobj.h
@@ -18,10 +18,13 @@
container_of(obj, struct kgsl_drawobj_sync, base)
#define CMDOBJ(obj) \
container_of(obj, struct kgsl_drawobj_cmd, base)
+#define SPARSEOBJ(obj) \
+ container_of(obj, struct kgsl_drawobj_sparse, base)
#define CMDOBJ_TYPE BIT(0)
#define MARKEROBJ_TYPE BIT(1)
#define SYNCOBJ_TYPE BIT(2)
+#define SPARSEOBJ_TYPE BIT(3)
/**
* struct kgsl_drawobj - KGSL drawobj descriptor
@@ -45,7 +48,7 @@ struct kgsl_drawobj {
* struct kgsl_drawobj_cmd - KGSL command obj, This covers marker
* cmds also since markers are special form of cmds that do not
* need their cmds to be executed.
- * @base: Base kgsl_drawobj
+ * @base: Base kgsl_drawobj, this needs to be the first entry
* @priv: Internal flags
* @global_ts: The ringbuffer timestamp corresponding to this
* command obj
@@ -123,6 +126,22 @@ struct kgsl_drawobj_sync_event {
struct kgsl_device *device;
};
+/**
+ * struct kgsl_drawobj_sparse - KGSl sparse obj descriptor
+ * @base: Base kgsl_obj, this needs to be the first entry
+ * @id: virtual id of the bind/unbind
+ * @sparselist: list of binds/unbinds
+ * @size: Size of kgsl_sparse_bind_object
+ * @count: Number of elements in list
+ */
+struct kgsl_drawobj_sparse {
+ struct kgsl_drawobj base;
+ unsigned int id;
+ struct list_head sparselist;
+ unsigned int size;
+ unsigned int count;
+};
+
#define KGSL_DRAWOBJ_FLAGS \
{ KGSL_DRAWOBJ_MARKER, "MARKER" }, \
{ KGSL_DRAWOBJ_CTX_SWITCH, "CTX_SWITCH" }, \
@@ -172,9 +191,15 @@ int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
int kgsl_drawobj_sync_add_sync(struct kgsl_device *device,
struct kgsl_drawobj_sync *syncobj,
struct kgsl_cmd_syncpoint *sync);
-
-int kgsl_drawobj_init(void);
-void kgsl_drawobj_exit(void);
+struct kgsl_drawobj_sparse *kgsl_drawobj_sparse_create(
+ struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int flags);
+int kgsl_drawobj_sparse_add_sparselist(struct kgsl_device *device,
+ struct kgsl_drawobj_sparse *sparseobj, unsigned int id,
+ void __user *ptr, unsigned int size, unsigned int count);
+
+int kgsl_drawobjs_cache_init(void);
+void kgsl_drawobjs_cache_exit(void);
void kgsl_dump_syncpoints(struct kgsl_device *device,
struct kgsl_drawobj_sync *syncobj);
diff --git a/drivers/gpu/msm/kgsl_ioctl.c b/drivers/gpu/msm/kgsl_ioctl.c
index 894e6a4a146b..f7876335d95e 100644
--- a/drivers/gpu/msm/kgsl_ioctl.c
+++ b/drivers/gpu/msm/kgsl_ioctl.c
@@ -100,6 +100,8 @@ static const struct kgsl_ioctl kgsl_ioctl_funcs[] = {
kgsl_ioctl_sparse_virt_free),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_BIND,
kgsl_ioctl_sparse_bind),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_GPU_SPARSE_COMMAND,
+ kgsl_ioctl_gpu_sparse_command),
};
long kgsl_ioctl_copy_in(unsigned int kernel_cmd, unsigned int user_cmd,
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 774cd2210566..21febbb0d84e 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -1418,8 +1418,10 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
}
ret = hid_hw_output_report(hdev, buf, 1);
- if (ret < 0)
- hid_err(hdev, "can't set operational mode: step 3\n");
+ if (ret < 0) {
+ hid_info(hdev, "can't set operational mode: step 3, ignoring\n");
+ ret = 0;
+ }
out:
kfree(buf);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 022176189346..54dc4ce09f35 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -51,10 +51,26 @@ struct uhid_device {
u32 report_id;
u32 report_type;
struct uhid_event report_buf;
+ struct work_struct worker;
};
static struct miscdevice uhid_misc;
+static void uhid_device_add_worker(struct work_struct *work)
+{
+ struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
+ int ret;
+
+ ret = hid_add_device(uhid->hid);
+ if (ret) {
+ hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
+
+ hid_destroy_device(uhid->hid);
+ uhid->hid = NULL;
+ uhid->running = false;
+ }
+}
+
static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
{
__u8 newhead;
@@ -516,18 +532,14 @@ static int uhid_dev_create2(struct uhid_device *uhid,
uhid->hid = hid;
uhid->running = true;
- ret = hid_add_device(hid);
- if (ret) {
- hid_err(hid, "Cannot register HID device\n");
- goto err_hid;
- }
+ /* Adding of a HID device is done through a worker, to allow HID drivers
+ * which use feature requests during .probe to work, without they would
+ * be blocked on devlock, which is held by uhid_char_write.
+ */
+ schedule_work(&uhid->worker);
return 0;
-err_hid:
- hid_destroy_device(hid);
- uhid->hid = NULL;
- uhid->running = false;
err_free:
kfree(uhid->rd_data);
uhid->rd_data = NULL;
@@ -568,6 +580,8 @@ static int uhid_dev_destroy(struct uhid_device *uhid)
uhid->running = false;
wake_up_interruptible(&uhid->report_wait);
+ cancel_work_sync(&uhid->worker);
+
hid_destroy_device(uhid->hid);
kfree(uhid->rd_data);
@@ -630,6 +644,7 @@ static int uhid_char_open(struct inode *inode, struct file *file)
init_waitqueue_head(&uhid->waitq);
init_waitqueue_head(&uhid->report_wait);
uhid->running = false;
+ INIT_WORK(&uhid->worker, uhid_device_add_worker);
file->private_data = uhid;
nonseekable_open(inode, file);
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 9098f13f2f44..1ef37c727572 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/hyperv.h>
#include <linux/uio.h>
+#include <linux/interrupt.h>
#include "hyperv_vmbus.h"
@@ -496,8 +497,21 @@ static void reset_channel_cb(void *arg)
static int vmbus_close_internal(struct vmbus_channel *channel)
{
struct vmbus_channel_close_channel *msg;
+ struct tasklet_struct *tasklet;
int ret;
+ /*
+ * process_chn_event(), running in the tasklet, can race
+ * with vmbus_close_internal() in the case of SMP guest, e.g., when
+ * the former is accessing channel->inbound.ring_buffer, the latter
+ * could be freeing the ring_buffer pages.
+ *
+ * To resolve the race, we can serialize them by disabling the
+ * tasklet when the latter is running here.
+ */
+ tasklet = hv_context.event_dpc[channel->target_cpu];
+ tasklet_disable(tasklet);
+
channel->state = CHANNEL_OPEN_STATE;
channel->sc_creation_callback = NULL;
/* Stop callback and cancel the timer asap */
@@ -525,7 +539,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* If we failed to post the close msg,
* it is perhaps better to leak memory.
*/
- return ret;
+ goto out;
}
/* Tear down the gpadl for the channel's ring buffer */
@@ -538,7 +552,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* If we failed to teardown gpadl,
* it is perhaps better to leak memory.
*/
- return ret;
+ goto out;
}
}
@@ -549,12 +563,9 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
free_pages((unsigned long)channel->ringbuffer_pages,
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
- /*
- * If the channel has been rescinded; process device removal.
- */
- if (channel->rescind)
- hv_process_channel_removal(channel,
- channel->offermsg.child_relid);
+out:
+ tasklet_enable(tasklet);
+
return ret;
}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 652afd11a9ef..37238dffd947 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -28,6 +28,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/completion.h>
+#include <linux/delay.h>
#include <linux/hyperv.h>
#include "hyperv_vmbus.h"
@@ -191,6 +192,8 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
if (channel == NULL)
return;
+ BUG_ON(!channel->rescind);
+
if (channel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(channel->target_cpu,
@@ -230,9 +233,7 @@ void vmbus_free_channels(void)
list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
listentry) {
- /* if we don't set rescind to true, vmbus_close_internal()
- * won't invoke hv_process_channel_removal().
- */
+ /* hv_process_channel_removal() needs this */
channel->rescind = true;
vmbus_device_unregister(channel->device_obj);
@@ -459,6 +460,17 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
cpumask_of_node(primary->numa_node));
cur_cpu = -1;
+
+ /*
+ * Normally Hyper-V host doesn't create more subchannels than there
+ * are VCPUs on the node but it is possible when not all present VCPUs
+ * on the node are initialized by guest. Clear the alloced_cpus_in_node
+ * to start over.
+ */
+ if (cpumask_equal(&primary->alloced_cpus_in_node,
+ cpumask_of_node(primary->numa_node)))
+ cpumask_clear(&primary->alloced_cpus_in_node);
+
while (true) {
cur_cpu = cpumask_next(cur_cpu, &available_mask);
if (cur_cpu >= nr_cpu_ids) {
@@ -488,6 +500,40 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
channel->target_vp = hv_context.vp_index[cur_cpu];
}
+static void vmbus_wait_for_unload(void)
+{
+ int cpu = smp_processor_id();
+ void *page_addr = hv_context.synic_message_page[cpu];
+ struct hv_message *msg = (struct hv_message *)page_addr +
+ VMBUS_MESSAGE_SINT;
+ struct vmbus_channel_message_header *hdr;
+ bool unloaded = false;
+
+ while (1) {
+ if (msg->header.message_type == HVMSG_NONE) {
+ mdelay(10);
+ continue;
+ }
+
+ hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+ if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
+ unloaded = true;
+
+ msg->header.message_type = HVMSG_NONE;
+ /*
+ * header.message_type needs to be written before we do
+ * wrmsrl() below.
+ */
+ mb();
+
+ if (msg->header.message_flags.msg_pending)
+ wrmsrl(HV_X64_MSR_EOM, 0);
+
+ if (unloaded)
+ break;
+ }
+}
+
/*
* vmbus_unload_response - Handler for the unload response.
*/
@@ -513,7 +559,14 @@ void vmbus_initiate_unload(void)
hdr.msgtype = CHANNELMSG_UNLOAD;
vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
- wait_for_completion(&vmbus_connection.unload_event);
+ /*
+ * vmbus_initiate_unload() is also called on crash and the crash can be
+ * happening in an interrupt context, where scheduling is impossible.
+ */
+ if (!in_interrupt())
+ wait_for_completion(&vmbus_connection.unload_event);
+ else
+ vmbus_wait_for_unload();
}
/*
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 6341be8739ae..63194a9a7189 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -293,8 +293,14 @@ void hv_cleanup(void)
* Cleanup the TSC page based CS.
*/
if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
- clocksource_change_rating(&hyperv_cs_tsc, 10);
- clocksource_unregister(&hyperv_cs_tsc);
+ /*
+ * Crash can happen in an interrupt context and unregistering
+ * a clocksource is impossible and redundant in this case.
+ */
+ if (!oops_in_progress) {
+ clocksource_change_rating(&hyperv_cs_tsc, 10);
+ clocksource_unregister(&hyperv_cs_tsc);
+ }
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index db4b887b889d..c37a71e13de0 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -51,7 +51,6 @@ static struct {
struct hv_fcopy_hdr *fcopy_msg; /* current message */
struct vmbus_channel *recv_channel; /* chn we got the request */
u64 recv_req_id; /* request ID. */
- void *fcopy_context; /* for the channel callback */
} fcopy_transaction;
static void fcopy_respond_to_host(int error);
@@ -67,6 +66,13 @@ static struct hvutil_transport *hvt;
*/
static int dm_reg_value;
+static void fcopy_poll_wrapper(void *channel)
+{
+ /* Transaction is finished, reset the state here to avoid races. */
+ fcopy_transaction.state = HVUTIL_READY;
+ hv_fcopy_onchannelcallback(channel);
+}
+
static void fcopy_timeout_func(struct work_struct *dummy)
{
/*
@@ -74,13 +80,7 @@ static void fcopy_timeout_func(struct work_struct *dummy)
* process the pending transaction.
*/
fcopy_respond_to_host(HV_E_FAIL);
-
- /* Transaction is finished, reset the state. */
- if (fcopy_transaction.state > HVUTIL_READY)
- fcopy_transaction.state = HVUTIL_READY;
-
- hv_poll_channel(fcopy_transaction.fcopy_context,
- hv_fcopy_onchannelcallback);
+ hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
}
static int fcopy_handle_handshake(u32 version)
@@ -108,9 +108,7 @@ static int fcopy_handle_handshake(u32 version)
return -EINVAL;
}
pr_debug("FCP: userspace daemon ver. %d registered\n", version);
- fcopy_transaction.state = HVUTIL_READY;
- hv_poll_channel(fcopy_transaction.fcopy_context,
- hv_fcopy_onchannelcallback);
+ hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
return 0;
}
@@ -227,15 +225,8 @@ void hv_fcopy_onchannelcallback(void *context)
int util_fw_version;
int fcopy_srv_version;
- if (fcopy_transaction.state > HVUTIL_READY) {
- /*
- * We will defer processing this callback once
- * the current transaction is complete.
- */
- fcopy_transaction.fcopy_context = context;
+ if (fcopy_transaction.state > HVUTIL_READY)
return;
- }
- fcopy_transaction.fcopy_context = NULL;
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
&requestid);
@@ -275,7 +266,8 @@ void hv_fcopy_onchannelcallback(void *context)
* Send the information to the user-level daemon.
*/
schedule_work(&fcopy_send_work);
- schedule_delayed_work(&fcopy_timeout_work, 5*HZ);
+ schedule_delayed_work(&fcopy_timeout_work,
+ HV_UTIL_TIMEOUT * HZ);
return;
}
icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
@@ -304,9 +296,8 @@ static int fcopy_on_msg(void *msg, int len)
if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
fcopy_transaction.state = HVUTIL_USERSPACE_RECV;
fcopy_respond_to_host(*val);
- fcopy_transaction.state = HVUTIL_READY;
- hv_poll_channel(fcopy_transaction.fcopy_context,
- hv_fcopy_onchannelcallback);
+ hv_poll_channel(fcopy_transaction.recv_channel,
+ fcopy_poll_wrapper);
}
return 0;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 74c38a9f34a6..2a3420c4ca59 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -66,7 +66,6 @@ static struct {
struct hv_kvp_msg *kvp_msg; /* current message */
struct vmbus_channel *recv_channel; /* chn we got the request */
u64 recv_req_id; /* request ID. */
- void *kvp_context; /* for the channel callback */
} kvp_transaction;
/*
@@ -94,6 +93,13 @@ static struct hvutil_transport *hvt;
*/
#define HV_DRV_VERSION "3.1"
+static void kvp_poll_wrapper(void *channel)
+{
+ /* Transaction is finished, reset the state here to avoid races. */
+ kvp_transaction.state = HVUTIL_READY;
+ hv_kvp_onchannelcallback(channel);
+}
+
static void
kvp_register(int reg_value)
{
@@ -121,12 +127,7 @@ static void kvp_timeout_func(struct work_struct *dummy)
*/
kvp_respond_to_host(NULL, HV_E_FAIL);
- /* Transaction is finished, reset the state. */
- if (kvp_transaction.state > HVUTIL_READY)
- kvp_transaction.state = HVUTIL_READY;
-
- hv_poll_channel(kvp_transaction.kvp_context,
- hv_kvp_onchannelcallback);
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
}
static int kvp_handle_handshake(struct hv_kvp_msg *msg)
@@ -218,9 +219,7 @@ static int kvp_on_msg(void *msg, int len)
*/
if (cancel_delayed_work_sync(&kvp_timeout_work)) {
kvp_respond_to_host(message, error);
- kvp_transaction.state = HVUTIL_READY;
- hv_poll_channel(kvp_transaction.kvp_context,
- hv_kvp_onchannelcallback);
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
}
return 0;
@@ -596,15 +595,8 @@ void hv_kvp_onchannelcallback(void *context)
int util_fw_version;
int kvp_srv_version;
- if (kvp_transaction.state > HVUTIL_READY) {
- /*
- * We will defer processing this callback once
- * the current transaction is complete.
- */
- kvp_transaction.kvp_context = context;
+ if (kvp_transaction.state > HVUTIL_READY)
return;
- }
- kvp_transaction.kvp_context = NULL;
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
&requestid);
@@ -668,7 +660,8 @@ void hv_kvp_onchannelcallback(void *context)
* user-mode not responding.
*/
schedule_work(&kvp_sendkey_work);
- schedule_delayed_work(&kvp_timeout_work, 5*HZ);
+ schedule_delayed_work(&kvp_timeout_work,
+ HV_UTIL_TIMEOUT * HZ);
return;
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 815405f2e777..81882d4848bd 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -53,7 +53,6 @@ static struct {
struct vmbus_channel *recv_channel; /* chn we got the request */
u64 recv_req_id; /* request ID. */
struct hv_vss_msg *msg; /* current message */
- void *vss_context; /* for the channel callback */
} vss_transaction;
@@ -74,6 +73,13 @@ static void vss_timeout_func(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
static DECLARE_WORK(vss_send_op_work, vss_send_op);
+static void vss_poll_wrapper(void *channel)
+{
+ /* Transaction is finished, reset the state here to avoid races. */
+ vss_transaction.state = HVUTIL_READY;
+ hv_vss_onchannelcallback(channel);
+}
+
/*
* Callback when data is received from user mode.
*/
@@ -86,12 +92,7 @@ static void vss_timeout_func(struct work_struct *dummy)
pr_warn("VSS: timeout waiting for daemon to reply\n");
vss_respond_to_host(HV_E_FAIL);
- /* Transaction is finished, reset the state. */
- if (vss_transaction.state > HVUTIL_READY)
- vss_transaction.state = HVUTIL_READY;
-
- hv_poll_channel(vss_transaction.vss_context,
- hv_vss_onchannelcallback);
+ hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
}
static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
@@ -138,9 +139,8 @@ static int vss_on_msg(void *msg, int len)
if (cancel_delayed_work_sync(&vss_timeout_work)) {
vss_respond_to_host(vss_msg->error);
/* Transaction is finished, reset the state. */
- vss_transaction.state = HVUTIL_READY;
- hv_poll_channel(vss_transaction.vss_context,
- hv_vss_onchannelcallback);
+ hv_poll_channel(vss_transaction.recv_channel,
+ vss_poll_wrapper);
}
} else {
/* This is a spurious call! */
@@ -238,15 +238,8 @@ void hv_vss_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
struct icmsg_negotiate *negop = NULL;
- if (vss_transaction.state > HVUTIL_READY) {
- /*
- * We will defer processing this callback once
- * the current transaction is complete.
- */
- vss_transaction.vss_context = context;
+ if (vss_transaction.state > HVUTIL_READY)
return;
- }
- vss_transaction.vss_context = NULL;
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
&requestid);
@@ -338,6 +331,11 @@ static void vss_on_reset(void)
int
hv_vss_init(struct hv_util_service *srv)
{
+ if (vmbus_proto_version < VERSION_WIN8_1) {
+ pr_warn("Integration service 'Backup (volume snapshot)'"
+ " not supported on this host version.\n");
+ return -ENOTSUPP;
+ }
recv_buffer = srv->recv_buffer;
/*
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index 6a9d80a5332d..1505ee6e6605 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -204,9 +204,12 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len)
goto out_unlock;
}
hvt->outmsg = kzalloc(len, GFP_KERNEL);
- memcpy(hvt->outmsg, msg, len);
- hvt->outmsg_len = len;
- wake_up_interruptible(&hvt->outmsg_q);
+ if (hvt->outmsg) {
+ memcpy(hvt->outmsg, msg, len);
+ hvt->outmsg_len = len;
+ wake_up_interruptible(&hvt->outmsg_q);
+ } else
+ ret = -ENOMEM;
out_unlock:
mutex_unlock(&hvt->outmsg_lock);
return ret;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 3782636562a1..12156db2e88e 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -31,6 +31,11 @@
#include <linux/hyperv.h>
/*
+ * Timeout for services such as KVP and fcopy.
+ */
+#define HV_UTIL_TIMEOUT 30
+
+/*
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
* is set by CPUID(HVCPUID_VERSION_FEATURES).
*/
@@ -759,11 +764,7 @@ static inline void hv_poll_channel(struct vmbus_channel *channel,
if (!channel)
return;
- if (channel->target_cpu != smp_processor_id())
- smp_call_function_single(channel->target_cpu,
- cb, channel, true);
- else
- cb(channel);
+ smp_call_function_single(channel->target_cpu, cb, channel, true);
}
enum hvutil_device_state {
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index f19b6f7a467a..509ed9731630 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -41,6 +41,7 @@
#include <linux/ptrace.h>
#include <linux/screen_info.h>
#include <linux/kdebug.h>
+#include <linux/random.h>
#include "hyperv_vmbus.h"
static struct acpi_device *hv_acpi_dev;
@@ -104,6 +105,7 @@ static struct notifier_block hyperv_panic_block = {
};
struct resource *hyperv_mmio;
+DEFINE_SEMAPHORE(hyperv_mmio_lock);
static int vmbus_exists(void)
{
@@ -602,23 +604,11 @@ static int vmbus_remove(struct device *child_device)
{
struct hv_driver *drv;
struct hv_device *dev = device_to_hv_device(child_device);
- u32 relid = dev->channel->offermsg.child_relid;
if (child_device->driver) {
drv = drv_to_hv_drv(child_device->driver);
if (drv->remove)
drv->remove(dev);
- else {
- hv_process_channel_removal(dev->channel, relid);
- pr_err("remove not set for driver %s\n",
- dev_name(child_device));
- }
- } else {
- /*
- * We don't have a driver for this device; deal with the
- * rescind message by removing the channel.
- */
- hv_process_channel_removal(dev->channel, relid);
}
return 0;
@@ -653,7 +643,10 @@ static void vmbus_shutdown(struct device *child_device)
static void vmbus_device_release(struct device *device)
{
struct hv_device *hv_dev = device_to_hv_device(device);
+ struct vmbus_channel *channel = hv_dev->channel;
+ hv_process_channel_removal(channel,
+ channel->offermsg.child_relid);
kfree(hv_dev);
}
@@ -826,6 +819,8 @@ static void vmbus_isr(void)
else
tasklet_schedule(&msg_dpc);
}
+
+ add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
}
@@ -867,7 +862,7 @@ static int vmbus_bus_init(int irq)
on_each_cpu(hv_synic_init, NULL, 1);
ret = vmbus_connect();
if (ret)
- goto err_alloc;
+ goto err_connect;
if (vmbus_proto_version > VERSION_WIN7)
cpu_hotplug_disable();
@@ -885,6 +880,8 @@ static int vmbus_bus_init(int irq)
return 0;
+err_connect:
+ on_each_cpu(hv_synic_cleanup, NULL, 1);
err_alloc:
hv_synic_free();
hv_remove_vmbus_irq();
@@ -1144,7 +1141,10 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
resource_size_t range_min, range_max, start, local_min, local_max;
const char *dev_n = dev_name(&device_obj->device);
u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1);
- int i;
+ int i, retval;
+
+ retval = -ENXIO;
+ down(&hyperv_mmio_lock);
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
if ((iter->start >= max) || (iter->end <= min))
@@ -1181,13 +1181,17 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
for (; start + size - 1 <= local_max; start += align) {
*new = request_mem_region_exclusive(start, size,
dev_n);
- if (*new)
- return 0;
+ if (*new) {
+ retval = 0;
+ goto exit;
+ }
}
}
}
- return -ENXIO;
+exit:
+ up(&hyperv_mmio_lock);
+ return retval;
}
EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 17ae2eb26ce2..d5c06f2764f4 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -109,24 +109,24 @@ static int iio_hwmon_probe(struct platform_device *pdev)
switch (type) {
case IIO_VOLTAGE:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "in%d_input",
- in_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "in%d_input",
+ in_i++);
break;
case IIO_TEMP:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "temp%d_input",
- temp_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "temp%d_input",
+ temp_i++);
break;
case IIO_CURRENT:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "curr%d_input",
- curr_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "curr%d_input",
+ curr_i++);
break;
case IIO_HUMIDITYRELATIVE:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "humidity%d_input",
- humidity_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "humidity%d_input",
+ humidity_i++);
break;
default:
ret = -EINVAL;
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 8c92a564299d..ed70a980d9ac 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -79,7 +79,6 @@ config CORESIGHT_SOURCE_ETM3X
config CORESIGHT_SOURCE_ETM4X
bool "CoreSight Embedded Trace Macrocell 4.x driver"
- depends on ARM64
select CORESIGHT_LINKS_AND_SINKS
help
This driver provides support for the ETM4.x tracer module, tracing the
@@ -113,7 +112,7 @@ config CORESIGHT_QCOM_REPLICATOR
config CORESIGHT_STM
bool "CoreSight System Trace Macrocell driver"
- depends on CORESIGHT_LINKS_AND_SINKS
+ select CORESIGHT_LINKS_AND_SINKS
help
This driver provides support for hardware assisted software
instrumentation based tracing. This is primarily useful for
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 165d3001c301..c6ec5c62b7a9 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -419,6 +419,38 @@ static struct intel_th_subdevice {
},
};
+#ifdef CONFIG_MODULES
+static void __intel_th_request_hub_module(struct work_struct *work)
+{
+ struct intel_th *th = container_of(work, struct intel_th,
+ request_module_work);
+
+ request_module("intel_th_%s", th->hub->name);
+}
+
+static int intel_th_request_hub_module(struct intel_th *th)
+{
+ INIT_WORK(&th->request_module_work, __intel_th_request_hub_module);
+ schedule_work(&th->request_module_work);
+
+ return 0;
+}
+
+static void intel_th_request_hub_module_flush(struct intel_th *th)
+{
+ flush_work(&th->request_module_work);
+}
+#else
+static inline int intel_th_request_hub_module(struct intel_th *th)
+{
+ return -EINVAL;
+}
+
+static inline void intel_th_request_hub_module_flush(struct intel_th *th)
+{
+}
+#endif /* CONFIG_MODULES */
+
static int intel_th_populate(struct intel_th *th, struct resource *devres,
unsigned int ndevres, int irq)
{
@@ -488,7 +520,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
/* need switch driver to be loaded to enumerate the rest */
if (subdev->type == INTEL_TH_SWITCH && !req) {
th->hub = thdev;
- err = request_module("intel_th_%s", subdev->name);
+ err = intel_th_request_hub_module(th);
if (!err)
req++;
}
@@ -603,6 +635,7 @@ void intel_th_free(struct intel_th *th)
{
int i;
+ intel_th_request_hub_module_flush(th);
for (i = 0; i < TH_SUBDEVICE_MAX; i++)
if (th->thdev[i] != th->hub)
intel_th_device_remove(th->thdev[i]);
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 57fd72b20fae..d03a6cd1c65d 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -197,6 +197,9 @@ struct intel_th {
int id;
int major;
+#ifdef CONFIG_MODULES
+ struct work_struct request_module_work;
+#endif /* CONFIG_MODULES */
#ifdef CONFIG_INTEL_TH_DEBUG
struct dentry *dbg;
#endif
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 641e87936064..d57a2f75dccf 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -67,6 +67,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa126),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Kaby Lake PCH-H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
+ .driver_data = (kernel_ulong_t)0,
+ },
{ 0 },
};
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index a0d95ff682ae..2d5ff86398d0 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -215,7 +215,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[],
msg->outsize = request_len;
msg->insize = response_len;
- result = cros_ec_cmd_xfer(bus->ec, msg);
+ result = cros_ec_cmd_xfer_status(bus->ec, msg);
if (result < 0) {
dev_err(dev, "Error transferring EC i2c message %d\n", result);
goto exit;
diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c
index 8eff62738877..e253598d764c 100644
--- a/drivers/i2c/busses/i2c-efm32.c
+++ b/drivers/i2c/busses/i2c-efm32.c
@@ -433,7 +433,7 @@ static int efm32_i2c_probe(struct platform_device *pdev)
ret = request_irq(ddata->irq, efm32_i2c_irq, 0, DRIVER_NAME, ddata);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request irq (%d)\n", ret);
- return ret;
+ goto err_disable_clk;
}
ret = i2c_add_adapter(&ddata->adapter);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 27fa0cb09538..85f39cc3e276 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -244,6 +244,13 @@ struct i801_priv {
struct platform_device *mux_pdev;
#endif
struct platform_device *tco_pdev;
+
+ /*
+ * If set to true the host controller registers are reserved for
+ * ACPI AML use. Protected by acpi_lock.
+ */
+ bool acpi_reserved;
+ struct mutex acpi_lock;
};
#define FEATURE_SMBUS_PEC (1 << 0)
@@ -714,9 +721,15 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
{
int hwpec;
int block = 0;
- int ret, xact = 0;
+ int ret = 0, xact = 0;
struct i801_priv *priv = i2c_get_adapdata(adap);
+ mutex_lock(&priv->acpi_lock);
+ if (priv->acpi_reserved) {
+ mutex_unlock(&priv->acpi_lock);
+ return -EBUSY;
+ }
+
hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
&& size != I2C_SMBUS_QUICK
&& size != I2C_SMBUS_I2C_BLOCK_DATA;
@@ -773,7 +786,8 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
default:
dev_err(&priv->pci_dev->dev, "Unsupported transaction %d\n",
size);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto out;
}
if (hwpec) /* enable/disable hardware PEC */
@@ -796,11 +810,11 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
if (block)
- return ret;
+ goto out;
if (ret)
- return ret;
+ goto out;
if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK))
- return 0;
+ goto out;
switch (xact & 0x7f) {
case I801_BYTE: /* Result put in SMBHSTDAT0 */
@@ -812,7 +826,10 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
(inb_p(SMBHSTDAT1(priv)) << 8);
break;
}
- return 0;
+
+out:
+ mutex_unlock(&priv->acpi_lock);
+ return ret;
}
@@ -1249,6 +1266,72 @@ static void i801_add_tco(struct i801_priv *priv)
priv->tco_pdev = pdev;
}
+#ifdef CONFIG_ACPI
+static acpi_status
+i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+ u64 *value, void *handler_context, void *region_context)
+{
+ struct i801_priv *priv = handler_context;
+ struct pci_dev *pdev = priv->pci_dev;
+ acpi_status status;
+
+ /*
+ * Once BIOS AML code touches the OpRegion we warn and inhibit any
+ * further access from the driver itself. This device is now owned
+ * by the system firmware.
+ */
+ mutex_lock(&priv->acpi_lock);
+
+ if (!priv->acpi_reserved) {
+ priv->acpi_reserved = true;
+
+ dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
+ dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n");
+ }
+
+ if ((function & ACPI_IO_MASK) == ACPI_READ)
+ status = acpi_os_read_port(address, (u32 *)value, bits);
+ else
+ status = acpi_os_write_port(address, (u32)*value, bits);
+
+ mutex_unlock(&priv->acpi_lock);
+
+ return status;
+}
+
+static int i801_acpi_probe(struct i801_priv *priv)
+{
+ struct acpi_device *adev;
+ acpi_status status;
+
+ adev = ACPI_COMPANION(&priv->pci_dev->dev);
+ if (adev) {
+ status = acpi_install_address_space_handler(adev->handle,
+ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler,
+ NULL, priv);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ }
+
+ return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]);
+}
+
+static void i801_acpi_remove(struct i801_priv *priv)
+{
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(&priv->pci_dev->dev);
+ if (!adev)
+ return;
+
+ acpi_remove_address_space_handler(adev->handle,
+ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler);
+}
+#else
+static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
+static inline void i801_acpi_remove(struct i801_priv *priv) { }
+#endif
+
static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
unsigned char temp;
@@ -1266,6 +1349,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
priv->adapter.dev.parent = &dev->dev;
ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev));
priv->adapter.retries = 3;
+ mutex_init(&priv->acpi_lock);
priv->pci_dev = dev;
switch (dev->device) {
@@ -1328,10 +1412,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
- err = acpi_check_resource_conflict(&dev->resource[SMBBAR]);
- if (err) {
+ if (i801_acpi_probe(priv))
return -ENODEV;
- }
err = pcim_iomap_regions(dev, 1 << SMBBAR,
dev_driver_string(&dev->dev));
@@ -1340,6 +1422,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
"Failed to request SMBus region 0x%lx-0x%Lx\n",
priv->smba,
(unsigned long long)pci_resource_end(dev, SMBBAR));
+ i801_acpi_remove(priv);
return err;
}
@@ -1404,6 +1487,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
err = i2c_add_adapter(&priv->adapter);
if (err) {
dev_err(&dev->dev, "Failed to add SMBus adapter\n");
+ i801_acpi_remove(priv);
return err;
}
@@ -1422,6 +1506,7 @@ static void i801_remove(struct pci_dev *dev)
i801_del_mux(priv);
i2c_del_adapter(&priv->adapter);
+ i801_acpi_remove(priv);
pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
platform_device_unregister(priv->tco_pdev);
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index 5fbd5bd0878f..49fc2c7e560a 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -150,7 +150,7 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
mux->data.idle_in_use = true;
/* map address from "reg" if exists */
- if (of_address_to_resource(np, 0, &res)) {
+ if (of_address_to_resource(np, 0, &res) == 0) {
mux->data.reg_size = resource_size(&res);
mux->data.reg = devm_ioremap_resource(&pdev->dev, &res);
if (IS_ERR(mux->data.reg))
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 146eed70bdf4..ba947df5a8c7 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -716,6 +716,26 @@ static struct cpuidle_state avn_cstates[] = {
{
.enter = NULL }
};
+static struct cpuidle_state knl_cstates[] = {
+ {
+ .name = "C1-KNL",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze },
+ {
+ .name = "C6-KNL",
+ .desc = "MWAIT 0x10",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 120,
+ .target_residency = 500,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze },
+ {
+ .enter = NULL }
+};
/**
* intel_idle
@@ -890,6 +910,10 @@ static const struct idle_cpu idle_cpu_avn = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_knl = {
+ .state_table = knl_cstates,
+};
+
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
@@ -921,6 +945,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(0x56, idle_cpu_bdw),
ICPU(0x4e, idle_cpu_skl),
ICPU(0x5e, idle_cpu_skl),
+ ICPU(0x57, idle_cpu_knl),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index 0e8fda8b9080..ec774917f4a4 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -28,9 +28,8 @@
#define FG_ADC_RR_SKIN_TEMP_MSB 0x51
#define FG_ADC_RR_RR_ADC_CTL 0x52
#define FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK 0x8
-#define FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL BIT(3)
+#define FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL BIT(3)
#define FG_ADC_RR_ADC_LOG 0x53
-#define FG_ADC_RR_ADC_LOG_CLR_CTRL_MASK 0xFE
#define FG_ADC_RR_ADC_LOG_CLR_CTRL BIT(0)
#define FG_ADC_RR_FAKE_BATT_LOW_LSB 0x58
@@ -40,6 +39,7 @@
#define FG_ADC_RR_BATT_ID_CTRL 0x60
#define FG_ADC_RR_BATT_ID_TRIGGER 0x61
+#define FG_ADC_RR_BATT_ID_TRIGGER_CTL BIT(0)
#define FG_ADC_RR_BATT_ID_STS 0x62
#define FG_ADC_RR_BATT_ID_CFG 0x63
#define FG_ADC_RR_BATT_ID_5_LSB 0x66
@@ -182,9 +182,11 @@
#define FG_RR_ADC_COHERENT_CHECK_RETRY 5
#define FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN 16
#define FG_RR_ADC_STS_CHANNEL_READING_MASK 0x3
+#define FG_RR_ADC_STS_CHANNEL_STS 0x2
-#define FG_RR_CONV_CONTINUOUS_TIME_MIN 80000
-#define FG_RR_CONV_CONTINUOUS_TIME_MAX 81000
+#define FG_RR_CONV_CONTINUOUS_TIME_MIN_US 50000
+#define FG_RR_CONV_CONTINUOUS_TIME_MAX_US 51000
+#define FG_RR_CONV_MAX_RETRY_CNT 50
/*
* The channel number is not a physical index in hardware,
@@ -570,40 +572,157 @@ static const struct rradc_channels rradc_chans[] = {
FG_ADC_RR_AUX_THERM_STS)
};
+static int rradc_enable_continuous_mode(struct rradc_chip *chip)
+{
+ int rc = 0;
+
+ /* Clear channel log */
+ rc = rradc_masked_write(chip, FG_ADC_RR_ADC_LOG,
+ FG_ADC_RR_ADC_LOG_CLR_CTRL,
+ FG_ADC_RR_ADC_LOG_CLR_CTRL);
+ if (rc < 0) {
+ pr_err("log ctrl update to clear failed:%d\n", rc);
+ return rc;
+ }
+
+ rc = rradc_masked_write(chip, FG_ADC_RR_ADC_LOG,
+ FG_ADC_RR_ADC_LOG_CLR_CTRL, 0);
+ if (rc < 0) {
+ pr_err("log ctrl update to not clear failed:%d\n", rc);
+ return rc;
+ }
+
+ /* Switch to continuous mode */
+ rc = rradc_masked_write(chip, FG_ADC_RR_RR_ADC_CTL,
+ FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK,
+ FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL);
+ if (rc < 0) {
+ pr_err("Update to continuous mode failed:%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int rradc_disable_continuous_mode(struct rradc_chip *chip)
+{
+ int rc = 0;
+
+ /* Switch to non continuous mode */
+ rc = rradc_masked_write(chip, FG_ADC_RR_RR_ADC_CTL,
+ FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK, 0);
+ if (rc < 0) {
+ pr_err("Update to non-continuous mode failed:%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
+ struct rradc_chan_prop *prop, u8 *buf, u16 status)
+{
+ int rc = 0, retry_cnt = 0, mask = 0;
+
+ switch (prop->channel) {
+ case RR_ADC_BATT_ID:
+ /* BATT_ID STS bit does not get set initially */
+ mask = FG_RR_ADC_STS_CHANNEL_STS;
+ break;
+ default:
+ mask = FG_RR_ADC_STS_CHANNEL_READING_MASK;
+ break;
+ }
+
+ while (((buf[0] & mask) != mask) &&
+ (retry_cnt < FG_RR_CONV_MAX_RETRY_CNT)) {
+ pr_debug("%s is not ready; nothing to read:0x%x\n",
+ rradc_chans[prop->channel].datasheet_name, buf[0]);
+ usleep_range(FG_RR_CONV_CONTINUOUS_TIME_MIN_US,
+ FG_RR_CONV_CONTINUOUS_TIME_MAX_US);
+ retry_cnt++;
+ rc = rradc_read(chip, status, buf, 1);
+ if (rc < 0) {
+ pr_err("status read failed:%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (retry_cnt >= FG_RR_CONV_MAX_RETRY_CNT)
+ rc = -ENODATA;
+
+ return rc;
+}
+
+static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip,
+ struct rradc_chan_prop *prop, u8 *buf)
+{
+ int rc = 0;
+ u16 status = 0;
+
+ rc = rradc_enable_continuous_mode(chip);
+ if (rc < 0) {
+ pr_err("Failed to switch to continuous mode\n");
+ return rc;
+ }
+
+ status = rradc_chans[prop->channel].sts;
+ rc = rradc_read(chip, status, buf, 1);
+ if (rc < 0) {
+ pr_err("status read failed:%d\n", rc);
+ return rc;
+ }
+
+ rc = rradc_check_status_ready_with_retry(chip, prop,
+ buf, status);
+ if (rc < 0) {
+ pr_err("Status read failed:%d\n", rc);
+ return rc;
+ }
+
+ rc = rradc_disable_continuous_mode(chip);
+ if (rc < 0) {
+ pr_err("Failed to switch to non continuous mode\n");
+ return rc;
+ }
+
+ return rc;
+}
+
static int rradc_do_conversion(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 *data)
{
- int rc = 0, bytes_to_read = 0, retry = 0;
+ int rc = 0, bytes_to_read = 0;
u8 buf[6];
u16 offset = 0, batt_id_5 = 0, batt_id_15 = 0, batt_id_150 = 0;
u16 status = 0;
mutex_lock(&chip->lock);
- if ((prop->channel != RR_ADC_BATT_ID) &&
- (prop->channel != RR_ADC_CHG_HOT_TEMP) &&
- (prop->channel != RR_ADC_CHG_TOO_HOT_TEMP) &&
- (prop->channel != RR_ADC_SKIN_HOT_TEMP) &&
- (prop->channel != RR_ADC_SKIN_TOO_HOT_TEMP) &&
- (prop->channel != RR_ADC_USBIN_V)) {
- /* BATT_ID STS bit does not get set initially */
- status = rradc_chans[prop->channel].sts;
- rc = rradc_read(chip, status, buf, 1);
+ switch (prop->channel) {
+ case RR_ADC_BATT_ID:
+ rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
+ FG_ADC_RR_BATT_ID_TRIGGER_CTL,
+ FG_ADC_RR_BATT_ID_TRIGGER_CTL);
if (rc < 0) {
- pr_err("status read failed:%d\n", rc);
+ pr_err("BATT_ID trigger set failed:%d\n", rc);
goto fail;
}
- buf[0] &= FG_RR_ADC_STS_CHANNEL_READING_MASK;
- if (buf[0] != FG_RR_ADC_STS_CHANNEL_READING_MASK) {
- pr_debug("%s is not ready; nothing to read\n",
- rradc_chans[prop->channel].datasheet_name);
- rc = -ENODATA;
+ rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
+ if (rc < 0) {
+ pr_err("Error reading in continuous mode:%d\n", rc);
goto fail;
}
- }
- if (prop->channel == RR_ADC_USBIN_V) {
+ rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
+ FG_ADC_RR_BATT_ID_TRIGGER_CTL, 0);
+ if (rc < 0) {
+ pr_err("BATT_ID trigger re-set failed:%d\n", rc);
+ goto fail;
+ }
+ break;
+ case RR_ADC_USBIN_V:
/* Force conversion every cycle */
rc = rradc_masked_write(chip, FG_ADC_RR_USB_IN_V_TRIGGER,
FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK,
@@ -613,31 +732,27 @@ static int rradc_do_conversion(struct rradc_chip *chip,
goto fail;
}
- /* Clear channel log */
- rc = rradc_masked_write(chip, FG_ADC_RR_ADC_LOG,
- FG_ADC_RR_ADC_LOG_CLR_CTRL_MASK,
- FG_ADC_RR_ADC_LOG_CLR_CTRL);
+ rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
if (rc < 0) {
- pr_err("log ctrl update to clear failed:%d\n", rc);
+ pr_err("Error reading in continuous mode:%d\n", rc);
goto fail;
}
- rc = rradc_masked_write(chip, FG_ADC_RR_ADC_LOG,
- FG_ADC_RR_ADC_LOG_CLR_CTRL_MASK, 0);
- if (rc < 0) {
- pr_err("log ctrl update to not clear failed:%d\n", rc);
- goto fail;
- }
-
- /* Switch to continuous mode */
- rc = rradc_masked_write(chip, FG_ADC_RR_RR_ADC_CTL,
- FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK,
- FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL);
+ /* Restore usb_in trigger */
+ rc = rradc_masked_write(chip, FG_ADC_RR_USB_IN_V_TRIGGER,
+ FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK, 0);
if (rc < 0) {
- pr_err("Update to continuous mode failed:%d\n", rc);
+ pr_err("Restore every cycle update failed:%d\n", rc);
goto fail;
}
-
+ break;
+ case RR_ADC_CHG_HOT_TEMP:
+ case RR_ADC_CHG_TOO_HOT_TEMP:
+ case RR_ADC_SKIN_HOT_TEMP:
+ case RR_ADC_SKIN_TOO_HOT_TEMP:
+ pr_debug("Read only the data registers\n");
+ break;
+ default:
status = rradc_chans[prop->channel].sts;
rc = rradc_read(chip, status, buf, 1);
if (rc < 0) {
@@ -645,41 +760,14 @@ static int rradc_do_conversion(struct rradc_chip *chip,
goto fail;
}
- buf[0] &= FG_RR_ADC_STS_CHANNEL_READING_MASK;
- while ((buf[0] != FG_RR_ADC_STS_CHANNEL_READING_MASK) &&
- (retry < 2)) {
- pr_debug("%s is not ready; nothing to read\n",
- rradc_chans[prop->channel].datasheet_name);
- usleep_range(FG_RR_CONV_CONTINUOUS_TIME_MIN,
- FG_RR_CONV_CONTINUOUS_TIME_MAX);
- retry++;
- rc = rradc_read(chip, status, buf, 1);
- if (rc < 0) {
- pr_err("status read failed:%d\n", rc);
- goto fail;
- }
- }
-
- /* Switch to non continuous mode */
- rc = rradc_masked_write(chip, FG_ADC_RR_RR_ADC_CTL,
- FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK, 0);
- if (rc < 0) {
- pr_err("Update to continuous mode failed:%d\n", rc);
- goto fail;
- }
-
- /* Restore usb_in trigger */
- rc = rradc_masked_write(chip, FG_ADC_RR_USB_IN_V_TRIGGER,
- FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK, 0);
+ rc = rradc_check_status_ready_with_retry(chip, prop,
+ buf, status);
if (rc < 0) {
- pr_err("Restore every cycle update failed:%d\n", rc);
- goto fail;
- }
-
- if (retry >= 2) {
+ pr_debug("Status read failed:%d\n", rc);
rc = -ENODATA;
goto fail;
}
+ break;
}
offset = rradc_chans[prop->channel].lsb;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 0f6f63b20263..7afd226a3321 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -107,6 +107,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
{
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
size_t datum_size;
size_t to_wait;
int ret;
@@ -131,19 +132,29 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
else
to_wait = min_t(size_t, n / datum_size, rb->watermark);
+ add_wait_queue(&rb->pollq, &wait);
do {
- ret = wait_event_interruptible(rb->pollq,
- iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size));
- if (ret)
- return ret;
+ if (!indio_dev->info) {
+ ret = -ENODEV;
+ break;
+ }
- if (!indio_dev->info)
- return -ENODEV;
+ if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ wait_woken(&wait, TASK_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ continue;
+ }
ret = rb->access->read_first_n(rb, n, buf);
if (ret == 0 && (filp->f_flags & O_NONBLOCK))
ret = -EAGAIN;
} while (ret == 0);
+ remove_wait_queue(&rb->pollq, &wait);
return ret;
}
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 5fb089e91353..fb43a242847b 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -634,6 +634,7 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid)
if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client,
RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) {
pr_warn("%s Unable to put NLMSG_DONE\n", __func__);
+ dev_kfree_skb(skb);
return -ENOMEM;
}
nlh->nlmsg_type = NLMSG_DONE;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index a95a32ba596e..d3b7ecd106f7 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -534,7 +534,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
if (!data) {
- kfree_skb(skb);
+ nlmsg_free(skb);
return -EMSGSIZE;
}
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 870e56b6b25f..05179f47bbde 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -526,7 +526,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
spin_unlock(&tun_qp->tx_lock);
if (ret)
- goto out;
+ goto end;
tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
if (tun_qp->tx_ring[tun_tx_ix].ah)
@@ -595,9 +595,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
wr.wr.send_flags = IB_SEND_SIGNALED;
ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
-out:
- if (ret)
- ib_destroy_ah(ah);
+ if (!ret)
+ return 0;
+ out:
+ spin_lock(&tun_qp->tx_lock);
+ tun_qp->tx_ix_tail++;
+ spin_unlock(&tun_qp->tx_lock);
+ tun_qp->tx_ring[tun_tx_ix].ah = NULL;
+end:
+ ib_destroy_ah(ah);
return ret;
}
@@ -1278,9 +1284,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
+ if (!ret)
+ return 0;
+
+ spin_lock(&sqp->tx_lock);
+ sqp->tx_ix_tail++;
+ spin_unlock(&sqp->tx_lock);
+ sqp->tx_ring[wire_tx_ix].ah = NULL;
out:
- if (ret)
- ib_destroy_ah(ah);
+ ib_destroy_ah(ah);
return ret;
}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 13eaaf45288f..ea1e2ddaddf5 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -357,7 +357,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
sizeof (struct mlx4_wqe_raddr_seg);
case MLX4_IB_QPT_RC:
return sizeof (struct mlx4_wqe_ctrl_seg) +
- sizeof (struct mlx4_wqe_atomic_seg) +
+ sizeof (struct mlx4_wqe_masked_atomic_seg) +
sizeof (struct mlx4_wqe_raddr_seg);
case MLX4_IB_QPT_SMI:
case MLX4_IB_QPT_GSI:
@@ -1162,8 +1162,10 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
{
err = create_qp_common(to_mdev(pd->device), pd, init_attr,
udata, 0, &qp, gfp);
- if (err)
+ if (err) {
+ kfree(qp);
return ERR_PTR(err);
+ }
qp->ibqp.qp_num = qp->mqp.qpn;
qp->xrcdn = xrcdn;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 92ddae101ecc..8184267c7901 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -763,7 +763,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
if (attr->flags)
return ERR_PTR(-EINVAL);
- if (entries < 0)
+ if (entries < 0 ||
+ (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
return ERR_PTR(-EINVAL);
entries = roundup_pow_of_two(entries + 1);
@@ -1094,11 +1095,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
return -ENOSYS;
}
- if (entries < 1)
+ if (entries < 1 ||
+ entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
+ mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
+ entries,
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
return -EINVAL;
+ }
entries = roundup_pow_of_two(entries + 1);
- if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
+ if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
return -EINVAL;
if (entries == ibcq->cqe + 1)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index fd17443aeacd..bfc940ff9c8a 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -962,14 +962,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
break;
case MLX5_DEV_EVENT_PORT_DOWN:
+ case MLX5_DEV_EVENT_PORT_INITIALIZED:
ibev.event = IB_EVENT_PORT_ERR;
port = (u8)param;
break;
- case MLX5_DEV_EVENT_PORT_INITIALIZED:
- /* not used by ULPs */
- return;
-
case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE;
port = (u8)param;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 307bdbca8938..cfcfbb6b84d7 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -226,6 +226,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
qp->rq.max_gs = 0;
qp->rq.wqe_cnt = 0;
qp->rq.wqe_shift = 0;
+ cap->max_recv_wr = 0;
+ cap->max_recv_sge = 0;
} else {
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
@@ -2525,10 +2527,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
return MLX5_FENCE_MODE_SMALL_AND_FENCE;
else
return fence;
-
- } else {
- return 0;
+ } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
+ return MLX5_FENCE_MODE_FENCE;
}
+
+ return 0;
}
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
@@ -3092,17 +3095,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
if (!ibqp->uobject) {
- qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_wr = qp->sq.max_post;
qp_attr->cap.max_send_sge = qp->sq.max_gs;
+ qp_init_attr->qp_context = ibqp->qp_context;
} else {
qp_attr->cap.max_send_wr = 0;
qp_attr->cap.max_send_sge = 0;
}
- /* We don't support inline sends for kernel QPs (yet), and we
- * don't know what userspace's value should be.
- */
- qp_attr->cap.max_inline_data = 0;
+ qp_init_attr->qp_type = ibqp->qp_type;
+ qp_init_attr->recv_cq = ibqp->recv_cq;
+ qp_init_attr->send_cq = ibqp->send_cq;
+ qp_init_attr->srq = ibqp->srq;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5ea0c14070d1..fa9c42ff1fb0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -245,8 +245,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_reset_mac_header(skb);
skb_pull(skb, IPOIB_ENCAP_LEN);
- skb->truesize = SKB_TRUESIZE(skb->len);
-
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7d3281866ffc..942dffca6a9d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1131,7 +1131,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
neigh = NULL;
goto out_unlock;
}
- neigh->alive = jiffies;
+
+ if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+ neigh->alive = jiffies;
goto out_unlock;
}
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index fd4100d56d8c..aff42d5e2296 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -317,6 +317,19 @@ static struct usb_device_id xpad_table[] = {
MODULE_DEVICE_TABLE(usb, xpad_table);
+struct xpad_output_packet {
+ u8 data[XPAD_PKT_LEN];
+ u8 len;
+ bool pending;
+};
+
+#define XPAD_OUT_CMD_IDX 0
+#define XPAD_OUT_FF_IDX 1
+#define XPAD_OUT_LED_IDX (1 + IS_ENABLED(CONFIG_JOYSTICK_XPAD_FF))
+#define XPAD_NUM_OUT_PACKETS (1 + \
+ IS_ENABLED(CONFIG_JOYSTICK_XPAD_FF) + \
+ IS_ENABLED(CONFIG_JOYSTICK_XPAD_LEDS))
+
struct usb_xpad {
struct input_dev *dev; /* input device interface */
struct usb_device *udev; /* usb device */
@@ -329,9 +342,13 @@ struct usb_xpad {
dma_addr_t idata_dma;
struct urb *irq_out; /* urb for interrupt out report */
+ bool irq_out_active; /* we must not use an active URB */
unsigned char *odata; /* output data */
dma_addr_t odata_dma;
- struct mutex odata_mutex;
+ spinlock_t odata_lock;
+
+ struct xpad_output_packet out_packets[XPAD_NUM_OUT_PACKETS];
+ int last_out_packet;
#if defined(CONFIG_JOYSTICK_XPAD_LEDS)
struct xpad_led *led;
@@ -678,18 +695,71 @@ exit:
__func__, retval);
}
+/* Callers must hold xpad->odata_lock spinlock */
+static bool xpad_prepare_next_out_packet(struct usb_xpad *xpad)
+{
+ struct xpad_output_packet *pkt, *packet = NULL;
+ int i;
+
+ for (i = 0; i < XPAD_NUM_OUT_PACKETS; i++) {
+ if (++xpad->last_out_packet >= XPAD_NUM_OUT_PACKETS)
+ xpad->last_out_packet = 0;
+
+ pkt = &xpad->out_packets[xpad->last_out_packet];
+ if (pkt->pending) {
+ dev_dbg(&xpad->intf->dev,
+ "%s - found pending output packet %d\n",
+ __func__, xpad->last_out_packet);
+ packet = pkt;
+ break;
+ }
+ }
+
+ if (packet) {
+ memcpy(xpad->odata, packet->data, packet->len);
+ xpad->irq_out->transfer_buffer_length = packet->len;
+ packet->pending = false;
+ return true;
+ }
+
+ return false;
+}
+
+/* Callers must hold xpad->odata_lock spinlock */
+static int xpad_try_sending_next_out_packet(struct usb_xpad *xpad)
+{
+ int error;
+
+ if (!xpad->irq_out_active && xpad_prepare_next_out_packet(xpad)) {
+ error = usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
+ if (error) {
+ dev_err(&xpad->intf->dev,
+ "%s - usb_submit_urb failed with result %d\n",
+ __func__, error);
+ return -EIO;
+ }
+
+ xpad->irq_out_active = true;
+ }
+
+ return 0;
+}
+
static void xpad_irq_out(struct urb *urb)
{
struct usb_xpad *xpad = urb->context;
struct device *dev = &xpad->intf->dev;
- int retval, status;
+ int status = urb->status;
+ int error;
+ unsigned long flags;
- status = urb->status;
+ spin_lock_irqsave(&xpad->odata_lock, flags);
switch (status) {
case 0:
/* success */
- return;
+ xpad->irq_out_active = xpad_prepare_next_out_packet(xpad);
+ break;
case -ECONNRESET:
case -ENOENT:
@@ -697,19 +767,26 @@ static void xpad_irq_out(struct urb *urb)
/* this urb is terminated, clean up */
dev_dbg(dev, "%s - urb shutting down with status: %d\n",
__func__, status);
- return;
+ xpad->irq_out_active = false;
+ break;
default:
dev_dbg(dev, "%s - nonzero urb status received: %d\n",
__func__, status);
- goto exit;
+ break;
}
-exit:
- retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval)
- dev_err(dev, "%s - usb_submit_urb failed with result %d\n",
- __func__, retval);
+ if (xpad->irq_out_active) {
+ error = usb_submit_urb(urb, GFP_ATOMIC);
+ if (error) {
+ dev_err(dev,
+ "%s - usb_submit_urb failed with result %d\n",
+ __func__, error);
+ xpad->irq_out_active = false;
+ }
+ }
+
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
}
static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
@@ -728,7 +805,7 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
goto fail1;
}
- mutex_init(&xpad->odata_mutex);
+ spin_lock_init(&xpad->odata_lock);
xpad->irq_out = usb_alloc_urb(0, GFP_KERNEL);
if (!xpad->irq_out) {
@@ -770,27 +847,57 @@ static void xpad_deinit_output(struct usb_xpad *xpad)
static int xpad_inquiry_pad_presence(struct usb_xpad *xpad)
{
+ struct xpad_output_packet *packet =
+ &xpad->out_packets[XPAD_OUT_CMD_IDX];
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&xpad->odata_lock, flags);
+
+ packet->data[0] = 0x08;
+ packet->data[1] = 0x00;
+ packet->data[2] = 0x0F;
+ packet->data[3] = 0xC0;
+ packet->data[4] = 0x00;
+ packet->data[5] = 0x00;
+ packet->data[6] = 0x00;
+ packet->data[7] = 0x00;
+ packet->data[8] = 0x00;
+ packet->data[9] = 0x00;
+ packet->data[10] = 0x00;
+ packet->data[11] = 0x00;
+ packet->len = 12;
+ packet->pending = true;
+
+ /* Reset the sequence so we send out presence first */
+ xpad->last_out_packet = -1;
+ retval = xpad_try_sending_next_out_packet(xpad);
+
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
+
+ return retval;
+}
+
+static int xpad_start_xbox_one(struct usb_xpad *xpad)
+{
+ struct xpad_output_packet *packet =
+ &xpad->out_packets[XPAD_OUT_CMD_IDX];
+ unsigned long flags;
int retval;
- mutex_lock(&xpad->odata_mutex);
+ spin_lock_irqsave(&xpad->odata_lock, flags);
- xpad->odata[0] = 0x08;
- xpad->odata[1] = 0x00;
- xpad->odata[2] = 0x0F;
- xpad->odata[3] = 0xC0;
- xpad->odata[4] = 0x00;
- xpad->odata[5] = 0x00;
- xpad->odata[6] = 0x00;
- xpad->odata[7] = 0x00;
- xpad->odata[8] = 0x00;
- xpad->odata[9] = 0x00;
- xpad->odata[10] = 0x00;
- xpad->odata[11] = 0x00;
- xpad->irq_out->transfer_buffer_length = 12;
+ /* Xbox one controller needs to be initialized. */
+ packet->data[0] = 0x05;
+ packet->data[1] = 0x20;
+ packet->len = 2;
+ packet->pending = true;
- retval = usb_submit_urb(xpad->irq_out, GFP_KERNEL);
+ /* Reset the sequence so we send out start packet first */
+ xpad->last_out_packet = -1;
+ retval = xpad_try_sending_next_out_packet(xpad);
- mutex_unlock(&xpad->odata_mutex);
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
return retval;
}
@@ -799,8 +906,11 @@ static int xpad_inquiry_pad_presence(struct usb_xpad *xpad)
static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect)
{
struct usb_xpad *xpad = input_get_drvdata(dev);
+ struct xpad_output_packet *packet = &xpad->out_packets[XPAD_OUT_FF_IDX];
__u16 strong;
__u16 weak;
+ int retval;
+ unsigned long flags;
if (effect->type != FF_RUMBLE)
return 0;
@@ -808,69 +918,80 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
strong = effect->u.rumble.strong_magnitude;
weak = effect->u.rumble.weak_magnitude;
+ spin_lock_irqsave(&xpad->odata_lock, flags);
+
switch (xpad->xtype) {
case XTYPE_XBOX:
- xpad->odata[0] = 0x00;
- xpad->odata[1] = 0x06;
- xpad->odata[2] = 0x00;
- xpad->odata[3] = strong / 256; /* left actuator */
- xpad->odata[4] = 0x00;
- xpad->odata[5] = weak / 256; /* right actuator */
- xpad->irq_out->transfer_buffer_length = 6;
+ packet->data[0] = 0x00;
+ packet->data[1] = 0x06;
+ packet->data[2] = 0x00;
+ packet->data[3] = strong / 256; /* left actuator */
+ packet->data[4] = 0x00;
+ packet->data[5] = weak / 256; /* right actuator */
+ packet->len = 6;
+ packet->pending = true;
break;
case XTYPE_XBOX360:
- xpad->odata[0] = 0x00;
- xpad->odata[1] = 0x08;
- xpad->odata[2] = 0x00;
- xpad->odata[3] = strong / 256; /* left actuator? */
- xpad->odata[4] = weak / 256; /* right actuator? */
- xpad->odata[5] = 0x00;
- xpad->odata[6] = 0x00;
- xpad->odata[7] = 0x00;
- xpad->irq_out->transfer_buffer_length = 8;
+ packet->data[0] = 0x00;
+ packet->data[1] = 0x08;
+ packet->data[2] = 0x00;
+ packet->data[3] = strong / 256; /* left actuator? */
+ packet->data[4] = weak / 256; /* right actuator? */
+ packet->data[5] = 0x00;
+ packet->data[6] = 0x00;
+ packet->data[7] = 0x00;
+ packet->len = 8;
+ packet->pending = true;
break;
case XTYPE_XBOX360W:
- xpad->odata[0] = 0x00;
- xpad->odata[1] = 0x01;
- xpad->odata[2] = 0x0F;
- xpad->odata[3] = 0xC0;
- xpad->odata[4] = 0x00;
- xpad->odata[5] = strong / 256;
- xpad->odata[6] = weak / 256;
- xpad->odata[7] = 0x00;
- xpad->odata[8] = 0x00;
- xpad->odata[9] = 0x00;
- xpad->odata[10] = 0x00;
- xpad->odata[11] = 0x00;
- xpad->irq_out->transfer_buffer_length = 12;
+ packet->data[0] = 0x00;
+ packet->data[1] = 0x01;
+ packet->data[2] = 0x0F;
+ packet->data[3] = 0xC0;
+ packet->data[4] = 0x00;
+ packet->data[5] = strong / 256;
+ packet->data[6] = weak / 256;
+ packet->data[7] = 0x00;
+ packet->data[8] = 0x00;
+ packet->data[9] = 0x00;
+ packet->data[10] = 0x00;
+ packet->data[11] = 0x00;
+ packet->len = 12;
+ packet->pending = true;
break;
case XTYPE_XBOXONE:
- xpad->odata[0] = 0x09; /* activate rumble */
- xpad->odata[1] = 0x08;
- xpad->odata[2] = 0x00;
- xpad->odata[3] = 0x08; /* continuous effect */
- xpad->odata[4] = 0x00; /* simple rumble mode */
- xpad->odata[5] = 0x03; /* L and R actuator only */
- xpad->odata[6] = 0x00; /* TODO: LT actuator */
- xpad->odata[7] = 0x00; /* TODO: RT actuator */
- xpad->odata[8] = strong / 256; /* left actuator */
- xpad->odata[9] = weak / 256; /* right actuator */
- xpad->odata[10] = 0x80; /* length of pulse */
- xpad->odata[11] = 0x00; /* stop period of pulse */
- xpad->irq_out->transfer_buffer_length = 12;
+ packet->data[0] = 0x09; /* activate rumble */
+ packet->data[1] = 0x08;
+ packet->data[2] = 0x00;
+ packet->data[3] = 0x08; /* continuous effect */
+ packet->data[4] = 0x00; /* simple rumble mode */
+ packet->data[5] = 0x03; /* L and R actuator only */
+ packet->data[6] = 0x00; /* TODO: LT actuator */
+ packet->data[7] = 0x00; /* TODO: RT actuator */
+ packet->data[8] = strong / 256; /* left actuator */
+ packet->data[9] = weak / 256; /* right actuator */
+ packet->data[10] = 0x80; /* length of pulse */
+ packet->data[11] = 0x00; /* stop period of pulse */
+ packet->len = 12;
+ packet->pending = true;
break;
default:
dev_dbg(&xpad->dev->dev,
"%s - rumble command sent to unsupported xpad type: %d\n",
__func__, xpad->xtype);
- return -EINVAL;
+ retval = -EINVAL;
+ goto out;
}
- return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
+ retval = xpad_try_sending_next_out_packet(xpad);
+
+out:
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
+ return retval;
}
static int xpad_init_ff(struct usb_xpad *xpad)
@@ -921,36 +1042,44 @@ struct xpad_led {
*/
static void xpad_send_led_command(struct usb_xpad *xpad, int command)
{
+ struct xpad_output_packet *packet =
+ &xpad->out_packets[XPAD_OUT_LED_IDX];
+ unsigned long flags;
+
command %= 16;
- mutex_lock(&xpad->odata_mutex);
+ spin_lock_irqsave(&xpad->odata_lock, flags);
switch (xpad->xtype) {
case XTYPE_XBOX360:
- xpad->odata[0] = 0x01;
- xpad->odata[1] = 0x03;
- xpad->odata[2] = command;
- xpad->irq_out->transfer_buffer_length = 3;
+ packet->data[0] = 0x01;
+ packet->data[1] = 0x03;
+ packet->data[2] = command;
+ packet->len = 3;
+ packet->pending = true;
break;
+
case XTYPE_XBOX360W:
- xpad->odata[0] = 0x00;
- xpad->odata[1] = 0x00;
- xpad->odata[2] = 0x08;
- xpad->odata[3] = 0x40 + command;
- xpad->odata[4] = 0x00;
- xpad->odata[5] = 0x00;
- xpad->odata[6] = 0x00;
- xpad->odata[7] = 0x00;
- xpad->odata[8] = 0x00;
- xpad->odata[9] = 0x00;
- xpad->odata[10] = 0x00;
- xpad->odata[11] = 0x00;
- xpad->irq_out->transfer_buffer_length = 12;
+ packet->data[0] = 0x00;
+ packet->data[1] = 0x00;
+ packet->data[2] = 0x08;
+ packet->data[3] = 0x40 + command;
+ packet->data[4] = 0x00;
+ packet->data[5] = 0x00;
+ packet->data[6] = 0x00;
+ packet->data[7] = 0x00;
+ packet->data[8] = 0x00;
+ packet->data[9] = 0x00;
+ packet->data[10] = 0x00;
+ packet->data[11] = 0x00;
+ packet->len = 12;
+ packet->pending = true;
break;
}
- usb_submit_urb(xpad->irq_out, GFP_KERNEL);
- mutex_unlock(&xpad->odata_mutex);
+ xpad_try_sending_next_out_packet(xpad);
+
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
}
/*
@@ -1048,13 +1177,8 @@ static int xpad_open(struct input_dev *dev)
if (usb_submit_urb(xpad->irq_in, GFP_KERNEL))
return -EIO;
- if (xpad->xtype == XTYPE_XBOXONE) {
- /* Xbox one controller needs to be initialized. */
- xpad->odata[0] = 0x05;
- xpad->odata[1] = 0x20;
- xpad->irq_out->transfer_buffer_length = 2;
- return usb_submit_urb(xpad->irq_out, GFP_KERNEL);
- }
+ if (xpad->xtype == XTYPE_XBOXONE)
+ return xpad_start_xbox_one(xpad);
return 0;
}
@@ -1200,22 +1324,15 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
int ep_irq_in_idx;
int i, error;
+ if (intf->cur_altsetting->desc.bNumEndpoints != 2)
+ return -ENODEV;
+
for (i = 0; xpad_device[i].idVendor; i++) {
if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
(le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
break;
}
- if (xpad_device[i].xtype == XTYPE_XBOXONE &&
- intf->cur_altsetting->desc.bInterfaceNumber != 0) {
- /*
- * The Xbox One controller lists three interfaces all with the
- * same interface class, subclass and protocol. Differentiate by
- * interface number.
- */
- return -ENODEV;
- }
-
xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
if (!xpad)
return -ENOMEM;
@@ -1246,6 +1363,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
if (intf->cur_altsetting->desc.bInterfaceProtocol == 129)
xpad->xtype = XTYPE_XBOX360W;
+ else if (intf->cur_altsetting->desc.bInterfaceProtocol == 208)
+ xpad->xtype = XTYPE_XBOXONE;
else
xpad->xtype = XTYPE_XBOX360;
} else {
@@ -1260,6 +1379,17 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
xpad->mapping |= MAP_STICKS_TO_NULL;
}
+ if (xpad->xtype == XTYPE_XBOXONE &&
+ intf->cur_altsetting->desc.bInterfaceNumber != 0) {
+ /*
+ * The Xbox One controller lists three interfaces all with the
+ * same interface class, subclass and protocol. Differentiate by
+ * interface number.
+ */
+ error = -ENODEV;
+ goto err_free_in_urb;
+ }
+
error = xpad_init_output(intf, xpad);
if (error)
goto err_free_in_urb;
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index acc5394afb03..29485bc4221c 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -376,7 +376,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
/* Reset the KBC controller to clear all previous status.*/
reset_control_assert(kbc->rst);
udelay(100);
- reset_control_assert(kbc->rst);
+ reset_control_deassert(kbc->rst);
udelay(100);
tegra_kbc_config_pins(kbc);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 2f589857a039..d15b33813021 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -4,7 +4,8 @@
* Copyright (c) 2013 ELAN Microelectronics Corp.
*
* Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
- * Version: 1.6.0
+ * Author: KT Liao <kt.liao@emc.com.tw>
+ * Version: 1.6.2
*
* Based on cyapa driver:
* copyright (c) 2011-2012 Cypress Semiconductor, Inc.
@@ -40,7 +41,7 @@
#include "elan_i2c.h"
#define DRIVER_NAME "elan_i2c"
-#define ELAN_DRIVER_VERSION "1.6.1"
+#define ELAN_DRIVER_VERSION "1.6.2"
#define ELAN_VENDOR_ID 0x04f3
#define ETP_MAX_PRESSURE 255
#define ETP_FWIDTH_REDUCE 90
@@ -199,9 +200,41 @@ static int elan_sleep(struct elan_tp_data *data)
return error;
}
+static int elan_query_product(struct elan_tp_data *data)
+{
+ int error;
+
+ error = data->ops->get_product_id(data->client, &data->product_id);
+ if (error)
+ return error;
+
+ error = data->ops->get_sm_version(data->client, &data->ic_type,
+ &data->sm_version);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
+{
+ if (data->ic_type != 0x0E)
+ return false;
+
+ switch (data->product_id) {
+ case 0x05 ... 0x07:
+ case 0x09:
+ case 0x13:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int __elan_initialize(struct elan_tp_data *data)
{
struct i2c_client *client = data->client;
+ bool woken_up = false;
int error;
error = data->ops->initialize(client);
@@ -210,6 +243,27 @@ static int __elan_initialize(struct elan_tp_data *data)
return error;
}
+ error = elan_query_product(data);
+ if (error)
+ return error;
+
+ /*
+ * Some ASUS devices were shipped with firmware that requires
+ * touchpads to be woken up first, before attempting to switch
+ * them into absolute reporting mode.
+ */
+ if (elan_check_ASUS_special_fw(data)) {
+ error = data->ops->sleep_control(client, false);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to wake device up: %d\n", error);
+ return error;
+ }
+
+ msleep(200);
+ woken_up = true;
+ }
+
data->mode |= ETP_ENABLE_ABS;
error = data->ops->set_mode(client, data->mode);
if (error) {
@@ -218,11 +272,13 @@ static int __elan_initialize(struct elan_tp_data *data)
return error;
}
- error = data->ops->sleep_control(client, false);
- if (error) {
- dev_err(&client->dev,
- "failed to wake device up: %d\n", error);
- return error;
+ if (!woken_up) {
+ error = data->ops->sleep_control(client, false);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to wake device up: %d\n", error);
+ return error;
+ }
}
return 0;
@@ -248,10 +304,6 @@ static int elan_query_device_info(struct elan_tp_data *data)
{
int error;
- error = data->ops->get_product_id(data->client, &data->product_id);
- if (error)
- return error;
-
error = data->ops->get_version(data->client, false, &data->fw_version);
if (error)
return error;
@@ -261,11 +313,6 @@ static int elan_query_device_info(struct elan_tp_data *data)
if (error)
return error;
- error = data->ops->get_sm_version(data->client, &data->ic_type,
- &data->sm_version);
- if (error)
- return error;
-
error = data->ops->get_version(data->client, true, &data->iap_version);
if (error)
return error;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 78f93cf68840..be5b399da5d3 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1568,13 +1568,7 @@ static int elantech_set_properties(struct elantech_data *etd)
case 5:
etd->hw_version = 3;
break;
- case 6:
- case 7:
- case 8:
- case 9:
- case 10:
- case 13:
- case 14:
+ case 6 ... 14:
etd->hw_version = 4;
break;
default:
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index a3f0f5a47490..0f586780ceb4 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -355,18 +355,11 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
return -ENXIO;
}
- if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
- psmouse_dbg(psmouse, "VMMouse port in use.\n");
- return -EBUSY;
- }
-
/* Check if the device is present */
response = ~VMMOUSE_PROTO_MAGIC;
VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2);
- if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) {
- release_region(VMMOUSE_PROTO_PORT, 4);
+ if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU)
return -ENXIO;
- }
if (set_properties) {
psmouse->vendor = VMMOUSE_VENDOR;
@@ -374,8 +367,6 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
psmouse->model = version;
}
- release_region(VMMOUSE_PROTO_PORT, 4);
-
return 0;
}
@@ -394,7 +385,6 @@ static void vmmouse_disconnect(struct psmouse *psmouse)
psmouse_reset(psmouse);
input_unregister_device(priv->abs_dev);
kfree(priv);
- release_region(VMMOUSE_PROTO_PORT, 4);
}
/**
@@ -438,15 +428,10 @@ int vmmouse_init(struct psmouse *psmouse)
struct input_dev *rel_dev = psmouse->dev, *abs_dev;
int error;
- if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
- psmouse_dbg(psmouse, "VMMouse port in use.\n");
- return -EBUSY;
- }
-
psmouse_reset(psmouse);
error = vmmouse_enable(psmouse);
if (error)
- goto release_region;
+ return error;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
abs_dev = input_allocate_device();
@@ -502,8 +487,5 @@ init_fail:
kfree(priv);
psmouse->private = NULL;
-release_region:
- release_region(VMMOUSE_PROTO_PORT, 4);
-
return error;
}
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 454195709a82..405252a884dd 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1277,6 +1277,7 @@ static int __init i8042_create_kbd_port(void)
serio->start = i8042_start;
serio->stop = i8042_stop;
serio->close = i8042_port_close;
+ serio->ps2_cmd_mutex = &i8042_mutex;
serio->port_data = port;
serio->dev.parent = &i8042_platform_device->dev;
strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name));
@@ -1304,6 +1305,7 @@ static int __init i8042_create_aux_port(int idx)
serio->write = i8042_aux_write;
serio->start = i8042_start;
serio->stop = i8042_stop;
+ serio->ps2_cmd_mutex = &i8042_mutex;
serio->port_data = port;
serio->dev.parent = &i8042_platform_device->dev;
if (idx < 0) {
@@ -1373,21 +1375,6 @@ static void i8042_unregister_ports(void)
}
}
-/*
- * Checks whether port belongs to i8042 controller.
- */
-bool i8042_check_port_owner(const struct serio *port)
-{
- int i;
-
- for (i = 0; i < I8042_NUM_PORTS; i++)
- if (i8042_ports[i].serio == port)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(i8042_check_port_owner);
-
static void i8042_free_irqs(void)
{
if (i8042_aux_irq_registered)
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 316f2c897101..83e9c663aa67 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -56,19 +56,17 @@ EXPORT_SYMBOL(ps2_sendbyte);
void ps2_begin_command(struct ps2dev *ps2dev)
{
- mutex_lock(&ps2dev->cmd_mutex);
+ struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex;
- if (i8042_check_port_owner(ps2dev->serio))
- i8042_lock_chip();
+ mutex_lock(m);
}
EXPORT_SYMBOL(ps2_begin_command);
void ps2_end_command(struct ps2dev *ps2dev)
{
- if (i8042_check_port_owner(ps2dev->serio))
- i8042_unlock_chip();
+ struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex;
- mutex_unlock(&ps2dev->cmd_mutex);
+ mutex_unlock(m);
}
EXPORT_SYMBOL(ps2_end_command);
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index d214f22ed305..45b466e3bbe8 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -126,7 +126,7 @@ struct sur40_image_header {
#define VIDEO_PACKET_SIZE 16384
/* polling interval (ms) */
-#define POLL_INTERVAL 4
+#define POLL_INTERVAL 1
/* maximum number of contacts FIXME: this is a guess? */
#define MAX_CONTACTS 64
@@ -441,7 +441,7 @@ static void sur40_process_video(struct sur40_state *sur40)
/* return error if streaming was stopped in the meantime */
if (sur40->sequence == -1)
- goto err_poll;
+ return;
/* mark as finished */
v4l2_get_timestamp(&new_buf->vb.timestamp);
@@ -730,6 +730,7 @@ static int sur40_start_streaming(struct vb2_queue *vq, unsigned int count)
static void sur40_stop_streaming(struct vb2_queue *vq)
{
struct sur40_state *sur40 = vb2_get_drv_priv(vq);
+ vb2_wait_for_all_buffers(vq);
sur40->sequence = -1;
/* Release all active buffers */
diff --git a/drivers/input/touchscreen/tsc2004.c b/drivers/input/touchscreen/tsc2004.c
index 7295c198aa08..6fe55d598fac 100644
--- a/drivers/input/touchscreen/tsc2004.c
+++ b/drivers/input/touchscreen/tsc2004.c
@@ -22,6 +22,11 @@
#include <linux/regmap.h>
#include "tsc200x-core.h"
+static const struct input_id tsc2004_input_id = {
+ .bustype = BUS_I2C,
+ .product = 2004,
+};
+
static int tsc2004_cmd(struct device *dev, u8 cmd)
{
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -42,7 +47,7 @@ static int tsc2004_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- return tsc200x_probe(&i2c->dev, i2c->irq, BUS_I2C,
+ return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id,
devm_regmap_init_i2c(i2c, &tsc200x_regmap_config),
tsc2004_cmd);
}
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index b9f593dfd2ef..f2c5f0e47f77 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -24,6 +24,11 @@
#include <linux/regmap.h>
#include "tsc200x-core.h"
+static const struct input_id tsc2005_input_id = {
+ .bustype = BUS_SPI,
+ .product = 2005,
+};
+
static int tsc2005_cmd(struct device *dev, u8 cmd)
{
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -62,7 +67,7 @@ static int tsc2005_probe(struct spi_device *spi)
if (error)
return error;
- return tsc200x_probe(&spi->dev, spi->irq, BUS_SPI,
+ return tsc200x_probe(&spi->dev, spi->irq, &tsc2005_input_id,
devm_regmap_init_spi(spi, &tsc200x_regmap_config),
tsc2005_cmd);
}
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index 15240c1ee850..dfa7f1c4f545 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -450,7 +450,7 @@ static void tsc200x_close(struct input_dev *input)
mutex_unlock(&ts->mutex);
}
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd))
{
@@ -547,9 +547,18 @@ int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
snprintf(ts->phys, sizeof(ts->phys),
"%s/input-ts", dev_name(dev));
- input_dev->name = "TSC200X touchscreen";
+ if (tsc_id->product == 2004) {
+ input_dev->name = "TSC200X touchscreen";
+ } else {
+ input_dev->name = devm_kasprintf(dev, GFP_KERNEL,
+ "TSC%04d touchscreen",
+ tsc_id->product);
+ if (!input_dev->name)
+ return -ENOMEM;
+ }
+
input_dev->phys = ts->phys;
- input_dev->id.bustype = bustype;
+ input_dev->id = *tsc_id;
input_dev->dev.parent = dev;
input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
diff --git a/drivers/input/touchscreen/tsc200x-core.h b/drivers/input/touchscreen/tsc200x-core.h
index 7a482d102614..49a63a3c6840 100644
--- a/drivers/input/touchscreen/tsc200x-core.h
+++ b/drivers/input/touchscreen/tsc200x-core.h
@@ -70,7 +70,7 @@
extern const struct regmap_config tsc200x_regmap_config;
extern const struct dev_pm_ops tsc200x_pm_ops;
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd));
int tsc200x_remove(struct device *dev);
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 2792ca397dd0..3ed0ce1e4dcb 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -27,7 +27,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@gmail.com>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-#define W8001_MAX_LENGTH 11
+#define W8001_MAX_LENGTH 13
#define W8001_LEAD_MASK 0x80
#define W8001_LEAD_BYTE 0x80
#define W8001_TAB_MASK 0x40
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index b9319b76a8a1..0397985a2601 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -352,9 +352,11 @@ static void init_iommu_group(struct device *dev)
if (!domain)
goto out;
- dma_domain = to_pdomain(domain)->priv;
+ if (to_pdomain(domain)->flags == PD_DMA_OPS_MASK) {
+ dma_domain = to_pdomain(domain)->priv;
+ init_unity_mappings_for_device(dev, dma_domain);
+ }
- init_unity_mappings_for_device(dev, dma_domain);
out:
iommu_group_put(group);
}
@@ -2322,8 +2324,15 @@ static void update_device_table(struct protection_domain *domain)
{
struct iommu_dev_data *dev_data;
- list_for_each_entry(dev_data, &domain->dev_list, list)
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
+
+ if (dev_data->devid == dev_data->alias)
+ continue;
+
+ /* There is an alias, update device table entry for it */
+ set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled);
+ }
}
static void update_domain(struct protection_domain *domain)
@@ -2970,9 +2979,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
static void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
-
- if (!dom)
- return;
+ struct dma_ops_domain *dma_dom;
domain = to_pdomain(dom);
@@ -2981,13 +2988,24 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
BUG_ON(domain->dev_cnt != 0);
- if (domain->mode != PAGE_MODE_NONE)
- free_pagetable(domain);
+ if (!dom)
+ return;
+
+ switch (dom->type) {
+ case IOMMU_DOMAIN_DMA:
+ dma_dom = domain->priv;
+ dma_ops_domain_free(dma_dom);
+ break;
+ default:
+ if (domain->mode != PAGE_MODE_NONE)
+ free_pagetable(domain);
- if (domain->flags & PD_IOMMUV2_MASK)
- free_gcr3_table(domain);
+ if (domain->flags & PD_IOMMUV2_MASK)
+ free_gcr3_table(domain);
- protection_domain_free(domain);
+ protection_domain_free(domain);
+ break;
+ }
}
static void amd_iommu_detach_device(struct iommu_domain *dom,
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8487987458a1..00df3832faab 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -870,7 +870,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
* We may have concurrent producers, so we need to be careful
* not to touch any of the shadow cmdq state.
*/
- queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
+ queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
dev_err(smmu->dev, "skipping command in error state:\n");
for (i = 0; i < ARRAY_SIZE(cmd); ++i)
dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
@@ -881,7 +881,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
return;
}
- queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
+ queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
@@ -1025,6 +1025,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
case STRTAB_STE_0_CFG_S2_TRANS:
ste_live = true;
break;
+ case STRTAB_STE_0_CFG_ABORT:
+ if (disable_bypass)
+ break;
default:
BUG(); /* STE corruption */
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 58f2fe687a24..347a3c17f73a 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -68,7 +68,8 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
if (!iovad)
return;
- put_iova_domain(iovad);
+ if (iovad->granule)
+ put_iova_domain(iovad);
kfree(iovad);
domain->iova_cookie = NULL;
}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 97c41b8ab5d9..29a31eb9ace3 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -647,6 +647,7 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
.name = "exynos-sysmmu",
.of_match_table = sysmmu_of_match,
.pm = &sysmmu_pm_ops,
+ .suppress_bind_attrs = true,
}
};
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 6763a4dfed94..24d81308a1a6 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2032,7 +2032,7 @@ out_unlock:
spin_unlock(&iommu->lock);
spin_unlock_irqrestore(&device_domain_lock, flags);
- return 0;
+ return ret;
}
struct domain_context_mapping_data {
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index a159529f9d53..c5f1757ac61d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -41,6 +41,7 @@
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
+#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
@@ -71,6 +72,7 @@ struct its_node {
struct list_head its_device_list;
u64 flags;
u32 ite_size;
+ int numa_node;
};
#define ITS_ITT_ALIGN SZ_256
@@ -600,11 +602,23 @@ static void its_unmask_irq(struct irq_data *d)
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ unsigned int cpu;
+ const struct cpumask *cpu_mask = cpu_online_mask;
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_collection *target_col;
u32 id = its_get_event_id(d);
+ /* lpi cannot be routed to a redistributor that is on a foreign node */
+ if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+ if (its_dev->its->numa_node >= 0) {
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
+ if (!cpumask_intersects(mask_val, cpu_mask))
+ return -EINVAL;
+ }
+ }
+
+ cpu = cpumask_any_and(mask_val, cpu_mask);
+
if (cpu >= nr_cpu_ids)
return -EINVAL;
@@ -1081,6 +1095,16 @@ static void its_cpu_init_collection(void)
list_for_each_entry(its, &its_nodes, entry) {
u64 target;
+ /* avoid cross node collections and its mapping */
+ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+ struct device_node *cpu_node;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (its->numa_node != NUMA_NO_NODE &&
+ its->numa_node != of_node_to_nid(cpu_node))
+ continue;
+ }
+
/*
* We now have to bind each collection to its target
* redistributor.
@@ -1308,9 +1332,14 @@ static void its_irq_domain_activate(struct irq_domain *domain,
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ const struct cpumask *cpu_mask = cpu_online_mask;
+
+ /* get the cpu_mask of local node */
+ if (its_dev->its->numa_node >= 0)
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
/* Bind the LPI to the first possible CPU */
- its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
+ its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
/* Map the GIC IRQ and event to the device */
its_send_mapvi(its_dev, d->hwirq, event);
@@ -1400,6 +1429,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
}
+static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
+{
+ struct its_node *its = data;
+
+ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
+}
+
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
@@ -1409,6 +1445,14 @@ static const struct gic_quirk its_quirks[] = {
.init = its_enable_quirk_cavium_22375,
},
#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_23144
+ {
+ .desc = "ITS: Cavium erratum 23144",
+ .iidr = 0xa100034c, /* ThunderX pass 1.x */
+ .mask = 0xffff0fff,
+ .init = its_enable_quirk_cavium_23144,
+ },
+#endif
{
}
};
@@ -1470,6 +1514,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
its->base = its_base;
its->phys_base = res.start;
its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+ its->numa_node = of_node_to_nid(node);
its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
if (!its->cmd_base) {
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index 85b1133df4f5..77014f93927c 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -465,6 +465,24 @@ out:
return rc;
}
+static int qpnp_wled_swire_avdd_config(struct qpnp_wled *wled)
+{
+ int rc;
+ u8 val;
+
+ if (wled->pmic_rev_id->pmic_subtype != PMI8998_SUBTYPE &&
+ wled->pmic_rev_id->pmic_subtype != PM2FALCON_SUBTYPE)
+ return 0;
+
+ if (!wled->disp_type_amoled || wled->avdd_mode_spmi)
+ return 0;
+
+ val = QPNP_WLED_AVDD_MV_TO_REG(wled->avdd_target_voltage_mv);
+ rc = qpnp_wled_write_reg(wled,
+ QPNP_WLED_SWIRE_AVDD_REG(wled->ctrl_base), val);
+ return rc;
+}
+
static int qpnp_wled_sync_reg_toggle(struct qpnp_wled *wled)
{
int rc;
@@ -884,8 +902,20 @@ static void qpnp_wled_work(struct work_struct *work)
}
if (!!level != wled->prev_state) {
- rc = qpnp_wled_module_en(wled, wled->ctrl_base, !!level);
+ if (!!level) {
+ /*
+ * For AMOLED display in pmi8998, SWIRE_AVDD_DEFAULT has
+ * to be reconfigured every time the module is enabled.
+ */
+ rc = qpnp_wled_swire_avdd_config(wled);
+ if (rc < 0) {
+ pr_err("Write to SWIRE_AVDD_DEFAULT register failed rc:%d\n",
+ rc);
+ goto unlock_mutex;
+ }
+ }
+ rc = qpnp_wled_module_en(wled, wled->ctrl_base, !!level);
if (rc) {
dev_err(&wled->pdev->dev, "wled %sable failed\n",
level ? "en" : "dis");
@@ -1246,22 +1276,22 @@ static int qpnp_wled_avdd_mode_config(struct qpnp_wled *wled)
wled->avdd_target_voltage_mv = QPNP_WLED_AVDD_MIN_MV;
}
- reg = QPNP_WLED_AVDD_MV_TO_REG(wled->avdd_target_voltage_mv);
-
if (wled->avdd_mode_spmi) {
+ reg = QPNP_WLED_AVDD_MV_TO_REG(wled->avdd_target_voltage_mv);
reg |= QPNP_WLED_AVDD_SEL_SPMI_BIT;
rc = qpnp_wled_write_reg(wled,
QPNP_WLED_AMOLED_VOUT_REG(wled->ctrl_base),
reg);
+ if (rc < 0)
+ pr_err("Write to AMOLED_VOUT register failed, rc=%d\n",
+ rc);
} else {
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_SWIRE_AVDD_REG(wled->ctrl_base),
- reg);
+ rc = qpnp_wled_swire_avdd_config(wled);
+ if (rc < 0)
+ pr_err("Write to SWIRE_AVDD_DEFAULT register failed rc:%d\n",
+ rc);
}
- if (rc < 0)
- dev_err(&wled->pdev->dev, "Write to VOUT/AVDD register failed, rc=%d\n",
- rc);
return rc;
}
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index a54b339951a3..2a96ff6923f0 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -89,6 +89,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
list_move_tail(&blk->list, &lun->bb_list);
lun->vlun.nr_bad_blocks++;
+ lun->vlun.nr_free_blocks--;
}
return 0;
@@ -345,7 +346,7 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
if (!dev->ops->submit_io)
- return 0;
+ return -ENODEV;
/* Convert address space */
gennvm_generic_to_addr_mode(dev, rqd);
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 134e4faba482..a9859489acf6 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -287,6 +287,8 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
}
page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
+ if (!page)
+ return -ENOMEM;
while ((slot = find_first_zero_bit(rblk->invalid_pages,
nr_pgs_per_blk)) < nr_pgs_per_blk) {
@@ -427,7 +429,7 @@ static void rrpc_lun_gc(struct work_struct *work)
if (nr_blocks_need < rrpc->nr_luns)
nr_blocks_need = rrpc->nr_luns;
- spin_lock(&lun->lock);
+ spin_lock(&rlun->lock);
while (nr_blocks_need > lun->nr_free_blocks &&
!list_empty(&rlun->prio_list)) {
struct rrpc_block *rblock = block_prio_find_max(rlun);
@@ -436,16 +438,16 @@ static void rrpc_lun_gc(struct work_struct *work)
if (!rblock->nr_invalid_pages)
break;
+ gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
+ if (!gcb)
+ break;
+
list_del_init(&rblock->prio);
BUG_ON(!block_is_full(rrpc, rblock));
pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb)
- break;
-
gcb->rrpc = rrpc;
gcb->rblk = rblock;
INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
@@ -454,7 +456,7 @@ static void rrpc_lun_gc(struct work_struct *work)
nr_blocks_need--;
}
- spin_unlock(&lun->lock);
+ spin_unlock(&rlun->lock);
/* TODO: Hint that request queue can be started again */
}
@@ -650,11 +652,12 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
if (bio_data_dir(rqd->bio) == WRITE)
rrpc_end_io_write(rrpc, rrqd, laddr, npages);
+ bio_put(rqd->bio);
+
if (rrqd->flags & NVM_IOTYPE_GC)
return 0;
rrpc_unlock_rq(rrpc, rqd);
- bio_put(rqd->bio);
if (npages > 1)
nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
@@ -841,6 +844,13 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
err = nvm_submit_io(rrpc->dev, rqd);
if (err) {
pr_err("rrpc: I/O submission failed: %d\n", err);
+ bio_put(bio);
+ if (!(flags & NVM_IOTYPE_GC)) {
+ rrpc_unlock_rq(rrpc, rqd);
+ if (rqd->nr_pages > 1)
+ nvm_dev_dma_free(rrpc->dev,
+ rqd->ppa_list, rqd->dma_ppa_list);
+ }
return NVM_IO_ERR;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index e562bdedfb07..7d5aa2c5c81d 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -518,7 +518,7 @@ config DM_LOG_WRITES
If unsure, say N.
config DM_ANDROID_VERITY
- bool "Android verity target support"
+ tristate "Android verity target support"
depends on DM_VERITY
depends on X509_CERTIFICATE_PARSER
depends on SYSTEM_TRUSTED_KEYRING
@@ -526,6 +526,7 @@ config DM_ANDROID_VERITY
depends on KEYS
depends on ASYMMETRIC_KEY_TYPE
depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+ depends on MD_LINEAR
---help---
This device-mapper target is virtually a VERITY target. This
target is setup by reading the metadata contents piggybacked
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index ce7cf06d0e8a..2b2ba36638cd 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
obj-$(CONFIG_DM_ERA) += dm-era.o
obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o
obj-$(CONFIG_DM_REQ_CRYPT) += dm-req-crypt.o
+obj-$(CONFIG_DM_ANDROID_VERITY) += dm-android-verity.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index a296425a7270..3d5c0ba13181 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1818,7 +1818,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
- !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
+ !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c
index bb6c1285e499..13c60bee8af5 100644
--- a/drivers/md/dm-android-verity.c
+++ b/drivers/md/dm-android-verity.c
@@ -48,7 +48,7 @@ static char buildvariant[BUILD_VARIANT];
static bool target_added;
static bool verity_enabled = true;
-struct dentry *debug_dir;
+static struct dentry *debug_dir;
static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv);
static struct target_type android_verity_target = {
@@ -532,7 +532,7 @@ blkdev_release:
}
/* helper functions to extract properties from dts */
-const char *find_dt_value(const char *name)
+static const char *find_dt_value(const char *name)
{
struct device_node *firmware;
const char *value;
diff --git a/drivers/md/dm-android-verity.h b/drivers/md/dm-android-verity.h
index 0fcd54aaf5f6..0c7ff6afec69 100644
--- a/drivers/md/dm-android-verity.h
+++ b/drivers/md/dm-android-verity.h
@@ -72,9 +72,6 @@
* if fec is not present
* <data_blocks> <verity_tree> <verity_metdata_32K>
*/
-/* TODO: rearrange structure to reduce memory holes
- * depends on userspace change.
- */
struct fec_header {
__le32 magic;
__le32 version;
@@ -83,7 +80,7 @@ struct fec_header {
__le32 fec_size;
__le64 inp_size;
u8 hash[SHA256_DIGEST_SIZE];
-};
+} __attribute__((packed));
struct android_metadata_header {
__le32 magic_number;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 09e2afcafd2d..cd0a93df4cb7 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -289,10 +289,16 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
pb->bio_submitted = true;
/*
- * Map reads as normal.
+ * Map reads as normal only if corrupt_bio_byte set.
*/
- if (bio_data_dir(bio) == READ)
- goto map_bio;
+ if (bio_data_dir(bio) == READ) {
+ /* If flags were specified, only corrupt those that match. */
+ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
+ all_corrupt_bio_flags_match(bio, fc))
+ goto map_bio;
+ else
+ return -EIO;
+ }
/*
* Drop writes?
@@ -330,12 +336,13 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
/*
* Corrupt successful READs while in down state.
- * If flags were specified, only corrupt those that match.
*/
- if (fc->corrupt_bio_byte && !error && pb->bio_submitted &&
- (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
- all_corrupt_bio_flags_match(bio, fc))
- corrupt_bio_data(bio, fc);
+ if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
+ if (fc->corrupt_bio_byte)
+ corrupt_bio_data(bio, fc);
+ else
+ return -EIO;
+ }
return error;
}
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 8505a771de42..2ff5f32a4b99 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -66,6 +66,7 @@ int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
kfree(lc);
return ret;
}
+EXPORT_SYMBOL_GPL(dm_linear_ctr);
void dm_linear_dtr(struct dm_target *ti)
{
@@ -74,6 +75,7 @@ void dm_linear_dtr(struct dm_target *ti)
dm_put_device(ti, lc->dev);
kfree(lc);
}
+EXPORT_SYMBOL_GPL(dm_linear_dtr);
static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
{
@@ -98,6 +100,7 @@ int dm_linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
+EXPORT_SYMBOL_GPL(dm_linear_map);
void dm_linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
@@ -115,6 +118,7 @@ void dm_linear_status(struct dm_target *ti, status_type_t type,
break;
}
}
+EXPORT_SYMBOL_GPL(dm_linear_status);
int dm_linear_prepare_ioctl(struct dm_target *ti,
struct block_device **bdev, fmode_t *mode)
@@ -132,6 +136,7 @@ int dm_linear_prepare_ioctl(struct dm_target *ti,
return 1;
return 0;
}
+EXPORT_SYMBOL_GPL(dm_linear_prepare_ioctl);
int dm_linear_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
@@ -140,6 +145,7 @@ int dm_linear_iterate_devices(struct dm_target *ti,
return fn(ti, lc->dev, lc->start, ti->len, data);
}
+EXPORT_SYMBOL_GPL(dm_linear_iterate_devices);
static struct target_type linear_target = {
.name = "linear",
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 5214ed2c7507..9d3d4b297201 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -592,6 +592,7 @@ int verity_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
+EXPORT_SYMBOL_GPL(verity_map);
/*
* Status: V (valid) or C (corruption found)
@@ -655,6 +656,7 @@ void verity_status(struct dm_target *ti, status_type_t type,
break;
}
}
+EXPORT_SYMBOL_GPL(verity_status);
int verity_prepare_ioctl(struct dm_target *ti,
struct block_device **bdev, fmode_t *mode)
@@ -668,6 +670,7 @@ int verity_prepare_ioctl(struct dm_target *ti,
return 1;
return 0;
}
+EXPORT_SYMBOL_GPL(verity_prepare_ioctl);
int verity_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
@@ -676,6 +679,7 @@ int verity_iterate_devices(struct dm_target *ti,
return fn(ti, v->data_dev, v->data_start, ti->len, data);
}
+EXPORT_SYMBOL_GPL(verity_iterate_devices);
void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
@@ -689,6 +693,7 @@ void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, limits->logical_block_size);
}
+EXPORT_SYMBOL_GPL(verity_io_hints);
void verity_dtr(struct dm_target *ti)
{
@@ -719,6 +724,7 @@ void verity_dtr(struct dm_target *ti)
kfree(v);
}
+EXPORT_SYMBOL_GPL(verity_dtr);
static int verity_alloc_zero_digest(struct dm_verity *v)
{
@@ -1053,6 +1059,7 @@ bad:
return r;
}
+EXPORT_SYMBOL_GPL(verity_ctr);
static struct target_type verity_target = {
.name = "verity",
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 479fdbb3dcb2..d8615788b17d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -3085,7 +3085,8 @@ static void unlock_fs(struct mapped_device *md)
* Caller must hold md->suspend_lock
*/
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
- unsigned suspend_flags, int interruptible)
+ unsigned suspend_flags, int interruptible,
+ int dmf_suspended_flag)
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
@@ -3152,6 +3153,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* to finish.
*/
r = dm_wait_for_completion(md, interruptible);
+ if (!r)
+ set_bit(dmf_suspended_flag, &md->flags);
if (noflush)
clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
@@ -3213,12 +3216,10 @@ retry:
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
- r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
+ r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
if (r)
goto out_unlock;
- set_bit(DMF_SUSPENDED, &md->flags);
-
dm_table_postsuspend_targets(map);
out_unlock:
@@ -3312,9 +3313,8 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
* would require changing .presuspend to return an error -- avoid this
* until there is a need for more elaborate variants of internal suspend.
*/
- (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);
-
- set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
+ (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
+ DMF_SUSPENDED_INTERNALLY);
dm_table_postsuspend_targets(map);
}
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
index d61be58e22f0..0d44deed65bd 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb-core/dvb_ringbuffer.c
@@ -57,7 +57,13 @@ void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len)
int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf)
{
- return (rbuf->pread == rbuf->pwrite);
+ /* smp_load_acquire() to load write pointer on reader side
+ * this pairs with smp_store_release() in dvb_ringbuffer_write(),
+ * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
+ *
+ * for memory barriers also see Documentation/circular-buffers.txt
+ */
+ return (rbuf->pread == smp_load_acquire(&rbuf->pwrite));
}
@@ -66,7 +72,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf)
{
ssize_t free;
- free = rbuf->pread - rbuf->pwrite;
+ /* ACCESS_ONCE() to load read pointer on writer side
+ * this pairs with smp_store_release() in dvb_ringbuffer_read(),
+ * dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(),
+ * or dvb_ringbuffer_reset()
+ */
+ free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite;
if (free <= 0)
free += rbuf->size;
return free-1;
@@ -78,7 +89,11 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf)
{
ssize_t avail;
- avail = rbuf->pwrite - rbuf->pread;
+ /* smp_load_acquire() to load write pointer on reader side
+ * this pairs with smp_store_release() in dvb_ringbuffer_write(),
+ * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
+ */
+ avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread;
if (avail < 0)
avail += rbuf->size;
return avail;
@@ -88,14 +103,25 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf)
void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf)
{
- rbuf->pread = rbuf->pwrite;
+ /* dvb_ringbuffer_flush() counts as read operation
+ * smp_load_acquire() to load write pointer
+ * smp_store_release() to update read pointer, this ensures that the
+ * correct pointer is visible for subsequent dvb_ringbuffer_free()
+ * calls on other cpu cores
+ */
+ smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite));
rbuf->error = 0;
}
EXPORT_SYMBOL(dvb_ringbuffer_flush);
void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf)
{
- rbuf->pread = rbuf->pwrite = 0;
+ /* dvb_ringbuffer_reset() counts as read and write operation
+ * smp_store_release() to update read pointer
+ */
+ smp_store_release(&rbuf->pread, 0);
+ /* smp_store_release() to update write pointer */
+ smp_store_release(&rbuf->pwrite, 0);
rbuf->error = 0;
}
@@ -121,12 +147,17 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si
return -EFAULT;
buf += split;
todo -= split;
- rbuf->pread = 0;
+ /* smp_store_release() for read pointer update to ensure
+ * that buf is not overwritten until read is complete,
+ * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
+ */
+ smp_store_release(&rbuf->pread, 0);
}
if (copy_to_user(buf, rbuf->data+rbuf->pread, todo))
return -EFAULT;
- rbuf->pread = (rbuf->pread + todo) % rbuf->size;
+ /* smp_store_release() to update read pointer, see above */
+ smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size);
return len;
}
@@ -141,11 +172,16 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len)
memcpy(buf, rbuf->data+rbuf->pread, split);
buf += split;
todo -= split;
- rbuf->pread = 0;
+ /* smp_store_release() for read pointer update to ensure
+ * that buf is not overwritten until read is complete,
+ * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
+ */
+ smp_store_release(&rbuf->pread, 0);
}
memcpy(buf, rbuf->data+rbuf->pread, todo);
- rbuf->pread = (rbuf->pread + todo) % rbuf->size;
+ /* smp_store_release() to update read pointer, see above */
+ smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size);
}
@@ -160,10 +196,16 @@ ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t
memcpy(rbuf->data+rbuf->pwrite, buf, split);
buf += split;
todo -= split;
- rbuf->pwrite = 0;
+ /* smp_store_release() for write pointer update to ensure that
+ * written data is visible on other cpu cores before the pointer
+ * update, this pairs with smp_load_acquire() in
+ * dvb_ringbuffer_empty() or dvb_ringbuffer_avail()
+ */
+ smp_store_release(&rbuf->pwrite, 0);
}
memcpy(rbuf->data+rbuf->pwrite, buf, todo);
- rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size;
+ /* smp_store_release() for write pointer update, see above */
+ smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);
return len;
}
@@ -184,15 +226,18 @@ ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
return -EFAULT;
buf += split;
todo -= split;
- rbuf->pwrite = 0;
+ /* smp_store_release() for write pointer update to ensure that
+ * written data is visible on other cpu cores before the pointer
+ * update, this pairs with smp_load_acquire() in
+ * dvb_ringbuffer_empty() or dvb_ringbuffer_avail()
+ */
+ smp_store_release(&rbuf->pwrite, 0);
}
-
- if (copy_from_user(rbuf->data + rbuf->pwrite, buf, todo)) {
- rbuf->pwrite = oldpwrite;
- return -EFAULT;
- }
-
- rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size;
+ status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo);
+ if (status)
+ return len - todo;
+ /* smp_store_release() for write pointer update, see above */
+ smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);
return len;
}
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 292c9479bb75..310e4b8beae8 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -264,7 +264,7 @@ config DVB_MB86A16
config DVB_TDA10071
tristate "NXP TDA10071"
depends on DVB_CORE && I2C
- select REGMAP
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
Say Y when you want to support this frontend.
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
index 3e3143be0a13..b045d6c6e8da 100644
--- a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
@@ -972,6 +972,11 @@ static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
struct dma_buf_attachment *attach = NULL;
struct sg_table *table = NULL;
+ if (!paddr_ptr) {
+ rc = -EINVAL;
+ goto err_out;
+ }
+
/* allocate memory for each buffer information */
buf = dma_buf_get(ion_fd);
if (IS_ERR_OR_NULL(buf)) {
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
index a56e42dc1c6f..31568da21fee 100644
--- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
@@ -266,8 +266,10 @@ static void msm_fd_stop_streaming(struct vb2_queue *q)
{
struct fd_ctx *ctx = vb2_get_drv_priv(q);
+ mutex_lock(&ctx->fd_device->recovery_lock);
msm_fd_hw_remove_buffers_from_queue(ctx->fd_device, q);
msm_fd_hw_put(ctx->fd_device);
+ mutex_unlock(&ctx->fd_device->recovery_lock);
}
/* Videobuf2 queue callbacks. */
@@ -329,6 +331,68 @@ static struct vb2_mem_ops msm_fd_vb2_mem_ops = {
};
/*
+ * msm_fd_vbif_error_handler - FD VBIF Error handler
+ * @handle: FD Device handle
+ * @error: CPP-VBIF Error code
+ */
+static int msm_fd_vbif_error_handler(void *handle, uint32_t error)
+{
+ struct fd_ctx *ctx;
+ struct msm_fd_device *fd;
+ struct msm_fd_buffer *active_buf;
+ int ret;
+
+ if (handle == NULL)
+ return 0;
+
+ ctx = (struct fd_ctx *)handle;
+ fd = (struct msm_fd_device *)ctx->fd_device;
+
+ if (error == CPP_VBIF_ERROR_HANG) {
+ mutex_lock(&fd->recovery_lock);
+ dev_err(fd->dev, "Handling FD VBIF Hang\n");
+ if (fd->state != MSM_FD_DEVICE_RUNNING) {
+ dev_err(fd->dev, "FD is not FD_DEVICE_RUNNING, %d\n",
+ fd->state);
+ mutex_unlock(&fd->recovery_lock);
+ return 0;
+ }
+ fd->recovery_mode = 1;
+
+ /* Halt and reset */
+ msm_fd_hw_put(fd);
+ msm_fd_hw_get(fd, ctx->settings.speed);
+
+ /* Get active buffer */
+ active_buf = msm_fd_hw_get_active_buffer(fd);
+
+ if (active_buf == NULL) {
+ dev_dbg(fd->dev, "no active buffer, return\n");
+ fd->recovery_mode = 0;
+ mutex_unlock(&fd->recovery_lock);
+ return 0;
+ }
+
+ dev_dbg(fd->dev, "Active Buffer present.. Start re-schedule\n");
+
+ /* Queue the buffer again */
+ msm_fd_hw_add_buffer(fd, active_buf);
+
+ /* Schedule and restart */
+ ret = msm_fd_hw_schedule_next_buffer(fd);
+ if (ret) {
+ dev_err(fd->dev, "Cannot reschedule buffer, recovery failed\n");
+ fd->recovery_mode = 0;
+ mutex_unlock(&fd->recovery_lock);
+ return ret;
+ }
+ dev_dbg(fd->dev, "Restarted FD after VBIF HAng\n");
+ mutex_unlock(&fd->recovery_lock);
+ }
+ return 0;
+}
+
+/*
* msm_fd_open - Fd device open method.
* @file: Pointer to file struct.
*/
@@ -391,6 +455,10 @@ static int msm_fd_open(struct file *file)
goto error_ahb_config;
}
+ /* Register with CPP VBIF error handler */
+ msm_cpp_vbif_register_error_handler((void *)ctx,
+ VBIF_CLIENT_FD, msm_fd_vbif_error_handler);
+
return 0;
error_ahb_config:
@@ -412,6 +480,10 @@ static int msm_fd_release(struct file *file)
{
struct fd_ctx *ctx = msm_fd_ctx_from_fh(file->private_data);
+ /* Un-register with CPP VBIF error handler */
+ msm_cpp_vbif_register_error_handler((void *)ctx,
+ VBIF_CLIENT_FD, NULL);
+
vb2_queue_release(&ctx->vb2_q);
vfree(ctx->stats);
@@ -468,7 +540,7 @@ static long msm_fd_private_ioctl(struct file *file, void *fh,
struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
struct msm_fd_stats *stats;
int stats_idx;
- int ret;
+ int ret = 0;
int i;
switch (cmd) {
@@ -1176,6 +1248,12 @@ static void msm_fd_wq_handler(struct work_struct *work)
/* Stats are ready, set correct frame id */
atomic_set(&stats->frame_id, ctx->sequence);
+ /* If Recovery mode is on, we got IRQ after recovery, reset it */
+ if (fd->recovery_mode) {
+ fd->recovery_mode = 0;
+ dev_dbg(fd->dev, "Got IRQ after Recovery\n");
+ }
+
/* We have the data from fd hw, we can start next processing */
msm_fd_hw_schedule_next_buffer(fd);
@@ -1213,6 +1291,7 @@ static int fd_probe(struct platform_device *pdev)
mutex_init(&fd->lock);
spin_lock_init(&fd->slock);
+ mutex_init(&fd->recovery_lock);
init_completion(&fd->hw_halt_completion);
INIT_LIST_HEAD(&fd->buf_queue);
fd->pdev = pdev;
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h
index b96c33b3fd07..7505f0585d42 100644
--- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h
@@ -23,6 +23,8 @@
#include <linux/msm_ion.h>
#include "cam_soc_api.h"
#include "cam_hw_ops.h"
+#include "msm_cpp.h"
+
/* Maximum number of result buffers */
#define MSM_FD_MAX_RESULT_BUFS 5
/* Max number of clocks defined in device tree */
@@ -214,12 +216,14 @@ enum msm_fd_mem_resources {
* @work_queue: Pointer to FD device IRQ bottom half workqueue.
* @work: IRQ bottom half work struct.
* @hw_halt_completion: Completes when face detection hw halt completes.
+ * @recovery_mode: Indicates if FD is in recovery mode
*/
struct msm_fd_device {
u32 hw_revision;
struct mutex lock;
spinlock_t slock;
+ struct mutex recovery_lock;
int ref_count;
int irq_num;
@@ -248,6 +252,8 @@ struct msm_fd_device {
struct workqueue_struct *work_queue;
struct work_struct work;
struct completion hw_halt_completion;
+ int recovery_mode;
+ uint32_t clk_rate_idx;
};
#endif /* __MSM_FD_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.c b/drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.c
index 08268d98fc13..21d42a77accb 100644
--- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.c
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.c
@@ -38,7 +38,7 @@
/* Face detection bus client name */
#define MSM_FD_BUS_CLIENT_NAME "msm_face_detect"
/* Face detection processing timeout in ms */
-#define MSM_FD_PROCESSING_TIMEOUT_MS 500
+#define MSM_FD_PROCESSING_TIMEOUT_MS 150
/* Face detection halt timeout in ms */
#define MSM_FD_HALT_TIMEOUT_MS 100
/* Smmu callback name */
@@ -822,6 +822,45 @@ static int msm_fd_hw_set_clock_rate_idx(struct msm_fd_device *fd,
return 0;
}
+
+/**
+ * msm_fd_hw_update_settings() - API to set clock rate and bus settings
+ * @fd: Pointer to fd device.
+ * @buf: fd buffer
+ */
+static int msm_fd_hw_update_settings(struct msm_fd_device *fd,
+ struct msm_fd_buffer *buf)
+{
+ int ret = 0;
+ uint32_t clk_rate_idx;
+
+ if (!buf)
+ return 0;
+
+ clk_rate_idx = buf->settings.speed;
+ if (fd->clk_rate_idx == clk_rate_idx)
+ return 0;
+
+ if (fd->bus_client) {
+ ret = msm_fd_hw_bus_request(fd, clk_rate_idx);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail bus scale update %d\n", ret);
+ return -EINVAL;
+ }
+ }
+
+ ret = msm_fd_hw_set_clock_rate_idx(fd, clk_rate_idx);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail to set clock rate idx\n");
+ goto end;
+ }
+ dev_dbg(fd->dev, "set clk %d %d", fd->clk_rate_idx, clk_rate_idx);
+ fd->clk_rate_idx = clk_rate_idx;
+
+end:
+ return ret;
+}
+
/*
* msm_fd_hw_get - Get fd hw for performing any hw operation.
* @fd: Pointer to fd device.
@@ -868,6 +907,8 @@ int msm_fd_hw_get(struct msm_fd_device *fd, unsigned int clock_rate_idx)
ret = msm_fd_hw_set_dt_parms(fd);
if (ret < 0)
goto error_set_dt;
+
+ fd->clk_rate_idx = clock_rate_idx;
}
fd->ref_count++;
@@ -924,7 +965,7 @@ void msm_fd_hw_put(struct msm_fd_device *fd)
*/
static int msm_fd_hw_attach_iommu(struct msm_fd_device *fd)
{
- int ret;
+ int ret = -EINVAL;
mutex_lock(&fd->lock);
@@ -1058,6 +1099,8 @@ static int msm_fd_hw_enable(struct msm_fd_device *fd,
msm_fd_hw_set_direction_angle(fd, buffer->settings.direction_index,
buffer->settings.angle_index);
msm_fd_hw_run(fd);
+ if (fd->recovery_mode)
+ dev_err(fd->dev, "Scheduled buffer in recovery mode\n");
return 1;
}
@@ -1153,6 +1196,9 @@ void msm_fd_hw_remove_buffers_from_queue(struct msm_fd_device *fd,
time = wait_for_completion_timeout(&active_buffer->completion,
msecs_to_jiffies(MSM_FD_PROCESSING_TIMEOUT_MS));
if (!time) {
+ /* Do a vb2 buffer done since it timed out */
+ vb2_buffer_done(&active_buffer->vb_v4l2_buf.vb2_buf,
+ VB2_BUF_STATE_DONE);
/* Remove active buffer */
msm_fd_hw_get_active_buffer(fd);
/* Schedule if other buffers are present in device */
@@ -1224,6 +1270,8 @@ int msm_fd_hw_schedule_and_start(struct msm_fd_device *fd)
spin_unlock(&fd->slock);
+ msm_fd_hw_update_settings(fd, buf);
+
return 0;
}
@@ -1257,8 +1305,12 @@ int msm_fd_hw_schedule_next_buffer(struct msm_fd_device *fd)
}
} else {
fd->state = MSM_FD_DEVICE_IDLE;
+ if (fd->recovery_mode)
+ dev_err(fd->dev, "No Buffer in recovery mode.Device Idle\n");
}
spin_unlock(&fd->slock);
+ msm_fd_hw_update_settings(fd, buf);
+
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index 4200215705d0..de29692414d2 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -188,11 +188,27 @@ static int msm_isp_prepare_v4l2_buf(struct msm_isp_buf_mgr *buf_mgr,
int ret;
struct msm_isp_buffer_mapped_info *mapped_info;
uint32_t accu_length = 0;
+ struct msm_isp_bufq *bufq = NULL;
+ bufq = msm_isp_get_bufq(buf_mgr, buf_info->bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq, stream id %x\n",
+ __func__, stream_id);
+ return -EINVAL;
+ }
for (i = 0; i < qbuf_buf->num_planes; i++) {
mapped_info = &buf_info->mapped_info[i];
mapped_info->buf_fd = qbuf_buf->planes[i].addr;
- ret = cam_smmu_get_phy_addr(buf_mgr->iommu_hdl,
+
+ if (bufq->security_mode == SECURE_MODE)
+ ret = cam_smmu_get_stage2_phy_addr(buf_mgr->iommu_hdl,
+ mapped_info->buf_fd,
+ CAM_SMMU_MAP_RW,
+ buf_mgr->client,
+ &(mapped_info->paddr),
+ &(mapped_info->len));
+ else
+ ret = cam_smmu_get_phy_addr(buf_mgr->iommu_hdl,
mapped_info->buf_fd,
CAM_SMMU_MAP_RW,
&(mapped_info->paddr),
@@ -242,8 +258,13 @@ static void msm_isp_unprepare_v4l2_buf(
for (i = 0; i < buf_info->num_planes; i++) {
mapped_info = &buf_info->mapped_info[i];
-
- cam_smmu_put_phy_addr(buf_mgr->iommu_hdl, mapped_info->buf_fd);
+ /* SEC_CAM: check any change is needed for secure_mode */
+ if (bufq->security_mode == SECURE_MODE)
+ cam_smmu_put_stage2_phy_addr(buf_mgr->iommu_hdl,
+ mapped_info->buf_fd);
+ else
+ cam_smmu_put_phy_addr(buf_mgr->iommu_hdl,
+ mapped_info->buf_fd);
}
return;
}
@@ -259,7 +280,15 @@ static int msm_isp_map_buf(struct msm_isp_buf_mgr *buf_mgr,
__func__, __LINE__, buf_mgr, mapped_info);
return -EINVAL;
}
- ret = cam_smmu_get_phy_addr(buf_mgr->iommu_hdl,
+ if (buf_mgr->secure_enable == SECURE_MODE)
+ ret = cam_smmu_get_stage2_phy_addr(buf_mgr->iommu_hdl,
+ fd,
+ CAM_SMMU_MAP_RW,
+ buf_mgr->client,
+ &(mapped_info->paddr),
+ &(mapped_info->len));
+ else
+ ret = cam_smmu_get_phy_addr(buf_mgr->iommu_hdl,
fd,
CAM_SMMU_MAP_RW,
&(mapped_info->paddr),
@@ -275,7 +304,11 @@ static int msm_isp_map_buf(struct msm_isp_buf_mgr *buf_mgr,
return rc;
smmu_map_error:
- cam_smmu_put_phy_addr(buf_mgr->iommu_hdl,
+ if (buf_mgr->secure_enable == SECURE_MODE)
+ cam_smmu_put_stage2_phy_addr(buf_mgr->iommu_hdl,
+ fd);
+ else
+ cam_smmu_put_phy_addr(buf_mgr->iommu_hdl,
fd);
return rc;
}
@@ -289,7 +322,12 @@ static int msm_isp_unmap_buf(struct msm_isp_buf_mgr *buf_mgr,
return -EINVAL;
}
- cam_smmu_put_phy_addr(buf_mgr->iommu_hdl,
+ /* SEC_CAMERA: recheck Put part for stats */
+ if (buf_mgr->secure_enable == SECURE_MODE)
+ cam_smmu_put_stage2_phy_addr(buf_mgr->iommu_hdl,
+ fd);
+ else
+ cam_smmu_put_phy_addr(buf_mgr->iommu_hdl,
fd);
return 0;
@@ -921,7 +959,7 @@ static int msm_isp_get_buf_src(struct msm_isp_buf_mgr *buf_mgr,
}
static int msm_isp_request_bufq(struct msm_isp_buf_mgr *buf_mgr,
- struct msm_isp_buf_request *buf_request)
+ struct msm_isp_buf_request_ver2 *buf_request)
{
int i;
struct msm_isp_bufq *bufq = NULL;
@@ -961,6 +999,7 @@ static int msm_isp_request_bufq(struct msm_isp_buf_mgr *buf_mgr,
bufq->num_bufs = buf_request->num_buf;
bufq->buf_type = buf_request->buf_type;
INIT_LIST_HEAD(&bufq->head);
+ bufq->security_mode = buf_request->security_mode;
for (i = 0; i < buf_request->num_buf; i++) {
bufq->bufs[i].state = MSM_ISP_BUFFER_STATE_INITIALIZED;
@@ -1032,15 +1071,25 @@ static int msm_isp_buf_put_scratch(struct msm_isp_buf_mgr *buf_mgr)
if (!buf_mgr->scratch_buf_addr)
return 0;
- rc = cam_smmu_put_phy_addr_scratch(buf_mgr->iommu_hdl,
+ if (buf_mgr->secure_enable == SECURE_MODE) {
+ rc = cam_smmu_free_stage2_scratch_mem(buf_mgr->iommu_hdl,
+ buf_mgr->client, buf_mgr->sc_handle);
+ if (buf_mgr->scratch_buf_stats_addr)
+ rc = cam_smmu_put_phy_addr_scratch(buf_mgr->iommu_hdl,
+ buf_mgr->scratch_buf_stats_addr);
+ } else {
+ rc = cam_smmu_put_phy_addr_scratch(buf_mgr->iommu_hdl,
buf_mgr->scratch_buf_addr);
+ }
if (rc)
pr_err("%s: failed to put scratch buffer to img iommu: %d\n",
__func__, rc);
- if (!rc)
+ if (!rc) {
buf_mgr->scratch_buf_addr = 0;
+ buf_mgr->scratch_buf_stats_addr = 0;
+ }
return rc;
}
@@ -1057,17 +1106,40 @@ static int msm_isp_buf_put_scratch(struct msm_isp_buf_mgr *buf_mgr)
static int msm_isp_buf_get_scratch(struct msm_isp_buf_mgr *buf_mgr)
{
int rc;
+ size_t range = buf_mgr->scratch_buf_range;
if (buf_mgr->scratch_buf_addr || !buf_mgr->scratch_buf_range)
/* already mapped or not supported */
return 0;
- rc = cam_smmu_get_phy_addr_scratch(
+ if (buf_mgr->secure_enable == SECURE_MODE) {
+ rc = cam_smmu_alloc_get_stage2_scratch_mem(buf_mgr->iommu_hdl,
+ CAM_SMMU_MAP_RW,
+ buf_mgr->client,
+ &buf_mgr->sc_handle,
+ &buf_mgr->scratch_buf_addr,
+ &range);
+ if (rc)
+ goto done;
+
+ rc = cam_smmu_get_phy_addr_scratch(
+ buf_mgr->iommu_hdl,
+ CAM_SMMU_MAP_RW,
+ &buf_mgr->scratch_buf_stats_addr,
+ buf_mgr->scratch_buf_range,
+ SZ_4K);
+ if (rc)
+ msm_isp_buf_put_scratch(buf_mgr);
+ } else {
+ rc = cam_smmu_get_phy_addr_scratch(
buf_mgr->iommu_hdl,
CAM_SMMU_MAP_RW,
&buf_mgr->scratch_buf_addr,
buf_mgr->scratch_buf_range,
SZ_4K);
+ buf_mgr->scratch_buf_stats_addr = buf_mgr->scratch_buf_addr;
+ }
+done:
if (rc) {
pr_err("%s: failed to map scratch buffer to img iommu: %d\n",
__func__, rc);
@@ -1085,20 +1157,23 @@ int msm_isp_smmu_attach(struct msm_isp_buf_mgr *buf_mgr,
pr_debug("%s: cmd->security_mode : %d\n", __func__, cmd->security_mode);
mutex_lock(&buf_mgr->lock);
if (cmd->iommu_attach_mode == IOMMU_ATTACH) {
- buf_mgr->secure_enable = cmd->security_mode;
-
/*
* Call hypervisor thru scm call to notify secure or
* non-secure mode
*/
if (buf_mgr->attach_ref_cnt == 0) {
- rc = cam_smmu_ops(buf_mgr->iommu_hdl,
- CAM_SMMU_ATTACH);
+ if (cmd->security_mode == SECURE_MODE)
+ rc = cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_ATTACH_SEC_VFE_NS_STATS);
+ else
+ rc = cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_ATTACH);
if (rc < 0) {
pr_err("%s: img smmu attach error, rc :%d\n",
__func__, rc);
- goto err1;
+ goto err1;
}
+ buf_mgr->secure_enable = cmd->security_mode;
}
buf_mgr->attach_ref_cnt++;
rc = msm_isp_buf_get_scratch(buf_mgr);
@@ -1113,8 +1188,12 @@ int msm_isp_smmu_attach(struct msm_isp_buf_mgr *buf_mgr,
if (buf_mgr->attach_ref_cnt == 0) {
rc = msm_isp_buf_put_scratch(buf_mgr);
- rc |= cam_smmu_ops(buf_mgr->iommu_hdl,
- CAM_SMMU_DETACH);
+ if (buf_mgr->secure_enable == SECURE_MODE)
+ rc |= cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_DETACH_SEC_VFE_NS_STATS);
+ else
+ rc |= cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_DETACH);
if (rc < 0) {
pr_err("%s: img/stats smmu detach error, rc :%d\n",
__func__, rc);
@@ -1126,8 +1205,11 @@ int msm_isp_smmu_attach(struct msm_isp_buf_mgr *buf_mgr,
return rc;
err2:
- if (cam_smmu_ops(buf_mgr->iommu_hdl, CAM_SMMU_DETACH))
- pr_err("%s: img smmu detach error\n", __func__);
+ if (buf_mgr->secure_enable == SECURE_MODE)
+ cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_DETACH_SEC_VFE_NS_STATS);
+ else
+ cam_smmu_ops(buf_mgr->iommu_hdl, CAM_SMMU_DETACH);
err1:
mutex_unlock(&buf_mgr->lock);
return rc;
@@ -1162,12 +1244,11 @@ static int msm_isp_init_isp_buf_mgr(struct msm_isp_buf_mgr *buf_mgr,
buf_mgr->pagefault_debug_disable = 0;
buf_mgr->frameId_mismatch_recovery = 0;
- mutex_unlock(&buf_mgr->lock);
- return 0;
-
+ /* create ION client */
+ buf_mgr->client = msm_ion_client_create("vfe");
get_handle_error:
mutex_unlock(&buf_mgr->lock);
- return rc;
+ return 0;
}
static int msm_isp_deinit_isp_buf_mgr(
@@ -1186,10 +1267,21 @@ static int msm_isp_deinit_isp_buf_mgr(
buf_mgr->pagefault_debug_disable = 0;
msm_isp_buf_put_scratch(buf_mgr);
- cam_smmu_ops(buf_mgr->iommu_hdl, CAM_SMMU_DETACH);
+ if (buf_mgr->attach_ref_cnt != 0) {
+ if (buf_mgr->secure_enable == SECURE_MODE)
+ cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_DETACH_SEC_VFE_NS_STATS);
+ else
+ cam_smmu_ops(buf_mgr->iommu_hdl, CAM_SMMU_DETACH);
+ }
cam_smmu_destroy_handle(buf_mgr->iommu_hdl);
-
buf_mgr->attach_ref_cnt = 0;
+ buf_mgr->secure_enable = 0;
+ buf_mgr->attach_ref_cnt = 0;
+ if (buf_mgr->client) {
+ ion_client_destroy(buf_mgr->client);
+ buf_mgr->client = NULL;
+ }
mutex_unlock(&buf_mgr->lock);
return 0;
}
@@ -1200,8 +1292,20 @@ int msm_isp_proc_buf_cmd(struct msm_isp_buf_mgr *buf_mgr,
switch (cmd) {
case VIDIOC_MSM_ISP_REQUEST_BUF: {
struct msm_isp_buf_request *buf_req = arg;
+ struct msm_isp_buf_request_ver2 buf_req_ver2;
+
+ memcpy(&buf_req_ver2, buf_req,
+ sizeof(struct msm_isp_buf_request));
+ buf_req_ver2.security_mode = NON_SECURE_MODE;
+ buf_mgr->ops->request_buf(buf_mgr, &buf_req_ver2);
+ memcpy(buf_req, &buf_req_ver2,
+ sizeof(struct msm_isp_buf_request));
+ break;
+ }
+ case VIDIOC_MSM_ISP_REQUEST_BUF_VER2: {
+ struct msm_isp_buf_request_ver2 *buf_req_ver2 = arg;
- buf_mgr->ops->request_buf(buf_mgr, buf_req);
+ buf_mgr->ops->request_buf(buf_mgr, buf_req_ver2);
break;
}
case VIDIOC_MSM_ISP_ENQUEUE_BUF: {
@@ -1393,7 +1497,6 @@ int msm_isp_create_isp_buf_mgr(
buf_mgr->open_count = 0;
buf_mgr->pagefault_debug_disable = 0;
buf_mgr->secure_enable = NON_SECURE_MODE;
- buf_mgr->attach_state = MSM_ISP_BUF_MGR_DETACH;
buf_mgr->scratch_buf_range = scratch_buf_range;
mutex_init(&buf_mgr->lock);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
index 43519ee74062..21fab0590b55 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
@@ -119,11 +119,12 @@ struct msm_isp_bufq {
spinlock_t bufq_lock;
/*Native buffer queue*/
struct list_head head;
+ enum smmu_attach_mode security_mode;
};
struct msm_isp_buf_ops {
int (*request_buf)(struct msm_isp_buf_mgr *buf_mgr,
- struct msm_isp_buf_request *buf_request);
+ struct msm_isp_buf_request_ver2 *buf_request);
int (*enqueue_buf)(struct msm_isp_buf_mgr *buf_mgr,
struct msm_isp_qbuf_info *info);
@@ -191,21 +192,20 @@ struct msm_isp_buf_mgr {
struct msm_sd_req_vb2_q *vb2_ops;
- /*IOMMU driver*/
- int iommu_hdl;
/*Add secure mode*/
int secure_enable;
- int num_iommu_ctx;
- int num_iommu_secure_ctx;
int attach_ref_cnt;
enum msm_isp_buf_mgr_state attach_state;
struct device *isp_dev;
struct mutex lock;
/* Scratch buffer */
dma_addr_t scratch_buf_addr;
+ dma_addr_t scratch_buf_stats_addr;
uint32_t scratch_buf_range;
+ int iommu_hdl;
+ struct ion_handle *sc_handle;
};
int msm_isp_create_isp_buf_mgr(struct msm_isp_buf_mgr *buf_mgr,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index 094996b2d60b..35daf30bac63 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -655,8 +655,6 @@ int vfe_hw_probe(struct platform_device *pdev)
goto probe_fail3;
}
msm_isp_enable_debugfs(vfe_dev, msm_isp_bw_request_history);
- vfe_dev->buf_mgr->num_iommu_secure_ctx =
- vfe_dev->hw_info->num_iommu_secure_ctx;
vfe_dev->buf_mgr->init_done = 1;
vfe_dev->vfe_open_cnt = 0;
return rc;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 1f860f2c5b12..9c7eba21fde1 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -243,6 +243,10 @@ struct msm_vfe_core_ops {
int (*start_fetch_eng_multi_pass)(struct vfe_device *vfe_dev,
void *arg);
void (*set_halt_restart_mask)(struct vfe_device *vfe_dev);
+ void (*set_bus_err_ign_mask)(struct vfe_device *vfe_dev,
+ int wm, int enable);
+ void (*get_bus_err_mask)(struct vfe_device *vfe_dev,
+ uint32_t *bus_err, uint32_t *irq_status1);
};
struct msm_vfe_stats_ops {
@@ -786,6 +790,8 @@ struct vfe_device {
/* irq info */
uint32_t irq0_mask;
uint32_t irq1_mask;
+
+ uint32_t bus_err_ign_mask;
};
struct vfe_parent_device {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 8275f8cedf2e..43f562b18209 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -1474,6 +1474,8 @@ struct msm_vfe_hardware_info vfe32_hw_info = {
.is_module_cfg_lock_needed =
msm_vfe32_is_module_cfg_lock_needed,
.ahb_clk_cfg = NULL,
+ .set_bus_err_ign_mask = NULL,
+ .get_bus_err_mask = NULL,
},
.stats_ops = {
.get_stats_idx = msm_vfe32_get_stats_idx,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 2d937fc3ed05..a1fb307b09c1 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -2263,6 +2263,8 @@ struct msm_vfe_hardware_info vfe40_hw_info = {
msm_vfe40_start_fetch_engine_multi_pass,
.set_halt_restart_mask =
msm_vfe40_set_halt_restart_mask,
+ .set_bus_err_ign_mask = NULL,
+ .get_bus_err_mask = NULL,
},
.stats_ops = {
.get_stats_idx = msm_vfe40_get_stats_idx,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
index 15820b5f398b..0a72a041de28 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
@@ -1869,6 +1869,8 @@ struct msm_vfe_hardware_info vfe44_hw_info = {
.ahb_clk_cfg = NULL,
.set_halt_restart_mask =
msm_vfe44_set_halt_restart_mask,
+ .set_bus_err_ign_mask = NULL,
+ .get_bus_err_mask = NULL,
},
.stats_ops = {
.get_stats_idx = msm_vfe44_get_stats_idx,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
index 23fbc4f5e33a..f2d53c956fdc 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
@@ -1945,6 +1945,8 @@ struct msm_vfe_hardware_info vfe46_hw_info = {
.ahb_clk_cfg = NULL,
.set_halt_restart_mask =
msm_vfe46_set_halt_restart_mask,
+ .set_bus_err_ign_mask = NULL,
+ .get_bus_err_mask = NULL,
},
.stats_ops = {
.get_stats_idx = msm_vfe46_get_stats_idx,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index 56056849e140..13c6e000fefc 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -1523,6 +1523,8 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
if ((vfe_dev->hvx_cmd > HVX_DISABLE) &&
(vfe_dev->hvx_cmd <= HVX_ROUND_TRIP))
msm_vfe47_configure_hvx(vfe_dev, 1);
+ else
+ msm_vfe47_configure_hvx(vfe_dev, 0);
bus_en =
((vfe_dev->axi_data.
@@ -2858,6 +2860,8 @@ struct msm_vfe_hardware_info vfe47_hw_info = {
msm_vfe47_start_fetch_engine_multi_pass,
.set_halt_restart_mask =
msm_vfe47_set_halt_restart_mask,
+ .set_bus_err_ign_mask = NULL,
+ .get_bus_err_mask = NULL,
},
.stats_ops = {
.get_stats_idx = msm_vfe47_get_stats_idx,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
index c533f23c1163..f346ceb6c9e5 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
@@ -241,6 +241,25 @@ static void msm_vfe48_put_regulators(struct vfe_device *vfe_dev)
vfe_dev->vfe_num_regulators = 0;
}
+static void msm_vfe48_get_bus_err_mask(struct vfe_device *vfe_dev,
+ uint32_t *bus_err, uint32_t *irq_status1)
+{
+ *bus_err = msm_camera_io_r(vfe_dev->vfe_base + 0xC94);
+
+ *bus_err &= ~vfe_dev->bus_err_ign_mask;
+ if (*bus_err == 0)
+ *irq_status1 &= ~(1 << 4);
+}
+
+static void msm_vfe48_set_bus_err_ign_mask(struct vfe_device *vfe_dev,
+ int wm, int enable)
+{
+ if (enable)
+ vfe_dev->bus_err_ign_mask |= (1 << wm);
+ else
+ vfe_dev->bus_err_ign_mask &= ~(1 << wm);
+}
+
struct msm_vfe_hardware_info vfe48_hw_info = {
.num_iommu_ctx = 1,
.num_iommu_secure_ctx = 0,
@@ -315,6 +334,8 @@ struct msm_vfe_hardware_info vfe48_hw_info = {
msm_vfe47_start_fetch_engine_multi_pass,
.set_halt_restart_mask =
msm_vfe47_set_halt_restart_mask,
+ .set_bus_err_ign_mask = msm_vfe48_set_bus_err_ign_mask,
+ .get_bus_err_mask = msm_vfe48_get_bus_err_mask,
},
.stats_ops = {
.get_stats_idx = msm_vfe47_get_stats_idx,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 1bf628de4df0..f6e0d9083b22 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -13,6 +13,7 @@
#include <media/v4l2-subdev.h>
#include <asm/div64.h>
#include "msm_isp_util.h"
+#include "msm_isp_stats_util.h"
#include "msm_isp_axi_util.h"
#include "msm_isp48.h"
@@ -429,6 +430,13 @@ static void msm_isp_axi_reserve_wm(struct vfe_device *vfe_dev,
vfe_dev->pdev->id,
stream_info->stream_handle[vfe_idx], j);
stream_info->wm[vfe_idx][i] = j;
+ /* setup var to ignore bus error from RDI wm */
+ if (stream_info->stream_src >= RDI_INTF_0) {
+ if (vfe_dev->hw_info->vfe_ops.core_ops.
+ set_bus_err_ign_mask)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ set_bus_err_ign_mask(vfe_dev, j, 1);
+ }
}
}
@@ -442,6 +450,13 @@ void msm_isp_axi_free_wm(struct vfe_device *vfe_dev,
for (i = 0; i < stream_info->num_planes; i++) {
axi_data->free_wm[stream_info->wm[vfe_idx][i]] = 0;
axi_data->num_used_wm--;
+ if (stream_info->stream_src >= RDI_INTF_0) {
+ if (vfe_dev->hw_info->vfe_ops.core_ops.
+ set_bus_err_ign_mask)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ set_bus_err_ign_mask(vfe_dev,
+ stream_info->wm[vfe_idx][i], 0);
+ }
}
if (stream_info->stream_src <= IDEAL_RAW)
axi_data->num_pix_stream++;
@@ -2186,6 +2201,7 @@ static void msm_isp_input_disable(struct vfe_device *vfe_dev, int cmd_type)
if (msm_vfe_is_vfe48(vfe_dev))
vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
0, 1);
+ vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
}
}
@@ -2758,12 +2774,11 @@ static void __msm_isp_stop_axi_streams(struct vfe_device *vfe_dev,
if (!update_vfes[k])
continue;
vfe_dev = update_vfes[k];
- axi_data = &vfe_dev->axi_data;
- if (axi_data->src_info[VFE_PIX_0].active == 0) {
- vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
- vfe_dev, 0xFF, 0);
- }
+ /* make sure all stats are stopped if camif is stopped */
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active == 0)
+ msm_isp_stop_all_stats_stream(vfe_dev);
}
+
for (i = 0; i < num_streams; i++) {
stream_info = streams[i];
spin_lock_irqsave(&stream_info->lock, flags);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index 22a7f6886964..38ce78d941c9 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -23,7 +23,7 @@ static inline void msm_isp_stats_cfg_wm_scratch(struct vfe_device *vfe_dev,
{
vfe_dev->hw_info->vfe_ops.stats_ops.update_ping_pong_addr(
vfe_dev, stream_info,
- pingpong_status, vfe_dev->buf_mgr->scratch_buf_addr);
+ pingpong_status, vfe_dev->buf_mgr->scratch_buf_stats_addr);
}
static inline void msm_isp_stats_cfg_stream_scratch(
@@ -548,15 +548,12 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
return 0;
}
-void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev)
+void msm_isp_stop_all_stats_stream(struct vfe_device *vfe_dev)
{
- struct msm_vfe_stats_stream_release_cmd
- stream_release_cmd[MSM_ISP_STATS_MAX];
struct msm_vfe_stats_stream_cfg_cmd stream_cfg_cmd;
struct msm_vfe_stats_stream *stream_info;
int i;
int vfe_idx;
- int num_stream = 0;
unsigned long flags;
stream_cfg_cmd.enable = 0;
@@ -565,7 +562,8 @@ void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev)
for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, i);
spin_lock_irqsave(&stream_info->lock, flags);
- if (stream_info->state == STATS_AVAILABLE) {
+ if (stream_info->state == STATS_AVAILABLE ||
+ stream_info->state == STATS_INACTIVE) {
spin_unlock_irqrestore(&stream_info->lock, flags);
continue;
}
@@ -575,12 +573,6 @@ void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev)
spin_unlock_irqrestore(&stream_info->lock, flags);
continue;
}
- stream_release_cmd[num_stream++].stream_handle =
- stream_info->stream_handle[vfe_idx];
- if (stream_info->state == STATS_INACTIVE) {
- spin_unlock_irqrestore(&stream_info->lock, flags);
- continue;
- }
stream_cfg_cmd.stream_handle[
stream_cfg_cmd.num_streams] =
stream_info->stream_handle[vfe_idx];
@@ -589,6 +581,37 @@ void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev)
}
if (stream_cfg_cmd.num_streams)
msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
+}
+
+void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_stats_stream_release_cmd
+ stream_release_cmd[MSM_ISP_STATS_MAX];
+ struct msm_vfe_stats_stream *stream_info;
+ int i;
+ int vfe_idx;
+ int num_stream = 0;
+ unsigned long flags;
+
+ msm_isp_stop_all_stats_stream(vfe_dev);
+
+ for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, i);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (stream_info->state == STATS_AVAILABLE) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ vfe_idx = msm_isp_get_vfe_idx_for_stats_stream_user(vfe_dev,
+ stream_info);
+ if (vfe_idx == -ENOTTY) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ stream_release_cmd[num_stream++].stream_handle =
+ stream_info->stream_handle[vfe_idx];
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
for (i = 0; i < num_stream; i++)
msm_isp_release_stats_stream(vfe_dev, &stream_release_cmd[i]);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
index e9728f33fae1..2e3a24dd1f0d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
@@ -29,6 +29,7 @@ int msm_isp_stats_restart(struct vfe_device *vfe_dev);
void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev);
void msm_isp_process_stats_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
enum msm_isp_comp_irq_types irq);
+void msm_isp_stop_all_stats_stream(struct vfe_device *vfe_dev);
static inline int msm_isp_get_vfe_idx_for_stats_stream_user(
struct vfe_device *vfe_dev,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 59b875d6e464..4abb6d1d91a8 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -450,9 +450,7 @@ static int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
}
pix_cfg = &input_cfg->d.pix_cfg;
- if ((pix_cfg->hvx_cmd > HVX_DISABLE) &&
- (pix_cfg->hvx_cmd <= HVX_ROUND_TRIP))
- vfe_dev->hvx_cmd = pix_cfg->hvx_cmd;
+ vfe_dev->hvx_cmd = pix_cfg->hvx_cmd;
vfe_dev->is_split = input_cfg->d.pix_cfg.is_split;
vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock =
@@ -821,6 +819,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
break;
}
case VIDIOC_MSM_ISP_REQUEST_BUF:
+ case VIDIOC_MSM_ISP_REQUEST_BUF_VER2:
/* fallthrough */
case VIDIOC_MSM_ISP_ENQUEUE_BUF:
/* fallthrough */
@@ -1790,12 +1789,17 @@ static int msm_isp_process_overflow_irq(
uint32_t *irq_status0, uint32_t *irq_status1)
{
uint32_t overflow_mask;
+ uint32_t bus_err = 0;
/* if there are no active streams - do not start recovery */
if (!vfe_dev->axi_data.num_active_stream)
return 0;
- /*Mask out all other irqs if recovery is started*/
+ if (vfe_dev->hw_info->vfe_ops.core_ops.
+ get_bus_err_mask)
+ vfe_dev->hw_info->vfe_ops.core_ops.get_bus_err_mask(
+ vfe_dev, &bus_err, irq_status1);
+ /* Mask out all other irqs if recovery is started */
if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) {
uint32_t halt_restart_mask0, halt_restart_mask1;
vfe_dev->hw_info->vfe_ops.core_ops.
@@ -1807,14 +1811,13 @@ static int msm_isp_process_overflow_irq(
return 0;
}
- /*Check if any overflow bit is set*/
+ /* Check if any overflow bit is set */
vfe_dev->hw_info->vfe_ops.core_ops.
get_overflow_mask(&overflow_mask);
overflow_mask &= *irq_status1;
if (overflow_mask) {
struct msm_isp_event_data error_event;
- uint32_t val = 0;
int i;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
@@ -1829,10 +1832,8 @@ static int msm_isp_process_overflow_irq(
*irq_status1 &= ~overflow_mask;
return 0;
}
- if (msm_vfe_is_vfe48(vfe_dev))
- val = msm_camera_io_r(vfe_dev->vfe_base + 0xC94);
pr_err("%s: vfe %d overflow mask %x, bus_error %x\n",
- __func__, vfe_dev->pdev->id, overflow_mask, val);
+ __func__, vfe_dev->pdev->id, overflow_mask, bus_err);
for (i = 0; i < axi_data->hw_info->num_wm; i++) {
if (!axi_data->free_wm[i])
continue;
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c
index 3301fc446193..4b4846907d0f 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c
+++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c
@@ -537,6 +537,7 @@ static int msm_jpegdma_open(struct file *file)
if (!ctx)
return -ENOMEM;
+ mutex_init(&ctx->lock);
ctx->jdma_device = device;
dev_dbg(ctx->jdma_device->dev, "Jpeg v4l2 dma open\n");
/* Set ctx defaults */
@@ -835,12 +836,13 @@ static int msm_jpegdma_qbuf(struct file *file, void *fh,
int ret;
msm_jpegdma_cast_long_to_buff_ptr(buf->m.userptr, &up_buff);
-
+ mutex_lock(&ctx->lock);
if (!access_ok(VERIFY_READ, up_buff,
sizeof(struct msm_jpeg_dma_buff)) ||
get_user(kp_buff.fd, &up_buff->fd) ||
get_user(kp_buff.offset, &up_buff->offset)) {
dev_err(ctx->jdma_device->dev, "Error getting user data\n");
+ mutex_unlock(&ctx->lock);
return -EFAULT;
}
@@ -849,6 +851,7 @@ static int msm_jpegdma_qbuf(struct file *file, void *fh,
put_user(kp_buff.fd, &up_buff->fd) ||
put_user(kp_buff.offset, &up_buff->offset)) {
dev_err(ctx->jdma_device->dev, "Error putting user data\n");
+ mutex_unlock(&ctx->lock);
return -EFAULT;
}
@@ -871,7 +874,7 @@ static int msm_jpegdma_qbuf(struct file *file, void *fh,
ret = v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
if (ret < 0)
dev_err(ctx->jdma_device->dev, "QBuf fail\n");
-
+ mutex_unlock(&ctx->lock);
return ret;
}
@@ -1032,10 +1035,11 @@ static int msm_jpegdma_s_crop(struct file *file, void *fh,
if (crop->c.height % formats[ctx->format_idx].v_align)
return -EINVAL;
+ mutex_lock(&ctx->lock);
ctx->crop = crop->c;
if (atomic_read(&ctx->active))
ret = msm_jpegdma_update_hw_config(ctx);
-
+ mutex_unlock(&ctx->lock);
return ret;
}
@@ -1240,12 +1244,14 @@ void msm_jpegdma_isr_processing_done(struct msm_jpegdma_device *dma)
ctx = v4l2_m2m_get_curr_priv(dma->m2m_dev);
if (ctx) {
+ mutex_lock(&ctx->lock);
ctx->plane_idx++;
if (ctx->plane_idx >= formats[ctx->format_idx].num_planes) {
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
if (src_buf == NULL || dst_buf == NULL) {
dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
+ mutex_unlock(&ctx->lock);
mutex_unlock(&dma->lock);
return;
}
@@ -1261,11 +1267,13 @@ void msm_jpegdma_isr_processing_done(struct msm_jpegdma_device *dma)
src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
if (src_buf == NULL || dst_buf == NULL) {
dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
+ mutex_unlock(&ctx->lock);
mutex_unlock(&dma->lock);
return;
}
msm_jpegdma_process_buffers(ctx, src_buf, dst_buf);
}
+ mutex_unlock(&ctx->lock);
}
mutex_unlock(&dma->lock);
}
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.h b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.h
index 6a1205daf1d2..4911ce3aa5bd 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.h
+++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.h
@@ -254,6 +254,7 @@ struct msm_jpegdma_buf_handle {
* @format_idx: Current format index.
*/
struct jpegdma_ctx {
+ struct mutex lock;
struct msm_jpegdma_device *jdma_device;
atomic_t active;
struct completion completion;
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 7e452e9e4ee2..b7724b4bf936 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -102,6 +102,7 @@
#define IS_DEFAULT_OUTPUT_BUF_INDEX(index) \
((index == DEFAULT_OUTPUT_BUF_INDEX) ? 1 : 0)
+static struct msm_cpp_vbif_data cpp_vbif;
static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev,
uint32_t buff_mgr_ops, uint32_t ids, void *arg);
@@ -121,6 +122,7 @@ static void msm_cpp_flush_queue_and_release_buffer(struct cpp_device *cpp_dev,
static int msm_cpp_dump_frame_cmd(struct msm_cpp_frame_info_t *frame_info);
static int msm_cpp_dump_addr(struct cpp_device *cpp_dev,
struct msm_cpp_frame_info_t *frame_info);
+static int32_t msm_cpp_reset_vbif_and_load_fw(struct cpp_device *cpp_dev);
#if CONFIG_MSM_CPP_DBG
#define CPP_DBG(fmt, args...) pr_err(fmt, ##args)
@@ -171,6 +173,26 @@ struct msm_cpp_timer_t {
struct msm_cpp_timer_t cpp_timer;
static void msm_cpp_set_vbif_reg_values(struct cpp_device *cpp_dev);
+
+void msm_cpp_vbif_register_error_handler(void *dev,
+ enum cpp_vbif_client client,
+ int (*client_vbif_error_handler)(void *, uint32_t))
+{
+ if (dev == NULL || client >= VBIF_CLIENT_MAX) {
+ pr_err("%s: Fail to register handler! dev = %p, client %d\n",
+ __func__, dev, client);
+ return;
+ }
+
+ if (client_vbif_error_handler != NULL) {
+ cpp_vbif.dev[client] = dev;
+ cpp_vbif.err_handler[client] = client_vbif_error_handler;
+ } else {
+ /* if handler = NULL, is unregister case */
+ cpp_vbif.dev[client] = NULL;
+ cpp_vbif.err_handler[client] = NULL;
+ }
+}
static int msm_cpp_init_bandwidth_mgr(struct cpp_device *cpp_dev)
{
int rc = 0;
@@ -755,6 +777,7 @@ static void msm_cpp_iommu_fault_handler(struct iommu_domain *domain,
struct msm_cpp_frame_info_t *processed_frame[MAX_CPP_PROCESSING_FRAME];
int32_t i = 0, queue_len = 0;
struct msm_device_queue *queue = NULL;
+ int32_t rc = 0;
if (token) {
cpp_dev = token;
@@ -765,7 +788,16 @@ static void msm_cpp_iommu_fault_handler(struct iommu_domain *domain,
}
mutex_lock(&cpp_dev->mutex);
tasklet_kill(&cpp_dev->cpp_tasklet);
- cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
+ rc = cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
+ if (rc < 0) {
+ pr_err("load fw failure %d-retry\n", rc);
+ rc = msm_cpp_reset_vbif_and_load_fw(cpp_dev);
+ if (rc < 0) {
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ mutex_unlock(&cpp_dev->mutex);
+ return;
+ }
+ }
queue = &cpp_timer.data.cpp_dev->processing_q;
queue_len = queue->len;
if (!queue_len) {
@@ -1022,11 +1054,16 @@ static int cpp_init_hardware(struct cpp_device *cpp_dev)
if (cpp_dev->fw_name_bin) {
msm_camera_enable_irq(cpp_dev->irq, false);
rc = cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
- msm_camera_enable_irq(cpp_dev->irq, true);
if (rc < 0) {
- pr_err("%s: load firmware failure %d\n", __func__, rc);
- goto pwr_collapse_reset;
+ pr_err("%s: load firmware failure %d-retry\n",
+ __func__, rc);
+ rc = msm_cpp_reset_vbif_and_load_fw(cpp_dev);
+ if (rc < 0) {
+ msm_camera_enable_irq(cpp_dev->irq, true);
+ goto pwr_collapse_reset;
+ }
}
+ msm_camera_enable_irq(cpp_dev->irq, true);
msm_camera_io_w_mb(0x7C8, cpp_dev->base +
MSM_CPP_MICRO_IRQGEN_MASK);
msm_camera_io_w_mb(0xFFFF, cpp_dev->base +
@@ -1038,6 +1075,7 @@ static int cpp_init_hardware(struct cpp_device *cpp_dev)
pwr_collapse_reset:
msm_cpp_update_gdscr_status(cpp_dev, false);
+ msm_camera_unregister_irq(cpp_dev->pdev, cpp_dev->irq, cpp_dev);
req_irq_fail:
if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
CAM_AHB_SUSPEND_VOTE) < 0)
@@ -1081,7 +1119,7 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
{
uint32_t i;
uint32_t *ptr_bin = NULL;
- int32_t rc = 0;
+ int32_t rc = 0, ret = 0;
if (!fw_name_bin) {
pr_err("%s:%d] invalid fw name", __func__, __LINE__);
@@ -1207,14 +1245,70 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
}
vote:
- rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
+ ret = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
CAM_AHB_SVS_VOTE);
- if (rc < 0)
+ if (ret < 0) {
pr_err("%s:%d: failed to vote for AHB\n", __func__, __LINE__);
+ rc = ret;
+ }
end:
return rc;
}
+int32_t msm_cpp_reset_vbif_clients(struct cpp_device *cpp_dev)
+{
+ uint32_t i;
+
+ pr_warn("%s: handle vbif hang...\n", __func__);
+ for (i = 0; i < VBIF_CLIENT_MAX; i++) {
+ if (cpp_dev->vbif_data->err_handler[i] == NULL)
+ continue;
+
+ cpp_dev->vbif_data->err_handler[i](
+ cpp_dev->vbif_data->dev[i], CPP_VBIF_ERROR_HANG);
+ }
+ return 0;
+}
+
+int32_t msm_cpp_reset_vbif_and_load_fw(struct cpp_device *cpp_dev)
+{
+ int32_t rc = 0;
+
+ msm_cpp_reset_vbif_clients(cpp_dev);
+
+ rc = cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
+ if (rc < 0)
+ pr_err("Reset and load fw failed %d\n", rc);
+
+ return rc;
+}
+
+int cpp_vbif_error_handler(void *dev, uint32_t vbif_error)
+{
+ struct cpp_device *cpp_dev = NULL;
+
+ if (dev == NULL || vbif_error >= CPP_VBIF_ERROR_MAX) {
+ pr_err("failed: dev %p, vbif error %d\n", dev, vbif_error);
+ return -EINVAL;
+ }
+
+ cpp_dev = (struct cpp_device *) dev;
+
+ /* MMSS_A_CPP_IRQ_STATUS_0 = 0x10 */
+ pr_err("%s: before reset halt... read MMSS_A_CPP_IRQ_STATUS_0 = 0x%x",
+ __func__, msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10));
+
+ pr_err("%s: start reset bus bridge on FD + CPP!\n", __func__);
+ /* MMSS_A_CPP_RST_CMD_0 = 0x8, firmware reset = 0x3DF77 */
+ msm_camera_io_w(0x3DF77, cpp_dev->cpp_hw_base + 0x8);
+
+ /* MMSS_A_CPP_IRQ_STATUS_0 = 0x10 */
+ pr_err("%s: after reset halt... read MMSS_A_CPP_IRQ_STATUS_0 = 0x%x",
+ __func__, msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10));
+
+ return 0;
+}
+
static int cpp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
int rc;
@@ -1255,6 +1349,10 @@ static int cpp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
CPP_DBG("open %d %pK\n", i, &fh->vfh);
cpp_dev->cpp_open_cnt++;
+
+ msm_cpp_vbif_register_error_handler(cpp_dev,
+ VBIF_CLIENT_CPP, cpp_vbif_error_handler);
+
if (cpp_dev->cpp_open_cnt == 1) {
rc = cpp_init_hardware(cpp_dev);
if (rc < 0) {
@@ -1284,6 +1382,7 @@ static int cpp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
rc = -ENOMEM;
}
}
+
mutex_unlock(&cpp_dev->mutex);
return 0;
}
@@ -1383,6 +1482,9 @@ static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
}
}
+ /* unregister vbif error handler */
+ msm_cpp_vbif_register_error_handler(cpp_dev,
+ VBIF_CLIENT_CPP, NULL);
mutex_unlock(&cpp_dev->mutex);
return 0;
}
@@ -1641,8 +1743,12 @@ static void msm_cpp_do_timeout_work(struct work_struct *work)
rc = cpp_load_fw(cpp_timer.data.cpp_dev,
cpp_timer.data.cpp_dev->fw_name_bin);
if (rc) {
- pr_warn("Firmware loading failed\n");
- goto error;
+ pr_warn("Firmware loading failed-retry\n");
+ rc = msm_cpp_reset_vbif_and_load_fw(cpp_dev);
+ if (rc < 0) {
+ pr_err("Firmware loading failed\n");
+ goto error;
+ }
} else {
pr_debug("Firmware loading done\n");
}
@@ -2926,11 +3032,14 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
msm_camera_enable_irq(cpp_dev->irq, false);
rc = cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
if (rc < 0) {
- pr_err("%s: load firmware failure %d\n",
+ pr_err("%s: load firmware failure %d-retry\n",
__func__, rc);
- enable_irq(cpp_dev->irq->start);
- mutex_unlock(&cpp_dev->mutex);
- return rc;
+ rc = msm_cpp_reset_vbif_and_load_fw(cpp_dev);
+ if (rc < 0) {
+ enable_irq(cpp_dev->irq->start);
+ mutex_unlock(&cpp_dev->mutex);
+ return rc;
+ }
}
rc = msm_cpp_fw_version(cpp_dev);
if (rc < 0) {
@@ -4195,6 +4304,8 @@ static int cpp_probe(struct platform_device *pdev)
spin_lock_init(&cpp_timer.data.processed_frame_lock);
cpp_dev->pdev = pdev;
+ memset(&cpp_vbif, 0, sizeof(struct msm_cpp_vbif_data));
+ cpp_dev->vbif_data = &cpp_vbif;
cpp_dev->camss_cpp_base =
msm_camera_get_reg_base(pdev, "camss_cpp", true);
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
index f46cc10cef46..470c0cf1131b 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
@@ -23,6 +23,7 @@
#include "msm_sd.h"
#include "cam_soc_api.h"
#include "cam_hw_ops.h"
+#include <media/msmb_pproc.h>
/* hw version info:
31:28 Major version
@@ -95,6 +96,22 @@
#define MSM_CPP_TX_FIFO_LEVEL 16
#define MSM_CPP_RX_FIFO_LEVEL 512
+enum cpp_vbif_error {
+ CPP_VBIF_ERROR_HANG,
+ CPP_VBIF_ERROR_MAX,
+};
+
+enum cpp_vbif_client {
+ VBIF_CLIENT_CPP,
+ VBIF_CLIENT_FD,
+ VBIF_CLIENT_MAX,
+};
+
+struct msm_cpp_vbif_data {
+ int (*err_handler[VBIF_CLIENT_MAX])(void *, uint32_t);
+ void *dev[VBIF_CLIENT_MAX];
+};
+
struct cpp_subscribe_info {
struct v4l2_fh *vfh;
uint32_t active;
@@ -266,6 +283,7 @@ struct cpp_device {
uint32_t bus_master_flag;
uint32_t micro_reset;
struct msm_cpp_payload_params payload_params;
+ struct msm_cpp_vbif_data *vbif_data;
};
int msm_cpp_set_micro_clk(struct cpp_device *cpp_dev);
@@ -274,5 +292,8 @@ int msm_cpp_get_clock_index(struct cpp_device *cpp_dev, const char *clk_name);
long msm_cpp_set_core_clk(struct cpp_device *cpp_dev, long rate, int idx);
void msm_cpp_fetch_dt_params(struct cpp_device *cpp_dev);
int msm_cpp_read_payload_params_from_dt(struct cpp_device *cpp_dev);
+void msm_cpp_vbif_register_error_handler(void *dev,
+ enum cpp_vbif_client client,
+ int (*client_vbif_error_handler)(void *, uint32_t));
#endif /* __MSM_CPP_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index e4cee1fa4ffc..a7cd44636d1d 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -1600,7 +1600,7 @@ static const struct v4l2_subdev_ops msm_csiphy_subdev_ops = {
static int msm_csiphy_get_clk_info(struct csiphy_device *csiphy_dev,
struct platform_device *pdev)
{
- int i, rc;
+ int i, rc = 0;
char *csi_3p_clk_name = "csi_phy_3p_clk";
char *csi_3p_clk_src_name = "csiphy_3p_clk_src";
uint32_t clk_cnt = 0;
@@ -1616,6 +1616,7 @@ static int msm_csiphy_get_clk_info(struct csiphy_device *csiphy_dev,
if (csiphy_dev->num_all_clk > CSIPHY_NUM_CLK_MAX) {
pr_err("%s: invalid count=%zu, max is %d\n", __func__,
csiphy_dev->num_all_clk, CSIPHY_NUM_CLK_MAX);
+ rc = -EINVAL;
goto MAX_CLK_ERROR;
}
@@ -1659,13 +1660,14 @@ static int msm_csiphy_get_clk_info(struct csiphy_device *csiphy_dev,
}
csiphy_dev->num_clk = clk_cnt;
+ return rc;
MAX_CLK_ERROR:
msm_camera_put_clk_info(csiphy_dev->pdev,
&csiphy_dev->csiphy_all_clk_info,
&csiphy_dev->csiphy_all_clk,
csiphy_dev->num_all_clk);
- return 0;
+ return rc;
}
static int csiphy_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
index 86e7837cc02a..c1c25b655d1f 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
@@ -381,9 +381,6 @@ static int32_t msm_sensor_fill_slave_info_init_params(
if (!slave_info || !sensor_info)
return -EINVAL;
- if (!slave_info->is_init_params_valid)
- return 0;
-
sensor_init_params = &slave_info->sensor_init_params;
if (INVALID_CAMERA_B != sensor_init_params->position)
sensor_info->position =
@@ -754,8 +751,6 @@ int32_t msm_sensor_driver_probe(void *setting,
slave_info->power_setting_array.power_down_setting =
compat_ptr(slave_info32->
power_setting_array.power_down_setting);
- slave_info->is_init_params_valid =
- slave_info32->is_init_params_valid;
slave_info->sensor_init_params =
slave_info32->sensor_init_params;
slave_info->output_format =
@@ -783,13 +778,10 @@ int32_t msm_sensor_driver_probe(void *setting,
CDBG("power up size %d power down size %d\n",
slave_info->power_setting_array.size,
slave_info->power_setting_array.size_down);
-
- if (slave_info->is_init_params_valid) {
- CDBG("position %d",
- slave_info->sensor_init_params.position);
- CDBG("mount %d",
- slave_info->sensor_init_params.sensor_mount_angle);
- }
+ CDBG("position %d",
+ slave_info->sensor_init_params.position);
+ CDBG("mount %d",
+ slave_info->sensor_init_params.sensor_mount_angle);
/* Validate camera id */
if (slave_info->camera_id >= MAX_CAMERAS) {
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index a0195ac400f8..0188637af85c 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -928,7 +928,7 @@ static int mpq_map_buffer_to_kernel(
MPQ_DVB_DBG_PRINT("%s: secured buffer\n", __func__);
*kernel_mem = NULL;
} else {
- unsigned long tmp;
+ size_t tmp;
*kernel_mem = ion_map_kernel(client, ion_handle);
if (IS_ERR_OR_NULL(*kernel_mem)) {
ret = PTR_ERR(*kernel_mem);
@@ -940,7 +940,7 @@ static int mpq_map_buffer_to_kernel(
}
ion_handle_get_size(client, ion_handle, &tmp);
MPQ_DVB_DBG_PRINT(
- "%s: mapped to address 0x%p, size=%lu\n",
+ "%s: mapped to address 0x%p, size=%zu\n",
__func__, *kernel_mem, tmp);
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index dc5a5a0dc851..4e695fc774f6 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -2108,7 +2108,7 @@ static ssize_t sde_rotator_show_caps(struct device *dev,
#define SPRINT(fmt, ...) \
(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
- SPRINT("wb_count=%d\n", mgr->queue_count);
+ SPRINT("queue_count=%d\n", mgr->queue_count);
SPRINT("downscale=1\n");
SPRINT("ubwc=1\n");
@@ -2415,7 +2415,7 @@ static int sde_rotator_register_clk(struct platform_device *pdev,
static void sde_rotator_unregister_clk(struct sde_rot_mgr *mgr)
{
- kfree(mgr->rot_clk);
+ devm_kfree(mgr->device, mgr->rot_clk);
mgr->rot_clk = NULL;
mgr->num_rot_clk = 0;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index eaf35733b38a..ff46a607db0c 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -2096,6 +2096,8 @@ static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
else
SPRINT("min_downscale=2.0\n");
+ SPRINT("downscale_compression=1\n");
+
#undef SPRINT
return cnt;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index c11c4b61d832..da9845e35fde 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -214,10 +214,9 @@ static int sde_smmu_attach(struct sde_rot_data_type *mdata)
SDEROT_DBG("iommu v2 domain[%i] attached\n", i);
}
} else {
- SDEROT_ERR(
+ SDEROT_DBG(
"iommu device not attached for domain[%d]\n",
i);
- return -ENODEV;
}
}
return 0;
@@ -492,11 +491,13 @@ int sde_smmu_probe(struct platform_device *pdev)
mp->num_vreg = 1;
}
- rc = sde_rot_config_vreg(&pdev->dev, mp->vreg_config,
- mp->num_vreg, true);
- if (rc) {
- SDEROT_ERR("vreg config failed rc=%d\n", rc);
- return rc;
+ if (mp->vreg_config) {
+ rc = sde_rot_config_vreg(&pdev->dev, mp->vreg_config,
+ mp->num_vreg, true);
+ if (rc) {
+ SDEROT_ERR("vreg config failed rc=%d\n", rc);
+ goto release_vreg;
+ }
}
rc = sde_smmu_clk_register(pdev, mp);
@@ -504,18 +505,16 @@ int sde_smmu_probe(struct platform_device *pdev)
SDEROT_ERR(
"smmu clk register failed for domain[%d] with err:%d\n",
smmu_domain.domain, rc);
- sde_rot_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
- false);
- return rc;
+ goto disable_vreg;
}
snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
sde_smmu->reg_bus_clt = sde_reg_bus_vote_client_create(name);
if (IS_ERR_OR_NULL(sde_smmu->reg_bus_clt)) {
SDEROT_ERR("mdss bus client register failed\n");
- sde_rot_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
- false);
- return PTR_ERR(sde_smmu->reg_bus_clt);
+ rc = PTR_ERR(sde_smmu->reg_bus_clt);
+ sde_smmu->reg_bus_clt = NULL;
+ goto unregister_clk;
}
rc = sde_smmu_enable_power(sde_smmu, true);
@@ -531,6 +530,7 @@ int sde_smmu_probe(struct platform_device *pdev)
SDEROT_ERR("iommu create mapping failed for domain[%d]\n",
smmu_domain.domain);
rc = PTR_ERR(sde_smmu->mmu_mapping);
+ sde_smmu->mmu_mapping = NULL;
goto disable_power;
}
@@ -558,13 +558,20 @@ int sde_smmu_probe(struct platform_device *pdev)
release_mapping:
arm_iommu_release_mapping(sde_smmu->mmu_mapping);
+ sde_smmu->mmu_mapping = NULL;
disable_power:
sde_smmu_enable_power(sde_smmu, false);
bus_client_destroy:
sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
sde_smmu->reg_bus_clt = NULL;
- sde_rot_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
- false);
+unregister_clk:
+disable_vreg:
+ sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
+ sde_smmu->mp.num_vreg, false);
+release_vreg:
+ devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
+ sde_smmu->mp.vreg_config = NULL;
+ sde_smmu->mp.num_vreg = 0;
return rc;
}
@@ -575,9 +582,21 @@ int sde_smmu_remove(struct platform_device *pdev)
for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
sde_smmu = sde_smmu_get_cb(i);
- if (sde_smmu && sde_smmu->dev &&
- (sde_smmu->dev == &pdev->dev))
- arm_iommu_release_mapping(sde_smmu->mmu_mapping);
+ if (!sde_smmu || !sde_smmu->dev ||
+ (sde_smmu->dev != &pdev->dev))
+ continue;
+
+ sde_smmu->dev = NULL;
+ arm_iommu_release_mapping(sde_smmu->mmu_mapping);
+ sde_smmu->mmu_mapping = NULL;
+ sde_smmu_enable_power(sde_smmu, false);
+ sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
+ sde_smmu->reg_bus_clt = NULL;
+ sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
+ sde_smmu->mp.num_vreg, false);
+ devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
+ sde_smmu->mp.vreg_config = NULL;
+ sde_smmu->mp.num_vreg = 0;
}
return 0;
}
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 88a3b4b6f7ba..3835a2e45882 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -1369,6 +1369,22 @@ static int hfi_process_session_flush_done(u32 device_id,
cmd_done.status = hfi_map_err_status(pkt->error_type);
cmd_done.size = sizeof(u32);
+ switch (pkt->flush_type) {
+ case HFI_FLUSH_OUTPUT:
+ cmd_done.data.flush_type = HAL_FLUSH_OUTPUT;
+ break;
+ case HFI_FLUSH_INPUT:
+ cmd_done.data.flush_type = HAL_FLUSH_INPUT;
+ break;
+ case HFI_FLUSH_ALL:
+ cmd_done.data.flush_type = HAL_FLUSH_ALL;
+ break;
+ default:
+ dprintk(VIDC_ERR,
+ "%s: invalid flush type!", __func__);
+ return -EINVAL;
+ }
+
*info = (struct msm_vidc_cb_info) {
.response_type = HAL_SESSION_FLUSH_DONE,
.response.cmd = cmd_done,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 28faa1436ef5..8b1329db1742 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1243,14 +1243,13 @@ void *msm_vidc_open(int core_id, int session_type)
return inst;
fail_init:
- v4l2_fh_del(&inst->event_handler);
- v4l2_fh_exit(&inst->event_handler);
- vb2_queue_release(&inst->bufq[OUTPUT_PORT].vb2_bufq);
-
mutex_lock(&core->lock);
list_del(&inst->list);
mutex_unlock(&core->lock);
+ v4l2_fh_del(&inst->event_handler);
+ v4l2_fh_exit(&inst->event_handler);
+ vb2_queue_release(&inst->bufq[OUTPUT_PORT].vb2_bufq);
fail_bufq_output:
vb2_queue_release(&inst->bufq[CAPTURE_PORT].vb2_bufq);
fail_bufq_capture:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index a2edbb6a8270..c5b4872b8e23 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1505,6 +1505,9 @@ static void handle_session_flush(enum hal_command_response cmd, void *data)
{
struct msm_vidc_cb_cmd_done *response = data;
struct msm_vidc_inst *inst;
+ struct v4l2_event flush_event = {0};
+ u32 *ptr = NULL;
+ enum hal_flush flush_type;
int rc;
if (!response) {
@@ -1532,8 +1535,31 @@ static void handle_session_flush(enum hal_command_response cmd, void *data)
}
}
atomic_dec(&inst->in_flush);
- dprintk(VIDC_DBG, "Notify flush complete to client\n");
- msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_FLUSH_DONE);
+ flush_event.type = V4L2_EVENT_MSM_VIDC_FLUSH_DONE;
+ ptr = (u32 *)flush_event.u.data;
+
+ flush_type = response->data.flush_type;
+ switch (flush_type) {
+ case HAL_FLUSH_INPUT:
+ ptr[0] = V4L2_QCOM_CMD_FLUSH_OUTPUT;
+ break;
+ case HAL_FLUSH_OUTPUT:
+ ptr[0] = V4L2_QCOM_CMD_FLUSH_CAPTURE;
+ break;
+ case HAL_FLUSH_ALL:
+ ptr[0] |= V4L2_QCOM_CMD_FLUSH_CAPTURE;
+ ptr[0] |= V4L2_QCOM_CMD_FLUSH_OUTPUT;
+ break;
+ default:
+ dprintk(VIDC_ERR, "Invalid flush type received!");
+ goto exit;
+ }
+
+ dprintk(VIDC_DBG,
+ "Notify flush complete, flush_type: %x\n", flush_type);
+ v4l2_event_queue_fh(&inst->event_handler, &flush_event);
+
+exit:
put_inst(inst);
}
@@ -2455,7 +2481,6 @@ static int msm_comm_session_abort(struct msm_vidc_inst *inst)
}
hdev = inst->core->device;
abort_completion = SESSION_MSG_INDEX(HAL_SESSION_ABORT_DONE);
- init_completion(&inst->completions[abort_completion]);
rc = call_hfi_op(hdev, session_abort, (void *)inst->session);
if (rc) {
@@ -2637,8 +2662,6 @@ static int msm_comm_init_core(struct msm_vidc_inst *inst)
__func__);
}
- init_completion(&core->completions
- [SYS_MSG_INDEX(HAL_SYS_INIT_DONE)]);
rc = call_hfi_op(hdev, core_init, hdev->hfi_device_data);
if (rc) {
dprintk(VIDC_ERR, "Failed to init core, id = %d\n",
@@ -2742,8 +2765,6 @@ static int msm_comm_session_init(int flipped_state,
dprintk(VIDC_ERR, "Invalid session\n");
return -EINVAL;
}
- init_completion(
- &inst->completions[SESSION_MSG_INDEX(HAL_SESSION_INIT_DONE)]);
rc = call_hfi_op(hdev, session_init, hdev->hfi_device_data,
inst, get_hal_domain(inst->session_type),
@@ -2881,8 +2902,6 @@ static int msm_vidc_start(int flipped_state, struct msm_vidc_inst *inst)
inst, inst->state);
goto exit;
}
- init_completion(
- &inst->completions[SESSION_MSG_INDEX(HAL_SESSION_START_DONE)]);
rc = call_hfi_op(hdev, session_start, (void *) inst->session);
if (rc) {
dprintk(VIDC_ERR,
@@ -2912,8 +2931,6 @@ static int msm_vidc_stop(int flipped_state, struct msm_vidc_inst *inst)
goto exit;
}
dprintk(VIDC_DBG, "Send Stop to hal\n");
- init_completion(
- &inst->completions[SESSION_MSG_INDEX(HAL_SESSION_STOP_DONE)]);
rc = call_hfi_op(hdev, session_stop, (void *) inst->session);
if (rc) {
dprintk(VIDC_ERR, "Failed to send stop\n");
@@ -2943,8 +2960,6 @@ static int msm_vidc_release_res(int flipped_state, struct msm_vidc_inst *inst)
}
dprintk(VIDC_DBG,
"Send release res to hal\n");
- init_completion(&inst->completions[
- SESSION_MSG_INDEX(HAL_SESSION_RELEASE_RESOURCE_DONE)]);
rc = call_hfi_op(hdev, session_release_res, (void *) inst->session);
if (rc) {
dprintk(VIDC_ERR,
@@ -2975,8 +2990,6 @@ static int msm_comm_session_close(int flipped_state,
}
dprintk(VIDC_DBG,
"Send session close to hal\n");
- init_completion(
- &inst->completions[SESSION_MSG_INDEX(HAL_SESSION_END_DONE)]);
rc = call_hfi_op(hdev, session_end, (void *) inst->session);
if (rc) {
dprintk(VIDC_ERR,
@@ -4022,8 +4035,6 @@ int msm_comm_try_get_prop(struct msm_vidc_inst *inst, enum hal_property ptype,
}
mutex_unlock(&inst->sync_lock);
- init_completion(&inst->completions[
- SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO)]);
switch (ptype) {
case HAL_PARAM_PROFILE_LEVEL_CURRENT:
case HAL_CONFIG_VDEC_ENTROPY:
@@ -4253,8 +4264,6 @@ int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst,
if (inst->state != MSM_VIDC_CORE_INVALID &&
core->state != VIDC_CORE_INVALID) {
buffer_info.response_required = true;
- init_completion(&inst->completions[SESSION_MSG_INDEX
- (HAL_SESSION_RELEASE_BUFFER_DONE)]);
rc = call_hfi_op(hdev, session_release_buffers,
(void *)inst->session, &buffer_info);
if (rc) {
@@ -4325,9 +4334,6 @@ int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst)
if (inst->state != MSM_VIDC_CORE_INVALID &&
core->state != VIDC_CORE_INVALID) {
buffer_info.response_required = true;
- init_completion(
- &inst->completions[SESSION_MSG_INDEX
- (HAL_SESSION_RELEASE_BUFFER_DONE)]);
rc = call_hfi_op(hdev, session_release_buffers,
(void *)inst->session, &buffer_info);
if (rc) {
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index eceb86b01a7c..9970c4152ef9 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -934,8 +934,6 @@ err_create_pkt:
return rc;
}
-static DECLARE_COMPLETION(release_resources_done);
-
static int __alloc_imem(struct venus_hfi_device *device, unsigned long size)
{
struct imem *imem = NULL;
@@ -2172,8 +2170,6 @@ static int venus_hfi_core_init(void *device)
dev = device;
mutex_lock(&dev->lock);
- init_completion(&release_resources_done);
-
rc = __load_fw(dev);
if (rc) {
dprintk(VIDC_ERR, "Failed to load Venus FW\n");
@@ -3460,7 +3456,6 @@ static int __response_handler(struct venus_hfi_device *device)
break;
case HAL_SYS_RELEASE_RESOURCE_DONE:
dprintk(VIDC_DBG, "Received SYS_RELEASE_RESOURCE\n");
- complete(&release_resources_done);
break;
case HAL_SYS_INIT_DONE:
dprintk(VIDC_DBG, "Received SYS_INIT_DONE\n");
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 116ce12c8dba..820c8685a75b 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -1351,6 +1351,7 @@ struct msm_vidc_cb_cmd_done {
struct vidc_hal_session_init_done session_init_done;
struct hal_buffer_info buffer_info;
union hal_get_property property;
+ enum hal_flush flush_type;
} data;
};
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 3ffe2ecfd5ef..c8946f98ced4 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -1029,6 +1029,11 @@ static int match_child(struct device *dev, void *data)
return !strcmp(dev_name(dev), (char *)data);
}
+static void s5p_mfc_memdev_release(struct device *dev)
+{
+ dma_release_declared_memory(dev);
+}
+
static void *mfc_get_drv_data(struct platform_device *pdev);
static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
@@ -1041,6 +1046,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
mfc_err("Not enough memory\n");
return -ENOMEM;
}
+
+ dev_set_name(dev->mem_dev_l, "%s", "s5p-mfc-l");
+ dev->mem_dev_l->release = s5p_mfc_memdev_release;
device_initialize(dev->mem_dev_l);
of_property_read_u32_array(dev->plat_dev->dev.of_node,
"samsung,mfc-l", mem_info, 2);
@@ -1058,6 +1066,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
mfc_err("Not enough memory\n");
return -ENOMEM;
}
+
+ dev_set_name(dev->mem_dev_r, "%s", "s5p-mfc-r");
+ dev->mem_dev_r->release = s5p_mfc_memdev_release;
device_initialize(dev->mem_dev_r);
of_property_read_u32_array(dev->plat_dev->dev.of_node,
"samsung,mfc-r", mem_info, 2);
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 84fa6e9b59a1..67314c034cdb 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -29,7 +29,7 @@
#define RC5_BIT_START (1 * RC5_UNIT)
#define RC5_BIT_END (1 * RC5_UNIT)
#define RC5X_SPACE (4 * RC5_UNIT)
-#define RC5_TRAILER (10 * RC5_UNIT) /* In reality, approx 100 */
+#define RC5_TRAILER (6 * RC5_UNIT) /* In reality, approx 100 */
enum rc5_state {
STATE_INACTIVE,
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 082ff5608455..317ef63ee789 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -1407,8 +1407,10 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
if (p->fname) {
priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
- if (priv->ctrl.fname == NULL)
- return -ENOMEM;
+ if (priv->ctrl.fname == NULL) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
}
/*
@@ -1440,6 +1442,7 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
} else
priv->state = XC2028_WAITING_FIRMWARE;
}
+unlock:
mutex_unlock(&priv->lock);
return rc;
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 565a59310747..34b35ebd60ac 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -1073,7 +1073,7 @@ static int airspy_probe(struct usb_interface *intf,
if (ret) {
dev_err(s->dev, "Failed to register as video device (%d)\n",
ret);
- goto err_unregister_v4l2_dev;
+ goto err_free_controls;
}
dev_info(s->dev, "Registered as %s\n",
video_device_node_name(&s->vdev));
@@ -1082,7 +1082,6 @@ static int airspy_probe(struct usb_interface *intf,
err_free_controls:
v4l2_ctrl_handler_free(&s->hdl);
-err_unregister_v4l2_dev:
v4l2_device_unregister(&s->v4l2_dev);
err_free_mem:
kfree(s);
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
index 78c12d22dfbb..5dab02432e82 100644
--- a/drivers/media/usb/usbtv/usbtv-audio.c
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -278,6 +278,9 @@ static void snd_usbtv_trigger(struct work_struct *work)
{
struct usbtv *chip = container_of(work, struct usbtv, snd_trigger);
+ if (!chip->snd)
+ return;
+
if (atomic_read(&chip->snd_stream))
usbtv_audio_start(chip);
else
@@ -378,6 +381,8 @@ err:
void usbtv_audio_free(struct usbtv *usbtv)
{
+ cancel_work_sync(&usbtv->snd_trigger);
+
if (usbtv->snd && usbtv->udev) {
snd_card_free(usbtv->snd);
usbtv->snd = NULL;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index d11fd6ac2df0..5cefca95734e 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -148,6 +148,26 @@ static struct uvc_format_desc uvc_fmts[] = {
.guid = UVC_GUID_FORMAT_H264,
.fcc = V4L2_PIX_FMT_H264,
},
+ {
+ .name = "Greyscale 8 L/R (Y8I)",
+ .guid = UVC_GUID_FORMAT_Y8I,
+ .fcc = V4L2_PIX_FMT_Y8I,
+ },
+ {
+ .name = "Greyscale 12 L/R (Y12I)",
+ .guid = UVC_GUID_FORMAT_Y12I,
+ .fcc = V4L2_PIX_FMT_Y12I,
+ },
+ {
+ .name = "Depth data 16-bit (Z16)",
+ .guid = UVC_GUID_FORMAT_Z16,
+ .fcc = V4L2_PIX_FMT_Z16,
+ },
+ {
+ .name = "Bayer 10-bit (SRGGB10P)",
+ .guid = UVC_GUID_FORMAT_RW10,
+ .fcc = V4L2_PIX_FMT_SRGGB10P,
+ },
};
/* ------------------------------------------------------------------------
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index f0f2391e1b43..7e4d3eea371b 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -119,6 +119,18 @@
#define UVC_GUID_FORMAT_H264 \
{ 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y8I \
+ { 'Y', '8', 'I', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y12I \
+ { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Z16 \
+ { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RW10 \
+ { 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
/* ------------------------------------------------------------------------
* Driver specific constants.
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 11f39791ec33..47f37683893a 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1505,7 +1505,7 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
void *pb, int nonblocking)
{
unsigned long flags;
- int ret;
+ int ret = 0;
/*
* Wait for at least one buffer to become available on the done_list.
@@ -1521,10 +1521,12 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
spin_lock_irqsave(&q->done_lock, flags);
*vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
/*
- * Only remove the buffer from done_list if v4l2_buffer can handle all
- * the planes.
+ * Only remove the buffer from done_list if all planes can be
+ * handled. Some cases such as V4L2 file I/O and DVB have pb
+ * == NULL; skip the check then as there's nothing to verify.
*/
- ret = call_bufop(q, verify_planes_array, *vb, pb);
+ if (pb)
+ ret = call_bufop(q, verify_planes_array, *vb, pb);
if (!ret)
list_del(&(*vb)->done_entry);
spin_unlock_irqrestore(&q->done_lock, flags);
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 502984c724ff..6c441be8f893 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -67,6 +67,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
return 0;
}
+static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
+{
+ return __verify_planes_array(vb, pb);
+}
+
/**
* __verify_length() - Verify that the bytesused value for each plane fits in
* the plane length and that the data offset doesn't exceed the bytesused value.
@@ -432,6 +437,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
}
static const struct vb2_buf_ops v4l2_buf_ops = {
+ .verify_planes_array = __verify_planes_array_core,
.fill_user_buffer = __fill_v4l2_buffer,
.fill_vb2_buffer = __fill_vb2_buffer,
.set_timestamp = __set_timestamp,
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 207a3bd68559..a867cc91657e 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -34,7 +34,13 @@ struct qcom_rpm_resource {
struct qcom_rpm_data {
u32 version;
const struct qcom_rpm_resource *resource_table;
- unsigned n_resources;
+ unsigned int n_resources;
+ unsigned int req_ctx_off;
+ unsigned int req_sel_off;
+ unsigned int ack_ctx_off;
+ unsigned int ack_sel_off;
+ unsigned int req_sel_size;
+ unsigned int ack_sel_size;
};
struct qcom_rpm {
@@ -61,11 +67,7 @@ struct qcom_rpm {
#define RPM_REQUEST_TIMEOUT (5 * HZ)
-#define RPM_REQUEST_CONTEXT 3
-#define RPM_REQ_SELECT 11
-#define RPM_ACK_CONTEXT 15
-#define RPM_ACK_SELECTOR 23
-#define RPM_SELECT_SIZE 7
+#define RPM_MAX_SEL_SIZE 7
#define RPM_NOTIFICATION BIT(30)
#define RPM_REJECTED BIT(31)
@@ -157,6 +159,12 @@ static const struct qcom_rpm_data apq8064_template = {
.version = 3,
.resource_table = apq8064_rpm_resource_table,
.n_resources = ARRAY_SIZE(apq8064_rpm_resource_table),
+ .req_ctx_off = 3,
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+ .req_sel_size = 4,
+ .ack_sel_size = 7,
};
static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = {
@@ -240,6 +248,12 @@ static const struct qcom_rpm_data msm8660_template = {
.version = 2,
.resource_table = msm8660_rpm_resource_table,
.n_resources = ARRAY_SIZE(msm8660_rpm_resource_table),
+ .req_ctx_off = 3,
+ .req_sel_off = 11,
+ .ack_ctx_off = 19,
+ .ack_sel_off = 27,
+ .req_sel_size = 7,
+ .ack_sel_size = 7,
};
static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = {
@@ -322,6 +336,12 @@ static const struct qcom_rpm_data msm8960_template = {
.version = 3,
.resource_table = msm8960_rpm_resource_table,
.n_resources = ARRAY_SIZE(msm8960_rpm_resource_table),
+ .req_ctx_off = 3,
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+ .req_sel_size = 4,
+ .ack_sel_size = 7,
};
static const struct qcom_rpm_resource ipq806x_rpm_resource_table[] = {
@@ -362,6 +382,12 @@ static const struct qcom_rpm_data ipq806x_template = {
.version = 3,
.resource_table = ipq806x_rpm_resource_table,
.n_resources = ARRAY_SIZE(ipq806x_rpm_resource_table),
+ .req_ctx_off = 3,
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+ .req_sel_size = 4,
+ .ack_sel_size = 7,
};
static const struct of_device_id qcom_rpm_of_match[] = {
@@ -380,7 +406,7 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
{
const struct qcom_rpm_resource *res;
const struct qcom_rpm_data *data = rpm->data;
- u32 sel_mask[RPM_SELECT_SIZE] = { 0 };
+ u32 sel_mask[RPM_MAX_SEL_SIZE] = { 0 };
int left;
int ret = 0;
int i;
@@ -398,12 +424,12 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i));
bitmap_set((unsigned long *)sel_mask, res->select_id, 1);
- for (i = 0; i < ARRAY_SIZE(sel_mask); i++) {
+ for (i = 0; i < rpm->data->req_sel_size; i++) {
writel_relaxed(sel_mask[i],
- RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i));
+ RPM_CTRL_REG(rpm, rpm->data->req_sel_off + i));
}
- writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT));
+ writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, rpm->data->req_ctx_off));
reinit_completion(&rpm->ack);
regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit));
@@ -426,10 +452,11 @@ static irqreturn_t qcom_rpm_ack_interrupt(int irq, void *dev)
u32 ack;
int i;
- ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
- for (i = 0; i < RPM_SELECT_SIZE; i++)
- writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i));
- writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
+ ack = readl_relaxed(RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
+ for (i = 0; i < rpm->data->ack_sel_size; i++)
+ writel_relaxed(0,
+ RPM_CTRL_REG(rpm, rpm->data->ack_sel_off + i));
+ writel(0, RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
if (ack & RPM_NOTIFICATION) {
dev_warn(rpm->dev, "ignoring notification!\n");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 9f0d9b7b7e17..ca092ef75cfe 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -555,6 +555,11 @@ config QPNP_MISC
peripheral. The MISC peripheral holds the USB ID interrupt
and the driver provides an API to check if this interrupt
is available on the current PMIC chip.
+config MEMORY_STATE_TIME
+ tristate "Memory freq/bandwidth time statistics"
+ depends on PROFILING
+ help
+ Memory time statistics exported to /sys/kernel/memory_state_time
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 6acb70964fb8..40b7e3b603e2 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -64,3 +64,4 @@ obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_UID_CPUTIME) += uid_cputime.o
obj-y += qcom/
obj-$(CONFIG_QPNP_MISC) += qpnp-misc.o
+obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 6982f603fadc..ab6f392d3504 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Werror -Wno-unused-const-variable
+ccflags-y := -Werror $(call cc-disable-warning, unused-const-variable)
cxl-y += main.o file.o irq.o fault.o native.o
cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 103baf0e0c5b..ea3eeb7011e1 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -25,7 +25,6 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
afu = cxl_pci_to_afu(dev);
- get_device(&afu->dev);
ctx = cxl_context_alloc();
if (IS_ERR(ctx)) {
rc = PTR_ERR(ctx);
@@ -61,7 +60,6 @@ err_mapping:
err_ctx:
kfree(ctx);
err_dev:
- put_device(&afu->dev);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(cxl_dev_context_init);
@@ -87,8 +85,6 @@ int cxl_release_context(struct cxl_context *ctx)
if (ctx->status >= STARTED)
return -EBUSY;
- put_device(&ctx->afu->dev);
-
cxl_context_free(ctx);
return 0;
@@ -176,7 +172,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
if (task) {
ctx->pid = get_task_pid(task, PIDTYPE_PID);
- get_pid(ctx->pid);
+ ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
kernel = false;
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 2faa1270d085..262b88eac414 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -42,7 +42,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
spin_lock_init(&ctx->sste_lock);
ctx->afu = afu;
ctx->master = master;
- ctx->pid = NULL; /* Set in start work ioctl */
+ ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
mutex_init(&ctx->mapping_lock);
ctx->mapping = mapping;
@@ -97,6 +97,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
ctx->pe = i;
ctx->elem = &ctx->afu->spa[i];
ctx->pe_inserted = false;
+
+ /*
+ * take a ref on the afu so that it stays alive at-least till
+ * this context is reclaimed inside reclaim_ctx.
+ */
+ cxl_afu_get(afu);
return 0;
}
@@ -211,7 +217,11 @@ int __detach_context(struct cxl_context *ctx)
WARN_ON(cxl_detach_process(ctx) &&
cxl_adapter_link_ok(ctx->afu->adapter));
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
+
+ /* release the reference to the group leader and mm handling pid */
put_pid(ctx->pid);
+ put_pid(ctx->glpid);
+
cxl_ctx_put();
return 0;
}
@@ -278,6 +288,9 @@ static void reclaim_ctx(struct rcu_head *rcu)
if (ctx->irq_bitmap)
kfree(ctx->irq_bitmap);
+ /* Drop ref to the afu device taken during cxl_context_init */
+ cxl_afu_put(ctx->afu);
+
kfree(ctx);
}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 0cfb9c129f27..a521bc72cec2 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -403,6 +403,18 @@ struct cxl_afu {
bool enabled;
};
+/* AFU refcount management */
+static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
+{
+
+ return (get_device(&afu->dev) == NULL) ? NULL : afu;
+}
+
+static inline void cxl_afu_put(struct cxl_afu *afu)
+{
+ put_device(&afu->dev);
+}
+
struct cxl_irq_name {
struct list_head list;
@@ -433,6 +445,9 @@ struct cxl_context {
unsigned int sst_size, sst_lru;
wait_queue_head_t wq;
+ /* pid of the group leader associated with the pid */
+ struct pid *glpid;
+ /* use mm context associated with this pid for ds faults */
struct pid *pid;
spinlock_t lock; /* Protects pending_irq_mask, pending_fault and fault_addr */
/* Only used in PR mode */
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 25a5418c55cb..81c3f75b7330 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -166,13 +166,92 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
}
+/*
+ * Returns the mm_struct corresponding to the context ctx via ctx->pid
+ * In case the task has exited we use the task group leader accessible
+ * via ctx->glpid to find the next task in the thread group that has a
+ * valid mm_struct associated with it. If a task with valid mm_struct
+ * is found the ctx->pid is updated to use the task struct for subsequent
+ * translations. In case no valid mm_struct is found in the task group to
+ * service the fault a NULL is returned.
+ */
+static struct mm_struct *get_mem_context(struct cxl_context *ctx)
+{
+ struct task_struct *task = NULL;
+ struct mm_struct *mm = NULL;
+ struct pid *old_pid = ctx->pid;
+
+ if (old_pid == NULL) {
+ pr_warn("%s: Invalid context for pe=%d\n",
+ __func__, ctx->pe);
+ return NULL;
+ }
+
+ task = get_pid_task(old_pid, PIDTYPE_PID);
+
+ /*
+ * pid_alive may look racy but this saves us from costly
+ * get_task_mm when the task is a zombie. In worst case
+ * we may think a task is alive, which is about to die
+ * but get_task_mm will return NULL.
+ */
+ if (task != NULL && pid_alive(task))
+ mm = get_task_mm(task);
+
+ /* release the task struct that was taken earlier */
+ if (task)
+ put_task_struct(task);
+ else
+ pr_devel("%s: Context owning pid=%i for pe=%i dead\n",
+ __func__, pid_nr(old_pid), ctx->pe);
+
+ /*
+ * If we couldn't find the mm context then use the group
+ * leader to iterate over the task group and find a task
+ * that gives us mm_struct.
+ */
+ if (unlikely(mm == NULL && ctx->glpid != NULL)) {
+
+ rcu_read_lock();
+ task = pid_task(ctx->glpid, PIDTYPE_PID);
+ if (task)
+ do {
+ mm = get_task_mm(task);
+ if (mm) {
+ ctx->pid = get_task_pid(task,
+ PIDTYPE_PID);
+ break;
+ }
+ task = next_thread(task);
+ } while (task && !thread_group_leader(task));
+ rcu_read_unlock();
+
+ /* check if we switched pid */
+ if (ctx->pid != old_pid) {
+ if (mm)
+ pr_devel("%s:pe=%i switch pid %i->%i\n",
+ __func__, ctx->pe, pid_nr(old_pid),
+ pid_nr(ctx->pid));
+ else
+ pr_devel("%s:Cannot find mm for pid=%i\n",
+ __func__, pid_nr(old_pid));
+
+ /* drop the reference to older pid */
+ put_pid(old_pid);
+ }
+ }
+
+ return mm;
+}
+
+
+
void cxl_handle_fault(struct work_struct *fault_work)
{
struct cxl_context *ctx =
container_of(fault_work, struct cxl_context, fault_work);
u64 dsisr = ctx->dsisr;
u64 dar = ctx->dar;
- struct task_struct *task = NULL;
struct mm_struct *mm = NULL;
if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
@@ -195,17 +274,17 @@ void cxl_handle_fault(struct work_struct *fault_work)
"DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
if (!ctx->kernel) {
- if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
- pr_devel("cxl_handle_fault unable to get task %i\n",
- pid_nr(ctx->pid));
+
+ mm = get_mem_context(ctx);
+ /* indicates all the thread in task group have exited */
+ if (mm == NULL) {
+ pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
+ __func__, ctx->pe, pid_nr(ctx->pid));
cxl_ack_ae(ctx);
return;
- }
- if (!(mm = get_task_mm(task))) {
- pr_devel("cxl_handle_fault unable to get mm %i\n",
- pid_nr(ctx->pid));
- cxl_ack_ae(ctx);
- goto out;
+ } else {
+ pr_devel("Handling page fault for pe=%d pid=%i\n",
+ ctx->pe, pid_nr(ctx->pid));
}
}
@@ -218,33 +297,22 @@ void cxl_handle_fault(struct work_struct *fault_work)
if (mm)
mmput(mm);
-out:
- if (task)
- put_task_struct(task);
}
static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
{
- int rc;
- struct task_struct *task;
struct mm_struct *mm;
- if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
- pr_devel("cxl_prefault_one unable to get task %i\n",
- pid_nr(ctx->pid));
- return;
- }
- if (!(mm = get_task_mm(task))) {
+ mm = get_mem_context(ctx);
+ if (mm == NULL) {
pr_devel("cxl_prefault_one unable to get mm %i\n",
pid_nr(ctx->pid));
- put_task_struct(task);
return;
}
- rc = cxl_fault_segment(ctx, mm, ea);
+ cxl_fault_segment(ctx, mm, ea);
mmput(mm);
- put_task_struct(task);
}
static u64 next_segment(u64 ea, u64 vsid)
@@ -263,18 +331,13 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
struct copro_slb slb;
struct vm_area_struct *vma;
int rc;
- struct task_struct *task;
struct mm_struct *mm;
- if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
- pr_devel("cxl_prefault_vma unable to get task %i\n",
- pid_nr(ctx->pid));
- return;
- }
- if (!(mm = get_task_mm(task))) {
+ mm = get_mem_context(ctx);
+ if (mm == NULL) {
pr_devel("cxl_prefault_vm unable to get mm %i\n",
pid_nr(ctx->pid));
- goto out1;
+ return;
}
down_read(&mm->mmap_sem);
@@ -295,8 +358,6 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
up_read(&mm->mmap_sem);
mmput(mm);
-out1:
- put_task_struct(task);
}
void cxl_prefault(struct cxl_context *ctx, u64 wed)
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 7ccd2998be92..783337d22f36 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -67,7 +67,13 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
spin_unlock(&adapter->afu_list_lock);
goto err_put_adapter;
}
- get_device(&afu->dev);
+
+ /*
+ * taking a ref to the afu so that it doesn't go away
+ * for rest of the function. This ref is released before
+ * we return.
+ */
+ cxl_afu_get(afu);
spin_unlock(&adapter->afu_list_lock);
if (!afu->current_mode)
@@ -90,13 +96,12 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
file->private_data = ctx;
cxl_ctx_get();
- /* Our ref on the AFU will now hold the adapter */
- put_device(&adapter->dev);
-
- return 0;
+ /* indicate success */
+ rc = 0;
err_put_afu:
- put_device(&afu->dev);
+ /* release the ref taken earlier */
+ cxl_afu_put(afu);
err_put_adapter:
put_device(&adapter->dev);
return rc;
@@ -131,8 +136,6 @@ int afu_release(struct inode *inode, struct file *file)
mutex_unlock(&ctx->mapping_lock);
}
- put_device(&ctx->afu->dev);
-
/*
* At this this point all bottom halfs have finished and we should be
* getting no more IRQs from the hardware for this context. Once it's
@@ -198,8 +201,12 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
* where a process (master, some daemon, etc) has opened the chardev on
* behalf of another process, so the AFU's mm gets bound to the process
* that performs this ioctl and not the process that opened the file.
+ * Also we grab the PID of the group leader so that if the task that
+ * has performed the attach operation exits the mm context of the
+ * process is still accessible.
*/
- ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID));
+ ctx->pid = get_task_pid(current, PIDTYPE_PID);
+ ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index be2c8e248e2e..0c6c17a1c59e 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -138,6 +138,7 @@ static const struct pci_device_id cxl_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
{ PCI_DEVICE_CLASS(0x120000, ~0), },
{ }
diff --git a/drivers/misc/memory_state_time.c b/drivers/misc/memory_state_time.c
new file mode 100644
index 000000000000..34c797a06a31
--- /dev/null
+++ b/drivers/misc/memory_state_time.c
@@ -0,0 +1,454 @@
+/* drivers/misc/memory_state_time.c
+ *
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/hashtable.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/memory-state-time.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/time.h>
+#include <linux/timekeeping.h>
+#include <linux/workqueue.h>
+
+#define KERNEL_ATTR_RO(_name) \
+static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
+
+#define KERNEL_ATTR_RW(_name) \
+static struct kobj_attribute _name##_attr = \
+ __ATTR(_name, 0644, _name##_show, _name##_store)
+
+#define FREQ_HASH_BITS 4
+DECLARE_HASHTABLE(freq_hash_table, FREQ_HASH_BITS);
+
+static DEFINE_MUTEX(mem_lock);
+
+#define TAG "memory_state_time"
+#define BW_NODE "/soc/memory-state-time"
+#define FREQ_TBL "freq-tbl"
+#define BW_TBL "bw-buckets"
+#define NUM_SOURCES "num-sources"
+
+#define LOWEST_FREQ 2
+
+static int curr_bw;
+static int curr_freq;
+static u32 *bw_buckets;
+static u32 *freq_buckets;
+static int num_freqs;
+static int num_buckets;
+static int registered_bw_sources;
+static u64 last_update;
+static bool init_success;
+static struct workqueue_struct *memory_wq;
+static u32 num_sources = 10;
+static int *bandwidths;
+
+struct freq_entry {
+ int freq;
+ u64 *buckets; /* Bandwidth buckets. */
+ struct hlist_node hash;
+};
+
+struct queue_container {
+ struct work_struct update_state;
+ int value;
+ u64 time_now;
+ int id;
+ struct mutex *lock;
+};
+
+static int find_bucket(int bw)
+{
+ int i;
+
+ if (bw_buckets != NULL) {
+ for (i = 0; i < num_buckets; i++) {
+ if (bw_buckets[i] > bw) {
+ pr_debug("Found bucket %d for bandwidth %d\n",
+ i, bw);
+ return i;
+ }
+ }
+ return num_buckets - 1;
+ }
+ return 0;
+}
+
+static u64 get_time_diff(u64 time_now)
+{
+ u64 ms;
+
+ ms = time_now - last_update;
+ last_update = time_now;
+ return ms;
+}
+
+static ssize_t show_stat_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int i, j;
+ int len = 0;
+ struct freq_entry *freq_entry;
+
+ for (i = 0; i < num_freqs; i++) {
+ hash_for_each_possible(freq_hash_table, freq_entry, hash,
+ freq_buckets[i]) {
+ if (freq_entry->freq == freq_buckets[i]) {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%d ", freq_buckets[i]);
+ if (len >= PAGE_SIZE)
+ break;
+ for (j = 0; j < num_buckets; j++) {
+ len += scnprintf(buf + len,
+ PAGE_SIZE - len,
+ "%llu ",
+ freq_entry->buckets[j]);
+ }
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "\n");
+ }
+ }
+ }
+ pr_debug("Current Time: %llu\n", ktime_get_boot_ns());
+ return len;
+}
+KERNEL_ATTR_RO(show_stat);
+
+static void update_table(u64 time_now)
+{
+ struct freq_entry *freq_entry;
+
+ pr_debug("Last known bw %d freq %d\n", curr_bw, curr_freq);
+ hash_for_each_possible(freq_hash_table, freq_entry, hash, curr_freq) {
+ if (curr_freq == freq_entry->freq) {
+ freq_entry->buckets[find_bucket(curr_bw)]
+ += get_time_diff(time_now);
+ break;
+ }
+ }
+}
+
+static bool freq_exists(int freq)
+{
+ int i;
+
+ for (i = 0; i < num_freqs; i++) {
+ if (freq == freq_buckets[i])
+ return true;
+ }
+ return false;
+}
+
+static int calculate_total_bw(int bw, int index)
+{
+ int i;
+ int total_bw = 0;
+
+ pr_debug("memory_state_time New bw %d for id %d\n", bw, index);
+ bandwidths[index] = bw;
+ for (i = 0; i < registered_bw_sources; i++)
+ total_bw += bandwidths[i];
+ return total_bw;
+}
+
+static void freq_update_do_work(struct work_struct *work)
+{
+ struct queue_container *freq_state_update
+ = container_of(work, struct queue_container,
+ update_state);
+ if (freq_state_update) {
+ mutex_lock(&mem_lock);
+ update_table(freq_state_update->time_now);
+ curr_freq = freq_state_update->value;
+ mutex_unlock(&mem_lock);
+ kfree(freq_state_update);
+ }
+}
+
+static void bw_update_do_work(struct work_struct *work)
+{
+ struct queue_container *bw_state_update
+ = container_of(work, struct queue_container,
+ update_state);
+ if (bw_state_update) {
+ mutex_lock(&mem_lock);
+ update_table(bw_state_update->time_now);
+ curr_bw = calculate_total_bw(bw_state_update->value,
+ bw_state_update->id);
+ mutex_unlock(&mem_lock);
+ kfree(bw_state_update);
+ }
+}
+
+static void memory_state_freq_update(struct memory_state_update_block *ub,
+ int value)
+{
+ if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+ if (freq_exists(value) && init_success) {
+ struct queue_container *freq_container
+ = kmalloc(sizeof(struct queue_container),
+ GFP_KERNEL);
+ if (!freq_container)
+ return;
+ INIT_WORK(&freq_container->update_state,
+ freq_update_do_work);
+ freq_container->time_now = ktime_get_boot_ns();
+ freq_container->value = value;
+ pr_debug("Scheduling freq update in work queue\n");
+ queue_work(memory_wq, &freq_container->update_state);
+ } else {
+ pr_debug("Freq does not exist.\n");
+ }
+ }
+}
+
+static void memory_state_bw_update(struct memory_state_update_block *ub,
+ int value)
+{
+ if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+ if (init_success) {
+ struct queue_container *bw_container
+ = kmalloc(sizeof(struct queue_container),
+ GFP_KERNEL);
+ if (!bw_container)
+ return;
+ INIT_WORK(&bw_container->update_state,
+ bw_update_do_work);
+ bw_container->time_now = ktime_get_boot_ns();
+ bw_container->value = value;
+ bw_container->id = ub->id;
+ pr_debug("Scheduling bandwidth update in work queue\n");
+ queue_work(memory_wq, &bw_container->update_state);
+ }
+ }
+}
+
+struct memory_state_update_block *memory_state_register_frequency_source(void)
+{
+ struct memory_state_update_block *block;
+
+ if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+ pr_debug("Allocating frequency source\n");
+ block = kmalloc(sizeof(struct memory_state_update_block),
+ GFP_KERNEL);
+ if (!block)
+ return NULL;
+ block->update_call = memory_state_freq_update;
+ return block;
+ }
+ pr_err("Config option disabled.\n");
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(memory_state_register_frequency_source);
+
+struct memory_state_update_block *memory_state_register_bandwidth_source(void)
+{
+ struct memory_state_update_block *block;
+
+ if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+ pr_debug("Allocating bandwidth source %d\n",
+ registered_bw_sources);
+ block = kmalloc(sizeof(struct memory_state_update_block),
+ GFP_KERNEL);
+ if (!block)
+ return NULL;
+ block->update_call = memory_state_bw_update;
+ if (registered_bw_sources < num_sources) {
+ block->id = registered_bw_sources++;
+ } else {
+ pr_err("Unable to allocate source; max number reached\n");
+ kfree(block);
+ return NULL;
+ }
+ return block;
+ }
+ pr_err("Config option disabled.\n");
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(memory_state_register_bandwidth_source);
+
+/* Buckets are designated by their maximum.
+ * Returns the buckets decided by the capability of the device.
+ */
+static int get_bw_buckets(struct device *dev)
+{
+ int ret, lenb;
+ struct device_node *node = dev->of_node;
+
+ of_property_read_u32(node, NUM_SOURCES, &num_sources);
+ if (of_find_property(node, BW_TBL, &lenb)) {
+ bandwidths = devm_kzalloc(dev,
+ sizeof(*bandwidths) * num_sources, GFP_KERNEL);
+ if (!bandwidths)
+ return -ENOMEM;
+ lenb /= sizeof(*bw_buckets);
+ bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets),
+ GFP_KERNEL);
+ if (!bw_buckets) {
+ devm_kfree(dev, bandwidths);
+ return -ENOMEM;
+ }
+ ret = of_property_read_u32_array(node, BW_TBL, bw_buckets,
+ lenb);
+ if (ret < 0) {
+ devm_kfree(dev, bandwidths);
+ devm_kfree(dev, bw_buckets);
+ pr_err("Unable to read bandwidth table from device tree.\n");
+ return ret;
+ }
+ }
+ curr_bw = 0;
+ num_buckets = lenb;
+ return 0;
+}
+
+/* Adds struct freq_entry nodes to the hashtable for each compatible frequency.
+ * Returns the supported number of frequencies.
+ */
+static int freq_buckets_init(struct device *dev)
+{
+ struct freq_entry *freq_entry;
+ int i;
+ int ret, lenf;
+ struct device_node *node = dev->of_node;
+
+ if (of_find_property(node, FREQ_TBL, &lenf)) {
+ lenf /= sizeof(*freq_buckets);
+ freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets),
+ GFP_KERNEL);
+ if (!freq_buckets)
+ return -ENOMEM;
+ pr_debug("freqs found len %d\n", lenf);
+ ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets,
+ lenf);
+ if (ret < 0) {
+ devm_kfree(dev, freq_buckets);
+ pr_err("Unable to read frequency table from device tree.\n");
+ return ret;
+ }
+ pr_debug("ret freq %d\n", ret);
+ }
+ num_freqs = lenf;
+ curr_freq = freq_buckets[LOWEST_FREQ];
+
+ for (i = 0; i < num_freqs; i++) {
+ freq_entry = devm_kzalloc(dev, sizeof(struct freq_entry),
+ GFP_KERNEL);
+ if (!freq_entry)
+ return -ENOMEM;
+ freq_entry->buckets = devm_kzalloc(dev, sizeof(u64)*num_buckets,
+ GFP_KERNEL);
+ if (!freq_entry->buckets) {
+ devm_kfree(dev, freq_entry);
+ return -ENOMEM;
+ }
+ pr_debug("memory_state_time Adding freq to ht %d\n",
+ freq_buckets[i]);
+ freq_entry->freq = freq_buckets[i];
+ hash_add(freq_hash_table, &freq_entry->hash, freq_buckets[i]);
+ }
+ return 0;
+}
+
+struct kobject *memory_kobj;
+EXPORT_SYMBOL_GPL(memory_kobj);
+
+static struct attribute *memory_attrs[] = {
+ &show_stat_attr.attr,
+ NULL
+};
+
+static struct attribute_group memory_attr_group = {
+ .attrs = memory_attrs,
+};
+
+static int memory_state_time_probe(struct platform_device *pdev)
+{
+ int error;
+
+ error = get_bw_buckets(&pdev->dev);
+ if (error)
+ return error;
+ error = freq_buckets_init(&pdev->dev);
+ if (error)
+ return error;
+ last_update = ktime_get_boot_ns();
+ init_success = true;
+
+ pr_debug("memory_state_time initialized with num_freqs %d\n",
+ num_freqs);
+ return 0;
+}
+
+static const struct of_device_id match_table[] = {
+ { .compatible = "memory-state-time" },
+ {}
+};
+
+static struct platform_driver memory_state_time_driver = {
+ .probe = memory_state_time_probe,
+ .driver = {
+ .name = "memory-state-time",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init memory_state_time_init(void)
+{
+ int error;
+
+ hash_init(freq_hash_table);
+ memory_wq = create_singlethread_workqueue("memory_wq");
+ if (!memory_wq) {
+ pr_err("Unable to create workqueue.\n");
+ return -EINVAL;
+ }
+ /*
+ * Create sys/kernel directory for memory_state_time.
+ */
+ memory_kobj = kobject_create_and_add(TAG, kernel_kobj);
+ if (!memory_kobj) {
+ pr_err("Unable to allocate memory_kobj for sysfs directory.\n");
+ error = -ENOMEM;
+ goto wq;
+ }
+ error = sysfs_create_group(memory_kobj, &memory_attr_group);
+ if (error) {
+ pr_err("Unable to create sysfs folder.\n");
+ goto kobj;
+ }
+
+ error = platform_driver_register(&memory_state_time_driver);
+ if (error) {
+ pr_err("Unable to register memory_state_time platform driver.\n");
+ goto group;
+ }
+ return 0;
+
+group: sysfs_remove_group(memory_kobj, &memory_attr_group);
+kobj: kobject_put(memory_kobj);
+wq: destroy_workqueue(memory_wq);
+ return error;
+}
+module_init(memory_state_time_init);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 4fd845ab4086..a7b3663ad7fe 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -80,6 +80,9 @@
/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
#define SCM_MDTP_CIPHER_DIP 0x01
+/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
+#define MAX_DIP 0x20000
+
#define RPMB_SERVICE 0x2000
#define SSD_SERVICE 0x3000
@@ -95,6 +98,7 @@
#define QSEECOM_STATE_NOT_READY 0
#define QSEECOM_STATE_SUSPEND 1
#define QSEECOM_STATE_READY 2
+#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
/*
* default ce info unit to 0 for
@@ -110,6 +114,15 @@ enum qseecom_clk_definitions {
CLK_SFPB,
};
+enum qseecom_ice_key_size_type {
+ QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
+ (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+ QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
+ (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+ QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
+ (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+};
+
enum qseecom_client_handle_type {
QSEECOM_CLIENT_APP = 1,
QSEECOM_LISTENER_SERVICE,
@@ -252,6 +265,7 @@ struct qseecom_control {
bool support_bus_scaling;
bool support_fde;
bool support_pfe;
+ bool fde_key_size;
uint32_t cumulative_mode;
enum qseecom_bandwidth_request_mode current_mode;
struct timer_list bw_scale_down_timer;
@@ -5693,6 +5707,11 @@ static int qseecom_create_key(struct qseecom_dev_handle *data,
goto free_buf;
}
+ if (qseecom.fde_key_size)
+ flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+ else
+ flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
generate_key_ireq.flags = flags;
generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
memset((void *)generate_key_ireq.key_id,
@@ -5913,6 +5932,12 @@ static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
}
ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
+
+ if (qseecom.fde_key_size)
+ flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+ else
+ flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
ireq.flags = flags;
memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
@@ -6034,7 +6059,8 @@ static int qseecom_mdtp_cipher_dip(void __user *argp)
}
if (req.in_buf == NULL || req.out_buf == NULL ||
- req.in_buf_size == 0 || req.out_buf_size == 0 ||
+ req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
+ req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
req.direction > 1) {
pr_err("invalid parameters\n");
ret = -EINVAL;
@@ -8346,6 +8372,9 @@ static int qseecom_probe(struct platform_device *pdev)
"qcom,commonlib64-loaded-by-uefi");
pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
qseecom.commonlib64_loaded);
+ qseecom.fde_key_size =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,fde-key-size");
qseecom.no_clock_support =
of_property_read_bool((&pdev->dev)->of_node,
"qcom,no-clock-support");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 39cb46a5ce11..9ec0928658cd 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2854,8 +2854,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
packed_cmd_hdr = packed->cmd_hdr;
memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
- packed_cmd_hdr[0] = (packed->nr_entries << 16) |
- (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+ packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
+ (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
hdr_blocks = mmc_large_sector(card) ? 8 : 1;
/*
@@ -2869,14 +2869,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
((brq->data.blocks * brq->data.blksz) >=
card->ext_csd.data_tag_unit_size);
/* Argument of CMD23 */
- packed_cmd_hdr[(i * 2)] =
+ packed_cmd_hdr[(i * 2)] = cpu_to_le32(
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
- blk_rq_sectors(prq);
+ blk_rq_sectors(prq));
/* Argument of CMD18 or CMD25 */
- packed_cmd_hdr[((i * 2)) + 1] =
+ packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
mmc_card_blockaddr(card) ?
- blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
packed->blocks += blk_rq_sectors(prq);
i++;
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 6ad91042409e..a444a3a80f52 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -948,6 +948,17 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
pr_debug("%s: %d bytes transferred: %d\n",
mmc_hostname(host),
mrq->data->bytes_xfered, mrq->data->error);
+ if (mrq->lat_hist_enabled) {
+ ktime_t completion;
+ u_int64_t delta_us;
+
+ completion = ktime_get();
+ delta_us = ktime_us_delta(completion,
+ mrq->io_start);
+ blk_update_latency_hist(&host->io_lat_s,
+ (mrq->data->flags & MMC_DATA_READ),
+ delta_us);
+ }
trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
}
@@ -1698,6 +1709,11 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
}
if (!err && areq) {
+ if (host->latency_hist_enabled) {
+ areq->mrq->io_start = ktime_get();
+ areq->mrq->lat_hist_enabled = 1;
+ } else
+ areq->mrq->lat_hist_enabled = 0;
trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
areq->mrq->cmd->arg,
areq->mrq->data);
@@ -3233,7 +3249,7 @@ void mmc_init_erase(struct mmc_card *card)
}
static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
- unsigned int arg, unsigned int qty)
+ unsigned int arg, unsigned int qty)
{
unsigned int erase_timeout;
@@ -4394,6 +4410,54 @@ static void __exit mmc_exit(void)
destroy_workqueue(workqueue);
}
+static ssize_t
+latency_hist_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ return blk_latency_hist_show(&host->io_lat_s, buf);
+}
+
+/*
+ * Values permitted 0, 1, 2.
+ * 0 -> Disable IO latency histograms (default)
+ * 1 -> Enable IO latency histograms
+ * 2 -> Zero out IO latency histograms
+ */
+static ssize_t
+latency_hist_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ long value;
+
+ if (kstrtol(buf, 0, &value))
+ return -EINVAL;
+ if (value == BLK_IO_LAT_HIST_ZERO)
+ blk_zero_latency_hist(&host->io_lat_s);
+ else if (value == BLK_IO_LAT_HIST_ENABLE ||
+ value == BLK_IO_LAT_HIST_DISABLE)
+ host->latency_hist_enabled = value;
+ return count;
+}
+
+static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
+ latency_hist_show, latency_hist_store);
+
+void
+mmc_latency_hist_sysfs_init(struct mmc_host *host)
+{
+ if (device_create_file(&host->class_dev, &dev_attr_latency_hist))
+ dev_err(&host->class_dev,
+ "Failed to create latency_hist sysfs entry\n");
+}
+
+void
+mmc_latency_hist_sysfs_exit(struct mmc_host *host)
+{
+ device_remove_file(&host->class_dev, &dev_attr_latency_hist);
+}
+
subsys_initcall(mmc_init);
module_exit(mmc_exit);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 1c4a15dc5fbb..f6a54a8e1076 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -880,6 +880,8 @@ int mmc_add_host(struct mmc_host *host)
pr_err("%s: failed to create sysfs group with err %d\n",
__func__, err);
+ mmc_latency_hist_sysfs_init(host);
+
mmc_start_host(host);
if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
register_pm_notifier(&host->pm_notify);
@@ -910,6 +912,8 @@ void mmc_remove_host(struct mmc_host *host)
sysfs_remove_group(&host->parent->kobj, &dev_attr_grp);
sysfs_remove_group(&host->class_dev.kobj, &clk_scaling_attr_grp);
+ mmc_latency_hist_sysfs_exit(host);
+
device_del(&host->class_dev);
led_trigger_unregister_simple(host->led);
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 992bf5397633..bf38533406fd 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -12,6 +12,8 @@
#define _MMC_CORE_HOST_H
#include <linux/mmc/host.h>
+#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
+
int mmc_register_host_class(void);
void mmc_unregister_host_class(void);
@@ -21,5 +23,8 @@ void mmc_retune_hold(struct mmc_host *host);
void mmc_retune_release(struct mmc_host *host);
int mmc_retune(struct mmc_host *host);
+void mmc_latency_hist_sysfs_init(struct mmc_host *host);
+void mmc_latency_hist_sysfs_exit(struct mmc_host *host);
+
#endif
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index f41832252761..01959bd2d523 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
config MMC_SDHCI_ACPI
tristate "SDHCI support for ACPI enumerated SDHCI controllers"
depends on MMC_SDHCI && ACPI
+ select IOSF_MBI if X86
help
This selects support for ACPI enumerated SDHCI controllers,
identified by ACPI Compatibility ID PNP0D40 or specific
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 8aea3fa6938b..5a05bf400ca8 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -41,6 +41,11 @@
#include <linux/mmc/pm.h>
#include <linux/mmc/slot-gpio.h>
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+#endif
+
#include "sdhci.h"
enum {
@@ -146,6 +151,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
.ops = &sdhci_acpi_ops_int,
};
+#ifdef CONFIG_X86
+
+static bool sdhci_acpi_byt(void)
+{
+ static const struct x86_cpu_id byt[] = {
+ { X86_VENDOR_INTEL, 6, 0x37 },
+ {}
+ };
+
+ return x86_match_cpu(byt);
+}
+
+#define BYT_IOSF_SCCEP 0x63
+#define BYT_IOSF_OCP_NETCTRL0 0x1078
+#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
+
+static void sdhci_acpi_byt_setting(struct device *dev)
+{
+ u32 val = 0;
+
+ if (!sdhci_acpi_byt())
+ return;
+
+ if (iosf_mbi_read(BYT_IOSF_SCCEP, 0x06, BYT_IOSF_OCP_NETCTRL0,
+ &val)) {
+ dev_err(dev, "%s read error\n", __func__);
+ return;
+ }
+
+ if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
+ return;
+
+ val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
+
+ if (iosf_mbi_write(BYT_IOSF_SCCEP, 0x07, BYT_IOSF_OCP_NETCTRL0,
+ val)) {
+ dev_err(dev, "%s write error\n", __func__);
+ return;
+ }
+
+ dev_dbg(dev, "%s completed\n", __func__);
+}
+
+static bool sdhci_acpi_byt_defer(struct device *dev)
+{
+ if (!sdhci_acpi_byt())
+ return false;
+
+ if (!iosf_mbi_available())
+ return true;
+
+ sdhci_acpi_byt_setting(dev);
+
+ return false;
+}
+
+#else
+
+static inline void sdhci_acpi_byt_setting(struct device *dev)
+{
+}
+
+static inline bool sdhci_acpi_byt_defer(struct device *dev)
+{
+ return false;
+}
+
+#endif
+
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
@@ -337,6 +411,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (acpi_bus_get_status(device) || !device->status.present)
return -ENODEV;
+ if (sdhci_acpi_byt_defer(dev))
+ return -EPROBE_DEFER;
+
hid = acpi_device_hid(device);
uid = device->pnp.unique_id;
@@ -460,6 +537,8 @@ static int sdhci_acpi_resume(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
return sdhci_resume_host(c->host);
}
@@ -483,6 +562,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
return sdhci_runtime_resume_host(c->host);
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 886229317fea..b5b2c8612663 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -576,7 +576,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
host->align_buffer, host->align_buffer_sz, direction);
if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
goto fail;
- BUG_ON(host->align_addr & host->align_mask);
+ BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
host->sg_count = sdhci_pre_dma_transfer(host, data);
if (host->sg_count < 0)
@@ -598,8 +598,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
* the (up to three) bytes that screw up the
* alignment.
*/
- offset = (host->align_sz - (addr & host->align_mask)) &
- host->align_mask;
+ offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
+ SDHCI_ADMA2_MASK;
if (offset) {
if (data->flags & MMC_DATA_WRITE) {
buffer = sdhci_kmap_atomic(sg, &flags);
@@ -613,8 +613,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
BUG_ON(offset > 65536);
- align += host->align_sz;
- align_addr += host->align_sz;
+ align += SDHCI_ADMA2_ALIGN;
+ align_addr += SDHCI_ADMA2_ALIGN;
desc += host->desc_sz;
@@ -698,7 +698,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
/* Do a quick scan of the SG list for any unaligned mappings */
has_unaligned = false;
for_each_sg(data->sg, sg, host->sg_count, i)
- if (sg_dma_address(sg) & host->align_mask) {
+ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
has_unaligned = true;
break;
}
@@ -710,15 +710,15 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
align = host->align_buffer;
for_each_sg(data->sg, sg, host->sg_count, i) {
- if (sg_dma_address(sg) & host->align_mask) {
- size = host->align_sz -
- (sg_dma_address(sg) & host->align_mask);
+ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
+ size = SDHCI_ADMA2_ALIGN -
+ (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
buffer = sdhci_kmap_atomic(sg, &flags);
memcpy(buffer, align, size);
sdhci_kunmap_atomic(buffer, &flags);
- align += host->align_sz;
+ align += SDHCI_ADMA2_ALIGN;
}
}
}
@@ -1445,7 +1445,9 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
pwr = SDHCI_POWER_330;
break;
default:
- BUG();
+ WARN(1, "%s: Invalid vdd %#x\n",
+ mmc_hostname(host->mmc), vdd);
+ break;
}
}
@@ -3810,24 +3812,17 @@ int sdhci_add_host(struct sdhci_host *host)
if (host->flags & SDHCI_USE_64_BIT_DMA) {
host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
SDHCI_ADMA2_64_DESC_SZ;
- host->align_buffer_sz = SDHCI_MAX_SEGS *
- SDHCI_ADMA2_64_ALIGN;
host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
- host->align_sz = SDHCI_ADMA2_64_ALIGN;
- host->align_mask = SDHCI_ADMA2_64_ALIGN - 1;
} else {
host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
SDHCI_ADMA2_32_DESC_SZ;
- host->align_buffer_sz = SDHCI_MAX_SEGS *
- SDHCI_ADMA2_32_ALIGN;
host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
- host->align_sz = SDHCI_ADMA2_32_ALIGN;
- host->align_mask = SDHCI_ADMA2_32_ALIGN - 1;
}
host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
host->adma_table_sz,
&host->adma_addr,
GFP_KERNEL);
+ host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
if (!host->adma_table || !host->align_buffer) {
if (host->adma_table)
@@ -3841,7 +3836,7 @@ int sdhci_add_host(struct sdhci_host *host)
host->flags &= ~SDHCI_USE_ADMA;
host->adma_table = NULL;
host->align_buffer = NULL;
- } else if (host->adma_addr & host->align_mask) {
+ } else if (host->adma_addr & (SDHCI_ADMA2_DESC_ALIGN - 1)) {
pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 6ef2005c8d4c..e5419b42a444 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -284,22 +284,27 @@
/* ADMA2 32-bit DMA descriptor size */
#define SDHCI_ADMA2_32_DESC_SZ 8
-/* ADMA2 32-bit DMA alignment */
-#define SDHCI_ADMA2_32_ALIGN 4
-
/* ADMA2 32-bit descriptor */
struct sdhci_adma2_32_desc {
__le16 cmd;
__le16 len;
__le32 addr;
-} __packed __aligned(SDHCI_ADMA2_32_ALIGN);
+} __packed __aligned(4);
+
+/* ADMA2 data alignment */
+#define SDHCI_ADMA2_ALIGN 4
+#define SDHCI_ADMA2_MASK (SDHCI_ADMA2_ALIGN - 1)
+
+/*
+ * ADMA2 descriptor alignment. Some controllers (e.g. Intel) require 8 byte
+ * alignment for the descriptor table even in 32-bit DMA mode. Memory
+ * allocation is at least 8 byte aligned anyway, so just stipulate 8 always.
+ */
+#define SDHCI_ADMA2_DESC_ALIGN 8
/* ADMA2 64-bit DMA descriptor size */
#define SDHCI_ADMA2_64_DESC_SZ 12
-/* ADMA2 64-bit DMA alignment */
-#define SDHCI_ADMA2_64_ALIGN 8
-
/*
* ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte
* aligned.
@@ -581,8 +586,6 @@ struct sdhci_host {
dma_addr_t align_addr; /* Mapped bounce buffer */
unsigned int desc_sz; /* ADMA descriptor size */
- unsigned int align_sz; /* ADMA alignment */
- unsigned int align_mask; /* ADMA alignment mask */
struct tasklet_struct finish_tasklet; /* Tasklet structures */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index ce7b2cab5762..54ab48827258 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2586,7 +2586,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
int cached = writelen > bytes && page != blockmask;
uint8_t *wbuf = buf;
int use_bufpoi;
- int part_pagewr = (column || writelen < (mtd->writesize - 1));
+ int part_pagewr = (column || writelen < mtd->writesize);
if (part_pagewr)
use_bufpoi = 1;
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 19c54afdbc49..9b7bc6326fa2 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -931,7 +931,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
if (ubi && mtd->index == ubi->mtd->index) {
- ubi_err(ubi, "mtd%d is already attached to ubi%d",
+ pr_err("ubi: mtd%d is already attached to ubi%d",
mtd->index, i);
return -EEXIST;
}
@@ -946,7 +946,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
* no sense to attach emulated MTD devices, so we prohibit this.
*/
if (mtd->type == MTD_UBIVOLUME) {
- ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI",
+ pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI",
mtd->index);
return -EINVAL;
}
@@ -957,7 +957,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (!ubi_devices[ubi_num])
break;
if (ubi_num == UBI_MAX_DEVICES) {
- ubi_err(ubi, "only %d UBI devices may be created",
+ pr_err("ubi: only %d UBI devices may be created",
UBI_MAX_DEVICES);
return -ENFILE;
}
@@ -967,7 +967,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
/* Make sure ubi_num is not busy */
if (ubi_devices[ubi_num]) {
- ubi_err(ubi, "already exists");
+ pr_err("ubi: ubi%i already exists", ubi_num);
return -EEXIST;
}
}
@@ -1049,6 +1049,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
goto out_detach;
}
+ /* Make device "available" before it becomes accessible via sysfs */
+ ubi_devices[ubi_num] = ubi;
+
err = uif_init(ubi, &ref);
if (err)
goto out_detach;
@@ -1093,7 +1096,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
- ubi_devices[ubi_num] = ubi;
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
@@ -1104,6 +1106,7 @@ out_uif:
ubi_assert(ref);
uif_close(ubi);
out_detach:
+ ubi_devices[ubi_num] = NULL;
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 1ae17bb9b889..3ea4c022cbb9 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -488,13 +488,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
spin_unlock(&ubi->volumes_lock);
}
- /* Change volume table record */
- vtbl_rec = ubi->vtbl[vol_id];
- vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
- err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
- if (err)
- goto out_acc;
-
if (pebs < 0) {
for (i = 0; i < -pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
@@ -512,6 +505,24 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
spin_unlock(&ubi->volumes_lock);
}
+ /*
+ * When we shrink a volume we have to flush all pending (erase) work.
+ * Otherwise it can happen that upon next attach UBI finds a LEB with
+ * lnum > highest_lnum and refuses to attach.
+ */
+ if (pebs < 0) {
+ err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
+ if (err)
+ goto out_acc;
+ }
+
+ /* Change volume table record */
+ vtbl_rec = ubi->vtbl[vol_id];
+ vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ goto out_acc;
+
vol->reserved_pebs = reserved_pebs;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = reserved_pebs;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index db760e84119f..b8df0f5e8c25 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -446,7 +446,11 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
if (err < 0)
return err;
- return register_netdevice(bond_dev);
+ err = register_netdevice(bond_dev);
+
+ netif_carrier_off(bond_dev);
+
+ return err;
}
static size_t bond_get_size(const struct net_device *bond_dev)
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 8b3275d7792a..8f5e93cb7975 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -712,9 +712,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
/* upper group completed, look again in lower */
if (priv->rx_next > get_mb_rx_low_last(priv) &&
- quota > 0 && mb > get_mb_rx_last(priv)) {
+ mb > get_mb_rx_last(priv)) {
priv->rx_next = get_mb_rx_first(priv);
- goto again;
+ if (quota > 0)
+ goto again;
}
return received;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index f91b094288da..e3dccd3200d5 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -332,9 +332,23 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
- for (i = 0; i < frame->can_dlc; i += 2) {
- priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
- frame->data[i] | (frame->data[i + 1] << 8));
+ if (priv->type == BOSCH_D_CAN) {
+ u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
+
+ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ data = (u32)frame->data[i];
+ data |= (u32)frame->data[i + 1] << 8;
+ data |= (u32)frame->data[i + 2] << 16;
+ data |= (u32)frame->data[i + 3] << 24;
+ priv->write_reg32(priv, dreg, data);
+ }
+ } else {
+ for (i = 0; i < frame->can_dlc; i += 2) {
+ priv->write_reg(priv,
+ C_CAN_IFACE(DATA1_REG, iface) + i / 2,
+ frame->data[i] |
+ (frame->data[i + 1] << 8));
+ }
}
}
@@ -402,10 +416,20 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
} else {
int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
- for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
- data = priv->read_reg(priv, dreg);
- frame->data[i] = data;
- frame->data[i + 1] = data >> 8;
+ if (priv->type == BOSCH_D_CAN) {
+ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ data = priv->read_reg32(priv, dreg);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ frame->data[i + 2] = data >> 16;
+ frame->data[i + 3] = data >> 24;
+ }
+ } else {
+ for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
+ data = priv->read_reg(priv, dreg);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ }
}
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 910c12e2638e..ad535a854e5c 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -798,6 +798,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[])
* - control mode with CAN_CTRLMODE_FD set
*/
+ if (!data)
+ return 0;
+
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
@@ -1008,6 +1011,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
return -EOPNOTSUPP;
}
+static void can_dellink(struct net_device *dev, struct list_head *head)
+{
+ return;
+}
+
static struct rtnl_link_ops can_link_ops __read_mostly = {
.kind = "can",
.maxtype = IFLA_CAN_MAX,
@@ -1016,6 +1024,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
.validate = can_validate,
.newlink = can_newlink,
.changelink = can_changelink,
+ .dellink = can_dellink,
.get_size = can_get_size,
.fill_info = can_fill_info,
.get_xstats_size = can_get_xstats_size,
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 28f7610b03fe..c32f5d32f811 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -219,7 +219,7 @@ err_dma:
dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
DMA_TO_DEVICE);
- while (i > 0) {
+ while (i-- > 0) {
int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[index];
u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index b89504405b72..7445da218bd9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2526,7 +2526,7 @@ static void handle_timestamp(struct octeon_device *oct,
octeon_swap_8B_data(&resp->timestamp, 1);
- if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) {
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
struct skb_shared_hwtstamps ts;
u64 ns = resp->timestamp;
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 39ca6744a4e6..22471d283a95 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -116,6 +116,15 @@
#define NIC_PF_INTR_ID_MBOX0 8
#define NIC_PF_INTR_ID_MBOX1 9
+/* Minimum FIFO level before all packets for the CQ are dropped
+ *
+ * This value ensures that once a packet has been "accepted"
+ * for reception it will not get dropped due to non-availability
+ * of CQ descriptor. An errata in HW mandates this value to be
+ * atleast 0x100.
+ */
+#define NICPF_CQM_MIN_DROP_LEVEL 0x100
+
/* Global timer for CQ timer thresh interrupts
* Calculated for SCLK of 700Mhz
* value written should be a 1/16th of what is expected
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 5f24d11cb16a..16baaafed26c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -309,6 +309,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
static void nic_init_hw(struct nicpf *nic)
{
int i;
+ u64 cqm_cfg;
/* Enable NIC HW block */
nic_reg_write(nic, NIC_PF_CFG, 0x3);
@@ -345,6 +346,11 @@ static void nic_init_hw(struct nicpf *nic)
/* Enable VLAN ethertype matching and stripping */
nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
(2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
+
+ /* Check if HW expected value is higher (could be in future chips) */
+ cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
+ if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
+ nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
}
/* Channel parse index configuration */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index dd536be20193..afb10e326b4f 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -21,7 +21,7 @@
#define NIC_PF_TCP_TIMER (0x0060)
#define NIC_PF_BP_CFG (0x0080)
#define NIC_PF_RRM_CFG (0x0088)
-#define NIC_PF_CQM_CF (0x00A0)
+#define NIC_PF_CQM_CFG (0x00A0)
#define NIC_PF_CNM_CF (0x00A8)
#define NIC_PF_CNM_STATUS (0x00B0)
#define NIC_PF_CQ_AVG_CFG (0x00C0)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index dde8dc720cd3..b7093b9cd1e8 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -566,8 +566,7 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct napi_struct *napi,
- struct cmp_queue *cq,
- struct cqe_rx_t *cqe_rx, int cqe_type)
+ struct cqe_rx_t *cqe_rx)
{
struct sk_buff *skb;
struct nicvf *nic = netdev_priv(netdev);
@@ -583,7 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
}
/* Check for errors */
- err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
+ err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
if (err && !cqe_rx->rb_cnt)
return;
@@ -674,8 +673,7 @@ loop:
cq_idx, cq_desc->cqe_type);
switch (cq_desc->cqe_type) {
case CQE_TYPE_RX:
- nicvf_rcv_pkt_handler(netdev, napi, cq,
- cq_desc, CQE_TYPE_RX);
+ nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
work_done++;
break;
case CQE_TYPE_SEND:
@@ -1117,7 +1115,6 @@ int nicvf_stop(struct net_device *netdev)
/* Clear multiqset info */
nic->pnicvf = nic;
- nic->sqs_count = 0;
return 0;
}
@@ -1346,6 +1343,9 @@ void nicvf_update_stats(struct nicvf *nic)
drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
stats->tx_bcast_frames_ok +
stats->tx_mcast_frames_ok;
+ drv_stats->rx_frames_ok = stats->rx_ucast_frames +
+ stats->rx_bcast_frames +
+ stats->rx_mcast_frames;
drv_stats->rx_drops = stats->rx_drop_red +
stats->rx_drop_overrun;
drv_stats->tx_drops = stats->tx_drops;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d1c217eaf417..912ee28ab58b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1414,16 +1414,12 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
}
/* Check for errors in the receive cmp.queue entry */
-int nicvf_check_cqe_rx_errs(struct nicvf *nic,
- struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
+int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
{
struct nicvf_hw_stats *stats = &nic->hw_stats;
- struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
- if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
- drv_stats->rx_frames_ok++;
+ if (!cqe_rx->err_level && !cqe_rx->err_opcode)
return 0;
- }
if (netif_msg_rx_err(nic))
netdev_err(nic->netdev,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 033e8306e91c..5652c612e20b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -344,8 +344,7 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
/* Stats */
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
-int nicvf_check_cqe_rx_errs(struct nicvf *nic,
- struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
+int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
int nicvf_check_cqe_tx_errs(struct nicvf *nic,
struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 9df26c2263bc..42718cc7d4e8 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -549,7 +549,9 @@ static int bgx_xaui_check_link(struct lmac *lmac)
}
/* Clear rcvflt bit (latching high) and read it back */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
if (bgx->use_training) {
@@ -568,13 +570,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- /* Wait for MAC RX to be ready */
- if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
- SMU_RX_CTL_STATUS, true)) {
- dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
- return -1;
- }
-
/* Wait for BGX RX to be idle */
if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
@@ -587,29 +582,30 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
- dev_err(&bgx->pdev->dev, "Receive fault\n");
- return -1;
- }
-
- /* Receive link is latching low. Force it high and verify it */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
- if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
- SPU_STATUS1_RCV_LNK, false)) {
- dev_err(&bgx->pdev->dev, "SPU receive link down\n");
- return -1;
- }
-
+ /* Clear receive packet disable */
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
cfg &= ~SPU_MISC_CTL_RX_DIS;
bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
- return 0;
+
+ /* Check for MAC RX faults */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
+ /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
+ cfg &= SMU_RX_CTL_STATUS;
+ if (!cfg)
+ return 0;
+
+ /* Rx local/remote fault seen.
+ * Do lmac reinit to see if condition recovers
+ */
+ bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
+
+ return -1;
}
static void bgx_poll_for_link(struct work_struct *work)
{
struct lmac *lmac;
- u64 link;
+ u64 spu_link, smu_link;
lmac = container_of(work, struct lmac, dwork.work);
@@ -619,8 +615,11 @@ static void bgx_poll_for_link(struct work_struct *work)
bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
SPU_STATUS1_RCV_LNK, false);
- link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
- if (link & SPU_STATUS1_RCV_LNK) {
+ spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+ smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
+
+ if ((spu_link & SPU_STATUS1_RCV_LNK) &&
+ !(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1;
if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = 40000;
@@ -634,9 +633,15 @@ static void bgx_poll_for_link(struct work_struct *work)
}
if (lmac->last_link != lmac->link_up) {
+ if (lmac->link_up) {
+ if (bgx_xaui_check_link(lmac)) {
+ /* Errors, clear link_up state */
+ lmac->link_up = 0;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ }
+ }
lmac->last_link = lmac->link_up;
- if (lmac->link_up)
- bgx_xaui_check_link(lmac);
}
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
@@ -708,7 +713,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
{
struct lmac *lmac;
- u64 cmrx_cfg;
+ u64 cfg;
lmac = &bgx->lmac[lmacid];
if (lmac->check_link) {
@@ -717,9 +722,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
destroy_workqueue(lmac->check_link);
}
- cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
- cmrx_cfg &= ~(1 << 15);
- bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+ /* Disable packet reception */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_RX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Give chance for Rx/Tx FIFO to get drained */
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
+
+ /* Disable packet transmission */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_TX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Disable serdes lanes */
+ if (!lmac->is_sgmii)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ else
+ bgx_reg_modify(bgx, lmacid,
+ BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
bgx_flush_dmac_addrs(bgx, lmacid);
if ((bgx->lmac_type != BGX_MODE_XFI) &&
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 149e179363a1..42010d2e5ddf 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -41,6 +41,7 @@
#define BGX_CMRX_RX_STAT10 0xC0
#define BGX_CMRX_RX_BP_DROP 0xC8
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
+#define BGX_CMRX_RX_FIFO_LEN 0x108
#define BGX_CMR_RX_DMACX_CAM 0x200
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (x << 49)
@@ -50,6 +51,7 @@
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
#define BGX_CMR_RX_LMACS 0x468
+#define BGX_CMRX_TX_FIFO_LEN 0x518
#define BGX_CMRX_TX_STAT0 0x600
#define BGX_CMRX_TX_STAT1 0x608
#define BGX_CMRX_TX_STAT2 0x610
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 69707108d23c..98fe5a2cd6e3 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -213,8 +213,11 @@ struct e1000_rx_ring {
};
#define E1000_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) \
- ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
+({ \
+ unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \
+ unsigned int use = READ_ONCE((R)->next_to_use); \
+ (clean > use ? 0 : (R)->count) + clean - use - 1; \
+})
#define E1000_RX_DESC_EXT(R, i) \
(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index fd7be860c201..068023595d84 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3876,7 +3876,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
eop_desc = E1000_TX_DESC(*tx_ring, eop);
}
- tx_ring->next_to_clean = i;
+ /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
+ * which will reuse the cleaned buffers.
+ */
+ smp_store_release(&tx_ring->next_to_clean, i);
netdev_completed_queue(netdev, pkts_compl, bytes_compl);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 0a854a47d31a..80ec587d510e 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1959,8 +1959,10 @@ static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
* previous interrupt.
*/
if (rx_ring->set_itr) {
- writel(1000000000 / (rx_ring->itr_val * 256),
- rx_ring->itr_register);
+ u32 itr = rx_ring->itr_val ?
+ 1000000000 / (rx_ring->itr_val * 256) : 0;
+
+ writel(itr, rx_ring->itr_register);
rx_ring->set_itr = 0;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 14440200499b..48809e5d3f79 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -33,7 +33,7 @@
#include "fm10k_pf.h"
#include "fm10k_vf.h"
-#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */
+#define FM10K_MAX_JUMBO_FRAME_SIZE 15342 /* Maximum supported size 15K */
#define MAX_QUEUES FM10K_MAX_QUEUES_PF
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index e76a44cf330c..09281558bfbc 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1428,6 +1428,10 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
fm10k_for_each_ring(ring, q_vector->tx)
clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (budget <= 0)
+ return budget;
+
/* attempt to distribute budget to each queue fairly, but don't
* allow the budget to go below 1 because we'll exit polling
*/
@@ -1966,8 +1970,10 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
/* Allocate memory for queues */
err = fm10k_alloc_q_vectors(interface);
- if (err)
+ if (err) {
+ fm10k_reset_msix_capability(interface);
return err;
+ }
/* Map rings to devices, and map devices to physical queues */
fm10k_assign_rings(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 74be792f3f1b..7f3fb51bc37b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -159,13 +159,30 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
fm10k_mbx_free_irq(interface);
+ /* free interrupts */
+ fm10k_clear_queueing_scheme(interface);
+
/* delay any future reset requests */
interface->last_reset = jiffies + (10 * HZ);
/* reset and initialize the hardware so it is in a known state */
- err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
- if (err)
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err);
+ goto reinit_err;
+ }
+
+ err = hw->mac.ops.init_hw(hw);
+ if (err) {
dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
+ goto reinit_err;
+ }
+
+ err = fm10k_init_queueing_scheme(interface);
+ if (err) {
+ dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err);
+ goto reinit_err;
+ }
/* reassociate interrupts */
fm10k_mbx_request_irq(interface);
@@ -193,6 +210,10 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
fm10k_iov_resume(interface->pdev);
+reinit_err:
+ if (err)
+ netif_device_detach(netdev);
+
rtnl_unlock();
clear_bit(__FM10K_RESETTING, &interface->state);
@@ -1101,6 +1122,10 @@ void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
struct fm10k_hw *hw = &interface->hw;
int itr_reg;
+ /* no mailbox IRQ to free if MSI-X is not enabled */
+ if (!interface->msix_entries)
+ return;
+
/* disconnect the mailbox */
hw->mbx.ops.disconnect(hw, &hw->mbx);
@@ -1423,10 +1448,15 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
err = fm10k_mbx_request_irq_pf(interface);
else
err = fm10k_mbx_request_irq_vf(interface);
+ if (err)
+ return err;
/* connect mailbox */
- if (!err)
- err = hw->mbx.ops.connect(hw, &hw->mbx);
+ err = hw->mbx.ops.connect(hw, &hw->mbx);
+
+ /* if the mailbox failed to connect, then free IRQ */
+ if (err)
+ fm10k_mbx_free_irq(interface);
return err;
}
@@ -1684,7 +1714,13 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
interface->last_reset = jiffies + (10 * HZ);
/* reset and initialize the hardware so it is in a known state */
- err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ dev_err(&pdev->dev, "reset_hw failed: %d\n", err);
+ return err;
+ }
+
+ err = hw->mac.ops.init_hw(hw);
if (err) {
dev_err(&pdev->dev, "init_hw failed: %d\n", err);
return err;
@@ -2071,8 +2107,10 @@ static int fm10k_resume(struct pci_dev *pdev)
/* reset hardware to known state */
err = hw->mac.ops.init_hw(&interface->hw);
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "init_hw failed: %d\n", err);
return err;
+ }
/* reset statistics starting values */
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
@@ -2185,6 +2223,9 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
fm10k_close(netdev);
+ /* free interrupts */
+ fm10k_clear_queueing_scheme(interface);
+
fm10k_mbx_free_irq(interface);
pci_disable_device(pdev);
@@ -2248,11 +2289,21 @@ static void fm10k_io_resume(struct pci_dev *pdev)
int err = 0;
/* reset hardware to known state */
- hw->mac.ops.init_hw(&interface->hw);
+ err = hw->mac.ops.init_hw(&interface->hw);
+ if (err) {
+ dev_err(&pdev->dev, "init_hw failed: %d\n", err);
+ return;
+ }
/* reset statistics starting values */
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
+ err = fm10k_init_queueing_scheme(interface);
+ if (err) {
+ dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err);
+ return;
+ }
+
/* reassociate interrupts */
fm10k_mbx_request_irq(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 318a212f0a78..35afd711d144 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -77,6 +77,7 @@ struct fm10k_hw;
#define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10
#define FM10K_ERR_PARAM -2
+#define FM10K_ERR_NO_RESOURCES -3
#define FM10K_ERR_REQUESTS_PENDING -4
#define FM10K_ERR_RESET_REQUESTED -5
#define FM10K_ERR_DMA_PENDING -6
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 36c8b0aa08fd..d512575c33f3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -103,7 +103,14 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
s32 err;
u16 i;
- /* assume we always have at least 1 queue */
+ /* verify we have at least 1 queue */
+ if (!~fm10k_read_reg(hw, FM10K_TXQCTL(0)) ||
+ !~fm10k_read_reg(hw, FM10K_RXQCTL(0))) {
+ err = FM10K_ERR_NO_RESOURCES;
+ goto reset_max_queues;
+ }
+
+ /* determine how many queues we have */
for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) {
/* verify the Descriptor cache offsets are increasing */
tqdloc = ~fm10k_read_reg(hw, FM10K_TQDLOC(i));
@@ -119,7 +126,7 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
/* shut down queues we own and reset DMA configuration */
err = fm10k_disable_queues_generic(hw, i);
if (err)
- return err;
+ goto reset_max_queues;
/* record maximum queue count */
hw->mac.max_queues = i;
@@ -129,6 +136,11 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT;
return 0;
+
+reset_max_queues:
+ hw->mac.max_queues = 0;
+
+ return err;
}
/* This structure defines the attibutes to be parsed below */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 4dd3e26129b4..7e258a83ccab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -767,6 +767,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 3f385ffe420f..488a50d59dca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2164,8 +2164,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case TCP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- break;
+ return -EINVAL;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
@@ -2176,8 +2175,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case TCP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- break;
+ return -EINVAL;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
@@ -2188,9 +2186,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
+ return -EINVAL;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
@@ -2202,9 +2198,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
+ return -EINVAL;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4a9873ec28c7..2215bebe208e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1317,6 +1317,42 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
}
/**
+ * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be removed
+ * @is_vf: true if it is a VF
+ * @is_netdev: true if it is a netdev
+ *
+ * Removes a given MAC address from a VSI, regardless of VLAN
+ *
+ * Returns 0 for success, or error
+ **/
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f = NULL;
+ int changed = 0;
+
+ WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+ "Missing mac_filter_list_lock\n");
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if ((ether_addr_equal(macaddr, f->macaddr)) &&
+ (is_vf == f->is_vf) &&
+ (is_netdev == f->is_netdev)) {
+ f->counter--;
+ f->changed = true;
+ changed = 1;
+ }
+ }
+ if (changed) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+/**
* i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
* @vsi: the PF Main VSI - inappropriate for any other VSI
* @macaddr: the MAC address
@@ -1547,9 +1583,11 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
spin_unlock_bh(&vsi->mac_filter_list_lock);
}
- i40e_sync_vsi_filters(vsi, false);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
-
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
return 0;
}
@@ -1935,11 +1973,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
/* Now process 'del_list' outside the lock */
if (!list_empty(&tmp_del_list)) {
+ int del_list_size;
+
filter_list_len = pf->hw.aq.asq_buf_size /
sizeof(struct i40e_aqc_remove_macvlan_element_data);
- del_list = kcalloc(filter_list_len,
- sizeof(struct i40e_aqc_remove_macvlan_element_data),
- GFP_KERNEL);
+ del_list_size = filter_list_len *
+ sizeof(struct i40e_aqc_remove_macvlan_element_data);
+ del_list = kzalloc(del_list_size, GFP_KERNEL);
if (!del_list) {
i40e_cleanup_add_list(&tmp_add_list);
@@ -1971,7 +2011,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
NULL);
aq_err = pf->hw.aq.asq_last_status;
num_del = 0;
- memset(del_list, 0, sizeof(*del_list));
+ memset(del_list, 0, del_list_size);
if (ret && aq_err != I40E_AQ_RC_ENOENT)
dev_err(&pf->pdev->dev,
@@ -2004,13 +2044,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
}
if (!list_empty(&tmp_add_list)) {
+ int add_list_size;
/* do all the adds now */
filter_list_len = pf->hw.aq.asq_buf_size /
sizeof(struct i40e_aqc_add_macvlan_element_data),
- add_list = kcalloc(filter_list_len,
- sizeof(struct i40e_aqc_add_macvlan_element_data),
- GFP_KERNEL);
+ add_list_size = filter_list_len *
+ sizeof(struct i40e_aqc_add_macvlan_element_data);
+ add_list = kzalloc(add_list_size, GFP_KERNEL);
if (!add_list) {
/* Purge element from temporary lists */
i40e_cleanup_add_list(&tmp_add_list);
@@ -2048,7 +2089,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
if (ret)
break;
- memset(add_list, 0, sizeof(*add_list));
+ memset(add_list, 0, add_list_size);
}
/* Entries from tmp_add_list were cloned from MAC
* filter list, hence clean those cloned entries
@@ -2112,12 +2153,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
*/
if (pf->cur_promisc != cur_promisc) {
pf->cur_promisc = cur_promisc;
- if (grab_rtnl)
- i40e_do_reset_safe(pf,
- BIT(__I40E_PF_RESET_REQUESTED));
- else
- i40e_do_reset(pf,
- BIT(__I40E_PF_RESET_REQUESTED));
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
}
} else {
ret = i40e_aq_set_vsi_unicast_promiscuous(
@@ -2377,16 +2413,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
- /* Make sure to release before sync_vsi_filter because that
- * function will lock/unlock as necessary
- */
spin_unlock_bh(&vsi->mac_filter_list_lock);
- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
- return 0;
-
- return i40e_sync_vsi_filters(vsi, false);
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
+ return 0;
}
/**
@@ -2459,16 +2492,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
- /* Make sure to release before sync_vsi_filter because that
- * function with lock/unlock as necessary
- */
spin_unlock_bh(&vsi->mac_filter_list_lock);
- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
- return 0;
-
- return i40e_sync_vsi_filters(vsi, false);
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
+ return 0;
}
/**
@@ -2711,6 +2741,11 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
free_cpumask_var(mask);
}
+
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
}
/**
@@ -6685,6 +6720,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
struct i40e_hw *hw = &pf->hw;
u8 set_fc_aq_fail = 0;
i40e_status ret;
+ u32 val;
u32 v;
/* Now we wait for GRST to settle out.
@@ -6823,6 +6859,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
}
+ /* Reconfigure hardware for allowing smaller MSS in the case
+ * of TSO, so that we avoid the MDD being fired and causing
+ * a reset in the case of small MSS+TSO.
+ */
+#define I40E_REG_MSS 0x000E64DC
+#define I40E_REG_MSS_MIN_MASK 0x3FF0000
+#define I40E_64BYTE_MSS 0x400000
+ val = rd32(hw, I40E_REG_MSS);
+ if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
+ val &= ~I40E_REG_MSS_MIN_MASK;
+ val |= I40E_64BYTE_MSS;
+ wr32(hw, I40E_REG_MSS, val);
+ }
+
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4)) {
msleep(75);
@@ -10183,6 +10233,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u16 link_status;
int err;
u32 len;
+ u32 val;
u32 i;
u8 set_fc_aq_fail;
@@ -10493,6 +10544,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_stat_str(&pf->hw, err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* Reconfigure hardware for allowing smaller MSS in the case
+ * of TSO, so that we avoid the MDD being fired and causing
+ * a reset in the case of small MSS+TSO.
+ */
+ val = rd32(hw, I40E_REG_MSS);
+ if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
+ val &= ~I40E_REG_MSS_MIN_MASK;
+ val |= I40E_64BYTE_MSS;
+ wr32(hw, I40E_REG_MSS, val);
+ }
+
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4)) {
msleep(75);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 635b3ac17877..26c55bba4bf3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -235,6 +235,9 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
"Filter deleted for PCTYPE %d loc = %d\n",
fd_data->pctype, fd_data->fd_id);
}
+ if (err)
+ kfree(raw_packet);
+
return err ? -EOPNOTSUPP : 0;
}
@@ -312,6 +315,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
fd_data->pctype, fd_data->fd_id);
}
+ if (err)
+ kfree(raw_packet);
+
return err ? -EOPNOTSUPP : 0;
}
@@ -387,6 +393,9 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
}
}
+ if (err)
+ kfree(raw_packet);
+
return err ? -EOPNOTSUPP : 0;
}
@@ -526,11 +535,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
- kfree(tx_buffer->raw_buf);
- else
- dev_kfree_skb_any(tx_buffer->skb);
-
+ dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
@@ -542,6 +547,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
}
+
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buffer->raw_buf);
+
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
@@ -1416,31 +1425,12 @@ checksum_fail:
}
/**
- * i40e_rx_hash - returns the hash value from the Rx descriptor
- * @ring: descriptor ring
- * @rx_desc: specific descriptor
- **/
-static inline u32 i40e_rx_hash(struct i40e_ring *ring,
- union i40e_rx_desc *rx_desc)
-{
- const __le64 rss_mask =
- cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
-
- if ((ring->netdev->features & NETIF_F_RXHASH) &&
- (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
- return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- else
- return 0;
-}
-
-/**
- * i40e_ptype_to_hash - get a hash type
+ * i40e_ptype_to_htype - get a hash type
* @ptype: the ptype value from the descriptor
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
+static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
@@ -1458,6 +1448,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
}
/**
+ * i40e_rx_hash - set the hash value in the skb
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline void i40e_rx_hash(struct i40e_ring *ring,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb,
+ u8 rx_ptype)
+{
+ u32 hash;
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if (ring->netdev->features & NETIF_F_RXHASH)
+ return;
+
+ if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
+ hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
+ }
+}
+
+/**
* i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
* @rx_ring: rx ring to clean
* @budget: how many cleans we're allowed
@@ -1606,8 +1620,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
continue;
}
- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
- i40e_ptype_to_hash(rx_ptype));
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+
if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
@@ -1736,8 +1750,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
continue;
}
- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
- i40e_ptype_to_hash(rx_ptype));
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 44462b40f2d7..e116d9a99b8e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -549,12 +549,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
spin_lock_bh(&vsi->mac_filter_list_lock);
- f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id ? vf->port_vlan_id : -1,
- true, false);
- if (!f)
- dev_info(&pf->pdev->dev,
- "Could not allocate VF MAC addr\n");
+ if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
+ f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
+ vf->port_vlan_id ? vf->port_vlan_id : -1,
+ true, false);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not add MAC filter %pM for VF %d\n",
+ vf->default_lan_addr.addr, vf->vf_id);
+ }
f = i40e_add_filter(vsi, brdcast,
vf->port_vlan_id ? vf->port_vlan_id : -1,
true, false);
@@ -1680,8 +1683,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
spin_lock_bh(&vsi->mac_filter_list_lock);
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
- i40e_del_filter(vsi, al->list[i].addr,
- I40E_VLAN_ANY, true, false);
+ if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) {
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ goto error_param;
+ }
+
spin_unlock_bh(&vsi->mac_filter_list_lock);
/* program the updated filter list */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 47e9a90d6b10..39db70a597ed 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -51,11 +51,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
- kfree(tx_buffer->raw_buf);
- else
- dev_kfree_skb_any(tx_buffer->skb);
-
+ dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
@@ -67,6 +63,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
}
+
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buffer->raw_buf);
+
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
@@ -245,16 +245,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
- /* check to see if there are any non-cache aligned descriptors
- * waiting to be written back, and kick the hardware to force
- * them to be written back in case of napi polling
- */
- if (budget &&
- !((i & WB_STRIDE) == WB_STRIDE) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
- (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
- tx_ring->arm_wb = true;
-
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
total_packets, total_bytes);
@@ -889,31 +879,12 @@ checksum_fail:
}
/**
- * i40e_rx_hash - returns the hash value from the Rx descriptor
- * @ring: descriptor ring
- * @rx_desc: specific descriptor
- **/
-static inline u32 i40e_rx_hash(struct i40e_ring *ring,
- union i40e_rx_desc *rx_desc)
-{
- const __le64 rss_mask =
- cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
-
- if ((ring->netdev->features & NETIF_F_RXHASH) &&
- (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
- return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- else
- return 0;
-}
-
-/**
- * i40e_ptype_to_hash - get a hash type
+ * i40e_ptype_to_htype - get a hash type
* @ptype: the ptype value from the descriptor
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
+static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
@@ -931,6 +902,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
}
/**
+ * i40e_rx_hash - set the hash value in the skb
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline void i40e_rx_hash(struct i40e_ring *ring,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb,
+ u8 rx_ptype)
+{
+ u32 hash;
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if (ring->netdev->features & NETIF_F_RXHASH)
+ return;
+
+ if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
+ hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
+ }
+}
+
+/**
* i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
* @rx_ring: rx ring to clean
* @budget: how many cleans we're allowed
@@ -1071,8 +1066,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
continue;
}
- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
- i40e_ptype_to_hash(rx_ptype));
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
total_rx_packets++;
@@ -1189,8 +1184,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
continue;
}
- skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
- i40e_ptype_to_hash(rx_ptype));
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
total_rx_packets++;
@@ -1770,6 +1764,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
+ u16 desc_count = 0;
+ bool tail_bump = true;
+ bool do_rs = false;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -1810,6 +1807,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++;
i++;
+ desc_count++;
+
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0;
@@ -1829,6 +1828,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++;
i++;
+ desc_count++;
+
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0;
@@ -1843,35 +1844,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
- /* Place RS bit on last descriptor of any packet that spans across the
- * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
- */
#define WB_STRIDE 0x3
- if (((i & WB_STRIDE) != WB_STRIDE) &&
- (first <= &tx_ring->tx_bi[i]) &&
- (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
- I40E_TXD_QW1_CMD_SHIFT);
- } else {
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)I40E_TXD_CMD <<
- I40E_TXD_QW1_CMD_SHIFT);
- }
-
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
- first->bytecount);
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
-
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
@@ -1881,15 +1854,78 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
+ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ first->bytecount);
i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ /* Algorithm to optimize tail and RS bit setting:
+ * if xmit_more is supported
+ * if xmit_more is true
+ * do not update tail and do not mark RS bit.
+ * if xmit_more is false and last xmit_more was false
+ * if every packet spanned less than 4 desc
+ * then set RS bit on 4th packet and update tail
+ * on every packet
+ * else
+ * update tail and set RS bit on every packet.
+ * if xmit_more is false and last_xmit_more was true
+ * update tail and set RS bit.
+ * else (kernel < 3.18)
+ * if every packet spanned less than 4 desc
+ * then set RS bit on 4th packet and update tail
+ * on every packet
+ * else
+ * set RS bit on EOP for every packet and update tail
+ *
+ * Optimization: wmb to be issued only in case of tail update.
+ * Also optimize the Descriptor WB path for RS bit with the same
+ * algorithm.
+ *
+ * Note: If there are less than 4 packets
+ * pending and interrupts were disabled the service task will
+ * trigger a force WB.
+ */
+ if (skb->xmit_more &&
+ !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index))) {
+ tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
+ tail_bump = false;
+ } else if (!skb->xmit_more &&
+ !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index)) &&
+ (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
+ (tx_ring->packet_stride < WB_STRIDE) &&
+ (desc_count < WB_STRIDE)) {
+ tx_ring->packet_stride++;
+ } else {
+ tx_ring->packet_stride = 0;
+ tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
+ do_rs = true;
+ }
+ if (do_rs)
+ tx_ring->packet_stride = 0;
+
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
+ I40E_TX_DESC_CMD_EOP) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+
/* notify HW of packet */
- if (!skb->xmit_more ||
- netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index)))
- writel(i, tx_ring->tail);
- else
+ if (!tail_bump)
prefetchw(tx_desc + 1);
+ if (tail_bump) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, tx_ring->tail);
+ }
+
return;
dma_error:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index ebc1bf77f036..998976844e4e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -267,6 +267,8 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
+ u8 packet_stride;
+#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 4790437a50ac..2ac62efc36f7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -477,54 +477,30 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- break;
- default:
+ else
return -EINVAL;
- }
break;
case TCP_V6_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- break;
- default:
+ else
return -EINVAL;
- }
break;
case UDP_V4_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
- default:
+ } else {
return -EINVAL;
}
break;
case UDP_V6_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
- default:
+ } else {
return -EINVAL;
}
break;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 99d2cffae0cd..5f03ab3dfa19 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1864,6 +1864,9 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
{
int i;
+ if (!adapter->tx_rings)
+ return;
+
for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->tx_rings[i]->desc)
i40evf_free_tx_resources(adapter->tx_rings[i]);
@@ -1932,6 +1935,9 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
{
int i;
+ if (!adapter->rx_rings)
+ return;
+
for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->rx_rings[i]->desc)
i40evf_free_rx_resources(adapter->rx_rings[i]);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 32e620e1eb5c..5de3f52fd31f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -391,6 +391,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
struct i40e_virtchnl_ether_addr_list *veal;
int len, i = 0, count = 0;
struct i40evf_mac_filter *f;
+ bool more = false;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -415,7 +416,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
- len = I40EVF_MAX_AQ_BUF_SIZE;
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
+ more = true;
}
veal = kzalloc(len, GFP_ATOMIC);
@@ -431,7 +434,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
f->add = false;
}
}
- adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ if (!more)
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
(u8 *)veal, len);
kfree(veal);
@@ -450,6 +454,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
struct i40e_virtchnl_ether_addr_list *veal;
struct i40evf_mac_filter *f, *ftmp;
int len, i = 0, count = 0;
+ bool more = false;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -474,7 +479,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
- len = I40EVF_MAX_AQ_BUF_SIZE;
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
+ more = true;
}
veal = kzalloc(len, GFP_ATOMIC);
if (!veal)
@@ -490,7 +497,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
kfree(f);
}
}
- adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ if (!more)
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
(u8 *)veal, len);
kfree(veal);
@@ -509,6 +517,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
struct i40e_virtchnl_vlan_filter_list *vvfl;
int len, i = 0, count = 0;
struct i40evf_vlan_filter *f;
+ bool more = false;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -534,7 +543,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
- len = I40EVF_MAX_AQ_BUF_SIZE;
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (count * sizeof(u16));
+ more = true;
}
vvfl = kzalloc(len, GFP_ATOMIC);
if (!vvfl)
@@ -549,7 +560,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
f->add = false;
}
}
- adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ if (!more)
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -567,6 +579,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
struct i40e_virtchnl_vlan_filter_list *vvfl;
struct i40evf_vlan_filter *f, *ftmp;
int len, i = 0, count = 0;
+ bool more = false;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -592,7 +605,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
- len = I40EVF_MAX_AQ_BUF_SIZE;
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (count * sizeof(u16));
+ more = true;
}
vvfl = kzalloc(len, GFP_ATOMIC);
if (!vvfl)
@@ -608,7 +623,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
kfree(f);
}
}
- adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ if (!more)
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 7a73510e547c..97bf0c3d5c69 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -294,6 +294,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
case I210_I_PHY_ID:
phy->type = e1000_phy_i210;
phy->ops.check_polarity = igb_check_polarity_m88;
+ phy->ops.get_cfg_done = igb_get_cfg_done_i210;
phy->ops.get_phy_info = igb_get_phy_info_m88;
phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 65d931669f81..29f59c76878a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -900,3 +900,30 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw)
wr32(E1000_MDICNFG, mdicnfg);
return ret_val;
}
+
+/**
+ * igb_get_cfg_done_i210 - Read config done bit
+ * @hw: pointer to the HW structure
+ *
+ * Read the management control register for the config done bit for
+ * completion status. NOTE: silicon which is EEPROM-less will fail trying
+ * to read the config done bit, so an error is *ONLY* logged and returns
+ * 0. If we were to return with error, EEPROM-less silicon
+ * would not be able to be reset or change link.
+ **/
+s32 igb_get_cfg_done_i210(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+ while (timeout) {
+ if (rd32(E1000_EEMNGCTL_I210) & mask)
+ break;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ if (!timeout)
+ hw_dbg("MNG configuration cycle has not completed.\n");
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 3442b6357d01..eaa68a50cb3b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -34,6 +34,7 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
bool igb_get_flash_presence_i210(struct e1000_hw *hw);
s32 igb_pll_workaround_i210(struct e1000_hw *hw);
+s32 igb_get_cfg_done_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 4af2870e49f8..0fdcd4d1b982 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -66,6 +66,7 @@
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEMNGCTL_I210 0x12030 /* MNG EEprom Control */
#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 1a2f1cc44b28..e3cb93bdb21a 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -389,6 +389,8 @@ struct igb_adapter {
u16 link_speed;
u16 link_duplex;
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
+
struct work_struct reset_task;
struct work_struct watchdog_task;
bool fc_autoneg;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ea7b09887245..fa3b4cbea23b 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2294,9 +2294,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
err = -EIO;
- hw->hw_addr = pci_iomap(pdev, 0, 0);
- if (!hw->hw_addr)
+ adapter->io_addr = pci_iomap(pdev, 0, 0);
+ if (!adapter->io_addr)
goto err_ioremap;
+ /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
+ hw->hw_addr = adapter->io_addr;
netdev->netdev_ops = &igb_netdev_ops;
igb_set_ethtool_ops(netdev);
@@ -2656,7 +2658,7 @@ err_sw_init:
#ifdef CONFIG_PCI_IOV
igb_disable_sriov(pdev);
#endif
- pci_iounmap(pdev, hw->hw_addr);
+ pci_iounmap(pdev, adapter->io_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -2823,7 +2825,7 @@ static void igb_remove(struct pci_dev *pdev)
igb_clear_interrupt_scheme(adapter);
- pci_iounmap(pdev, hw->hw_addr);
+ pci_iounmap(pdev, adapter->io_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
pci_release_selected_regions(pdev,
@@ -2856,6 +2858,13 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
return;
+ /* Of the below we really only want the effect of getting
+ * IGB_FLAG_HAS_MSIX set (if available), without which
+ * igb_enable_sriov() has no effect.
+ */
+ igb_set_interrupt_capability(adapter, true);
+ igb_reset_interrupt_capability(adapter);
+
pci_sriov_set_totalvfs(pdev, 7);
igb_enable_sriov(pdev, max_vfs);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index aed8d029b23d..cd9b284bc83b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2786,7 +2786,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
ixgbe_for_each_ring(ring, q_vector->tx)
clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
- if (!ixgbe_qv_lock_napi(q_vector))
+ /* Exit if we are called by netpoll or busy polling is active */
+ if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
return budget;
/* attempt to distribute budget to each queue fairly, but don't allow
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a4ac6fedac75..71ec9cb08e06 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -226,7 +226,7 @@
/* Various constants */
/* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS 1
+#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 2e022e900939..7cc9df717323 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -399,6 +399,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
+ return -ENOTSUPP;
+
coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
@@ -416,11 +419,18 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
int tc;
int i;
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ return -ENOTSUPP;
+
+ mutex_lock(&priv->state_lock);
priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
for (i = 0; i < priv->params.num_channels; ++i) {
c = priv->channel[i];
@@ -436,6 +446,8 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
coal->rx_max_coalesced_frames);
}
+out:
+ mutex_unlock(&priv->state_lock);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index cbd17e25beeb..90e876ecc720 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -863,12 +863,10 @@ static int mlx5e_open_cq(struct mlx5e_channel *c,
if (err)
goto err_destroy_cq;
- err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
- moderation_usecs,
- moderation_frames);
- if (err)
- goto err_destroy_cq;
-
+ if (MLX5_CAP_GEN(mdev, cq_moderation))
+ mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
+ moderation_usecs,
+ moderation_frames);
return 0;
err_destroy_cq:
@@ -1963,6 +1961,8 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
}
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ mlx5_core_warn(mdev, "CQ modiration is not supported\n");
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 3dd548ab8df1..40365cb1abe6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -794,13 +794,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
* in a bitmap and increasing the chain consumer only
* for the first successive completed entries.
*/
- bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
+ __set_bit(pos, p_spq->p_comp_bitmap);
while (test_bit(p_spq->comp_bitmap_idx,
p_spq->p_comp_bitmap)) {
- bitmap_clear(p_spq->p_comp_bitmap,
- p_spq->comp_bitmap_idx,
- SPQ_RING_SIZE);
+ __clear_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap);
p_spq->comp_bitmap_idx++;
qed_chain_return_produced(&p_spq->chain);
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 174e06ec7c2f..e5bb870b5461 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2390,8 +2390,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
- put_net(pch->chan_net);
- pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
@@ -2984,6 +2982,9 @@ ppp_disconnect_channel(struct channel *pch)
*/
static void ppp_destroy_channel(struct channel *pch)
{
+ put_net(pch->chan_net);
+ pch->chan_net = NULL;
+
atomic_dec(&channel_count);
if (!pch->file.dead) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index 7e74ac3ad815..bcf29bf6f727 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -3401,10 +3401,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
goto err;
}
- /* Allow full data communication using DPC from now on. */
- brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
- bcmerror = 0;
-
err:
brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
sdio_release_host(bus->sdiodev->func[1]);
@@ -4112,6 +4108,9 @@ static void brcmf_sdio_firmware_callback(struct device *dev,
}
if (err == 0) {
+ /* Allow full data communication using DPC from now on. */
+ brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
+
err = brcmf_sdiod_intr_register(sdiodev);
if (err != 0)
brcmf_err("intr register failed:%d\n", err);
diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c
index 1e56d445c6e1..f53ed2693879 100644
--- a/drivers/net/wireless/cnss/cnss_pci.c
+++ b/drivers/net/wireless/cnss/cnss_pci.c
@@ -2472,7 +2472,7 @@ void *cnss_pci_get_virt_ramdump_mem(unsigned long *size)
void cnss_pci_device_crashed(void)
{
if (penv && penv->subsys) {
- subsys_set_crash_status(penv->subsys, true);
+ subsys_set_crash_status(penv->subsys, CRASH_STATUS_ERR_FATAL);
subsystem_restart_dev(penv->subsys);
}
}
@@ -2491,7 +2491,7 @@ EXPORT_SYMBOL(cnss_get_virt_ramdump_mem);
void cnss_device_crashed(void)
{
if (penv && penv->subsys) {
- subsys_set_crash_status(penv->subsys, true);
+ subsys_set_crash_status(penv->subsys, CRASH_STATUS_ERR_FATAL);
subsystem_restart_dev(penv->subsys);
}
}
diff --git a/drivers/net/wireless/cnss/cnss_sdio.c b/drivers/net/wireless/cnss/cnss_sdio.c
index 01b969ec627f..ce7dbc64c4c3 100644
--- a/drivers/net/wireless/cnss/cnss_sdio.c
+++ b/drivers/net/wireless/cnss/cnss_sdio.c
@@ -605,7 +605,8 @@ void cnss_sdio_device_crashed(void)
return;
ssr_info = &cnss_pdata->ssr_info;
if (ssr_info->subsys) {
- subsys_set_crash_status(ssr_info->subsys, true);
+ subsys_set_crash_status(ssr_info->subsys,
+ CRASH_STATUS_ERR_FATAL);
subsystem_restart_dev(ssr_info->subsys);
}
}
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 2dd18dd78677..17b6d1aea4c7 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -47,6 +47,7 @@ MODULE_DEVICE_TABLE(of, msm_match_table);
#define MAX_BUFFER_SIZE (320)
#define WAKEUP_SRC_TIMEOUT (2000)
+#define MAX_RETRY_COUNT 3
struct nqx_dev {
wait_queue_head_t read_wq;
@@ -264,6 +265,35 @@ out:
return ret;
}
+/**
+ * nqx_standby_write()
+ * @buf: pointer to data buffer
+ * @len: # of bytes need to transfer
+ *
+ * write data buffer over I2C and retry
+ * if NFCC is in stand by mode
+ *
+ * Return: # of bytes written or -ve value in case of error
+ */
+static int nqx_standby_write(struct nqx_dev *nqx_dev,
+ const unsigned char *buf, size_t len)
+{
+ int ret = -EINVAL;
+ int retry_cnt;
+
+ for (retry_cnt = 1; retry_cnt <= MAX_RETRY_COUNT; retry_cnt++) {
+ ret = i2c_master_send(nqx_dev->client, buf, len);
+ if (ret < 0) {
+ dev_err(&nqx_dev->client->dev,
+ "%s: write failed, Maybe in Standby Mode - Retry(%d)\n",
+ __func__, retry_cnt);
+ usleep_range(1000, 1100);
+ } else if (ret == len)
+ break;
+ }
+ return ret;
+}
+
/*
* Power management of the eSE
* NFC & eSE ON : NFC_EN high and eSE_pwr_req high.
@@ -273,39 +303,95 @@ out:
static int nqx_ese_pwr(struct nqx_dev *nqx_dev, unsigned long int arg)
{
int r = -1;
+ const unsigned char svdd_off_cmd_warn[] = {0x2F, 0x31, 0x01, 0x01};
+ const unsigned char svdd_off_cmd_done[] = {0x2F, 0x31, 0x01, 0x00};
+
+ if (!gpio_is_valid(nqx_dev->ese_gpio)) {
+ dev_err(&nqx_dev->client->dev,
+ "%s: ese_gpio is not valid\n", __func__);
+ return -EINVAL;
+ }
- /* Let's store the NFC_EN pin state */
if (arg == 0) {
/*
* We want to power on the eSE and to do so we need the
* eSE_pwr_req pin and the NFC_EN pin to be high
*/
- nqx_dev->nfc_ven_enabled = gpio_get_value(nqx_dev->en_gpio);
- if (!nqx_dev->nfc_ven_enabled) {
- gpio_set_value(nqx_dev->en_gpio, 1);
- /* hardware dependent delay */
- usleep_range(1000, 1100);
- }
- if (gpio_is_valid(nqx_dev->ese_gpio)) {
+ if (gpio_get_value(nqx_dev->ese_gpio)) {
+ dev_dbg(&nqx_dev->client->dev, "ese_gpio is already high\n");
+ r = 0;
+ } else {
+ /**
+ * Let's store the NFC_EN pin state
+ * only if the eSE is not yet on
+ */
+ nqx_dev->nfc_ven_enabled =
+ gpio_get_value(nqx_dev->en_gpio);
+ if (!nqx_dev->nfc_ven_enabled) {
+ gpio_set_value(nqx_dev->en_gpio, 1);
+ /* hardware dependent delay */
+ usleep_range(1000, 1100);
+ }
+ gpio_set_value(nqx_dev->ese_gpio, 1);
if (gpio_get_value(nqx_dev->ese_gpio)) {
- dev_dbg(&nqx_dev->client->dev, "ese_gpio is already high\n");
+ dev_dbg(&nqx_dev->client->dev, "ese_gpio is enabled\n");
r = 0;
- } else {
- gpio_set_value(nqx_dev->ese_gpio, 1);
- if (gpio_get_value(nqx_dev->ese_gpio)) {
- dev_dbg(&nqx_dev->client->dev, "ese_gpio is enabled\n");
- r = 0;
- }
}
}
} else if (arg == 1) {
- if (gpio_is_valid(nqx_dev->ese_gpio)) {
+ if (nqx_dev->nfc_ven_enabled &&
+ ((nqx_dev->nqx_info.info.chip_type == NFCC_NQ_220) ||
+ (nqx_dev->nqx_info.info.chip_type == NFCC_PN66T))) {
+ /**
+ * Let's inform the CLF we're
+ * powering off the eSE
+ */
+ r = nqx_standby_write(nqx_dev, svdd_off_cmd_warn,
+ sizeof(svdd_off_cmd_warn));
+ if (r < 0) {
+ dev_err(&nqx_dev->client->dev,
+ "%s: write failed after max retry\n",
+ __func__);
+ return -ENXIO;
+ }
+ dev_dbg(&nqx_dev->client->dev,
+ "%s: svdd_off_cmd_warn sent\n", __func__);
+
+ /* let's power down the eSE */
gpio_set_value(nqx_dev->ese_gpio, 0);
- if (!gpio_get_value(nqx_dev->ese_gpio)) {
- dev_dbg(&nqx_dev->client->dev, "ese_gpio is disabled\n");
- r = 0;
+ dev_dbg(&nqx_dev->client->dev,
+ "%s: nqx_dev->ese_gpio set to 0\n", __func__);
+
+ /**
+ * Time needed for the SVDD capacitor
+ * to get discharged
+ */
+ usleep_range(8000, 8100);
+
+ /* Let's inform the CLF the eSE is now off */
+ r = nqx_standby_write(nqx_dev, svdd_off_cmd_done,
+ sizeof(svdd_off_cmd_done));
+ if (r < 0) {
+ dev_err(&nqx_dev->client->dev,
+ "%s: write failed after max retry\n",
+ __func__);
+ return -ENXIO;
}
+ dev_dbg(&nqx_dev->client->dev,
+ "%s: svdd_off_cmd_done sent\n", __func__);
+ } else {
+ /**
+ * In case the NFC is off,
+ * there's no need to send the i2c commands
+ */
+ gpio_set_value(nqx_dev->ese_gpio, 0);
}
+
+ if (!gpio_get_value(nqx_dev->ese_gpio)) {
+ dev_dbg(&nqx_dev->client->dev, "ese_gpio is disabled\n");
+ r = 0;
+ }
+
if (!nqx_dev->nfc_ven_enabled) {
/* hardware dependent delay */
usleep_range(1000, 1100);
@@ -313,12 +399,7 @@ static int nqx_ese_pwr(struct nqx_dev *nqx_dev, unsigned long int arg)
gpio_set_value(nqx_dev->en_gpio, 0);
}
} else if (arg == 3) {
- if (!nqx_dev->nfc_ven_enabled)
- r = 0;
- else {
- if (gpio_is_valid(nqx_dev->ese_gpio))
- r = gpio_get_value(nqx_dev->ese_gpio);
- }
+ r = gpio_get_value(nqx_dev->ese_gpio);
}
return r;
}
@@ -624,6 +705,10 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
dev_dbg(&client->dev,
"%s: ## NFCC == NQ330 ##\n", __func__);
break;
+ case NFCC_PN66T:
+ dev_dbg(&client->dev,
+ "%s: ## NFCC == PN66T ##\n", __func__);
+ break;
default:
dev_err(&client->dev,
"%s: - NFCC HW not Supported\n", __func__);
diff --git a/drivers/nfc/nq-nci.h b/drivers/nfc/nq-nci.h
index c635e818b1f3..f34c4d987143 100644
--- a/drivers/nfc/nq-nci.h
+++ b/drivers/nfc/nq-nci.h
@@ -48,7 +48,7 @@ enum nfcc_chip_variant {
NFCC_NQ_220 = 0x58, /**< NFCC NQ220 */
NFCC_NQ_310 = 0x40, /**< NFCC NQ310 */
NFCC_NQ_330 = 0x51, /**< NFCC NQ330 */
+ NFCC_PN66T = 0x18, /**< NFCC PN66T */
NFCC_NOT_SUPPORTED = 0xFF /**< NFCC is not supported */
};
-
#endif
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0c67b57be83c..c851bc53831c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2672,10 +2672,10 @@ static int nvme_dev_add(struct nvme_dev *dev)
return 0;
}
-static int nvme_dev_map(struct nvme_dev *dev)
+static int nvme_pci_enable(struct nvme_dev *dev)
{
u64 cap;
- int bars, result = -ENOMEM;
+ int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (pci_enable_device_mem(pdev))
@@ -2683,24 +2683,14 @@ static int nvme_dev_map(struct nvme_dev *dev)
dev->entry[0].vector = pdev->irq;
pci_set_master(pdev);
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- if (!bars)
- goto disable_pci;
-
- if (pci_request_selected_regions(pdev, bars, "nvme"))
- goto disable_pci;
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
goto disable;
- dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
- if (!dev->bar)
- goto disable;
-
if (readl(&dev->bar->csts) == -1) {
result = -ENODEV;
- goto unmap;
+ goto disable;
}
/*
@@ -2710,7 +2700,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
if (!pdev->irq) {
result = pci_enable_msix(pdev, dev->entry, 1);
if (result < 0)
- goto unmap;
+ goto disable;
}
cap = lo_hi_readq(&dev->bar->cap);
@@ -2734,18 +2724,21 @@ static int nvme_dev_map(struct nvme_dev *dev)
return 0;
- unmap:
- iounmap(dev->bar);
- dev->bar = NULL;
disable:
- pci_release_regions(pdev);
- disable_pci:
pci_disable_device(pdev);
+
return result;
}
static void nvme_dev_unmap(struct nvme_dev *dev)
{
+ if (dev->bar)
+ iounmap(dev->bar);
+ pci_release_regions(to_pci_dev(dev->dev));
+}
+
+static void nvme_pci_disable(struct nvme_dev *dev)
+{
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (pdev->msi_enabled)
@@ -2753,12 +2746,6 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
else if (pdev->msix_enabled)
pci_disable_msix(pdev);
- if (dev->bar) {
- iounmap(dev->bar);
- dev->bar = NULL;
- pci_release_regions(pdev);
- }
-
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
}
@@ -2962,7 +2949,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
nvme_dev_list_remove(dev);
- if (dev->bar) {
+ if (pci_is_enabled(to_pci_dev(dev->dev))) {
nvme_freeze_queues(dev);
csts = readl(&dev->bar->csts);
}
@@ -2976,7 +2963,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
nvme_shutdown_ctrl(dev);
nvme_disable_queue(dev, 0);
}
- nvme_dev_unmap(dev);
+ nvme_pci_disable(dev);
for (i = dev->queue_count - 1; i >= 0; i--)
nvme_clear_queue(dev->queues[i]);
@@ -3136,7 +3123,7 @@ static void nvme_probe_work(struct work_struct *work)
bool start_thread = false;
int result;
- result = nvme_dev_map(dev);
+ result = nvme_pci_enable(dev);
if (result)
goto out;
@@ -3292,6 +3279,27 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
}
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
+static int nvme_dev_map(struct nvme_dev *dev)
+{
+ int bars;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (!bars)
+ return -ENODEV;
+ if (pci_request_selected_regions(pdev, bars, "nvme"))
+ return -ENODEV;
+
+ dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
+ if (!dev->bar)
+ goto release;
+
+ return 0;
+release:
+ pci_release_regions(pdev);
+ return -ENODEV;
+}
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int node, result = -ENOMEM;
@@ -3317,6 +3325,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_WORK(&dev->reset_work, nvme_reset_work);
dev->dev = get_device(&pdev->dev);
pci_set_drvdata(pdev, dev);
+
+ result = nvme_dev_map(dev);
+ if (result)
+ goto free;
+
result = nvme_set_instance(dev);
if (result)
goto put_pci;
@@ -3355,6 +3368,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nvme_release_instance(dev);
put_pci:
put_device(dev->dev);
+ nvme_dev_unmap(dev);
free:
kfree(dev->queues);
kfree(dev->entry);
@@ -3398,6 +3412,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_free_queues(dev, 0);
nvme_release_cmb(dev);
nvme_release_prp_pools(dev);
+ nvme_dev_unmap(dev);
kref_put(&dev->kref, nvme_free_dev);
}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 017dd94f16ea..31341290cd91 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -112,6 +112,7 @@ static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
}
+/* always return newly allocated name, caller must free after use */
static const char *safe_name(struct kobject *kobj, const char *orig_name)
{
const char *name = orig_name;
@@ -126,9 +127,12 @@ static const char *safe_name(struct kobject *kobj, const char *orig_name)
name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
}
- if (name != orig_name)
+ if (name == orig_name) {
+ name = kstrdup(orig_name, GFP_KERNEL);
+ } else {
pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n",
kobject_name(kobj), name);
+ }
return name;
}
@@ -159,6 +163,7 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp)
int __of_attach_node_sysfs(struct device_node *np)
{
const char *name;
+ struct kobject *parent;
struct property *pp;
int rc;
@@ -171,15 +176,16 @@ int __of_attach_node_sysfs(struct device_node *np)
np->kobj.kset = of_kset;
if (!np->parent) {
/* Nodes without parents are new top level trees */
- rc = kobject_add(&np->kobj, NULL, "%s",
- safe_name(&of_kset->kobj, "base"));
+ name = safe_name(&of_kset->kobj, "base");
+ parent = NULL;
} else {
name = safe_name(&np->parent->kobj, kbasename(np->full_name));
- if (!name || !name[0])
- return -EINVAL;
-
- rc = kobject_add(&np->kobj, &np->parent->kobj, "%s", name);
+ parent = &np->parent->kobj;
}
+ if (!name)
+ return -ENOMEM;
+ rc = kobject_add(&np->kobj, parent, "%s", name);
+ kfree(name);
if (rc)
return rc;
@@ -1753,6 +1759,12 @@ int __of_remove_property(struct device_node *np, struct property *prop)
return 0;
}
+void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
+{
+ sysfs_remove_bin_file(&np->kobj, &prop->attr);
+ kfree(prop->attr.attr.name);
+}
+
void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
{
if (!IS_ENABLED(CONFIG_SYSFS))
@@ -1760,7 +1772,7 @@ void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
/* at early boot, bail here and defer setup to of_init() */
if (of_kset && of_node_is_attached(np))
- sysfs_remove_bin_file(&np->kobj, &prop->attr);
+ __of_sysfs_remove_bin_file(np, prop);
}
/**
@@ -1830,7 +1842,7 @@ void __of_update_property_sysfs(struct device_node *np, struct property *newprop
return;
if (oldprop)
- sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
+ __of_sysfs_remove_bin_file(np, oldprop);
__of_add_property_sysfs(np, newprop);
}
@@ -2241,20 +2253,13 @@ struct device_node *of_graph_get_endpoint_by_regs(
const struct device_node *parent, int port_reg, int reg)
{
struct of_endpoint endpoint;
- struct device_node *node, *prev_node = NULL;
-
- while (1) {
- node = of_graph_get_next_endpoint(parent, prev_node);
- of_node_put(prev_node);
- if (!node)
- break;
+ struct device_node *node = NULL;
+ for_each_endpoint_of_node(parent, node) {
of_graph_parse_endpoint(node, &endpoint);
if (((port_reg == -1) || (endpoint.port == port_reg)) &&
((reg == -1) || (endpoint.id == reg)))
return node;
-
- prev_node = node;
}
return NULL;
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 53826b84e0ec..2d72ddcf534f 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -55,7 +55,7 @@ void __of_detach_node_sysfs(struct device_node *np)
/* only remove properties if on sysfs */
if (of_node_is_attached(np)) {
for_each_property_of_node(np, pp)
- sysfs_remove_bin_file(&np->kobj, &pp->attr);
+ __of_sysfs_remove_bin_file(np, pp);
kobject_del(&np->kobj);
}
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 8e882e706cd8..46ddbee22ce3 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -81,6 +81,9 @@ extern int __of_attach_node_sysfs(struct device_node *np);
extern void __of_detach_node(struct device_node *np);
extern void __of_detach_node_sysfs(struct device_node *np);
+extern void __of_sysfs_remove_bin_file(struct device_node *np,
+ struct property *prop);
+
/* iterators for transactions, used for overlays */
/* forward iterator */
#define for_each_transaction_entry(_oft, _te) \
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7eaa4c87fec7..10a6a8e5db88 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1278,6 +1278,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
pci_msi_domain_update_chip_ops(info);
+ info->flags |= MSI_FLAG_ACTIVATE_EARLY;
+
domain = msi_create_irq_domain(fwnode, info, parent);
if (!domain)
return NULL;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index eead54cd01b2..d7508704c992 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1372,10 +1372,10 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
if (!sysfs_initialized)
return -EACCES;
- if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
- retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
- else
+ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr);
+ else
+ retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
if (retval)
goto err;
@@ -1427,10 +1427,10 @@ err_rom_file:
err_resource_files:
pci_remove_resource_files(pdev);
err_config_file:
- if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
- sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
- else
+ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
+ else
+ sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
err:
return retval;
}
@@ -1464,10 +1464,10 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
pci_remove_capabilities_sysfs(pdev);
- if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
- sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
- else
+ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
+ else
+ sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
pci_remove_resource_files(pdev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7e327309cf69..42774bc39786 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -287,6 +287,18 @@ static void quirk_citrine(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
+/*
+ * This chip can cause bus lockups if config addresses above 0x600
+ * are read or written.
+ */
+static void quirk_nfp6000(struct pci_dev *dev)
+{
+ dev->cfg_size = 0x600;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000);
+
/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
static void quirk_extend_bar_to_page(struct pci_dev *dev)
{
@@ -3115,13 +3127,15 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
}
/*
- * Atheros AR93xx chips do not behave after a bus reset. The device will
- * throw a Link Down error on AER-capable systems and regardless of AER,
- * config space of the device is never accessible again and typically
- * causes the system to hang or reset when access is attempted.
+ * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
+ * The device will throw a Link Down error on AER-capable systems and
+ * regardless of AER, config space of the device is never accessible again
+ * and typically causes the system to hang or reset when access is attempted.
* http://www.spinics.net/lists/linux-pci/msg34797.html
*/
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
static void quirk_no_pm_reset(struct pci_dev *dev)
{
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 70cabc7080b3..223339ff119d 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-20nm.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-v3.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qrbtc-v2.o
+obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-v3-falcon.o
obj-$(CONFIG_PHY_TUSB1210) += phy-tusb1210.o
obj-$(CONFIG_PHY_BRCMSTB_SATA) += phy-brcmstb-sata.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3-falcon.c b/drivers/phy/phy-qcom-ufs-qmp-v3-falcon.c
new file mode 100644
index 000000000000..e88c00e01e0b
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3-falcon.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qmp-v3-falcon.h"
+
+#define UFS_PHY_NAME "ufs_phy_qmp_v3_falcon"
+
+static
+int ufs_qcom_phy_qmp_v3_falcon_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+ bool is_rate_B)
+{
+ int err;
+ int tbl_size_A, tbl_size_B;
+ struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+ u8 major = ufs_qcom_phy->host_ctrl_rev_major;
+ u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
+ u16 step = ufs_qcom_phy->host_ctrl_rev_step;
+
+ tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+ tbl_B = phy_cal_table_rate_B;
+
+ if ((major == 0x3) && (minor == 0x001) && (step == 0x001)) {
+ tbl_A = phy_cal_table_rate_A_3_1_1;
+ tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_3_1_1);
+ } else {
+ dev_err(ufs_qcom_phy->dev,
+ "%s: Unknown UFS-PHY version (major 0x%x minor 0x%x step 0x%x), no calibration values\n",
+ __func__, major, minor, step);
+ err = -ENODEV;
+ goto out;
+ }
+
+ err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
+ tbl_A, tbl_size_A,
+ tbl_B, tbl_size_B,
+ is_rate_B);
+
+ if (err)
+ dev_err(ufs_qcom_phy->dev,
+ "%s: ufs_qcom_phy_calibrate() failed %d\n",
+ __func__, err);
+
+out:
+ return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_falcon_init(struct phy *generic_phy)
+{
+ struct ufs_qcom_phy_qmp_v3_falcon *phy = phy_get_drvdata(generic_phy);
+ struct ufs_qcom_phy *phy_common = &phy->common_cfg;
+ int err;
+
+ err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+ __func__, err);
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static
+void ufs_qcom_phy_qmp_v3_falcon_power_control(struct ufs_qcom_phy *phy,
+ bool power_ctrl)
+{
+ if (!power_ctrl) {
+ /* apply analog power collapse */
+ writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+ /*
+ * Make sure that PHY knows its analog rail is going to be
+ * powered OFF.
+ */
+ mb();
+ } else {
+ /* bring PHY out of analog power collapse */
+ writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+
+ /*
+ * Before any transactions involving PHY, ensure PHY knows
+ * that it's analog rail is powered ON.
+ */
+ mb();
+ }
+}
+
+static inline
+void ufs_qcom_phy_qmp_v3_falcon_set_tx_lane_enable(struct ufs_qcom_phy *phy,
+ u32 val)
+{
+ /*
+ * v3 PHY does not have TX_LANE_ENABLE register.
+ * Implement this function so as not to propagate error to caller.
+ */
+}
+
+static
+void ufs_qcom_phy_qmp_v3_falcon_ctrl_rx_linecfg(struct ufs_qcom_phy *phy,
+ bool ctrl)
+{
+ u32 temp;
+
+ temp = readl_relaxed(phy->mmio + UFS_PHY_LINECFG_DISABLE);
+
+ if (ctrl) /* enable RX LineCfg */
+ temp &= ~UFS_PHY_RX_LINECFG_DISABLE_BIT;
+ else /* disable RX LineCfg */
+ temp |= UFS_PHY_RX_LINECFG_DISABLE_BIT;
+
+ writel_relaxed(temp, phy->mmio + UFS_PHY_LINECFG_DISABLE);
+ /* Make sure that RX LineCfg config applied before we return */
+ mb();
+}
+
+static inline void ufs_qcom_phy_qmp_v3_falcon_start_serdes(
+ struct ufs_qcom_phy *phy)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
+ tmp &= ~MASK_SERDES_START;
+ tmp |= (1 << OFFSET_SERDES_START);
+ writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
+ /* Ensure register value is committed */
+ mb();
+}
+
+static int ufs_qcom_phy_qmp_v3_falcon_is_pcs_ready(
+ struct ufs_qcom_phy *phy_common)
+{
+ int err = 0;
+ u32 val;
+
+ err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
+ val, (val & MASK_PCS_READY), 10, 1000000);
+ if (err)
+ dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+ __func__, err);
+ return err;
+}
+
+static void ufs_qcom_phy_qmp_v3_falcon_dbg_register_dump(
+ struct ufs_qcom_phy *phy)
+{
+ ufs_qcom_phy_dump_regs(phy, COM_BASE, COM_SIZE,
+ "PHY QSERDES COM Registers ");
+ ufs_qcom_phy_dump_regs(phy, PHY_BASE, PHY_SIZE,
+ "PHY Registers ");
+ ufs_qcom_phy_dump_regs(phy, RX_BASE, RX_SIZE,
+ "PHY RX0 Registers ");
+ ufs_qcom_phy_dump_regs(phy, TX_BASE, TX_SIZE,
+ "PHY TX0 Registers ");
+}
+
+struct phy_ops ufs_qcom_phy_qmp_v3_falcon_phy_ops = {
+ .init = ufs_qcom_phy_qmp_v3_falcon_init,
+ .exit = ufs_qcom_phy_exit,
+ .power_on = ufs_qcom_phy_power_on,
+ .power_off = ufs_qcom_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+struct ufs_qcom_phy_specific_ops phy_v3_falcon_ops = {
+ .calibrate_phy = ufs_qcom_phy_qmp_v3_falcon_phy_calibrate,
+ .start_serdes = ufs_qcom_phy_qmp_v3_falcon_start_serdes,
+ .is_physical_coding_sublayer_ready =
+ ufs_qcom_phy_qmp_v3_falcon_is_pcs_ready,
+ .set_tx_lane_enable = ufs_qcom_phy_qmp_v3_falcon_set_tx_lane_enable,
+ .ctrl_rx_linecfg = ufs_qcom_phy_qmp_v3_falcon_ctrl_rx_linecfg,
+ .power_control = ufs_qcom_phy_qmp_v3_falcon_power_control,
+ .dbg_register_dump = ufs_qcom_phy_qmp_v3_falcon_dbg_register_dump,
+};
+
+static int ufs_qcom_phy_qmp_v3_falcon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy;
+ struct ufs_qcom_phy_qmp_v3_falcon *phy;
+ int err = 0;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+ &ufs_qcom_phy_qmp_v3_falcon_phy_ops,
+ &phy_v3_falcon_ops);
+
+ if (!generic_phy) {
+ dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+ __func__);
+ err = -EIO;
+ goto out;
+ }
+
+ phy_set_drvdata(generic_phy, phy);
+
+ strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+ sizeof(phy->common_cfg.name));
+
+out:
+ return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_falcon_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy = to_phy(dev);
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+ int err = 0;
+
+ err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
+ if (err)
+ dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
+ __func__, err);
+
+ return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qmp_v3_falcon_of_match[] = {
+ {.compatible = "qcom,ufs-phy-qmp-v3-falcon"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_v3_falcon_of_match);
+
+static struct platform_driver ufs_qcom_phy_qmp_v3_falcon_driver = {
+ .probe = ufs_qcom_phy_qmp_v3_falcon_probe,
+ .remove = ufs_qcom_phy_qmp_v3_falcon_remove,
+ .driver = {
+ .of_match_table = ufs_qcom_phy_qmp_v3_falcon_of_match,
+ .name = "ufs_qcom_phy_qmp_v3_falcon",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(ufs_qcom_phy_qmp_v3_falcon_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP v3 falcon");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3-falcon.h b/drivers/phy/phy-qcom-ufs-qmp-v3-falcon.h
new file mode 100644
index 000000000000..e64601cc6b22
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3-falcon.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QMP_V3_FALCON_H_
+#define UFS_QCOM_PHY_QMP_V3_FALCON_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+#define COM_BASE 0x000
+#define COM_OFF(x) (COM_BASE + x)
+#define COM_SIZE 0x1C0
+
+#define TX_BASE 0x400
+#define TX_OFF(x) (TX_BASE + x)
+#define TX_SIZE 0x128
+
+#define RX_BASE 0x600
+#define RX_OFF(x) (RX_BASE + x)
+#define RX_SIZE 0x1FC
+
+#define PHY_BASE 0xC00
+#define PHY_OFF(x) (PHY_BASE + x)
+#define PHY_SIZE 0x1B4
+
+/* UFS PHY QSERDES COM registers */
+#define QSERDES_COM_ATB_SEL1 COM_OFF(0x00)
+#define QSERDES_COM_ATB_SEL2 COM_OFF(0x04)
+#define QSERDES_COM_FREQ_UPDATE COM_OFF(0x08)
+#define QSERDES_COM_BG_TIMER COM_OFF(0x0C)
+#define QSERDES_COM_SSC_EN_CENTER COM_OFF(0x10)
+#define QSERDES_COM_SSC_ADJ_PER1 COM_OFF(0x14)
+#define QSERDES_COM_SSC_ADJ_PER2 COM_OFF(0x18)
+#define QSERDES_COM_SSC_PER1 COM_OFF(0x1C)
+#define QSERDES_COM_SSC_PER2 COM_OFF(0x20)
+#define QSERDES_COM_SSC_STEP_SIZE1 COM_OFF(0x24)
+#define QSERDES_COM_SSC_STEP_SIZE2 COM_OFF(0x28)
+#define QSERDES_COM_POST_DIV COM_OFF(0x2C)
+#define QSERDES_COM_POST_DIV_MUX COM_OFF(0x30)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN COM_OFF(0x34)
+#define QSERDES_COM_CLK_ENABLE1 COM_OFF(0x38)
+#define QSERDES_COM_SYS_CLK_CTRL COM_OFF(0x3C)
+#define QSERDES_COM_SYSCLK_BUF_ENABLE COM_OFF(0x40)
+#define QSERDES_COM_PLL_EN COM_OFF(0x44)
+#define QSERDES_COM_PLL_IVCO COM_OFF(0x48)
+#define QSERDES_COM_LOCK_CMP1_MODE0 COM_OFF(0X4C)
+#define QSERDES_COM_LOCK_CMP2_MODE0 COM_OFF(0X50)
+#define QSERDES_COM_LOCK_CMP3_MODE0 COM_OFF(0X54)
+#define QSERDES_COM_LOCK_CMP1_MODE1 COM_OFF(0X58)
+#define QSERDES_COM_LOCK_CMP2_MODE1 COM_OFF(0X5C)
+#define QSERDES_COM_LOCK_CMP3_MODE1 COM_OFF(0X60)
+#define QSERDES_COM_CMD_RSVD0 COM_OFF(0x64)
+#define QSERDES_COM_EP_CLOCK_DETECT_CTRL COM_OFF(0x68)
+#define QSERDES_COM_SYSCLK_DET_COMP_STATUS COM_OFF(0x6C)
+#define QSERDES_COM_BG_TRIM COM_OFF(0x70)
+#define QSERDES_COM_CLK_EP_DIV COM_OFF(0x74)
+#define QSERDES_COM_CP_CTRL_MODE0 COM_OFF(0x78)
+#define QSERDES_COM_CP_CTRL_MODE1 COM_OFF(0x7C)
+#define QSERDES_COM_CMN_RSVD1 COM_OFF(0x80)
+#define QSERDES_COM_PLL_RCTRL_MODE0 COM_OFF(0x84)
+#define QSERDES_COM_PLL_RCTRL_MODE1 COM_OFF(0x88)
+#define QSERDES_COM_CMN_RSVD2 COM_OFF(0x8C)
+#define QSERDES_COM_PLL_CCTRL_MODE0 COM_OFF(0x90)
+#define QSERDES_COM_PLL_CCTRL_MODE1 COM_OFF(0x94)
+#define QSERDES_COM_CMN_RSVD3 COM_OFF(0x98)
+#define QSERDES_COM_PLL_CNTRL COM_OFF(0x9C)
+#define QSERDES_COM_PHASE_SEL_CTRL COM_OFF(0xA0)
+#define QSERDES_COM_PHASE_SEL_DC COM_OFF(0xA4)
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM COM_OFF(0xA8)
+#define QSERDES_COM_SYSCLK_EN_SEL COM_OFF(0xAC)
+#define QSERDES_COM_CML_SYSCLK_SEL COM_OFF(0xB0)
+#define QSERDES_COM_RESETSM_CNTRL COM_OFF(0xB4)
+#define QSERDES_COM_RESETSM_CNTRL2 COM_OFF(0xB8)
+#define QSERDES_COM_RESTRIM_CTRL COM_OFF(0xBC)
+#define QSERDES_COM_RESTRIM_CTRL2 COM_OFF(0xC0)
+#define QSERDES_COM_LOCK_CMP_EN COM_OFF(0xC8)
+#define QSERDES_COM_LOCK_CMP_CFG COM_OFF(0xCC)
+#define QSERDES_COM_DEC_START_MODE0 COM_OFF(0xD0)
+#define QSERDES_COM_DEC_START_MODE1 COM_OFF(0xD4)
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL COM_OFF(0xD8)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0 COM_OFF(0xDC)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0 COM_OFF(0xE0)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0 COM_OFF(0xE4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE1 COM_OFF(0xE8)
+#define QSERDES_COM_DIV_FRAC_START2_MODE1 COM_OFF(0xEC)
+#define QSERDES_COM_DIV_FRAC_START3_MODE1 COM_OFF(0xF0)
+#define QSERDES_COM_VCO_TUNE_MINVAL1 COM_OFF(0xF4)
+#define QSERDES_COM_VCO_TUNE_MINVAL2 COM_OFF(0xF8)
+#define QSERDES_COM_CMN_RSVD4 COM_OFF(0xFC)
+#define QSERDES_COM_INTEGLOOP_INITVAL COM_OFF(0x100)
+#define QSERDES_COM_INTEGLOOP_EN COM_OFF(0x104)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 COM_OFF(0x108)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 COM_OFF(0x10C)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 COM_OFF(0x110)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 COM_OFF(0x114)
+#define QSERDES_COM_VCO_TUNE_MAXVAL1 COM_OFF(0x118)
+#define QSERDES_COM_VCO_TUNE_MAXVAL2 COM_OFF(0x11C)
+#define QSERDES_COM_RES_TRIM_CONTROL2 COM_OFF(0x120)
+#define QSERDES_COM_VCO_TUNE_CTRL COM_OFF(0x124)
+#define QSERDES_COM_VCO_TUNE_MAP COM_OFF(0x128)
+#define QSERDES_COM_VCO_TUNE1_MODE0 COM_OFF(0x12C)
+#define QSERDES_COM_VCO_TUNE2_MODE0 COM_OFF(0x130)
+#define QSERDES_COM_VCO_TUNE1_MODE1 COM_OFF(0x134)
+#define QSERDES_COM_VCO_TUNE2_MODE1 COM_OFF(0x138)
+#define QSERDES_COM_VCO_TUNE_INITVAL1 COM_OFF(0x13C)
+#define QSERDES_COM_VCO_TUNE_INITVAL2 COM_OFF(0x140)
+#define QSERDES_COM_VCO_TUNE_TIMER1 COM_OFF(0x144)
+#define QSERDES_COM_VCO_TUNE_TIMER2 COM_OFF(0x148)
+#define QSERDES_COM_SAR COM_OFF(0x14C)
+#define QSERDES_COM_SAR_CLK COM_OFF(0x150)
+#define QSERDES_COM_SAR_CODE_OUT_STATUS COM_OFF(0x154)
+#define QSERDES_COM_SAR_CODE_READY_STATUS COM_OFF(0x158)
+#define QSERDES_COM_CMN_STATUS COM_OFF(0x15C)
+#define QSERDES_COM_RESET_SM_STATUS COM_OFF(0x160)
+#define QSERDES_COM_RESTRIM_CODE_STATUS COM_OFF(0x164)
+#define QSERDES_COM_PLLCAL_CODE1_STATUS COM_OFF(0x168)
+#define QSERDES_COM_PLLCAL_CODE2_STATUS COM_OFF(0x16C)
+#define QSERDES_COM_BG_CTRL COM_OFF(0x170)
+#define QSERDES_COM_CLK_SELECT COM_OFF(0x174)
+#define QSERDES_COM_HSCLK_SEL COM_OFF(0x178)
+#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS COM_OFF(0x17C)
+#define QSERDES_COM_PLL_ANALOG COM_OFF(0x180)
+#define QSERDES_COM_CORECLK_DIV COM_OFF(0x184)
+#define QSERDES_COM_SW_RESET COM_OFF(0x188)
+#define QSERDES_COM_CORE_CLK_EN COM_OFF(0x18C)
+#define QSERDES_COM_C_READY_STATUS COM_OFF(0x190)
+#define QSERDES_COM_CMN_CONFIG COM_OFF(0x194)
+#define QSERDES_COM_CMN_RATE_OVERRIDE COM_OFF(0x198)
+#define QSERDES_COM_SVS_MODE_CLK_SEL COM_OFF(0x19C)
+#define QSERDES_COM_DEBUG_BUS0 COM_OFF(0x1A0)
+#define QSERDES_COM_DEBUG_BUS1 COM_OFF(0x1A4)
+#define QSERDES_COM_DEBUG_BUS2 COM_OFF(0x1A8)
+#define QSERDES_COM_DEBUG_BUS3 COM_OFF(0x1AC)
+#define QSERDES_COM_DEBUG_BUS_SEL COM_OFF(0x1B0)
+#define QSERDES_COM_CMN_MISC1 COM_OFF(0x1B4)
+#define QSERDES_COM_CORECLK_DIV_MODE1 COM_OFF(0x1BC)
+#define QSERDES_COM_CMN_RSVD5 COM_OFF(0x1C0)
+
+/* UFS PHY registers */
+#define UFS_PHY_PHY_START PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL PHY_OFF(0x04)
+#define UFS_PHY_TX_LARGE_AMP_DRV_LVL PHY_OFF(0x34)
+#define UFS_PHY_TX_SMALL_AMP_DRV_LVL PHY_OFF(0x3C)
+#define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP PHY_OFF(0xCC)
+#define UFS_PHY_LINECFG_DISABLE PHY_OFF(0x138)
+#define UFS_PHY_RX_SYM_RESYNC_CTRL PHY_OFF(0x13C)
+#define UFS_PHY_RX_SIGDET_CTRL2 PHY_OFF(0x148)
+#define UFS_PHY_RX_PWM_GEAR_BAND PHY_OFF(0x154)
+#define UFS_PHY_PCS_READY_STATUS PHY_OFF(0x168)
+
+/* UFS PHY TX registers */
+#define QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN TX_OFF(0x68)
+#define QSERDES_TX_LANE_MODE TX_OFF(0x94)
+
+/* UFS PHY RX registers */
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_HALF RX_OFF(0x30)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER RX_OFF(0x34)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_EIGHTH RX_OFF(0x38)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN RX_OFF(0x3C)
+#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN RX_OFF(0x40)
+#define QSERDES_RX_UCDR_SO_SATURATION_ENABLE RX_OFF(0x48)
+#define QSERDES_RX_RX_TERM_BW RX_OFF(0x90)
+#define QSERDES_RX_RX_EQ_GAIN1_LSB RX_OFF(0xC4)
+#define QSERDES_RX_RX_EQ_GAIN1_MSB RX_OFF(0xC8)
+#define QSERDES_RX_RX_EQ_GAIN2_LSB RX_OFF(0xCC)
+#define QSERDES_RX_RX_EQ_GAIN2_MSB RX_OFF(0xD0)
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 RX_OFF(0xD8)
+#define QSERDES_RX_SIGDET_CNTRL RX_OFF(0x114)
+#define QSERDES_RX_SIGDET_LVL RX_OFF(0x118)
+#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL RX_OFF(0x11C)
+#define QSERDES_RX_RX_INTERFACE_MODE RX_OFF(0x12C)
+
+
+#define UFS_PHY_RX_LINECFG_DISABLE_BIT BIT(1)
+
+/*
+ * This structure represents the v3 falcon specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qmp_v3_falcon {
+ struct ufs_qcom_phy common_cfg;
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_3_1_1[] = {
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x06),
+
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D),
+
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TRIM, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_PWM_GEAR_BAND, 0x15),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SO_SATURATION_ENABLE, 0x4B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6c),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x44),
+};
+
+#endif
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 1029aa7889b5..398ec45aadef 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -207,9 +207,9 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
pin_reg = &info->pin_regs[pin_id];
if (pin_reg->mux_reg == -1) {
- dev_err(ipctl->dev, "Pin(%s) does not support mux function\n",
+ dev_dbg(ipctl->dev, "Pin(%s) does not support mux function\n",
info->pins[pin_id].name);
- return -EINVAL;
+ continue;
}
if (info->flags & SHARE_MUX_CONF_REG) {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 84936bae6e5e..4e377599d266 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -160,7 +160,6 @@ struct chv_pin_context {
* @pctldev: Pointer to the pin controller device
* @chip: GPIO chip in this pin controller
* @regs: MMIO registers
- * @lock: Lock to serialize register accesses
* @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
* offset (in GPIO number space)
* @community: Community this pinctrl instance represents
@@ -174,7 +173,6 @@ struct chv_pinctrl {
struct pinctrl_dev *pctldev;
struct gpio_chip chip;
void __iomem *regs;
- raw_spinlock_t lock;
unsigned intr_lines[16];
const struct chv_community *community;
u32 saved_intmask;
@@ -659,6 +657,17 @@ static const struct chv_community *chv_communities[] = {
&southeast_community,
};
+/*
+ * Lock to serialize register accesses
+ *
+ * Due to a silicon issue, a shared lock must be used to prevent
+ * concurrent accesses across the 4 GPIO controllers.
+ *
+ * See Intel Atom Z8000 Processor Series Specification Update (Rev. 005),
+ * errata #CHT34, for further information.
+ */
+static DEFINE_RAW_SPINLOCK(chv_lock);
+
static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned offset,
unsigned reg)
{
@@ -720,13 +729,13 @@ static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
u32 ctrl0, ctrl1;
bool locked;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1));
locked = chv_pad_locked(pctrl, offset);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
if (ctrl0 & CHV_PADCTRL0_GPIOEN) {
seq_puts(s, "GPIO ");
@@ -789,14 +798,14 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
grp = &pctrl->community->groups[group];
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
/* Check first that the pad is not locked */
for (i = 0; i < grp->npins; i++) {
if (chv_pad_locked(pctrl, grp->pins[i])) {
dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
grp->pins[i]);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return -EBUSY;
}
}
@@ -839,7 +848,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
}
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return 0;
}
@@ -853,13 +862,13 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
void __iomem *reg;
u32 value;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
if (chv_pad_locked(pctrl, offset)) {
value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
if (!(value & CHV_PADCTRL0_GPIOEN)) {
/* Locked so cannot enable */
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return -EBUSY;
}
} else {
@@ -899,7 +908,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
chv_writel(value, reg);
}
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return 0;
}
@@ -913,13 +922,13 @@ static void chv_gpio_disable_free(struct pinctrl_dev *pctldev,
void __iomem *reg;
u32 value;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
value = readl(reg) & ~CHV_PADCTRL0_GPIOEN;
chv_writel(value, reg);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
}
static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -931,7 +940,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
unsigned long flags;
u32 ctrl0;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK;
if (input)
@@ -940,7 +949,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
chv_writel(ctrl0, reg);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return 0;
}
@@ -965,10 +974,10 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin,
u16 arg = 0;
u32 term;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
@@ -1042,7 +1051,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
unsigned long flags;
u32 ctrl0, pull;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
ctrl0 = readl(reg);
switch (param) {
@@ -1065,7 +1074,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
break;
default:
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return -EINVAL;
}
@@ -1083,7 +1092,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
break;
default:
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return -EINVAL;
}
@@ -1091,12 +1100,12 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
break;
default:
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return -EINVAL;
}
chv_writel(ctrl0, reg);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return 0;
}
@@ -1162,9 +1171,9 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
u32 ctrl0, cfg;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
@@ -1182,7 +1191,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
void __iomem *reg;
u32 ctrl0;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
ctrl0 = readl(reg);
@@ -1194,7 +1203,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
chv_writel(ctrl0, reg);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
}
static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -1204,9 +1213,9 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
u32 ctrl0, direction;
unsigned long flags;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
@@ -1244,14 +1253,14 @@ static void chv_gpio_irq_ack(struct irq_data *d)
int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
u32 intr_line;
- raw_spin_lock(&pctrl->lock);
+ raw_spin_lock(&chv_lock);
intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
intr_line &= CHV_PADCTRL0_INTSEL_MASK;
intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT);
- raw_spin_unlock(&pctrl->lock);
+ raw_spin_unlock(&chv_lock);
}
static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
@@ -1262,7 +1271,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
u32 value, intr_line;
unsigned long flags;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
intr_line &= CHV_PADCTRL0_INTSEL_MASK;
@@ -1275,7 +1284,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
value |= BIT(intr_line);
chv_writel(value, pctrl->regs + CHV_INTMASK);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
}
static void chv_gpio_irq_mask(struct irq_data *d)
@@ -1309,7 +1318,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
unsigned long flags;
u32 intsel, value;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
intsel &= CHV_PADCTRL0_INTSEL_MASK;
intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
@@ -1324,7 +1333,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
irq_set_handler_locked(d, handler);
pctrl->intr_lines[intsel] = offset;
}
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
}
chv_gpio_irq_unmask(d);
@@ -1340,7 +1349,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&chv_lock, flags);
/*
* Pins which can be used as shared interrupt are configured in
@@ -1389,7 +1398,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
else if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
return 0;
}
@@ -1501,7 +1510,6 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
if (i == ARRAY_SIZE(chv_communities))
return -ENODEV;
- raw_spin_lock_init(&pctrl->lock);
pctrl->dev = &pdev->dev;
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 3318f1d6193c..7340ff78839a 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -48,17 +48,6 @@ static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + offset * 4);
- /*
- * Suppose BIOS or Bootloader sets specific debounce for the
- * GPIO. if not, set debounce to be 2.75ms and remove glitch.
- */
- if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
- pin_reg |= 0xf;
- pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
- pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
- }
-
pin_reg &= ~BIT(OUTPUT_ENABLE_OFF);
writel(pin_reg, gpio_dev->base + offset * 4);
spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -331,15 +320,6 @@ static void amd_gpio_irq_enable(struct irq_data *d)
spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
- /*
- Suppose BIOS or Bootloader sets specific debounce for the
- GPIO. if not, set debounce to be 2.75ms.
- */
- if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
- pin_reg |= 0xf;
- pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
- pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
- }
pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
pin_reg |= BIT(INTERRUPT_MASK_OFF);
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 23b6b8c29a99..73d8d47ea465 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1576,6 +1576,9 @@ static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc,
else
mask &= ~soc_mask;
pcs->write(mask, pcswi->reg);
+
+ /* flush posted write */
+ mask = pcs->read(pcswi->reg);
raw_spin_unlock(&pcs->lock);
}
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index d45cd254ed1c..2b331d5b9e79 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -147,13 +147,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
goto exit;
}
+ if (u_cmd.outsize != s_cmd->outsize ||
+ u_cmd.insize != s_cmd->insize) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
s_cmd->command += ec->cmd_offset;
ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
- if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
+ if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
ret = -EFAULT;
exit:
kfree(s_cmd);
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 990308ca384f..92430f781eb7 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -380,3 +380,20 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
return ret;
}
EXPORT_SYMBOL(cros_ec_cmd_xfer);
+
+int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ int ret;
+
+ ret = cros_ec_cmd_xfer(ec_dev, msg);
+ if (ret < 0) {
+ dev_err(ec_dev->dev, "Command xfer error (err:%d)\n", ret);
+ } else if (msg->result != EC_RES_SUCCESS) {
+ dev_dbg(ec_dev->dev, "Command result (err: %d)\n", msg->result);
+ return -EPROTO;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cros_ec_cmd_xfer_status);
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 159452c6e3bd..4dc3910737e1 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -1634,6 +1634,25 @@ u16 ipa_get_smem_restr_bytes(void)
EXPORT_SYMBOL(ipa_get_smem_restr_bytes);
/**
+ * ipa_broadcast_wdi_quota_reach_ind() - quota reach
+ * @uint32_t fid: [in] input netdev ID
+ * @uint64_t num_bytes: [in] used bytes
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
+ uint64_t num_bytes)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_broadcast_wdi_quota_reach_ind,
+ fid, num_bytes);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_broadcast_wdi_quota_reach_ind);
+
+/**
* ipa_uc_wdi_get_dbpa() - To retrieve
* doorbell physical address of wlan pipes
* @param: [in/out] input/output parameters
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 78fcdeb4b7a0..69bc4ae1fa6a 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -183,6 +183,9 @@ struct ipa_api_controller {
u16 (*ipa_get_smem_restr_bytes)(void);
+ int (*ipa_broadcast_wdi_quota_reach_ind)(uint32_t fid,
+ uint64_t num_bytes);
+
int (*ipa_uc_wdi_get_dbpa)(struct ipa_wdi_db_params *out);
int (*ipa_uc_reg_rdyCB)(struct ipa_wdi_uc_ready_params *param);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index d51e9ac97fe0..a7f1f9a040f9 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -2328,6 +2328,7 @@ static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,
unsigned int used = *(unsigned int *)skb->cb;
unsigned int used_align = ALIGN(used, 32);
unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
+ u32 skb2_len;
IPA_DUMP_BUFF(skb->data, 0, skb->len);
@@ -2510,8 +2511,9 @@ begin:
sys->drop_packet = true;
}
- skb2 = ipa_skb_copy_for_client(skb,
- status->pkt_len + IPA_PKT_STATUS_SIZE);
+ skb2_len = status->pkt_len + IPA_PKT_STATUS_SIZE;
+ skb2_len = min(skb2_len, skb->len);
+ skb2 = ipa_skb_copy_for_client(skb, skb2_len);
if (likely(skb2)) {
if (skb->len < len + IPA_PKT_STATUS_SIZE) {
IPADBG("SPL skb len %d len %d\n",
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 3a666419385e..b4f447f56d1c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -963,6 +963,10 @@ struct ipa3_uc_wdi_ctx {
struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio;
void *priv;
ipa_uc_ready_cb uc_ready_cb;
+ /* for AP+STA stats update */
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+ ipa_wdi_meter_notifier_cb stats_notify;
+#endif
};
/**
@@ -1674,6 +1678,7 @@ int ipa3_resume_wdi_pipe(u32 clnt_hdl);
int ipa3_suspend_wdi_pipe(u32 clnt_hdl);
int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
u16 ipa3_get_smem_restr_bytes(void);
+int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes);
int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
ipa_notify_cb notify, void *priv, u8 hdr_len,
struct ipa_ntn_conn_out_params *outp);
@@ -1714,6 +1719,9 @@ enum ipacm_client_enum ipa3_get_client(int pipe_idx);
bool ipa3_get_client_uplink(int pipe_idx);
+int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats);
+
+int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota);
/*
* IPADMA
*/
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index a6b075583162..bf794faa7f6a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -880,7 +880,8 @@ static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n",
qmi_ind.apn.mux_id,
(unsigned long int) qmi_ind.apn.num_Mbytes);
- ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id);
+ ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id,
+ IPA_UPSTEAM_MODEM);
}
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index e0126ec392c3..044200485878 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -181,7 +181,8 @@ int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats
int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data);
-void ipa3_broadcast_quota_reach_ind(uint32_t mux_id);
+void ipa3_broadcast_quota_reach_ind(uint32_t mux_id,
+ enum ipa_upstream_type upstream_type);
int rmnet_ipa3_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
*data);
@@ -189,6 +190,8 @@ int rmnet_ipa3_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
bool reset);
+int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data);
+
int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
struct ipa_get_data_stats_resp_msg_v01 *resp);
@@ -283,7 +286,8 @@ static inline int rmnet_ipa3_set_data_quota(
return -EPERM;
}
-static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id) { }
+static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id,
+ enum ipa_upstream_type upstream_type) { }
static inline int ipa3_qmi_get_data_stats(
struct ipa_get_data_stats_req_msg_v01 *req,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index e38f6f2860cf..8cb6935cd720 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -13,6 +13,7 @@
#include <linux/dmapool.h>
#include <linux/delay.h>
#include <linux/mm.h>
+#include "ipa_qmi_service.h"
#define IPA_HOLB_TMR_DIS 0x0
@@ -1185,6 +1186,12 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
ep->client_notify = in->sys.notify;
ep->priv = in->sys.priv;
+ /* for AP+STA stats update */
+ if (in->wdi_notify)
+ ipa3_ctx->uc_wdi_ctx.stats_notify = in->wdi_notify;
+ else
+ IPADBG("in->wdi_notify is null\n");
+
if (!ep->skip_ep_cfg) {
if (ipa3_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
IPAERR("fail to configure EP.\n");
@@ -1276,6 +1283,12 @@ int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+ /* for AP+STA stats update */
+ if (ipa3_ctx->uc_wdi_ctx.stats_notify)
+ ipa3_ctx->uc_wdi_ctx.stats_notify = NULL;
+ else
+ IPADBG("uc_wdi_ctx.stats_notify already null\n");
+
uc_timeout:
return result;
}
@@ -1618,6 +1631,23 @@ uc_timeout:
return result;
}
+/**
+ * ipa_broadcast_wdi_quota_reach_ind() - quota reach
+ * @uint32_t fid: [in] input netdev ID
+ * @uint64_t num_bytes: [in] used bytes
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid,
+ uint64_t num_bytes)
+{
+ IPAERR("Quota reached indication on fis(%d) Mbytes(%lu)\n",
+ fid,
+ (unsigned long int) num_bytes);
+ ipa3_broadcast_quota_reach_ind(0, IPA_UPSTEAM_WLAN);
+ return 0;
+}
+
int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
{
int result = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index c5b5d1892485..683777d4cacd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1000,11 +1000,39 @@ void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink)
}
}
+/* ipa3_get_wlan_stats() - get ipa wifi stats
+ *
+ * Return value: success or failure
+ */
+int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats)
+{
+ if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
+ ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS,
+ wdi_sap_stats);
+ } else {
+ IPAERR("uc_wdi_ctx.stats_notify NULL\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
+{
+ if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
+ ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA,
+ wdi_quota);
+ } else {
+ IPAERR("uc_wdi_ctx.stats_notify NULL\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
/**
* ipa3_get_client() - provide client mapping
* @client: client type
*
- * Return value: none
+ * Return value: client mapping enum
*/
enum ipacm_client_enum ipa3_get_client(int pipe_idx)
{
@@ -3122,6 +3150,8 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats;
api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes;
+ api_ctrl->ipa_broadcast_wdi_quota_reach_ind =
+ ipa3_broadcast_wdi_quota_reach_ind;
api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa;
api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB;
api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 0419249890e9..ff197705d845 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -52,6 +52,7 @@
#define DEFAULT_OUTSTANDING_LOW 32
#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
+#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0"
#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
@@ -783,6 +784,22 @@ static int find_vchannel_name_index(const char *vchannel_name)
return MAX_NUM_OF_MUX_CHANNEL;
}
+static enum ipa_upstream_type find_upstream_type(const char *upstreamIface)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+ if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+ upstreamIface) == 0)
+ return IPA_UPSTEAM_MODEM;
+ }
+
+ if (strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0)
+ return IPA_UPSTEAM_WLAN;
+ else
+ return MAX_NUM_OF_MUX_CHANNEL;
+}
+
static int ipa3_wwan_register_to_ipa(int index)
{
struct ipa_tx_intf tx_properties = {0};
@@ -2598,10 +2615,10 @@ int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
}
/**
- * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler
* @data - IOCTL data
*
- * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface.
* It translates the given interface name to the Modem MUX ID and
* sends the request of the quota to the IPA Modem driver via QMI.
*
@@ -2610,12 +2627,17 @@ int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
* -EFAULT: Invalid interface name provided
* other: See ipa_qmi_set_data_quota
*/
-int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
+static int rmnet_ipa3_set_data_quota_modem(
+ struct wan_ioctl_set_data_quota *data)
{
u32 mux_id;
int index;
struct ipa_set_data_usage_quota_req_msg_v01 req;
+ /* stop quota */
+ if (!data->set_quota)
+ ipa3_qmi_stop_data_qouta();
+
index = find_vchannel_name_index(data->interface_name);
IPAWANERR("iface name %s, quota %lu\n",
data->interface_name,
@@ -2639,6 +2661,64 @@ int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
return ipa3_qmi_set_data_quota(&req);
}
+static int rmnet_ipa3_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
+{
+ struct ipa_set_wifi_quota wifi_quota;
+ int rc = 0;
+
+ memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota));
+ wifi_quota.set_quota = data->set_quota;
+ wifi_quota.quota_bytes = data->quota_mbytes;
+ IPAWANERR("iface name %s, quota %lu\n",
+ data->interface_name,
+ (unsigned long int) data->quota_mbytes);
+
+ rc = ipa3_set_wlan_quota(&wifi_quota);
+ /* check if wlan-fw takes this quota-set */
+ if (!wifi_quota.set_valid)
+ rc = -EFAULT;
+ return rc;
+}
+
+/**
+ * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
+{
+ enum ipa_upstream_type upstream_type;
+ int rc = 0;
+
+ /* get IPA backhaul type */
+ upstream_type = find_upstream_type(data->interface_name);
+
+ if (upstream_type == IPA_UPSTEAM_MAX) {
+ IPAWANERR("Wrong interface_name name %s\n",
+ data->interface_name);
+ } else if (upstream_type == IPA_UPSTEAM_WLAN) {
+ rc = rmnet_ipa3_set_data_quota_wifi(data);
+ if (rc) {
+ IPAWANERR("set quota on wifi failed\n");
+ return rc;
+ }
+ } else {
+ rc = rmnet_ipa3_set_data_quota_modem(data);
+ if (rc) {
+ IPAWANERR("set quota on modem failed\n");
+ return rc;
+ }
+ }
+ return rc;
+}
/* rmnet_ipa_set_tether_client_pipe() -
* @data - IOCTL data
*
@@ -2686,8 +2766,61 @@ int rmnet_ipa3_set_tether_client_pipe(
return 0;
}
-int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
- bool reset)
+static int rmnet_ipa3_query_tethering_stats_wifi(
+ struct wan_ioctl_query_tether_stats *data, bool reset)
+{
+ struct ipa_get_wdi_sap_stats *sap_stats;
+ int rc;
+
+ sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats),
+ GFP_KERNEL);
+ if (!sap_stats) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ return -ENOMEM;
+ }
+ memset(sap_stats, 0, sizeof(struct ipa_get_wdi_sap_stats));
+
+ sap_stats->reset_stats = reset;
+ IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats);
+
+ rc = ipa3_get_wlan_stats(sap_stats);
+ if (rc) {
+ IPAWANERR("can't get ipa3_get_wlan_stats\n");
+ kfree(sap_stats);
+ return rc;
+ } else if (reset) {
+ kfree(sap_stats);
+ return 0;
+ }
+
+ if (sap_stats->stats_valid) {
+ data->ipv4_tx_packets = sap_stats->ipv4_tx_packets;
+ data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes;
+ data->ipv4_rx_packets = sap_stats->ipv4_rx_packets;
+ data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes;
+ data->ipv6_tx_packets = sap_stats->ipv6_tx_packets;
+ data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes;
+ data->ipv6_rx_packets = sap_stats->ipv6_rx_packets;
+ data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes;
+ }
+
+ IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+ (unsigned long int) data->ipv4_rx_packets,
+ (unsigned long int) data->ipv6_rx_packets,
+ (unsigned long int) data->ipv4_rx_bytes,
+ (unsigned long int) data->ipv6_rx_bytes);
+ IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
+ (unsigned long int) data->ipv4_tx_packets,
+ (unsigned long int) data->ipv6_tx_packets,
+ (unsigned long int) data->ipv4_tx_bytes,
+ (unsigned long int) data->ipv6_tx_bytes);
+
+ kfree(sap_stats);
+ return rc;
+}
+
+static int rmnet_ipa3_query_tethering_stats_modem(
+ struct wan_ioctl_query_tether_stats *data, bool reset)
{
struct ipa_get_data_stats_req_msg_v01 *req;
struct ipa_get_data_stats_resp_msg_v01 *resp;
@@ -2774,7 +2907,7 @@ int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
}
}
}
- IPAWANDBG_LOW("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+ IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
(unsigned long int) data->ipv4_rx_packets,
(unsigned long int) data->ipv6_rx_packets,
(unsigned long int) data->ipv4_rx_bytes,
@@ -2824,7 +2957,7 @@ int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
}
}
}
- IPAWANDBG_LOW("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
+ IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
(unsigned long int) data->ipv4_tx_packets,
(unsigned long int) data->ipv6_tx_packets,
(unsigned long int) data->ipv4_tx_bytes,
@@ -2834,6 +2967,69 @@ int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
return 0;
}
+int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+ bool reset)
+{
+ enum ipa_upstream_type upstream_type;
+ int rc = 0;
+
+ /* get IPA backhaul type */
+ upstream_type = find_upstream_type(data->upstreamIface);
+
+ if (upstream_type == IPA_UPSTEAM_MAX) {
+ IPAWANERR(" Wrong upstreamIface name %s\n",
+ data->upstreamIface);
+ } else if (upstream_type == IPA_UPSTEAM_WLAN) {
+ IPAWANDBG_LOW(" query wifi-backhaul stats\n");
+ rc = rmnet_ipa3_query_tethering_stats_wifi(
+ data, false);
+ if (rc) {
+ IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+ return rc;
+ }
+ } else {
+ IPAWANDBG_LOW(" query modem-backhaul stats\n");
+ rc = rmnet_ipa3_query_tethering_stats_modem(
+ data, false);
+ if (rc) {
+ IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
+ return rc;
+ }
+ }
+ return rc;
+}
+
+int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
+{
+ enum ipa_upstream_type upstream_type;
+ int rc = 0;
+
+ /* get IPA backhaul type */
+ upstream_type = find_upstream_type(data->upstreamIface);
+
+ if (upstream_type == IPA_UPSTEAM_MAX) {
+ IPAWANERR(" Wrong upstreamIface name %s\n",
+ data->upstreamIface);
+ } else if (upstream_type == IPA_UPSTEAM_WLAN) {
+ IPAWANERR(" reset wifi-backhaul stats\n");
+ rc = rmnet_ipa3_query_tethering_stats_wifi(
+ NULL, true);
+ if (rc) {
+ IPAWANERR("reset WLAN stats failed\n");
+ return rc;
+ }
+ } else {
+ IPAWANERR(" reset modem-backhaul stats\n");
+ rc = rmnet_ipa3_query_tethering_stats_modem(
+ NULL, true);
+ if (rc) {
+ IPAWANERR("reset MODEM stats failed\n");
+ return rc;
+ }
+ }
+ return rc;
+}
+
/**
* ipa3_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
* @mux_id - The MUX ID on which the quota has been reached
@@ -2843,23 +3039,28 @@ int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
* on the specific interface which matches the mux_id has been reached.
*
*/
-void ipa3_broadcast_quota_reach_ind(u32 mux_id)
+void ipa3_broadcast_quota_reach_ind(u32 mux_id,
+ enum ipa_upstream_type upstream_type)
{
char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
char *envp[IPA_UEVENT_NUM_EVNP] = {
- alert_msg, iface_name_l, iface_name_m, NULL };
+ alert_msg, iface_name_l, iface_name_m, NULL};
int res;
int index;
- index = ipa3_find_mux_channel_index(mux_id);
-
- if (index == MAX_NUM_OF_MUX_CHANNEL) {
- IPAWANERR("%u is an mux ID\n", mux_id);
+ /* check upstream_type*/
+ if (upstream_type == IPA_UPSTEAM_MAX) {
+ IPAWANERR(" Wrong upstreamIface type %d\n", upstream_type);
return;
+ } else if (upstream_type == IPA_UPSTEAM_MODEM) {
+ index = ipa3_find_mux_channel_index(mux_id);
+ if (index == MAX_NUM_OF_MUX_CHANNEL) {
+ IPAWANERR("%u is an mux ID\n", mux_id);
+ return;
+ }
}
-
res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
"ALERT_NAME=%s", "quotaReachedAlert");
if (IPA_QUOTA_REACH_ALERT_MAX_SIZE <= res) {
@@ -2867,15 +3068,25 @@ void ipa3_broadcast_quota_reach_ind(u32 mux_id)
return;
}
/* posting msg for L-release for CNE */
+ if (upstream_type == IPA_UPSTEAM_MODEM) {
res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
"UPSTREAM=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+ } else {
+ res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+ "UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
+ }
if (IPA_QUOTA_REACH_IF_NAME_MAX_SIZE <= res) {
IPAWANERR("message too long (%d)", res);
return;
}
/* posting msg for M-release for CNE */
+ if (upstream_type == IPA_UPSTEAM_MODEM) {
res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
"INTERFACE=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+ } else {
+ res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+ "INTERFACE=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
+ }
if (IPA_QUOTA_REACH_IF_NAME_MAX_SIZE <= res) {
IPAWANERR("message too long (%d)", res);
return;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 92636cba0f1c..ffd127a21ed1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -279,8 +279,9 @@ static long ipa3_wan_ioctl(struct file *filp,
break;
}
- if (rmnet_ipa3_query_tethering_stats(NULL, true)) {
- IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+ if (rmnet_ipa3_reset_tethering_stats(
+ (struct wan_ioctl_reset_tether_stats *)param)) {
+ IPAWANERR("WAN_IOC_RESET_TETHER_STATS failed\n");
retval = -EFAULT;
break;
}
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index fb4dd7b3ee71..af2046c87806 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -723,6 +723,11 @@ static int __init hp_wmi_rfkill_setup(struct platform_device *device)
if (err)
return err;
+ err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &wireless,
+ sizeof(wireless), 0);
+ if (err)
+ return err;
+
if (wireless & 0x1) {
wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
RFKILL_TYPE_WLAN,
@@ -910,7 +915,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
gps_rfkill = NULL;
rfkill2_count = 0;
- if (hp_wmi_bios_2009_later() || hp_wmi_rfkill_setup(device))
+ if (hp_wmi_rfkill_setup(device))
hp_wmi_rfkill2_setup(device);
err = device_create_file(&device->dev, &dev_attr_display);
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 943c1cb9566c..d28e3ab9479c 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -342,7 +342,9 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
/* Device IDs of parts that have 32KB MCH space */
static const unsigned int mch_quirk_devices[] = {
0x0154, /* Ivy Bridge */
+ 0x0a04, /* Haswell-ULT */
0x0c00, /* Haswell */
+ 0x1604, /* Broadwell */
};
static struct pci_dev *get_intel_host(void)
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 456987c88baa..b13cd074c52a 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -565,11 +565,12 @@ static int power_supply_read_temp(struct thermal_zone_device *tzd,
WARN_ON(tzd == NULL);
psy = tzd->devdata;
- ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+ ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+ if (ret)
+ return ret;
/* Convert tenths of degree Celsius to milli degree Celsius. */
- if (!ret)
- *temp = val.intval * 100;
+ *temp = val.intval * 100;
return ret;
}
@@ -612,10 +613,12 @@ static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
int ret;
psy = tcd->devdata;
- ret = psy->desc->get_property(psy,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
- if (!ret)
- *state = val.intval;
+ ret = power_supply_get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
+ if (ret)
+ return ret;
+
+ *state = val.intval;
return ret;
}
@@ -628,10 +631,12 @@ static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
int ret;
psy = tcd->devdata;
- ret = psy->desc->get_property(psy,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
- if (!ret)
- *state = val.intval;
+ ret = power_supply_get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+ if (ret)
+ return ret;
+
+ *state = val.intval;
return ret;
}
diff --git a/drivers/power/qcom-charger/fg-core.h b/drivers/power/qcom-charger/fg-core.h
index 9ddd18800f9d..d8b6754a465f 100644
--- a/drivers/power/qcom-charger/fg-core.h
+++ b/drivers/power/qcom-charger/fg-core.h
@@ -39,6 +39,12 @@
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
+#define is_between(left, right, value) \
+ (((left) >= (right) && (left) >= (value) \
+ && (value) >= (right)) \
+ || ((left) <= (right) && (left) <= (value) \
+ && (value) <= (right)))
+
/* Awake votable reasons */
#define SRAM_READ "fg_sram_read"
#define SRAM_WRITE "fg_sram_write"
@@ -205,10 +211,10 @@ struct fg_dt_props {
int recharge_soc_thr;
int recharge_volt_thr_mv;
int rsense_sel;
- int jeita_thresholds[NUM_JEITA_LEVELS];
int esr_timer_charging;
int esr_timer_awake;
int esr_timer_asleep;
+ int rconn_mohms;
int cl_start_soc;
int cl_max_temp;
int cl_min_temp;
@@ -218,6 +224,7 @@ struct fg_dt_props {
int cl_min_cap_limit;
int jeita_hyst_temp;
int batt_temp_delta;
+ int jeita_thresholds[NUM_JEITA_LEVELS];
int ki_coeff_soc[KI_COEFF_SOC_LEVELS];
int ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
int ki_coeff_hi_dischg[KI_COEFF_SOC_LEVELS];
@@ -310,8 +317,9 @@ struct fg_chip {
u32 batt_soc_base;
u32 batt_info_base;
u32 mem_if_base;
+ u32 rradc_base;
u32 wa_flags;
- int batt_id_kohms;
+ int batt_id_ohms;
int charge_status;
int prev_charge_status;
int charge_done;
diff --git a/drivers/power/qcom-charger/fg-reg.h b/drivers/power/qcom-charger/fg-reg.h
index ffc46f328f91..7ad26215e469 100644
--- a/drivers/power/qcom-charger/fg-reg.h
+++ b/drivers/power/qcom-charger/fg-reg.h
@@ -13,6 +13,10 @@
#ifndef __FG_REG_H__
#define __FG_REG_H__
+/* FG_ADC_RR register definitions used only for READ */
+#define ADC_RR_FAKE_BATT_LOW_LSB(chip) (chip->rradc_base + 0x58)
+#define ADC_RR_FAKE_BATT_HIGH_LSB(chip) (chip->rradc_base + 0x5A)
+
/* FG_BATT_SOC register definitions */
#define BATT_SOC_FG_ALG_STS(chip) (chip->batt_soc_base + 0x06)
#define BATT_SOC_FG_ALG_AUX_STS0(chip) (chip->batt_soc_base + 0x07)
diff --git a/drivers/power/qcom-charger/qpnp-fg-gen3.c b/drivers/power/qcom-charger/qpnp-fg-gen3.c
index 84204163f076..22025ac27ffa 100644
--- a/drivers/power/qcom-charger/qpnp-fg-gen3.c
+++ b/drivers/power/qcom-charger/qpnp-fg-gen3.c
@@ -62,6 +62,10 @@
#define ESR_TIMER_CHG_INIT_OFFSET 2
#define PROFILE_LOAD_WORD 24
#define PROFILE_LOAD_OFFSET 0
+#define ESR_RSLOW_DISCHG_WORD 34
+#define ESR_RSLOW_DISCHG_OFFSET 0
+#define ESR_RSLOW_CHG_WORD 51
+#define ESR_RSLOW_CHG_OFFSET 0
#define NOM_CAP_WORD 58
#define NOM_CAP_OFFSET 0
#define ACT_BATT_CAP_BKUP_WORD 74
@@ -69,6 +73,7 @@
#define CYCLE_COUNT_WORD 75
#define CYCLE_COUNT_OFFSET 0
#define PROFILE_INTEGRITY_WORD 79
+#define SW_CONFIG_OFFSET 0
#define PROFILE_INTEGRITY_OFFSET 3
#define BATT_SOC_WORD 91
#define BATT_SOC_OFFSET 0
@@ -564,21 +569,21 @@ static int fg_get_battery_esr(struct fg_chip *chip, int *val)
static int fg_get_battery_resistance(struct fg_chip *chip, int *val)
{
- int rc, esr, rslow;
+ int rc, esr_uohms, rslow_uohms;
- rc = fg_get_battery_esr(chip, &esr);
+ rc = fg_get_battery_esr(chip, &esr_uohms);
if (rc < 0) {
pr_err("failed to get ESR, rc=%d\n", rc);
return rc;
}
- rc = fg_get_sram_prop(chip, FG_SRAM_RSLOW, &rslow);
+ rc = fg_get_sram_prop(chip, FG_SRAM_RSLOW, &rslow_uohms);
if (rc < 0) {
pr_err("failed to get Rslow, rc=%d\n", rc);
return rc;
}
- *val = esr + rslow;
+ *val = esr_uohms + rslow_uohms;
return 0;
}
@@ -693,18 +698,57 @@ static bool is_batt_empty(struct fg_chip *chip)
return ((vbatt_uv < chip->dt.cutoff_volt_mv * 1000) ? true : false);
}
-#define DEBUG_BATT_ID_KOHMS 7
+static int fg_get_debug_batt_id(struct fg_chip *chip, int *batt_id)
+{
+ int rc;
+ u64 temp;
+ u8 buf[2];
+
+ rc = fg_read(chip, ADC_RR_FAKE_BATT_LOW_LSB(chip), buf, 2);
+ if (rc < 0) {
+ pr_err("failed to read addr=0x%04x, rc=%d\n",
+ ADC_RR_FAKE_BATT_LOW_LSB(chip), rc);
+ return rc;
+ }
+
+ /*
+ * Fake battery threshold is encoded in the following format.
+ * Threshold (code) = (battery_id in Ohms) * 0.00015 * 2^10 / 2.5
+ */
+ temp = (buf[1] << 8 | buf[0]) * 2500000;
+ do_div(temp, 150 * 1024);
+ batt_id[0] = temp;
+ rc = fg_read(chip, ADC_RR_FAKE_BATT_HIGH_LSB(chip), buf, 2);
+ if (rc < 0) {
+ pr_err("failed to read addr=0x%04x, rc=%d\n",
+ ADC_RR_FAKE_BATT_HIGH_LSB(chip), rc);
+ return rc;
+ }
+
+ temp = (buf[1] << 8 | buf[0]) * 2500000;
+ do_div(temp, 150 * 1024);
+ batt_id[1] = temp;
+ pr_debug("debug batt_id range: [%d %d]\n", batt_id[0], batt_id[1]);
+ return 0;
+}
+
static bool is_debug_batt_id(struct fg_chip *chip)
{
- int batt_id_delta = 0;
+ int debug_batt_id[2], rc;
- if (!chip->batt_id_kohms)
+ if (!chip->batt_id_ohms)
return false;
- batt_id_delta = abs(chip->batt_id_kohms - DEBUG_BATT_ID_KOHMS);
- if (batt_id_delta <= 1) {
- fg_dbg(chip, FG_POWER_SUPPLY, "Debug battery id: %dKohms\n",
- chip->batt_id_kohms);
+ rc = fg_get_debug_batt_id(chip, debug_batt_id);
+ if (rc < 0) {
+ pr_err("Failed to get debug batt_id, rc=%d\n", rc);
+ return false;
+ }
+
+ if (is_between(debug_batt_id[0], debug_batt_id[1],
+ chip->batt_id_ohms)) {
+ fg_dbg(chip, FG_POWER_SUPPLY, "Debug battery id: %dohms\n",
+ chip->batt_id_ohms);
return true;
}
@@ -792,8 +836,8 @@ static int fg_get_batt_profile(struct fg_chip *chip)
return rc;
}
+ chip->batt_id_ohms = batt_id;
batt_id /= 1000;
- chip->batt_id_kohms = batt_id;
batt_node = of_find_node_by_name(node, "qcom,battery-data");
if (!batt_node) {
pr_err("Batterydata not available\n");
@@ -824,10 +868,10 @@ static int fg_get_batt_profile(struct fg_chip *chip)
chip->bp.float_volt_uv = -EINVAL;
}
- rc = of_property_read_u32(profile_node, "qcom,nom-batt-capacity-mah",
+ rc = of_property_read_u32(profile_node, "qcom,fastchg-current-ma",
&chip->bp.fastchg_curr_ma);
if (rc < 0) {
- pr_err("battery nominal capacity unavailable, rc:%d\n", rc);
+ pr_err("battery fastchg current unavailable, rc:%d\n", rc);
chip->bp.fastchg_curr_ma = -EINVAL;
}
@@ -1368,6 +1412,80 @@ static int fg_charge_full_update(struct fg_chip *chip)
return 0;
}
+#define RCONN_CONFIG_BIT BIT(0)
+static int fg_rconn_config(struct fg_chip *chip)
+{
+ int rc, esr_uohms;
+ u64 scaling_factor;
+ u32 val = 0;
+
+ rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
+ SW_CONFIG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in reading SW_CONFIG_OFFSET, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (val & RCONN_CONFIG_BIT) {
+ fg_dbg(chip, FG_STATUS, "Rconn already configured: %x\n", val);
+ return 0;
+ }
+
+ rc = fg_get_battery_esr(chip, &esr_uohms);
+ if (rc < 0) {
+ pr_err("failed to get ESR, rc=%d\n", rc);
+ return rc;
+ }
+
+ scaling_factor = div64_u64((u64)esr_uohms * 1000,
+ esr_uohms + (chip->dt.rconn_mohms * 1000));
+
+ rc = fg_sram_read(chip, ESR_RSLOW_CHG_WORD,
+ ESR_RSLOW_CHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in reading ESR_RSLOW_CHG_OFFSET, rc=%d\n", rc);
+ return rc;
+ }
+
+ val *= scaling_factor;
+ do_div(val, 1000);
+ rc = fg_sram_write(chip, ESR_RSLOW_CHG_WORD,
+ ESR_RSLOW_CHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing ESR_RSLOW_CHG_OFFSET, rc=%d\n", rc);
+ return rc;
+ }
+ fg_dbg(chip, FG_STATUS, "esr_rslow_chg modified to %x\n", val & 0xFF);
+
+ rc = fg_sram_read(chip, ESR_RSLOW_DISCHG_WORD,
+ ESR_RSLOW_DISCHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in reading ESR_RSLOW_DISCHG_OFFSET, rc=%d\n", rc);
+ return rc;
+ }
+
+ val *= scaling_factor;
+ do_div(val, 1000);
+ rc = fg_sram_write(chip, ESR_RSLOW_DISCHG_WORD,
+ ESR_RSLOW_DISCHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing ESR_RSLOW_DISCHG_OFFSET, rc=%d\n", rc);
+ return rc;
+ }
+ fg_dbg(chip, FG_STATUS, "esr_rslow_dischg modified to %x\n",
+ val & 0xFF);
+
+ val = RCONN_CONFIG_BIT;
+ rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
+ SW_CONFIG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing SW_CONFIG_OFFSET, rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
static int fg_set_recharge_soc(struct fg_chip *chip, int recharge_soc)
{
u8 buf[4];
@@ -2330,6 +2448,9 @@ static int fg_notifier_cb(struct notifier_block *nb,
if (event != PSY_EVENT_PROP_CHANGED)
return NOTIFY_OK;
+ if (work_pending(&chip->status_change_work))
+ return NOTIFY_OK;
+
if ((strcmp(psy->desc->name, "battery") == 0)
|| (strcmp(psy->desc->name, "usb") == 0)) {
/*
@@ -2575,6 +2696,14 @@ static int fg_hw_init(struct fg_chip *chip)
return rc;
}
+ if (chip->dt.rconn_mohms > 0) {
+ rc = fg_rconn_config(chip);
+ if (rc < 0) {
+ pr_err("Error in configuring Rconn, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
return 0;
}
@@ -3076,10 +3205,17 @@ static int fg_parse_dt(struct fg_chip *chip)
}
}
+ rc = of_property_read_u32(node, "qcom,rradc-base", &base);
+ if (rc < 0) {
+ dev_err(chip->dev, "rradc-base not specified, rc=%d\n", rc);
+ return rc;
+ }
+ chip->rradc_base = base;
+
rc = fg_get_batt_profile(chip);
if (rc < 0)
pr_warn("profile for batt_id=%dKOhms not found..using OTP, rc:%d\n",
- chip->batt_id_kohms, rc);
+ chip->batt_id_ohms / 1000, rc);
/* Read all the optional properties below */
rc = of_property_read_u32(node, "qcom,fg-cutoff-voltage", &temp);
@@ -3236,6 +3372,12 @@ static int fg_parse_dt(struct fg_chip *chip)
if (rc < 0)
pr_err("Error in parsing Ki coefficients, rc=%d\n", rc);
+ rc = of_property_read_u32(node, "qcom,fg-rconn-mohms", &temp);
+ if (rc < 0)
+ chip->dt.rconn_mohms = -EINVAL;
+ else
+ chip->dt.rconn_mohms = temp;
+
return 0;
}
@@ -3362,7 +3504,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
if (!rc)
pr_info("battery SOC:%d voltage: %duV temp: %d id: %dKOhms\n",
- msoc, volt_uv, batt_temp, chip->batt_id_kohms);
+ msoc, volt_uv, batt_temp, chip->batt_id_ohms / 1000);
device_init_wakeup(chip->dev, true);
if (chip->profile_available)
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index e73ed2f1d288..543189ae5498 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -210,6 +210,13 @@ static struct smb_params v1_params = {
.max_u = 2000,
.step_u = 200,
},
+ .freq_boost = {
+ .name = "boost switching frequency",
+ .reg = CFG_BUCKBOOST_FREQ_SELECT_BOOST_REG,
+ .min_u = 600,
+ .max_u = 2000,
+ .step_u = 200,
+ },
};
#define STEP_CHARGING_MAX_STEPS 5
@@ -218,6 +225,7 @@ struct smb_dt_props {
int usb_icl_ua;
int otg_cl_ua;
int dc_icl_ua;
+ int boost_threshold_ua;
int fv_uv;
int wipower_max_uw;
u32 step_soc_threshold[STEP_CHARGING_MAX_STEPS - 1];
@@ -243,6 +251,7 @@ module_param_named(
);
#define MICRO_1P5A 1500000
+#define MICRO_P1A 100000
static int smb2_parse_dt(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -304,6 +313,12 @@ static int smb2_parse_dt(struct smb2 *chip)
if (rc < 0)
chip->dt.dc_icl_ua = -EINVAL;
+ rc = of_property_read_u32(node,
+ "qcom,boost-threshold-ua",
+ &chip->dt.boost_threshold_ua);
+ if (rc < 0)
+ chip->dt.boost_threshold_ua = MICRO_P1A;
+
rc = of_property_read_u32(node, "qcom,wipower-max-uw",
&chip->dt.wipower_max_uw);
if (rc < 0)
@@ -370,6 +385,7 @@ static enum power_supply_property smb2_usb_props[] = {
POWER_SUPPLY_PROP_PD_ACTIVE,
POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+ POWER_SUPPLY_PROP_BOOST_CURRENT,
POWER_SUPPLY_PROP_PE_START,
};
@@ -436,6 +452,9 @@ static int smb2_usb_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW:
rc = smblib_get_prop_usb_current_now(chg, val);
break;
+ case POWER_SUPPLY_PROP_BOOST_CURRENT:
+ val->intval = chg->boost_current_ua;
+ break;
case POWER_SUPPLY_PROP_PD_IN_HARD_RESET:
rc = smblib_get_prop_pd_in_hard_reset(chg, val);
break;
@@ -490,6 +509,9 @@ static int smb2_usb_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED:
chg->system_suspend_supported = val->intval;
break;
+ case POWER_SUPPLY_PROP_BOOST_CURRENT:
+ rc = smblib_set_prop_boost_current(chg, val);
+ break;
default:
pr_err("set prop %d is not supported\n", psp);
rc = -EINVAL;
@@ -1073,6 +1095,7 @@ static int smb2_init_hw(struct smb2 *chip)
chg->otg_cl_ua = chip->dt.otg_cl_ua;
chg->dcp_icl_ua = chip->dt.usb_icl_ua;
+ chg->boost_threshold_ua = chip->dt.boost_threshold_ua;
rc = smblib_read(chg, APSD_RESULT_STATUS_REG, &stat);
if (rc < 0) {
diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c
index 0faf8aee8aa0..9348091ec8dd 100644
--- a/drivers/power/qcom-charger/smb-lib.c
+++ b/drivers/power/qcom-charger/smb-lib.c
@@ -780,16 +780,6 @@ static int smblib_otg_cl_config(struct smb_charger *chg, int otg_cl_ua)
return rc;
}
- /* configure PFM/PWM mode for OTG regulator */
- rc = smblib_masked_write(chg, DC_ENG_SSUPPLY_CFG3_REG,
- ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT,
- otg_cl_ua > MICRO_250MA ? 1 : 0);
- if (rc < 0) {
- smblib_err(chg,
- "Couldn't write DC_ENG_SSUPPLY_CFG3_REG rc=%d\n", rc);
- return rc;
- }
-
return rc;
}
@@ -1155,32 +1145,9 @@ int smblib_get_prop_batt_capacity(struct smb_charger *chg,
int smblib_get_prop_batt_status(struct smb_charger *chg,
union power_supply_propval *val)
{
- union power_supply_propval pval = {0, };
- bool usb_online, dc_online;
u8 stat;
int rc;
- rc = smblib_get_prop_usb_online(chg, &pval);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get usb online property rc=%d\n",
- rc);
- return rc;
- }
- usb_online = (bool)pval.intval;
-
- rc = smblib_get_prop_dc_online(chg, &pval);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get dc online property rc=%d\n",
- rc);
- return rc;
- }
- dc_online = (bool)pval.intval;
-
- if (!usb_online && !dc_online) {
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
- return rc;
- }
-
rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
if (rc < 0) {
smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
@@ -1498,7 +1465,7 @@ int smblib_get_prop_dc_online(struct smb_charger *chg,
stat);
val->intval = (stat & USE_DCIN_BIT) &&
- (stat & VALID_INPUT_POWER_SOURCE_BIT);
+ (stat & VALID_INPUT_POWER_SOURCE_STS_BIT);
return rc;
}
@@ -1511,7 +1478,7 @@ int smblib_get_prop_dc_current_max(struct smb_charger *chg,
}
/*******************
- * USB PSY SETTERS *
+ * DC PSY SETTERS *
* *****************/
int smblib_set_prop_dc_current_max(struct smb_charger *chg,
@@ -1564,7 +1531,7 @@ int smblib_get_prop_usb_online(struct smb_charger *chg,
stat);
val->intval = (stat & USE_USBIN_BIT) &&
- (stat & VALID_INPUT_POWER_SOURCE_BIT);
+ (stat & VALID_INPUT_POWER_SOURCE_STS_BIT);
return rc;
}
@@ -1893,6 +1860,25 @@ int smblib_set_prop_usb_current_max(struct smb_charger *chg,
return rc;
}
+#define FSW_2MHZ 2000
+#define FSW_800KHZ_RESET 800
+int smblib_set_prop_boost_current(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+
+ rc = smblib_set_charge_param(chg, &chg->param.freq_boost,
+ val->intval <= chg->boost_threshold_ua ?
+ FSW_2MHZ : FSW_800KHZ_RESET);
+ if (rc < 0) {
+ dev_err(chg->dev, "Error in setting freq_boost rc=%d\n", rc);
+ return rc;
+ }
+
+ chg->boost_current_ua = val->intval;
+ return rc;
+}
+
int smblib_set_prop_typec_power_role(struct smb_charger *chg,
const union power_supply_propval *val)
{
@@ -2452,12 +2438,9 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
rc);
}
} else {
- if (chg->wa_flags & BOOST_BACK_WA) {
+ if (chg->wa_flags & BOOST_BACK_WA)
vote(chg->usb_suspend_votable,
BOOST_BACK_VOTER, false, 0);
- vote(chg->dc_suspend_votable,
- BOOST_BACK_VOTER, false, 0);
- }
if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
@@ -2766,6 +2749,12 @@ static void typec_sink_insertion(struct smb_charger *chg)
false, 0);
}
+static void typec_sink_removal(struct smb_charger *chg)
+{
+ smblib_set_charge_param(chg, &chg->param.freq_boost, FSW_800KHZ_RESET);
+ chg->boost_current_ua = 0;
+}
+
static void smblib_handle_typec_removal(struct smb_charger *chg)
{
vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, true, 0);
@@ -2785,6 +2774,7 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0);
typec_source_removal(chg);
+ typec_sink_removal(chg);
smblib_update_usb_type(chg);
}
@@ -2802,6 +2792,7 @@ static void smblib_handle_typec_insertion(struct smb_charger *chg,
typec_sink_insertion(chg);
} else {
typec_source_insertion(chg);
+ typec_sink_removal(chg);
}
vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER,
@@ -2941,19 +2932,30 @@ irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data)
get_effective_result(chg->usb_suspend_votable))
return IRQ_HANDLED;
- if ((stat & USE_DCIN_BIT) &&
- get_effective_result(chg->dc_suspend_votable))
+ if (stat & USE_DCIN_BIT)
return IRQ_HANDLED;
if (is_storming(&irq_data->storm_data)) {
- smblib_dbg(chg, PR_MISC, "reverse boost detected; suspending input\n");
+ smblib_err(chg, "Reverse boost detected: suspending input\n");
vote(chg->usb_suspend_votable, BOOST_BACK_VOTER, true, 0);
- vote(chg->dc_suspend_votable, BOOST_BACK_VOTER, true, 0);
}
return IRQ_HANDLED;
}
+irqreturn_t smblib_handle_wdog_bark(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ int rc;
+
+ rc = smblib_write(chg, BARK_BITE_WDOG_PET_REG, BARK_BITE_WDOG_PET_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't pet the dog rc=%d\n", rc);
+
+ return IRQ_HANDLED;
+}
+
/***************
* Work Queues *
***************/
diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h
index b309095b04c1..089c6a6fe1b2 100644
--- a/drivers/power/qcom-charger/smb-lib.h
+++ b/drivers/power/qcom-charger/smb-lib.h
@@ -110,6 +110,7 @@ struct smb_params {
struct smb_chg_param step_soc;
struct smb_chg_param step_cc_delta[5];
struct smb_chg_param freq_buck;
+ struct smb_chg_param freq_boost;
};
struct parallel_params {
@@ -198,6 +199,7 @@ struct smb_charger {
int voltage_max_uv;
int pd_active;
bool system_suspend_supported;
+ int boost_threshold_ua;
int system_temp_level;
int thermal_levels;
@@ -216,6 +218,7 @@ struct smb_charger {
/* workaround flag */
u32 wa_flags;
enum cc2_sink_type cc2_sink_detach_flag;
+ int boost_current_ua;
};
int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
@@ -262,6 +265,7 @@ irqreturn_t smblib_handle_usb_typec_change(int irq, void *data);
irqreturn_t smblib_handle_dc_plugin(int irq, void *data);
irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data);
irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data);
+irqreturn_t smblib_handle_wdog_bark(int irq, void *data);
int smblib_get_prop_input_suspend(struct smb_charger *chg,
union power_supply_propval *val);
@@ -346,6 +350,8 @@ int smblib_set_prop_usb_voltage_min(struct smb_charger *chg,
const union power_supply_propval *val);
int smblib_set_prop_usb_voltage_max(struct smb_charger *chg,
const union power_supply_propval *val);
+int smblib_set_prop_boost_current(struct smb_charger *chg,
+ const union power_supply_propval *val);
int smblib_set_prop_typec_power_role(struct smb_charger *chg,
const union power_supply_propval *val);
int smblib_set_prop_pd_active(struct smb_charger *chg,
diff --git a/drivers/power/qcom-charger/smb-reg.h b/drivers/power/qcom-charger/smb-reg.h
index c2a2b0c86d73..d9d12c9d7cf6 100644
--- a/drivers/power/qcom-charger/smb-reg.h
+++ b/drivers/power/qcom-charger/smb-reg.h
@@ -897,7 +897,7 @@ enum {
#define BITE_WDOG_INT_EN_BIT BIT(5)
#define SFT_AFTER_WDOG_IRQ_MASK GENMASK(4, 3)
#define WDOG_IRQ_SFT_BIT BIT(2)
-#define WDOG_OPTION_BIT BIT(1)
+#define WDOG_TIMER_EN_ON_PLUGIN_BIT BIT(1)
#define WDOG_TIMER_EN_BIT BIT(0)
#define MISC_CFG_REG (MISC_BASE + 0x52)
@@ -1000,5 +1000,6 @@ enum {
#define SYSOK_OPTIONS_MASK GENMASK(2, 0)
#define CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG (MISC_BASE + 0xA0)
+#define CFG_BUCKBOOST_FREQ_SELECT_BOOST_REG (MISC_BASE + 0xA1)
#endif /* __SMB2_CHARGER_REG_H */
diff --git a/drivers/power/qcom-charger/smb1351-charger.c b/drivers/power/qcom-charger/smb1351-charger.c
index 79fbe33acf5d..e9d8c0e08447 100644
--- a/drivers/power/qcom-charger/smb1351-charger.c
+++ b/drivers/power/qcom-charger/smb1351-charger.c
@@ -3087,7 +3087,7 @@ static int smb1351_main_charger_probe(struct i2c_client *client,
chip->adc_param.low_temp = chip->batt_cool_decidegc;
chip->adc_param.high_temp = chip->batt_warm_decidegc;
}
- chip->adc_param.timer_interval = ADC_MEAS2_INTERVAL_1S;
+ chip->adc_param.timer_interval = ADC_MEAS1_INTERVAL_500MS;
chip->adc_param.state_request = ADC_TM_WARM_COOL_THR_ENABLE;
chip->adc_param.btm_ctx = chip;
chip->adc_param.threshold_notification =
diff --git a/drivers/power/qcom-charger/smb138x-charger.c b/drivers/power/qcom-charger/smb138x-charger.c
index 4255958de300..70d935e9d1df 100644
--- a/drivers/power/qcom-charger/smb138x-charger.c
+++ b/drivers/power/qcom-charger/smb138x-charger.c
@@ -192,11 +192,13 @@ static int smb138x_usb_get_prop(struct power_supply *psy,
pr_err("get prop %d is not supported\n", prop);
return -EINVAL;
}
+
if (rc < 0) {
pr_debug("Couldn't get prop %d rc = %d\n", prop, rc);
return -ENODATA;
}
- return 0;
+
+ return rc;
}
static int smb138x_usb_set_prop(struct power_supply *psy,
@@ -319,11 +321,13 @@ static int smb138x_batt_get_prop(struct power_supply *psy,
pr_err("batt power supply get prop %d not supported\n", prop);
return -EINVAL;
}
+
if (rc < 0) {
pr_debug("Couldn't get prop %d rc = %d\n", prop, rc);
return -ENODATA;
}
- return 0;
+
+ return rc;
}
static int smb138x_batt_set_prop(struct power_supply *psy,
@@ -457,11 +461,37 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
prop);
return -EINVAL;
}
+
if (rc < 0) {
pr_debug("Couldn't get prop %d rc = %d\n", prop, rc);
return -ENODATA;
}
- return 0;
+
+ return rc;
+}
+
+static int smb138x_set_parallel_suspend(struct smb138x *chip, bool suspend)
+{
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ rc = smblib_masked_write(chg, WD_CFG_REG, WDOG_TIMER_EN_BIT,
+ suspend ? 0 : WDOG_TIMER_EN_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't %s watchdog rc=%d\n",
+ suspend ? "disable" : "enable", rc);
+ suspend = true;
+ }
+
+ rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
+ suspend ? USBIN_SUSPEND_BIT : 0);
+ if (rc < 0) {
+ pr_err("Couldn't %s parallel charger rc=%d\n",
+ suspend ? "suspend" : "resume", rc);
+ return rc;
+ }
+
+ return rc;
}
static int smb138x_parallel_set_prop(struct power_supply *psy,
@@ -474,7 +504,7 @@ static int smb138x_parallel_set_prop(struct power_supply *psy,
switch (prop) {
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
- rc = smblib_set_usb_suspend(chg, val->intval);
+ rc = smb138x_set_parallel_suspend(chip, (bool)val->intval);
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
@@ -620,7 +650,7 @@ static int smb138x_init_vconn_regulator(struct smb138x *chip)
static int smb138x_init_hw(struct smb138x *chip)
{
struct smb_charger *chg = &chip->chg;
- int rc;
+ int rc = 0;
/* votes must be cast before configuring software control */
vote(chg->usb_suspend_votable,
@@ -772,6 +802,7 @@ static int smb138x_determine_initial_status(struct smb138x *chip)
struct smb138x_irq_info {
const char *name;
const irq_handler_t handler;
+ const bool wake;
const struct storm_watch storm_data;
};
@@ -908,7 +939,8 @@ static const struct smb138x_irq_info smb138x_irqs[] = {
},
{
.name = "wdog-bark",
- .handler = smblib_handle_debug,
+ .handler = smblib_handle_wdog_bark,
+ .wake = true,
},
{
.name = "aicl-fail",
@@ -953,7 +985,7 @@ static int smb138x_request_interrupt(struct smb138x *chip,
const char *irq_name)
{
struct smb_charger *chg = &chip->chg;
- int rc, irq, irq_index;
+ int rc = 0, irq, irq_index;
struct smb_irq_data *irq_data;
irq = of_irq_get_byname(node, irq_name);
@@ -968,6 +1000,9 @@ static int smb138x_request_interrupt(struct smb138x *chip,
return irq_index;
}
+ if (!smb138x_irqs[irq_index].handler)
+ return 0;
+
irq_data = devm_kzalloc(chg->dev, sizeof(*irq_data), GFP_KERNEL);
if (!irq_data)
return -ENOMEM;
@@ -984,6 +1019,9 @@ static int smb138x_request_interrupt(struct smb138x *chip,
return rc;
}
+ if (smb138x_irqs[irq_index].wake)
+ enable_irq_wake(irq);
+
return rc;
}
@@ -1001,7 +1039,7 @@ static int smb138x_request_interrupts(struct smb138x *chip)
prop, name) {
rc = smb138x_request_interrupt(chip, child, name);
if (rc < 0) {
- pr_err("Coudn't request interrupt %s rc=%d\n",
+ pr_err("Couldn't request interrupt %s rc=%d\n",
name, rc);
return rc;
}
@@ -1092,7 +1130,7 @@ static int smb138x_slave_probe(struct smb138x *chip)
rc = smblib_init(chg);
if (rc < 0) {
pr_err("Couldn't initialize smblib rc=%d\n", rc);
- return rc;
+ goto cleanup;
}
if (chip->wa_flags & OOB_COMP_WA_BIT) {
@@ -1102,7 +1140,7 @@ static int smb138x_slave_probe(struct smb138x *chip)
if (rc < 0) {
dev_err(chg->dev,
"Couldn't configure the oob comp threh rc = %d\n", rc);
- return rc;
+ goto cleanup;
}
rc = smblib_masked_write(chg, SMB2CHG_MISC_ENG_SDCDC_CFG6,
@@ -1110,22 +1148,41 @@ static int smb138x_slave_probe(struct smb138x *chip)
if (rc < 0) {
dev_err(chg->dev,
"Couldn't configure the sdcdc cfg 6 reg rc = %d\n", rc);
- return rc;
+ goto cleanup;
}
}
- /* suspend usb input */
- rc = smblib_set_usb_suspend(chg, true);
+ /* enable watchdog bark and bite interrupts, and disable the watchdog */
+ rc = smblib_masked_write(chg, WD_CFG_REG, WDOG_TIMER_EN_BIT
+ | WDOG_TIMER_EN_ON_PLUGIN_BIT | BITE_WDOG_INT_EN_BIT
+ | BARK_WDOG_INT_EN_BIT,
+ BITE_WDOG_INT_EN_BIT | BARK_WDOG_INT_EN_BIT);
if (rc < 0) {
- pr_err("Couldn't suspend USB input rc=%d\n", rc);
- return rc;
+ pr_err("Couldn't configure the watchdog rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ /* disable charging when watchdog bites */
+ rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
+ BITE_WDOG_DISABLE_CHARGING_CFG_BIT,
+ BITE_WDOG_DISABLE_CHARGING_CFG_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't configure the watchdog bite rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ /* suspend parallel charging */
+ rc = smb138x_set_parallel_suspend(chip, true);
+ if (rc < 0) {
+ pr_err("Couldn't suspend parallel charging rc=%d\n", rc);
+ goto cleanup;
}
/* initialize FCC to 0 */
rc = smblib_set_charge_param(chg, &chg->param.fcc, 0);
if (rc < 0) {
pr_err("Couldn't set 0 FCC rc=%d\n", rc);
- return rc;
+ goto cleanup;
}
/* enable the charging path */
@@ -1134,7 +1191,7 @@ static int smb138x_slave_probe(struct smb138x *chip)
CHARGING_ENABLE_CMD_BIT);
if (rc < 0) {
dev_err(chg->dev, "Couldn't enable charging rc=%d\n", rc);
- return rc;
+ goto cleanup;
}
/* configure charge enable for software control; active high */
@@ -1143,7 +1200,7 @@ static int smb138x_slave_probe(struct smb138x *chip)
if (rc < 0) {
dev_err(chg->dev, "Couldn't configure charge enable source rc=%d\n",
rc);
- return rc;
+ goto cleanup;
}
/* enable parallel current sensing */
@@ -1152,17 +1209,28 @@ static int smb138x_slave_probe(struct smb138x *chip)
if (rc < 0) {
dev_err(chg->dev, "Couldn't enable parallel current sensing rc=%d\n",
rc);
- return rc;
+ goto cleanup;
}
- /* keep at the end of probe, ready to serve before notifying others */
rc = smb138x_init_parallel_psy(chip);
if (rc < 0) {
pr_err("Couldn't initialize parallel psy rc=%d\n", rc);
- return rc;
+ goto cleanup;
+ }
+
+ rc = smb138x_request_interrupts(chip);
+ if (rc < 0) {
+ pr_err("Couldn't request interrupts rc=%d\n", rc);
+ goto cleanup;
}
return rc;
+
+cleanup:
+ smblib_deinit(chg);
+ if (chip->parallel_psy)
+ power_supply_unregister(chip->parallel_psy);
+ return rc;
}
static const struct of_device_id match_table[] = {
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index 38a8bbe74810..83797d89c30f 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -195,7 +195,7 @@ static void parport_detach(struct parport *port)
struct pps_client_pp *device;
/* FIXME: oooh, this is ugly! */
- if (strcmp(pardev->name, KBUILD_MODNAME))
+ if (!pardev || strcmp(pardev->name, KBUILD_MODNAME))
/* not our port */
return;
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index f9dfc8b6407a..7225ac6b3df5 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -80,7 +80,6 @@ struct fsl_pwm_chip {
struct mutex lock;
- unsigned int use_count;
unsigned int cnt_select;
unsigned int clk_ps;
@@ -300,9 +299,6 @@ static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc)
{
int ret;
- if (fpc->use_count++ != 0)
- return 0;
-
/* select counter clock source */
regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK,
FTM_SC_CLK(fpc->cnt_select));
@@ -334,25 +330,6 @@ static int fsl_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
return ret;
}
-static void fsl_counter_clock_disable(struct fsl_pwm_chip *fpc)
-{
- /*
- * already disabled, do nothing
- */
- if (fpc->use_count == 0)
- return;
-
- /* there are still users, so can't disable yet */
- if (--fpc->use_count > 0)
- return;
-
- /* no users left, disable PWM counter clock */
- regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK, 0);
-
- clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
- clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
-}
-
static void fsl_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
@@ -362,7 +339,8 @@ static void fsl_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
regmap_update_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm),
BIT(pwm->hwpwm));
- fsl_counter_clock_disable(fpc);
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
+ clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
regmap_read(fpc->regmap, FTM_OUTMASK, &val);
if ((val & 0xFF) == 0xFF)
@@ -492,17 +470,24 @@ static int fsl_pwm_remove(struct platform_device *pdev)
static int fsl_pwm_suspend(struct device *dev)
{
struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
- u32 val;
+ int i;
regcache_cache_only(fpc->regmap, true);
regcache_mark_dirty(fpc->regmap);
- /* read from cache */
- regmap_read(fpc->regmap, FTM_OUTMASK, &val);
- if ((val & 0xFF) != 0xFF) {
+ for (i = 0; i < fpc->chip.npwm; i++) {
+ struct pwm_device *pwm = &fpc->chip.pwms[i];
+
+ if (!test_bit(PWMF_REQUESTED, &pwm->flags))
+ continue;
+
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
+
+ if (!pwm_is_enabled(pwm))
+ continue;
+
clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
- clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
}
return 0;
@@ -511,12 +496,19 @@ static int fsl_pwm_suspend(struct device *dev)
static int fsl_pwm_resume(struct device *dev)
{
struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
- u32 val;
+ int i;
+
+ for (i = 0; i < fpc->chip.npwm; i++) {
+ struct pwm_device *pwm = &fpc->chip.pwms[i];
+
+ if (!test_bit(PWMF_REQUESTED, &pwm->flags))
+ continue;
- /* read from cache */
- regmap_read(fpc->regmap, FTM_OUTMASK, &val);
- if ((val & 0xFF) != 0xFF) {
clk_prepare_enable(fpc->clk[FSL_PWM_CLK_SYS]);
+
+ if (!pwm_is_enabled(pwm))
+ continue;
+
clk_prepare_enable(fpc->clk[fpc->cnt_select]);
clk_prepare_enable(fpc->clk[FSL_PWM_CLK_CNTEN]);
}
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 9fde60ce8e7b..6e203a65effb 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -24,9 +24,7 @@ struct lpc32xx_pwm_chip {
void __iomem *base;
};
-#define PWM_ENABLE (1 << 31)
-#define PWM_RELOADV(x) (((x) & 0xFF) << 8)
-#define PWM_DUTY(x) ((x) & 0xFF)
+#define PWM_ENABLE BIT(31)
#define to_lpc32xx_pwm_chip(_chip) \
container_of(_chip, struct lpc32xx_pwm_chip, chip)
@@ -38,40 +36,27 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long long c;
int period_cycles, duty_cycles;
u32 val;
-
- c = clk_get_rate(lpc32xx->clk) / 256;
- c = c * period_ns;
- do_div(c, NSEC_PER_SEC);
-
- /* Handle high and low extremes */
- if (c == 0)
- c = 1;
- if (c > 255)
- c = 0; /* 0 set division by 256 */
- period_cycles = c;
-
- /* The duty-cycle value is as follows:
- *
- * DUTY-CYCLE HIGH LEVEL
- * 1 99.9%
- * 25 90.0%
- * 128 50.0%
- * 220 10.0%
- * 255 0.1%
- * 0 0.0%
- *
- * In other words, the register value is duty-cycle % 256 with
- * duty-cycle in the range 1-256.
- */
- c = 256 * duty_ns;
- do_div(c, period_ns);
- if (c > 255)
- c = 255;
- duty_cycles = 256 - c;
+ c = clk_get_rate(lpc32xx->clk);
+
+ /* The highest acceptable divisor is 256, which is represented by 0 */
+ period_cycles = div64_u64(c * period_ns,
+ (unsigned long long)NSEC_PER_SEC * 256);
+ if (!period_cycles)
+ period_cycles = 1;
+ if (period_cycles > 255)
+ period_cycles = 0;
+
+ /* Compute 256 x #duty/period value and care for corner cases */
+ duty_cycles = div64_u64((unsigned long long)(period_ns - duty_ns) * 256,
+ period_ns);
+ if (!duty_cycles)
+ duty_cycles = 1;
+ if (duty_cycles > 255)
+ duty_cycles = 255;
val = readl(lpc32xx->base + (pwm->hwpwm << 2));
val &= ~0xFFFF;
- val |= PWM_RELOADV(period_cycles) | PWM_DUTY(duty_cycles);
+ val |= (period_cycles << 8) | duty_cycles;
writel(val, lpc32xx->base + (pwm->hwpwm << 2));
return 0;
@@ -134,7 +119,7 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
lpc32xx->chip.dev = &pdev->dev;
lpc32xx->chip.ops = &lpc32xx_pwm_ops;
- lpc32xx->chip.npwm = 2;
+ lpc32xx->chip.npwm = 1;
lpc32xx->chip.base = -1;
ret = pwmchip_add(&lpc32xx->chip);
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 63cd5e68c864..3a6d0290c54c 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -296,7 +296,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
sreg->sel = 22;
- if (!sreg->sel) {
+ if (!sreg->bypass && !sreg->sel) {
dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
return -EINVAL;
}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 9e03d158f411..4f7ce0097191 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -1239,11 +1239,6 @@ int rproc_add(struct rproc *rproc)
if (ret < 0)
return ret;
- /* expose to rproc_get_by_phandle users */
- mutex_lock(&rproc_list_mutex);
- list_add(&rproc->node, &rproc_list);
- mutex_unlock(&rproc_list_mutex);
-
dev_info(dev, "%s is available\n", rproc->name);
dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n");
@@ -1251,8 +1246,16 @@ int rproc_add(struct rproc *rproc)
/* create debugfs entries */
rproc_create_debug_dir(rproc);
+ ret = rproc_add_virtio_devices(rproc);
+ if (ret < 0)
+ return ret;
- return rproc_add_virtio_devices(rproc);
+ /* expose to rproc_get_by_phandle users */
+ mutex_lock(&rproc_list_mutex);
+ list_add(&rproc->node, &rproc_list);
+ mutex_unlock(&rproc_list_mutex);
+
+ return 0;
}
EXPORT_SYMBOL(rproc_add);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index ffb860d18701..f92528822f06 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -149,12 +149,14 @@ static int s3c_rtc_setfreq(struct s3c_rtc *info, int freq)
if (!is_power_of_2(freq))
return -EINVAL;
+ s3c_rtc_enable_clk(info);
spin_lock_irq(&info->pie_lock);
if (info->data->set_freq)
info->data->set_freq(info, freq);
spin_unlock_irq(&info->pie_lock);
+ s3c_rtc_disable_clk(info);
return 0;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 4abfbdb285ec..84c13dffa3a8 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1584,9 +1584,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
unsigned long long now;
int expires;
+ cqr = (struct dasd_ccw_req *) intparm;
if (IS_ERR(irb)) {
switch (PTR_ERR(irb)) {
case -EIO:
+ if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
+ device = (struct dasd_device *) cqr->startdev;
+ cqr->status = DASD_CQR_CLEARED;
+ dasd_device_clear_timer(device);
+ wake_up(&dasd_flush_wq);
+ dasd_schedule_device_bh(device);
+ return;
+ }
break;
case -ETIMEDOUT:
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
@@ -1602,7 +1611,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
}
now = get_tod_clock();
- cqr = (struct dasd_ccw_req *) intparm;
/* check for conditions that should be handled immediately */
if (!cqr ||
!(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 648cb86afd42..ea607a4a1bdd 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
{
struct sclp_ctl_sccb ctl_sccb;
struct sccb_header *sccb;
+ unsigned long copied;
int rc;
if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
- if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
+ copied = PAGE_SIZE -
+ copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
+ if (offsetof(struct sccb_header, length) +
+ sizeof(sccb->length) > copied || sccb->length > copied) {
rc = -EFAULT;
goto out_free;
}
- if (sccb->length > PAGE_SIZE || sccb->length < 8)
- return -EINVAL;
- if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
- rc = -EFAULT;
+ if (sccb->length < 8) {
+ rc = -EINVAL;
goto out_free;
}
rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index c692dfebd0ba..50597f9522fe 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -139,11 +139,11 @@ static ssize_t chp_measurement_chars_read(struct file *filp,
device = container_of(kobj, struct device, kobj);
chp = to_channelpath(device);
- if (!chp->cmg_chars)
+ if (chp->cmg == -1)
return 0;
- return memory_read_from_buffer(buf, count, &off,
- chp->cmg_chars, sizeof(struct cmg_chars));
+ return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
+ sizeof(chp->cmg_chars));
}
static struct bin_attribute chp_measurement_chars_attr = {
@@ -416,7 +416,8 @@ static void chp_release(struct device *dev)
* chp_update_desc - update channel-path description
* @chp - channel-path
*
- * Update the channel-path description of the specified channel-path.
+ * Update the channel-path description of the specified channel-path
+ * including channel measurement related information.
* Return zero on success, non-zero otherwise.
*/
int chp_update_desc(struct channel_path *chp)
@@ -428,8 +429,10 @@ int chp_update_desc(struct channel_path *chp)
return rc;
rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+ if (rc)
+ return rc;
- return rc;
+ return chsc_get_channel_measurement_chars(chp);
}
/**
@@ -466,14 +469,6 @@ int chp_new(struct chp_id chpid)
ret = -ENODEV;
goto out_free;
}
- /* Get channel-measurement characteristics. */
- if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
- ret = chsc_get_channel_measurement_chars(chp);
- if (ret)
- goto out_free;
- } else {
- chp->cmg = -1;
- }
dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
/* make it known to the system */
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 4efd5b867cc3..af0232290dc4 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -48,7 +48,7 @@ struct channel_path {
/* Channel-measurement related stuff: */
int cmg;
int shared;
- void *cmg_chars;
+ struct cmg_chars cmg_chars;
};
/* Return channel_path struct for given chpid. */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index a831d18596a5..c424c0c7367e 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <asm/cio.h>
@@ -224,8 +225,9 @@ out_unreg:
void chsc_chp_offline(struct chp_id chpid)
{
- char dbf_txt[15];
+ struct channel_path *chp = chpid_to_chp(chpid);
struct chp_link link;
+ char dbf_txt[15];
sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
@@ -236,6 +238,11 @@ void chsc_chp_offline(struct chp_id chpid)
link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
+
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
}
@@ -690,8 +697,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
void chsc_chp_online(struct chp_id chpid)
{
- char dbf_txt[15];
+ struct channel_path *chp = chpid_to_chp(chpid);
struct chp_link link;
+ char dbf_txt[15];
sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
@@ -701,6 +709,11 @@ void chsc_chp_online(struct chp_id chpid)
link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
+
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+
for_each_subchannel_staged(__s390_process_res_acc, NULL,
&link);
css_schedule_reprobe();
@@ -967,22 +980,19 @@ static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
struct cmg_chars *chars)
{
- struct cmg_chars *cmg_chars;
int i, mask;
- cmg_chars = chp->cmg_chars;
for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
mask = 0x80 >> (i + 3);
if (cmcv & mask)
- cmg_chars->values[i] = chars->values[i];
+ chp->cmg_chars.values[i] = chars->values[i];
else
- cmg_chars->values[i] = 0;
+ chp->cmg_chars.values[i] = 0;
}
}
int chsc_get_channel_measurement_chars(struct channel_path *chp)
{
- struct cmg_chars *cmg_chars;
int ccode, ret;
struct {
@@ -1006,10 +1016,11 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
u32 data[NR_MEASUREMENT_CHARS];
} __attribute__ ((packed)) *scmc_area;
- chp->cmg_chars = NULL;
- cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
- if (!cmg_chars)
- return -ENOMEM;
+ chp->shared = -1;
+ chp->cmg = -1;
+
+ if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
+ return 0;
spin_lock_irq(&chsc_page_lock);
memset(chsc_page, 0, PAGE_SIZE);
@@ -1031,25 +1042,19 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
scmc_area->response.code);
goto out;
}
- if (scmc_area->not_valid) {
- chp->cmg = -1;
- chp->shared = -1;
+ if (scmc_area->not_valid)
goto out;
- }
+
chp->cmg = scmc_area->cmg;
chp->shared = scmc_area->shared;
if (chp->cmg != 2 && chp->cmg != 3) {
/* No cmg-dependent data. */
goto out;
}
- chp->cmg_chars = cmg_chars;
chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
(struct cmg_chars *) &scmc_area->data);
out:
spin_unlock_irq(&chsc_page_lock);
- if (!chp->cmg_chars)
- kfree(cmg_chars);
-
return ret;
}
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index b2afad5a5682..2a34eb5f6161 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -753,6 +753,17 @@ static void reset_cmb(struct ccw_device *cdev)
cmf_generic_reset(cdev);
}
+static int cmf_enabled(struct ccw_device *cdev)
+{
+ int enabled;
+
+ spin_lock_irq(cdev->ccwlock);
+ enabled = !!cdev->private->cmb;
+ spin_unlock_irq(cdev->ccwlock);
+
+ return enabled;
+}
+
static struct attribute_group cmf_attr_group;
static struct cmb_operations cmbops_basic = {
@@ -1153,13 +1164,8 @@ static ssize_t cmb_enable_show(struct device *dev,
char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
- int enabled;
- spin_lock_irq(cdev->ccwlock);
- enabled = !!cdev->private->cmb;
- spin_unlock_irq(cdev->ccwlock);
-
- return sprintf(buf, "%d\n", enabled);
+ return sprintf(buf, "%d\n", cmf_enabled(cdev));
}
static ssize_t cmb_enable_store(struct device *dev,
@@ -1199,15 +1205,20 @@ int ccw_set_cmf(struct ccw_device *cdev, int enable)
* @cdev: The ccw device to be enabled
*
* Returns %0 for success or a negative error value.
- *
+ * Note: If this is called on a device for which channel measurement is already
+ * enabled a reset of the measurement data is triggered.
* Context:
* non-atomic
*/
int enable_cmf(struct ccw_device *cdev)
{
- int ret;
+ int ret = 0;
device_lock(&cdev->dev);
+ if (cmf_enabled(cdev)) {
+ cmbops->reset(cdev);
+ goto out_unlock;
+ }
get_device(&cdev->dev);
ret = cmbops->alloc(cdev);
if (ret)
@@ -1226,7 +1237,7 @@ int enable_cmf(struct ccw_device *cdev)
out:
if (ret)
put_device(&cdev->dev);
-
+out_unlock:
device_unlock(&cdev->dev);
return ret;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8f1b091e1732..df036b872b05 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1051,6 +1051,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
qeth_l2_set_offline(cgdev);
if (card->dev) {
+ netif_napi_del(&card->napi);
unregister_netdev(card->dev);
card->dev = NULL;
}
@@ -1126,6 +1127,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
qeth_l2_request_initial_mac(card);
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
+ netif_carrier_off(card->dev);
return register_netdev(card->dev);
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 543960e96b42..cc4d3c3d8cc5 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3220,6 +3220,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
+ netif_carrier_off(card->dev);
return register_netdev(card->dev);
}
@@ -3246,6 +3247,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_set_offline(cgdev);
if (card->dev) {
+ netif_napi_del(&card->napi);
unregister_netdev(card->dev);
card->dev = NULL;
}
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 54195a117f72..f78cc943d230 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
struct fib *fibptr;
struct hw_fib * hw_fib = (struct hw_fib *)0;
dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
- unsigned size;
+ unsigned int size, osize;
int retval;
if (dev->in_reset) {
@@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
* will not overrun the buffer when we copy the memory. Return
* an error if we would.
*/
- size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr);
+ osize = size = le16_to_cpu(kfib->header.Size) +
+ sizeof(struct aac_fibhdr);
if (size < le16_to_cpu(kfib->header.SenderSize))
size = le16_to_cpu(kfib->header.SenderSize);
if (size > dev->max_fib_size) {
@@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
goto cleanup;
}
+ /* Sanity check the second copy */
+ if ((osize != le16_to_cpu(kfib->header.Size) +
+ sizeof(struct aac_fibhdr))
+ || (size < le16_to_cpu(kfib->header.SenderSize))) {
+ retval = -EINVAL;
+ goto cleanup;
+ }
+
if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
aac_adapter_interrupt(dev);
/*
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 333db5953607..41f9a00e4f74 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2664,7 +2664,7 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", acb->host->host_no);
- return false;
+ goto err_free_dma;
}
count = 8;
while (count){
@@ -2694,19 +2694,23 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
acb->firm_model,
acb->firm_version);
- acb->signature = readl(&reg->message_rwbuffer[1]);
+ acb->signature = readl(&reg->message_rwbuffer[0]);
/*firm_signature,1,00-03*/
- acb->firm_request_len = readl(&reg->message_rwbuffer[2]);
+ acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
/*firm_request_len,1,04-07*/
- acb->firm_numbers_queue = readl(&reg->message_rwbuffer[3]);
+ acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
/*firm_numbers_queue,2,08-11*/
- acb->firm_sdram_size = readl(&reg->message_rwbuffer[4]);
+ acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
/*firm_sdram_size,3,12-15*/
- acb->firm_hd_channels = readl(&reg->message_rwbuffer[5]);
+ acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
/*firm_ide_channels,4,16-19*/
acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
/*firm_ide_channels,4,16-19*/
return true;
+err_free_dma:
+ dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
+ acb->dma_coherent2, acb->dma_coherent_handle2);
+ return false;
}
static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
@@ -2880,15 +2884,15 @@ static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
iop_device_map++;
count--;
}
- acb->signature = readl(&reg->msgcode_rwbuffer[1]);
+ acb->signature = readl(&reg->msgcode_rwbuffer[0]);
/*firm_signature,1,00-03*/
- acb->firm_request_len = readl(&reg->msgcode_rwbuffer[2]);
+ acb->firm_request_len = readl(&reg->msgcode_rwbuffer[1]);
/*firm_request_len,1,04-07*/
- acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[3]);
+ acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]);
/*firm_numbers_queue,2,08-11*/
- acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[4]);
+ acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]);
/*firm_sdram_size,3,12-15*/
- acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[5]);
+ acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]);
/*firm_hd_channels,4,16-19*/
acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index fa09d4be2b53..2b456ca69d5c 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1181,8 +1181,9 @@ static const char * const snstext[] = {
/* Get sense key string or NULL if not available */
const char *
-scsi_sense_key_string(unsigned char key) {
- if (key <= 0xE)
+scsi_sense_key_string(unsigned char key)
+{
+ if (key < ARRAY_SIZE(snstext))
return snstext[key];
return NULL;
}
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index c11cd193f896..5ada9268a450 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -165,6 +165,8 @@ struct afu {
struct sisl_host_map __iomem *host_map; /* MC host map */
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
+ struct kref mapcount;
+
ctx_hndl_t ctx_hndl; /* master's context handle */
u64 *hrrq_start;
u64 *hrrq_end;
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 1e5bf0ca81da..c86847c68448 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -289,7 +289,7 @@ static void context_reset(struct afu_cmd *cmd)
atomic64_set(&afu->room, room);
if (room)
goto write_rrin;
- udelay(nretry);
+ udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
pr_err("%s: no cmd_room to send reset\n", __func__);
@@ -303,7 +303,7 @@ write_rrin:
if (rrin != 0x1)
break;
/* Double delay each time */
- udelay(2 << nretry);
+ udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
}
@@ -338,7 +338,7 @@ retry:
atomic64_set(&afu->room, room);
if (room)
goto write_ioarrin;
- udelay(nretry);
+ udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
dev_err(dev, "%s: no cmd_room to send 0x%X\n",
@@ -352,7 +352,7 @@ retry:
* afu->room.
*/
if (nretry++ < MC_ROOM_RETRY_CNT) {
- udelay(nretry);
+ udelay(1 << nretry);
goto retry;
}
@@ -368,6 +368,7 @@ out:
no_room:
afu->read_room = true;
+ kref_get(&cfg->afu->mapcount);
schedule_work(&cfg->work_q);
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
@@ -473,6 +474,16 @@ out:
return rc;
}
+static void afu_unmap(struct kref *ref)
+{
+ struct afu *afu = container_of(ref, struct afu, mapcount);
+
+ if (likely(afu->afu_map)) {
+ cxl_psa_unmap((void __iomem *)afu->afu_map);
+ afu->afu_map = NULL;
+ }
+}
+
/**
* cxlflash_driver_info() - information handler for this host driver
* @host: SCSI host associated with device.
@@ -503,6 +514,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
ulong lock_flags;
short lflag = 0;
int rc = 0;
+ int kref_got = 0;
dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
"cdb=(%08X-%08X-%08X-%08X)\n",
@@ -547,6 +559,9 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
goto out;
}
+ kref_get(&cfg->afu->mapcount);
+ kref_got = 1;
+
cmd->rcb.ctx_id = afu->ctx_hndl;
cmd->rcb.port_sel = port_sel;
cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
@@ -587,6 +602,8 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
}
out:
+ if (kref_got)
+ kref_put(&afu->mapcount, afu_unmap);
pr_devel("%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -632,20 +649,36 @@ static void free_mem(struct cxlflash_cfg *cfg)
* @cfg: Internal structure associated with the host.
*
* Safe to call with AFU in a partially allocated/initialized state.
+ *
+ * Cleans up all state associated with the command queue, and unmaps
+ * the MMIO space.
+ *
+ * - complete() will take care of commands we initiated (they'll be checked
+ * in as part of the cleanup that occurs after the completion)
+ *
+ * - cmd_checkin() will take care of entries that we did not initiate and that
+ * have not (and will not) complete because they are sitting on a [now stale]
+ * hardware queue
*/
static void stop_afu(struct cxlflash_cfg *cfg)
{
int i;
struct afu *afu = cfg->afu;
+ struct afu_cmd *cmd;
if (likely(afu)) {
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++)
- complete(&afu->cmd[i].cevent);
+ for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
+ cmd = &afu->cmd[i];
+ complete(&cmd->cevent);
+ if (!atomic_read(&cmd->free))
+ cmd_checkin(cmd);
+ }
if (likely(afu->afu_map)) {
cxl_psa_unmap((void __iomem *)afu->afu_map);
afu->afu_map = NULL;
}
+ kref_put(&afu->mapcount, afu_unmap);
}
}
@@ -731,8 +764,8 @@ static void cxlflash_remove(struct pci_dev *pdev)
scsi_remove_host(cfg->host);
/* fall through */
case INIT_STATE_AFU:
- term_afu(cfg);
cancel_work_sync(&cfg->work_q);
+ term_afu(cfg);
case INIT_STATE_PCI:
pci_release_regions(cfg->dev);
pci_disable_device(pdev);
@@ -1108,7 +1141,7 @@ static const struct asyc_intr_info ainfo[] = {
{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
- {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
+ {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET},
{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
@@ -1316,6 +1349,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
__func__, port);
cfg->lr_state = LINK_RESET_REQUIRED;
cfg->lr_port = port;
+ kref_get(&cfg->afu->mapcount);
schedule_work(&cfg->work_q);
}
@@ -1336,6 +1370,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
if (info->action & SCAN_HOST) {
atomic_inc(&cfg->scan_host_needed);
+ kref_get(&cfg->afu->mapcount);
schedule_work(&cfg->work_q);
}
}
@@ -1731,6 +1766,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
rc = -ENOMEM;
goto err1;
}
+ kref_init(&afu->mapcount);
/* No byte reverse on reading afu_version or string will be backwards */
reg = readq(&afu->afu_map->global.regs.afu_version);
@@ -1765,8 +1801,7 @@ out:
return rc;
err2:
- cxl_psa_unmap((void __iomem *)afu->afu_map);
- afu->afu_map = NULL;
+ kref_put(&afu->mapcount, afu_unmap);
err1:
term_mc(cfg, UNDO_START);
goto out;
@@ -2114,6 +2149,16 @@ static ssize_t lun_mode_store(struct device *dev,
rc = kstrtouint(buf, 10, &lun_mode);
if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
afu->internal_lun = lun_mode;
+
+ /*
+ * When configured for internal LUN, there is only one channel,
+ * channel number 0, else there will be 2 (default).
+ */
+ if (afu->internal_lun)
+ shost->max_channel = 0;
+ else
+ shost->max_channel = NUM_FC_PORTS - 1;
+
afu_reset(cfg);
scsi_scan_host(cfg->host);
}
@@ -2274,6 +2319,7 @@ static struct scsi_host_template driver_template = {
* Device dependent values
*/
static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
+static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS };
/*
* PCI device binding table
@@ -2281,6 +2327,8 @@ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
static struct pci_device_id cxlflash_pci_table[] = {
{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
+ {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
{}
};
@@ -2339,6 +2387,7 @@ static void cxlflash_worker_thread(struct work_struct *work)
if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
scsi_scan_host(cfg->host);
+ kref_put(&afu->mapcount, afu_unmap);
}
/**
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 60324566c14f..3d2d606fafb3 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -24,8 +24,8 @@
#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
#define CXLFLASH_DRIVER_DATE "(August 13, 2015)"
-#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
-#define CXLFLASH_SUBS_DEV_ID 0x04F0
+#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
+#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600
/* Since there is only one target, make it 0 */
#define CXLFLASH_TARGET 0
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index cac2e6a50efd..babe7ccc1777 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -1380,7 +1380,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
}
ctxid = cxl_process_element(ctx);
- if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
+ if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
rc = -EPERM;
goto err2;
@@ -1508,7 +1508,7 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
}
ctxid = cxl_process_element(ctx);
- if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
+ if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
rc = -EPERM;
goto err1;
@@ -1590,6 +1590,13 @@ err1:
* place at the same time and the failure was due to CXL services being
* unable to keep up.
*
+ * As this routine is called on ioctl context, it holds the ioctl r/w
+ * semaphore that is used to drain ioctls in recovery scenarios. The
+ * implementation to achieve the pacing described above (a local mutex)
+ * requires that the ioctl r/w semaphore be dropped and reacquired to
+ * avoid a 3-way deadlock when multiple process recoveries operate in
+ * parallel.
+ *
* Because a user can detect an error condition before the kernel, it is
* quite possible for this routine to act as the kernel's EEH detection
* source (MMIO read of mbox_r). Because of this, there is a window of
@@ -1617,9 +1624,17 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
int rc = 0;
atomic_inc(&cfg->recovery_threads);
+ up_read(&cfg->ioctl_rwsem);
rc = mutex_lock_interruptible(mutex);
+ down_read(&cfg->ioctl_rwsem);
if (rc)
goto out;
+ rc = check_state(cfg);
+ if (rc) {
+ dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc);
+ rc = -ENODEV;
+ goto out;
+ }
dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
__func__, recover->reason, rctxid);
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index a53f583e2d7b..50f8e9300770 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -1008,6 +1008,8 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
virt->last_lba = last_lba;
virt->rsrc_handle = rsrc_handle;
+ if (lli->port_sel == BOTH_PORTS)
+ virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE;
out:
if (likely(ctxi))
put_context(ctxi);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 43ac62623bf2..7a58128a0000 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -10095,6 +10095,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg->intr_flag = IPR_USE_MSI;
else {
ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->clear_isr = 1;
ioa_cfg->nvectors = 1;
dev_info(&pdev->dev, "Cannot enable MSI.\n");
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b0e6fe46448d..80d3c740a8a8 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -72,6 +72,7 @@ void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
void lpfc_retry_pport_discovery(struct lpfc_hba *);
void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
+void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b6fa257ea3e0..59ced8864b2f 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -455,9 +455,9 @@ int
lpfc_issue_reg_vfi(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
- LPFC_MBOXQ_t *mboxq;
+ LPFC_MBOXQ_t *mboxq = NULL;
struct lpfc_nodelist *ndlp;
- struct lpfc_dmabuf *dmabuf;
+ struct lpfc_dmabuf *dmabuf = NULL;
int rc = 0;
/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
@@ -471,25 +471,33 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
}
}
- dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!dmabuf) {
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
rc = -ENOMEM;
goto fail;
}
- dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
- if (!dmabuf->virt) {
- rc = -ENOMEM;
- goto fail_free_dmabuf;
- }
- mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mboxq) {
- rc = -ENOMEM;
- goto fail_free_coherent;
+ /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
+ if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
+ if (!dmabuf->virt) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ memcpy(dmabuf->virt, &phba->fc_fabparam,
+ sizeof(struct serv_parm));
}
+
vport->port_state = LPFC_FABRIC_CFG_LINK;
- memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
- lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
+ if (dmabuf)
+ lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
+ else
+ lpfc_reg_vfi(mboxq, vport, 0);
mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
mboxq->vport = vport;
@@ -497,17 +505,19 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
rc = -ENXIO;
- goto fail_free_mbox;
+ goto fail;
}
return 0;
-fail_free_mbox:
- mempool_free(mboxq, phba->mbox_mem_pool);
-fail_free_coherent:
- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
-fail_free_dmabuf:
- kfree(dmabuf);
fail:
+ if (mboxq)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (dmabuf) {
+ if (dmabuf->virt)
+ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0289 Issue Register VFI failed: Err %d\n", rc);
@@ -711,9 +721,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* For FC we need to do some special processing because of the SLI
* Port's default settings of the Common Service Parameters.
*/
- if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) {
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
- if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed)
+ if (fabric_param_changed)
lpfc_unregister_fcf_prep(phba);
/* This should just update the VFI CSPs*/
@@ -824,13 +835,21 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ vport->fc_flag |= FC_PT2PT;
spin_unlock_irq(shost->host_lock);
- phba->fc_edtov = FF_DEF_EDTOV;
- phba->fc_ratov = FF_DEF_RATOV;
+ /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+ if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
+ lpfc_unregister_fcf_prep(phba);
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VFI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+ phba->fc_topology_changed = 0;
+ }
+
rc = memcmp(&vport->fc_portname, &sp->portName,
sizeof(vport->fc_portname));
- memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
if (rc >= 0) {
/* This side will initiate the PLOGI */
@@ -839,38 +858,14 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock);
/*
- * N_Port ID cannot be 0, set our to LocalID the other
- * side will be RemoteID.
+ * N_Port ID cannot be 0, set our Id to LocalID
+ * the other side will be RemoteID.
*/
/* not equal */
if (rc)
vport->fc_myDID = PT2PT_LocalID;
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- goto fail;
-
- lpfc_config_link(phba, mbox);
-
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- goto fail;
- }
-
- /*
- * For SLI4, the VFI/VPI are registered AFTER the
- * Nport with the higher WWPN sends the PLOGI with
- * an assigned NPortId.
- */
-
- /* not equal */
- if ((phba->sli_rev == LPFC_SLI_REV4) && rc)
- lpfc_issue_reg_vfi(vport);
-
/* Decrement ndlp reference count indicating that ndlp can be
* safely released when other references to it are done.
*/
@@ -912,29 +907,20 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* If we are pt2pt with another NPort, force NPIV off! */
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_PT2PT;
- spin_unlock_irq(shost->host_lock);
- /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
- if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
- lpfc_unregister_fcf_prep(phba);
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
- /* The FC_VFI_REGISTERED flag will get clear in the cmpl
- * handler for unreg_vfi, but if we don't force the
- * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
- * built with the update bit set instead of just the vp bit to
- * change the Nport ID. We need to have the vp set and the
- * Upd cleared on topology changes.
- */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
- phba->fc_topology_changed = 0;
- lpfc_issue_reg_vfi(vport);
+ lpfc_config_link(phba, mbox);
+
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto fail;
}
- /* Start discovery - this should just do CLEAR_LA */
- lpfc_disc_start(vport);
return 0;
fail:
return -ENXIO;
@@ -1157,6 +1143,7 @@ flogifail:
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
+
lpfc_nlp_put(ndlp);
if (!lpfc_error_lost_link(irsp)) {
@@ -3792,14 +3779,17 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_REG_LOGIN_ISSUE);
}
+
+ ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
!= MBX_NOT_FINISHED)
goto out;
- else
- /* Decrement the ndlp reference count we
- * set for this failed mailbox command.
- */
- lpfc_nlp_put(ndlp);
+
+ /* Decrement the ndlp reference count we
+ * set for this failed mailbox command.
+ */
+ lpfc_nlp_put(ndlp);
+ ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
/* ELS rsp: Cannot issue reg_login for <NPortid> */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -3856,6 +3846,7 @@ out:
* the routine lpfc_els_free_iocb.
*/
cmdiocb->context1 = NULL;
+
}
lpfc_els_free_iocb(phba, cmdiocb);
@@ -3898,6 +3889,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
+ struct serv_parm *sp;
uint16_t cmdsize;
int rc;
ELS_PKT *els_pkt_ptr;
@@ -3927,6 +3919,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
"Issue ACC: did:x%x flg:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, 0);
break;
+ case ELS_CMD_FLOGI:
case ELS_CMD_PLOGI:
cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
@@ -3944,10 +3937,34 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
- memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
+ sp = (struct serv_parm *)pcmd;
+
+ if (flag == ELS_CMD_FLOGI) {
+ /* Copy the received service parameters back */
+ memcpy(sp, &phba->fc_fabparam,
+ sizeof(struct serv_parm));
+
+ /* Clear the F_Port bit */
+ sp->cmn.fPort = 0;
+
+ /* Mark all class service parameters as invalid */
+ sp->cls1.classValid = 0;
+ sp->cls2.classValid = 0;
+ sp->cls3.classValid = 0;
+ sp->cls4.classValid = 0;
+
+ /* Copy our worldwide names */
+ memcpy(&sp->portName, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+ } else {
+ memcpy(pcmd, &vport->fc_sparam,
+ sizeof(struct serv_parm));
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
- "Issue ACC PLOGI: did:x%x flg:x%x",
+ "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, 0);
break;
case ELS_CMD_PRLO:
@@ -4681,28 +4698,25 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
- switch (phba->sli4_hba.link_state.speed) {
- case LPFC_FC_LA_SPEED_1G:
+ switch (phba->fc_linkspeed) {
+ case LPFC_LINK_SPEED_1GHZ:
rdp_speed = RDP_PS_1GB;
break;
- case LPFC_FC_LA_SPEED_2G:
+ case LPFC_LINK_SPEED_2GHZ:
rdp_speed = RDP_PS_2GB;
break;
- case LPFC_FC_LA_SPEED_4G:
+ case LPFC_LINK_SPEED_4GHZ:
rdp_speed = RDP_PS_4GB;
break;
- case LPFC_FC_LA_SPEED_8G:
+ case LPFC_LINK_SPEED_8GHZ:
rdp_speed = RDP_PS_8GB;
break;
- case LPFC_FC_LA_SPEED_10G:
+ case LPFC_LINK_SPEED_10GHZ:
rdp_speed = RDP_PS_10GB;
break;
- case LPFC_FC_LA_SPEED_16G:
+ case LPFC_LINK_SPEED_16GHZ:
rdp_speed = RDP_PS_16GB;
break;
- case LPFC_FC_LA_SPEED_32G:
- rdp_speed = RDP_PS_32GB;
- break;
default:
rdp_speed = RDP_PS_UNKNOWN;
break;
@@ -5739,7 +5753,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
IOCB_t *icmd = &cmdiocb->iocb;
struct serv_parm *sp;
LPFC_MBOXQ_t *mbox;
- struct ls_rjt stat;
uint32_t cmd, did;
int rc;
uint32_t fc_flag = 0;
@@ -5765,135 +5778,92 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 1;
}
- if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
- /* For a FLOGI we accept, then if our portname is greater
- * then the remote portname we initiate Nport login.
- */
+ (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
- rc = memcmp(&vport->fc_portname, &sp->portName,
- sizeof(struct lpfc_name));
- if (!rc) {
- if (phba->sli_rev < LPFC_SLI_REV4) {
- mbox = mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL);
- if (!mbox)
- return 1;
- lpfc_linkdown(phba);
- lpfc_init_link(phba, mbox,
- phba->cfg_topology,
- phba->cfg_link_speed);
- mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox,
- MBX_NOWAIT);
- lpfc_set_loopback_flag(phba);
- if (rc == MBX_NOT_FINISHED)
- mempool_free(mbox, phba->mbox_mem_pool);
- return 1;
- } else {
- /* abort the flogi coming back to ourselves
- * due to external loopback on the port.
- */
- lpfc_els_abort_flogi(phba);
- return 0;
- }
- } else if (rc > 0) { /* greater than */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_PT2PT_PLOGI;
- spin_unlock_irq(shost->host_lock);
+ /*
+ * If our portname is greater than the remote portname,
+ * then we initiate Nport login.
+ */
- /* If we have the high WWPN we can assign our own
- * myDID; otherwise, we have to WAIT for a PLOGI
- * from the remote NPort to find out what it
- * will be.
- */
- vport->fc_myDID = PT2PT_LocalID;
- } else
- vport->fc_myDID = PT2PT_RemoteID;
+ rc = memcmp(&vport->fc_portname, &sp->portName,
+ sizeof(struct lpfc_name));
- /*
- * The vport state should go to LPFC_FLOGI only
- * AFTER we issue a FLOGI, not receive one.
+ if (!rc) {
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mbox)
+ return 1;
+ lpfc_linkdown(phba);
+ lpfc_init_link(phba, mbox,
+ phba->cfg_topology,
+ phba->cfg_link_speed);
+ mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox,
+ MBX_NOWAIT);
+ lpfc_set_loopback_flag(phba);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+
+ /* abort the flogi coming back to ourselves
+ * due to external loopback on the port.
*/
+ lpfc_els_abort_flogi(phba);
+ return 0;
+
+ } else if (rc > 0) { /* greater than */
spin_lock_irq(shost->host_lock);
- fc_flag = vport->fc_flag;
- port_state = vport->port_state;
- vport->fc_flag |= FC_PT2PT;
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ vport->fc_flag |= FC_PT2PT_PLOGI;
spin_unlock_irq(shost->host_lock);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "3311 Rcv Flogi PS x%x new PS x%x "
- "fc_flag x%x new fc_flag x%x\n",
- port_state, vport->port_state,
- fc_flag, vport->fc_flag);
- /*
- * We temporarily set fc_myDID to make it look like we are
- * a Fabric. This is done just so we end up with the right
- * did / sid on the FLOGI ACC rsp.
+ /* If we have the high WWPN we can assign our own
+ * myDID; otherwise, we have to WAIT for a PLOGI
+ * from the remote NPort to find out what it
+ * will be.
*/
- did = vport->fc_myDID;
- vport->fc_myDID = Fabric_DID;
-
+ vport->fc_myDID = PT2PT_LocalID;
} else {
- /* Reject this request because invalid parameters */
- stat.un.b.lsRjtRsvd0 = 0;
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
- stat.un.b.vendorUnique = 0;
-
- /*
- * We temporarily set fc_myDID to make it look like we are
- * a Fabric. This is done just so we end up with the right
- * did / sid on the FLOGI LS_RJT rsp.
- */
- did = vport->fc_myDID;
- vport->fc_myDID = Fabric_DID;
-
- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
- NULL);
+ vport->fc_myDID = PT2PT_RemoteID;
+ }
- /* Now lets put fc_myDID back to what its supposed to be */
- vport->fc_myDID = did;
+ /*
+ * The vport state should go to LPFC_FLOGI only
+ * AFTER we issue a FLOGI, not receive one.
+ */
+ spin_lock_irq(shost->host_lock);
+ fc_flag = vport->fc_flag;
+ port_state = vport->port_state;
+ vport->fc_flag |= FC_PT2PT;
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3311 Rcv Flogi PS x%x new PS x%x "
+ "fc_flag x%x new fc_flag x%x\n",
+ port_state, vport->port_state,
+ fc_flag, vport->fc_flag);
- return 1;
- }
+ /*
+ * We temporarily set fc_myDID to make it look like we are
+ * a Fabric. This is done just so we end up with the right
+ * did / sid on the FLOGI ACC rsp.
+ */
+ did = vport->fc_myDID;
+ vport->fc_myDID = Fabric_DID;
- /* send our FLOGI first */
- if (vport->port_state < LPFC_FLOGI) {
- vport->fc_myDID = 0;
- lpfc_initial_flogi(vport);
- vport->fc_myDID = Fabric_DID;
- }
+ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
/* Send back ACC */
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
+ lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
/* Now lets put fc_myDID back to what its supposed to be */
vport->fc_myDID = did;
- if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
-
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- goto fail;
-
- lpfc_config_link(phba, mbox);
-
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- goto fail;
- }
- }
-
return 0;
-fail:
- return 1;
}
/**
@@ -7345,7 +7315,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* reject till our FLOGI completes */
if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
- (cmd != ELS_CMD_FLOGI)) {
+ (cmd != ELS_CMD_FLOGI)) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_exp = LSEXP_NOTHING_MORE;
goto lsrjt;
@@ -7381,6 +7351,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
break;
}
+
if (vport->port_state < LPFC_DISC_AUTH) {
if (!(phba->pport->fc_flag & FC_PT2PT) ||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bfc2442dd74a..d3668aa555d5 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1083,7 +1083,7 @@ out:
}
-static void
+void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
@@ -1113,8 +1113,10 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Start discovery by sending a FLOGI. port_state is identically
* LPFC_FLOGI while waiting for FLOGI cmpl
*/
- if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI)
+ if (vport->port_state != LPFC_FLOGI)
lpfc_initial_flogi(vport);
+ else if (vport->fc_flag & FC_PT2PT)
+ lpfc_disc_start(vport);
return;
out:
@@ -2963,8 +2965,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
out_free_mem:
mempool_free(mboxq, phba->mbox_mem_pool);
- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
- kfree(dmabuf);
+ if (dmabuf) {
+ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
return;
}
@@ -3448,10 +3452,10 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
spin_unlock_irq(shost->host_lock);
- } else
- /* Good status, call state machine */
- lpfc_disc_state_machine(vport, ndlp, pmb,
- NLP_EVT_CMPL_REG_LOGIN);
+ }
+
+ /* Call state machine */
+ lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b0d92b84bcdc..c14ab6c3ae40 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -8834,9 +8834,12 @@ found:
* already mapped to this phys_id.
*/
if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
- chann[saved_chann] =
- cpup->channel_id;
- saved_chann++;
+ if (saved_chann <=
+ LPFC_FCP_IO_CHAN_MAX) {
+ chann[saved_chann] =
+ cpup->channel_id;
+ saved_chann++;
+ }
goto out;
}
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index f87f90e9b7df..1e34b5408a29 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2145,10 +2145,12 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
reg_vfi->e_d_tov = phba->fc_edtov;
reg_vfi->r_a_tov = phba->fc_ratov;
- reg_vfi->bde.addrHigh = putPaddrHigh(phys);
- reg_vfi->bde.addrLow = putPaddrLow(phys);
- reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
- reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ if (phys) {
+ reg_vfi->bde.addrHigh = putPaddrHigh(phys);
+ reg_vfi->bde.addrLow = putPaddrLow(phys);
+ reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
+ reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ }
bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
/* Only FC supports upd bit */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index ed9a2c80c4aa..193733e8c823 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -280,38 +280,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t *lp;
IOCB_t *icmd;
struct serv_parm *sp;
+ uint32_t ed_tov;
LPFC_MBOXQ_t *mbox;
struct ls_rjt stat;
int rc;
memset(&stat, 0, sizeof (struct ls_rjt));
- if (vport->port_state <= LPFC_FDISC) {
- /* Before responding to PLOGI, check for pt2pt mode.
- * If we are pt2pt, with an outstanding FLOGI, abort
- * the FLOGI and resend it first.
- */
- if (vport->fc_flag & FC_PT2PT) {
- lpfc_els_abort_flogi(phba);
- if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
- /* If the other side is supposed to initiate
- * the PLOGI anyway, just ACC it now and
- * move on with discovery.
- */
- phba->fc_edtov = FF_DEF_EDTOV;
- phba->fc_ratov = FF_DEF_RATOV;
- /* Start discovery - this should just do
- CLEAR_LA */
- lpfc_disc_start(vport);
- } else
- lpfc_initial_flogi(vport);
- } else {
- stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
- ndlp, NULL);
- return 0;
- }
- }
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
@@ -404,30 +378,46 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Check for Nport to NPort pt2pt protocol */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
-
/* rcv'ed PLOGI decides what our NPortId will be */
vport->fc_myDID = icmd->un.rcvels.parmRo;
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (mbox == NULL)
- goto out;
- lpfc_config_link(phba, mbox);
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- goto out;
+
+ ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
+ if (sp->cmn.edtovResolution) {
+ /* E_D_TOV ticks are in nanoseconds */
+ ed_tov = (phba->fc_edtov + 999999) / 1000000;
}
+
/*
- * For SLI4, the VFI/VPI are registered AFTER the
- * Nport with the higher WWPN sends us a PLOGI with
- * our assigned NPortId.
+ * For pt-to-pt, use the larger EDTOV
+ * RATOV = 2 * EDTOV
*/
+ if (ed_tov > phba->fc_edtov)
+ phba->fc_edtov = ed_tov;
+ phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
+
+ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+ /* Issue config_link / reg_vfi to account for updated TOV's */
+
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_issue_reg_vfi(vport);
+ else {
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox == NULL)
+ goto out;
+ lpfc_config_link(phba, mbox);
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+ }
lpfc_can_disctmo(vport);
}
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
goto out;
@@ -1038,7 +1028,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
uint32_t *lp;
IOCB_t *irsp;
struct serv_parm *sp;
+ uint32_t ed_tov;
LPFC_MBOXQ_t *mbox;
+ int rc;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
@@ -1094,18 +1086,63 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
+ if ((vport->fc_flag & FC_PT2PT) &&
+ (vport->fc_flag & FC_PT2PT_PLOGI)) {
+ ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
+ if (sp->cmn.edtovResolution) {
+ /* E_D_TOV ticks are in nanoseconds */
+ ed_tov = (phba->fc_edtov + 999999) / 1000000;
+ }
+
+ /*
+ * Use the larger EDTOV
+ * RATOV = 2 * EDTOV for pt-to-pt
+ */
+ if (ed_tov > phba->fc_edtov)
+ phba->fc_edtov = ed_tov;
+ phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
+
+ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+ /* Issue config_link / reg_vfi to account for updated TOV's */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_issue_reg_vfi(vport);
+ } else {
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0133 PLOGI: no memory "
+ "for config_link "
+ "Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
+ goto out;
+ }
+
+ lpfc_config_link(phba, mbox);
+
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+ }
+ }
+
+ lpfc_unreg_rpi(vport, ndlp);
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0133 PLOGI: no memory for reg_login "
- "Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, ndlp->nlp_state,
- ndlp->nlp_flag, ndlp->nlp_rpi);
+ "0018 PLOGI: no memory for reg_login "
+ "Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
goto out;
}
- lpfc_unreg_rpi(vport, ndlp);
-
if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
(uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
switch (ndlp->nlp_DID) {
@@ -2299,6 +2336,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
if (vport->phba->sli_rev < LPFC_SLI_REV4)
ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ if (ndlp->nlp_flag & NLP_LOGO_ACC) {
+ lpfc_unreg_rpi(vport, ndlp);
+ }
} else {
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
lpfc_drop_node(vport, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 4679ed4444a7..bae36cc3740b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3859,7 +3859,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
uint32_t tag;
uint16_t hwq;
- if (shost_use_blk_mq(cmnd->device->host)) {
+ if (cmnd && shost_use_blk_mq(cmnd->device->host)) {
tag = blk_mq_unique_tag(cmnd->request);
hwq = blk_mq_unique_tag_to_hwq(tag);
@@ -3908,9 +3908,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
uint32_t logit = LOG_FCP;
/* Sanity check on return of outstanding command */
- if (!(lpfc_cmd->pCmd))
- return;
cmd = lpfc_cmd->pCmd;
+ if (!cmd)
+ return;
shost = cmd->device->host;
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f9585cdd8933..92dfd6a5178c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -14842,10 +14842,12 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
struct lpfc_dmabuf *h_buf;
struct hbq_dmabuf *seq_dmabuf = NULL;
struct hbq_dmabuf *temp_dmabuf = NULL;
+ uint8_t found = 0;
INIT_LIST_HEAD(&dmabuf->dbuf.list);
dmabuf->time_stamp = jiffies;
new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+
/* Use the hdr_buf to find the sequence that this frame belongs to */
list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
temp_hdr = (struct fc_frame_header *)h_buf->virt;
@@ -14885,7 +14887,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
return seq_dmabuf;
}
/* find the correct place in the sequence to insert this frame */
- list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
+ d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
+ while (!found) {
temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
/*
@@ -14895,9 +14898,17 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
if (be16_to_cpu(new_hdr->fh_seq_cnt) >
be16_to_cpu(temp_hdr->fh_seq_cnt)) {
list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
- return seq_dmabuf;
+ found = 1;
+ break;
}
+
+ if (&d_buf->list == &seq_dmabuf->dbuf.list)
+ break;
+ d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
}
+
+ if (found)
+ return seq_dmabuf;
return NULL;
}
@@ -16173,7 +16184,7 @@ fail_fcf_read:
}
/**
- * lpfc_check_next_fcf_pri
+ * lpfc_check_next_fcf_pri_level
* phba pointer to the lpfc_hba struct for this port.
* This routine is called from the lpfc_sli4_fcf_rr_next_index_get
* routine when the rr_bmask is empty. The FCF indecies are put into the
@@ -16329,8 +16340,12 @@ next_priority:
if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
- LPFC_FCF_FLOGI_FAILED)
+ LPFC_FCF_FLOGI_FAILED) {
+ if (list_is_singular(&phba->fcf.fcf_pri_list))
+ return LPFC_FCOE_FCF_NEXT_NONE;
+
goto next_priority;
+ }
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2845 Get next roundrobin failover FCF (x%x)\n",
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index c0f7c8ce54aa..ef4ff03242ea 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1083,6 +1083,8 @@ struct megasas_ctrl_info {
#define VD_EXT_DEBUG 0
+#define SCAN_PD_CHANNEL 0x1
+#define SCAN_VD_CHANNEL 0x2
enum MR_SCSI_CMD_TYPE {
READ_WRITE_LDIO = 0,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 00ce3e269a43..3f8d357b1bac 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -735,6 +735,7 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
&(regs)->inbound_high_queue_port);
writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
&(regs)->inbound_low_queue_port);
+ mmiowb();
spin_unlock_irqrestore(&instance->hba_lock, flags);
}
@@ -4669,7 +4670,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Find first memory bar */
bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
- if (pci_request_selected_regions(instance->pdev, instance->bar,
+ if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
"megasas: LSI")) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
return -EBUSY;
@@ -4960,7 +4961,7 @@ fail_ready_state:
iounmap(instance->reg_set);
fail_ioremap:
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
return -EINVAL;
}
@@ -4981,7 +4982,7 @@ static void megasas_release_mfi(struct megasas_instance *instance)
iounmap(instance->reg_set);
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
}
/**
@@ -5476,7 +5477,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
spin_lock_init(&instance->hba_lock);
spin_lock_init(&instance->completion_lock);
- mutex_init(&instance->aen_mutex);
mutex_init(&instance->reset_mutex);
/*
@@ -6443,10 +6443,10 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
}
spin_unlock_irqrestore(&instance->hba_lock, flags);
- mutex_lock(&instance->aen_mutex);
+ mutex_lock(&instance->reset_mutex);
error = megasas_register_aen(instance, aen.seq_num,
aen.class_locale_word);
- mutex_unlock(&instance->aen_mutex);
+ mutex_unlock(&instance->reset_mutex);
return error;
}
@@ -6477,9 +6477,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
int i;
int error = 0;
compat_uptr_t ptr;
- unsigned long local_raw_ptr;
u32 local_sense_off;
u32 local_sense_len;
+ u32 user_sense_off;
if (clear_user(ioc, sizeof(*ioc)))
return -EFAULT;
@@ -6497,17 +6497,16 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
* sense_len is not null, so prepare the 64bit value under
* the same condition.
*/
- if (get_user(local_raw_ptr, ioc->frame.raw) ||
- get_user(local_sense_off, &ioc->sense_off) ||
- get_user(local_sense_len, &ioc->sense_len))
+ if (get_user(local_sense_off, &ioc->sense_off) ||
+ get_user(local_sense_len, &ioc->sense_len) ||
+ get_user(user_sense_off, &cioc->sense_off))
return -EFAULT;
-
if (local_sense_len) {
void __user **sense_ioc_ptr =
- (void __user **)((u8*)local_raw_ptr + local_sense_off);
+ (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
compat_uptr_t *sense_cioc_ptr =
- (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+ (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
if (get_user(ptr, sense_cioc_ptr) ||
put_user(compat_ptr(ptr), sense_ioc_ptr))
return -EFAULT;
@@ -6648,6 +6647,7 @@ megasas_aen_polling(struct work_struct *work)
int i, j, doscan = 0;
u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
int error;
+ u8 dcmd_ret = 0;
if (!instance) {
printk(KERN_ERR "invalid instance!\n");
@@ -6660,16 +6660,7 @@ megasas_aen_polling(struct work_struct *work)
wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
/* Don't run the event workqueue thread if OCR is running */
- for (i = 0; i < wait_time; i++) {
- if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
- break;
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- dev_notice(&instance->pdev->dev, "%s waiting for "
- "controller reset to finish for scsi%d\n",
- __func__, instance->host->host_no);
- }
- msleep(1000);
- }
+ mutex_lock(&instance->reset_mutex);
instance->ev = NULL;
host = instance->host;
@@ -6677,212 +6668,127 @@ megasas_aen_polling(struct work_struct *work)
megasas_decode_evt(instance);
switch (le32_to_cpu(instance->evt_detail->code)) {
- case MR_EVT_PD_INSERTED:
- if (megasas_get_pd_list(instance) == 0) {
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
-
- pd_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, i, j, 0);
-
- if (instance->pd_list[pd_index].driveState
- == MR_PD_STATE_SYSTEM) {
- if (!sdev1)
- scsi_add_device(host, i, j, 0);
-
- if (sdev1)
- scsi_device_put(sdev1);
- }
- }
- }
- }
- doscan = 0;
- break;
+ case MR_EVT_PD_INSERTED:
case MR_EVT_PD_REMOVED:
- if (megasas_get_pd_list(instance) == 0) {
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
-
- pd_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, i, j, 0);
-
- if (instance->pd_list[pd_index].driveState
- == MR_PD_STATE_SYSTEM) {
- if (sdev1)
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
- }
- }
- }
- }
- doscan = 0;
+ dcmd_ret = megasas_get_pd_list(instance);
+ if (dcmd_ret == 0)
+ doscan = SCAN_PD_CHANNEL;
break;
case MR_EVT_LD_OFFLINE:
case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
- if (!instance->requestorId ||
- megasas_get_ld_vf_affiliation(instance, 0)) {
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
-
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-
- if (instance->ld_ids[ld_index]
- != 0xff) {
- if (sdev1)
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
- }
- }
- }
- doscan = 0;
- }
- break;
case MR_EVT_LD_CREATED:
if (!instance->requestorId ||
- megasas_get_ld_vf_affiliation(instance, 0)) {
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-
- if (instance->ld_ids[ld_index]
- != 0xff) {
- if (!sdev1)
- scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- }
- if (sdev1)
- scsi_device_put(sdev1);
- }
- }
- doscan = 0;
- }
+ (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
+ dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+
+ if (dcmd_ret == 0)
+ doscan = SCAN_VD_CHANNEL;
+
break;
+
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
case MR_EVT_FOREIGN_CFG_IMPORTED:
case MR_EVT_LD_STATE_CHANGE:
- doscan = 1;
+ dcmd_ret = megasas_get_pd_list(instance);
+
+ if (dcmd_ret != 0)
+ break;
+
+ if (!instance->requestorId ||
+ (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
+ dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+
+ if (dcmd_ret != 0)
+ break;
+
+ doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
+ dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
+ instance->host->host_no);
break;
+
case MR_EVT_CTRL_PROP_CHANGED:
- megasas_get_ctrl_info(instance);
- break;
+ dcmd_ret = megasas_get_ctrl_info(instance);
+ break;
default:
doscan = 0;
break;
}
} else {
dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
+ mutex_unlock(&instance->reset_mutex);
kfree(ev);
return;
}
- if (doscan) {
- dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
- instance->host->host_no);
- if (megasas_get_pd_list(instance) == 0) {
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
- pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
- sdev1 = scsi_device_lookup(host, i, j, 0);
- if (instance->pd_list[pd_index].driveState ==
- MR_PD_STATE_SYSTEM) {
- if (!sdev1) {
- scsi_add_device(host, i, j, 0);
- }
- if (sdev1)
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
+ mutex_unlock(&instance->reset_mutex);
+
+ if (doscan & SCAN_PD_CHANNEL) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+ sdev1 = scsi_device_lookup(host, i, j, 0);
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
+ if (!sdev1)
+ scsi_add_device(host, i, j, 0);
+ else
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
}
}
}
}
+ }
- if (!instance->requestorId ||
- megasas_get_ld_vf_affiliation(instance, 0)) {
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host,
- MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- if (instance->ld_ids[ld_index]
- != 0xff) {
- if (!sdev1)
- scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- else
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
+ if (doscan & SCAN_VD_CHANNEL) {
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ if (instance->ld_ids[ld_index] != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ else
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
}
}
}
}
}
- if (instance->aen_cmd != NULL) {
- kfree(ev);
- return ;
- }
-
- seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
+ if (dcmd_ret == 0)
+ seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
+ else
+ seq_num = instance->last_seq_num;
/* Register AEN with FW for latest sequence number plus 1 */
class_locale.members.reserved = 0;
class_locale.members.locale = MR_EVT_LOCALE_ALL;
class_locale.members.class = MR_EVT_CLASS_DEBUG;
- mutex_lock(&instance->aen_mutex);
+
+ if (instance->aen_cmd != NULL) {
+ kfree(ev);
+ return;
+ }
+
+ mutex_lock(&instance->reset_mutex);
error = megasas_register_aen(instance, seq_num,
class_locale.word);
- mutex_unlock(&instance->aen_mutex);
-
if (error)
- dev_err(&instance->pdev->dev, "register aen failed error %x\n", error);
+ dev_err(&instance->pdev->dev,
+ "register aen failed error %x\n", error);
+ mutex_unlock(&instance->reset_mutex);
kfree(ev);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 8d630a552b07..021b994fdae8 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -201,6 +201,7 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
&instance->reg_set->inbound_low_queue_port);
writel(le32_to_cpu(req_desc->u.high),
&instance->reg_set->inbound_high_queue_port);
+ mmiowb();
spin_unlock_irqrestore(&instance->hba_lock, flags);
#endif
}
@@ -2437,7 +2438,7 @@ megasas_release_fusion(struct megasas_instance *instance)
iounmap(instance->reg_set);
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
}
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 11393ebf1a68..5b2c37f1e908 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2020,8 +2020,10 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
_base_free_irq(ioc);
_base_disable_msix(ioc);
- if (ioc->msix96_vector)
+ if (ioc->msix96_vector) {
kfree(ioc->replyPostRegisterIndex);
+ ioc->replyPostRegisterIndex = NULL;
+ }
if (ioc->chip_phys) {
iounmap(ioc->chip);
@@ -2155,6 +2157,17 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
} else
ioc->msix96_vector = 0;
+ if (ioc->is_warpdrive) {
+ ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
+ &ioc->chip->ReplyPostHostIndex;
+
+ for (i = 1; i < ioc->cpu_msix_table_sz; i++)
+ ioc->reply_post_host_index[i] =
+ (resource_size_t __iomem *)
+ ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
+ * 4)));
+ }
+
list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
@@ -2229,6 +2242,12 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
}
+static inline u8
+_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
+{
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
+}
+
/**
* mpt3sas_base_get_smid - obtain a free smid from internal queue
* @ioc: per adapter object
@@ -2289,6 +2308,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
request->scmd = scmd;
request->cb_idx = cb_idx;
smid = request->smid;
+ request->msix_io = _base_get_msix_index(ioc);
list_del(&request->tracker_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return smid;
@@ -2411,12 +2431,6 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
}
#endif
-static inline u8
-_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
-{
- return ioc->cpu_msix_table[raw_smp_processor_id()];
-}
-
/**
* mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
* @ioc: per adapter object
@@ -2470,18 +2484,19 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
* @ioc: per adapter object
* @smid: system request message index
- *
+ * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
* Return nothing.
*/
void
-mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 msix_task)
{
Mpi2RequestDescriptorUnion_t descriptor;
u64 *request = (u64 *)&descriptor;
descriptor.HighPriority.RequestFlags =
MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
- descriptor.HighPriority.MSIxIndex = 0;
+ descriptor.HighPriority.MSIxIndex = msix_task;
descriptor.HighPriority.SMID = cpu_to_le16(smid);
descriptor.HighPriority.LMID = 0;
descriptor.HighPriority.Reserved1 = 0;
@@ -5201,17 +5216,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
if (r)
goto out_free_resources;
- if (ioc->is_warpdrive) {
- ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
- &ioc->chip->ReplyPostHostIndex;
-
- for (i = 1; i < ioc->cpu_msix_table_sz; i++)
- ioc->reply_post_host_index[i] =
- (resource_size_t __iomem *)
- ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
- * 4)));
- }
-
pci_set_drvdata(ioc->pdev, ioc->shost);
r = _base_get_ioc_facts(ioc, CAN_SLEEP);
if (r)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 5ad271efbd45..92648a5ea2d2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -643,6 +643,7 @@ struct chain_tracker {
* @cb_idx: callback index
* @direct_io: To indicate whether I/O is direct (WARPDRIVE)
* @tracker_list: list of free request (ioc->free_list)
+ * @msix_io: IO's msix
*/
struct scsiio_tracker {
u16 smid;
@@ -651,6 +652,7 @@ struct scsiio_tracker {
u8 direct_io;
struct list_head chain_list;
struct list_head tracker_list;
+ u16 msix_io;
};
/**
@@ -1213,7 +1215,8 @@ void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle);
void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle);
-void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid, u16 msix_task);
void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void mpt3sas_base_initialize_callback_handler(void);
u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index d8366b056b70..4ccde5a05b70 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -817,7 +817,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
tm_request->DevHandle));
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
break;
}
case MPI2_FUNCTION_SMP_PASSTHROUGH:
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 9ab77b06434d..6180f7970bbf 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2193,6 +2193,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
unsigned long timeleft;
struct scsiio_tracker *scsi_lookup = NULL;
int rc;
+ u16 msix_task = 0;
if (m_type == TM_MUTEX_ON)
mutex_lock(&ioc->tm_cmds.mutex);
@@ -2256,7 +2257,12 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) &&
+ (scsi_lookup->msix_io < ioc->reply_queue_count))
+ msix_task = scsi_lookup->msix_io;
+ else
+ msix_task = 0;
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -3151,7 +3157,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
out:
@@ -3332,7 +3338,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
}
/**
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 75514a15bea0..f57d96984ae4 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1578,7 +1578,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
0, 0, 0, 0, 0, 0);
else {
- if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
+ if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
mcmd->fc_tm_rsp, false);
else
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 93cbefa75b26..11cdb172cfaf 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -426,7 +426,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
* here, and we don't know what device it is
* trying to work with, leave it as-is.
*/
- vmax = 8; /* max length of vendor */
+ vmax = sizeof(devinfo->vendor);
vskip = vendor;
while (vmax > 0 && *vskip == ' ') {
vmax--;
@@ -436,7 +436,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
while (vmax > 0 && vskip[vmax - 1] == ' ')
--vmax;
- mmax = 16; /* max length of model */
+ mmax = sizeof(devinfo->model);
mskip = model;
while (mmax > 0 && *mskip == ' ') {
mmax--;
@@ -452,10 +452,12 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
* Behave like the older version of get_device_flags.
*/
if (memcmp(devinfo->vendor, vskip, vmax) ||
- devinfo->vendor[vmax])
+ (vmax < sizeof(devinfo->vendor) &&
+ devinfo->vendor[vmax]))
continue;
if (memcmp(devinfo->model, mskip, mmax) ||
- devinfo->model[mmax])
+ (mmax < sizeof(devinfo->model) &&
+ devinfo->model[mmax]))
continue;
return devinfo;
} else {
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index d47624000edf..98cabf409bf0 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1059,11 +1059,12 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
}
error = scsi_dh_add_device(sdev);
- if (error) {
+ if (error)
+ /*
+ * device_handler is optional, so any error can be ignored
+ */
sdev_printk(KERN_INFO, sdev,
"failed to add device handler: %d\n", error);
- return error;
- }
device_enable_async_suspend(&sdev->sdev_dev);
error = device_add(&sdev->sdev_dev);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 99fd079fcefa..6e88e4b11273 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -42,6 +42,8 @@
#include <linux/devfreq.h>
#include <linux/nls.h>
#include <linux/of.h>
+#include <linux/blkdev.h>
+
#include "ufshcd.h"
#include "ufshci.h"
#include "ufs_quirks.h"
@@ -350,7 +352,7 @@ static inline bool ufshcd_is_valid_pm_lvl(int lvl)
}
static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
@@ -2843,6 +2845,18 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
/* Vote PM QoS for the request */
ufshcd_vops_pm_qos_req_start(hba, cmd->request);
+ /* IO svc time latency histogram */
+ if (hba != NULL && cmd->request != NULL) {
+ if (hba->latency_hist_enabled &&
+ (cmd->request->cmd_type == REQ_TYPE_FS)) {
+ cmd->request->lat_hist_io_start = ktime_get();
+ cmd->request->lat_hist_enabled = 1;
+ } else
+ cmd->request->lat_hist_enabled = 0;
+ }
+
+ WARN_ON(hba->clk_gating.state != CLKS_ON);
+
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
@@ -5225,19 +5239,29 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance
* @intr_status: interrupt status generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba);
hba->active_uic_cmd->argument3 =
ufshcd_get_dme_attr_val(hba);
complete(&hba->active_uic_cmd->done);
+ retval = IRQ_HANDLED;
}
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
+ if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
complete(hba->uic_async_done);
+ retval = IRQ_HANDLED;
+ }
+ return retval;
}
/**
@@ -5310,6 +5334,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
struct scsi_cmnd *cmd;
int result;
int index;
+ struct request *req;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
@@ -5339,6 +5364,23 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp, cmd->request);
}
+ clear_bit_unlock(index, &hba->lrb_in_use);
+ req = cmd->request;
+ if (req) {
+ /* Update IO svc time latency histogram */
+ if (req->lat_hist_enabled) {
+ ktime_t completion;
+ u_int64_t delta_us;
+
+ completion = ktime_get();
+ delta_us = ktime_us_delta(completion,
+ req->lat_hist_io_start);
+ /* rq_data_dir() => true if WRITE */
+ blk_update_latency_hist(&hba->io_lat_s,
+ (rq_data_dir(req) == READ),
+ delta_us);
+ }
+ }
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
@@ -5364,8 +5406,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
unsigned long completed_reqs;
u32 tr_doorbell;
@@ -5383,7 +5429,12 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
- __ufshcd_transfer_req_compl(hba, completed_reqs);
+ if (completed_reqs) {
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
@@ -5961,15 +6012,20 @@ static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
/**
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_update_uic_error(struct ufs_hba *hba)
+static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
+ irqreturn_t retval = IRQ_NONE;
/* PHY layer lane error */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
- (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
+ (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
/*
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
@@ -5991,61 +6047,79 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
}
}
}
+ retval |= IRQ_HANDLED;
}
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
- if (reg)
+ if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
+ (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
- if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
- hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
- } else if (hba->dev_quirks &
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
- if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
- hba->uic_error |=
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
- else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
- hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+ } else if (hba->dev_quirks &
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ else if (reg &
+ UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ }
+ retval |= IRQ_HANDLED;
}
/* UIC NL/TL/DME errors needs software retry */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
- if (reg) {
+ if ((reg & UIC_NETWORK_LAYER_ERROR) &&
+ (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+ retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
- if (reg) {
+ if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
+ (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+ retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
- if (reg) {
+ if ((reg & UIC_DME_ERROR) &&
+ (reg & UIC_DME_ERROR_CODE_MASK)) {
ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+ retval |= IRQ_HANDLED;
}
dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
__func__, hba->uic_error);
+ return retval;
}
/**
* ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_check_errors(struct ufs_hba *hba)
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
{
bool queue_eh_work = false;
+ irqreturn_t retval = IRQ_NONE;
if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
queue_eh_work = true;
if (hba->errors & UIC_ERROR) {
hba->uic_error = 0;
- ufshcd_update_uic_error(hba);
+ retval = ufshcd_update_uic_error(hba);
if (hba->uic_error)
queue_eh_work = true;
}
@@ -6070,6 +6144,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
hba->ufshcd_state = UFSHCD_STATE_ERROR;
schedule_work(&hba->eh_work);
}
+ retval |= IRQ_HANDLED;
}
/*
* if (!queue_eh_work) -
@@ -6077,28 +6152,44 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
* itself without s/w intervention or errors that will be
* handled by the SCSI core layer.
*/
+ return retval;
}
/**
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_tmc_handler(struct ufs_hba *hba)
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
u32 tm_doorbell;
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
- wake_up(&hba->tm_wq);
+ if (hba->tm_condition) {
+ wake_up(&hba->tm_wq);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
ufsdbg_error_inject_dispatcher(hba,
ERR_INJECT_INTR, intr_status, &intr_status);
@@ -6106,16 +6197,18 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
hba->errors = UFSHCD_ERROR_MASK & intr_status;
if (hba->errors || hba->ce_error)
- ufshcd_check_errors(hba);
+ retval |= ufshcd_check_errors(hba);
if (intr_status & UFSHCD_UIC_MASK)
- ufshcd_uic_cmd_compl(hba, intr_status);
+ retval |= ufshcd_uic_cmd_compl(hba, intr_status);
if (intr_status & UTP_TASK_REQ_COMPL)
- ufshcd_tmc_handler(hba);
+ retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- ufshcd_transfer_req_compl(hba);
+ retval |= ufshcd_transfer_req_compl(hba);
+
+ return retval;
}
/**
@@ -6123,27 +6216,44 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* @irq: irq number
* @__hba: pointer to adapter instance
*
- * Returns IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
u32 intr_status, enabled_intr_status;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
+ int retries = hba->nutrs;
spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- enabled_intr_status =
- intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (intr_status)
- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ /*
+ * There could be max of hba->nutrs reqs in flight and in worst case
+ * if the reqs get finished 1 by 1 after the interrupt status is
+ * read, make sure we handle them by checking the interrupt status
+ * again in a loop until we process all of the reqs before returning.
+ */
+ do {
+ enabled_intr_status =
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ if (intr_status)
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ if (enabled_intr_status)
+ retval |= ufshcd_sl_intr(hba, enabled_intr_status);
+
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ } while (intr_status && --retries);
- if (enabled_intr_status) {
- ufshcd_sl_intr(hba, enabled_intr_status);
- retval = IRQ_HANDLED;
+ if (retval == IRQ_NONE) {
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
+ __func__, intr_status);
+ ufshcd_hex_dump("host regs: ", hba->mmio_base,
+ UFSHCI_REG_SPACE_SIZE);
}
+
spin_unlock(hba->host->host_lock);
return retval;
}
@@ -8832,6 +8942,54 @@ out:
}
EXPORT_SYMBOL(ufshcd_shutdown);
+/*
+ * Values permitted 0, 1, 2.
+ * 0 -> Disable IO latency histograms (default)
+ * 1 -> Enable IO latency histograms
+ * 2 -> Zero out IO latency histograms
+ */
+static ssize_t
+latency_hist_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ long value;
+
+ if (kstrtol(buf, 0, &value))
+ return -EINVAL;
+ if (value == BLK_IO_LAT_HIST_ZERO)
+ blk_zero_latency_hist(&hba->io_lat_s);
+ else if (value == BLK_IO_LAT_HIST_ENABLE ||
+ value == BLK_IO_LAT_HIST_DISABLE)
+ hba->latency_hist_enabled = value;
+ return count;
+}
+
+ssize_t
+latency_hist_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return blk_latency_hist_show(&hba->io_lat_s, buf);
+}
+
+static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
+ latency_hist_show, latency_hist_store);
+
+static void
+ufshcd_init_latency_hist(struct ufs_hba *hba)
+{
+ if (device_create_file(hba->dev, &dev_attr_latency_hist))
+ dev_err(hba->dev, "Failed to create latency_hist sysfs entry\n");
+}
+
+static void
+ufshcd_exit_latency_hist(struct ufs_hba *hba)
+{
+ device_create_file(hba->dev, &dev_attr_latency_hist);
+}
+
/**
* ufshcd_remove - de-allocate SCSI host and host memory space
* data structure memory
@@ -8848,6 +9006,7 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_exit_hibern8_on_idle(hba);
if (ufshcd_is_clkscaling_supported(hba)) {
device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+ ufshcd_exit_latency_hist(hba);
devfreq_remove_device(hba->devfreq);
}
ufshcd_hba_exit(hba);
@@ -9511,6 +9670,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
+ ufshcd_init_latency_hist(hba);
+
/*
* We are assuming that device wasn't put in sleep/power-down
* state exclusively during the boot stage before kernel.
@@ -9531,6 +9692,7 @@ out_remove_scsi_host:
scsi_remove_host(hba->host);
exit_gating:
ufshcd_exit_clk_gating(hba);
+ ufshcd_exit_latency_hist(hba);
out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 81eab2cbb6cb..b20afd85beab 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -907,6 +907,9 @@ struct ufs_hba {
bool full_init_linereset;
struct pinctrl *pctrl;
+
+ int latency_hist_enabled;
+ struct io_latency_state io_lat_s;
};
static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 21ff179b7c0b..65ce7d791a47 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1670,6 +1670,14 @@ void ch_purge_intent_lists(struct channel_ctx *ctx)
}
spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+ spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+ list_for_each_entry_safe(tx_info, tx_info_temp,
+ &ctx->tx_pending_remote_done, list_done) {
+ ctx->notify_tx_abort(ctx, ctx->user_priv, tx_info->pkt_priv);
+ rwref_put(&tx_info->pkt_ref);
+ }
+ spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+
spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
list_for_each_entry_safe(ptr_intent, tmp_intent,
&ctx->local_rx_intent_list, list) {
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 37204dcbc065..5f1064201b3a 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -2822,7 +2822,8 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
icnss_pr_dbg("Modem-Notify: event %lu\n", code);
- if (code == SUBSYS_AFTER_SHUTDOWN) {
+ if (code == SUBSYS_AFTER_SHUTDOWN &&
+ notif->crashed != CRASH_STATUS_WDOG_BITE) {
icnss_remove_msa_permissions(priv);
icnss_pr_info("Collecting msa0 segment dump\n");
icnss_msa0_ramdump(priv);
@@ -3557,18 +3558,16 @@ unsigned int icnss_socinfo_get_serial_number(struct device *dev)
}
EXPORT_SYMBOL(icnss_socinfo_get_serial_number);
-int icnss_set_wlan_mac_address(struct device *dev,
- const u8 *in, uint32_t len)
+int icnss_set_wlan_mac_address(const u8 *in, const uint32_t len)
{
- struct icnss_priv *priv = dev_get_drvdata(dev);
+ struct icnss_priv *priv = penv;
uint32_t no_of_mac_addr;
struct icnss_wlan_mac_addr *addr = NULL;
int iter;
u8 *temp = NULL;
- if (priv->magic != ICNSS_MAGIC) {
- icnss_pr_err("Invalid drvdata: dev %p, data %p, magic 0x%x\n",
- dev, priv, priv->magic);
+ if (!priv) {
+ icnss_pr_err("Priv data is NULL\n");
return -EINVAL;
}
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index 3c3ae693f615..bbd77d8e0650 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -278,6 +278,7 @@ int pil_mss_shutdown(struct pil_desc *pil)
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
int ret = 0;
+ dev_info(pil->dev, "MSS is shutting down\n");
if (drv->axi_halt_base) {
pil_q6v5_halt_axi_port(pil,
drv->axi_halt_base + MSS_Q6_HALT_BASE);
@@ -542,7 +543,7 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
{
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
struct modem_data *md = dev_get_drvdata(pil->dev);
- const struct firmware *fw, *dp_fw;
+ const struct firmware *fw, *dp_fw = NULL;
char fw_name_legacy[10] = "mba.b00";
char fw_name[10] = "mba.mbn";
char *dp_name = "msadp";
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index dc803bdfd554..8ed98e2cbd5e 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -82,7 +82,7 @@ static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
return IRQ_HANDLED;
pr_err("Fatal error on the modem.\n");
- subsys_set_crash_status(drv->subsys, true);
+ subsys_set_crash_status(drv->subsys, CRASH_STATUS_ERR_FATAL);
restart_modem(drv);
return IRQ_HANDLED;
}
@@ -193,7 +193,7 @@ static irqreturn_t modem_wdog_bite_intr_handler(int irq, void *dev_id)
!gpio_get_value(drv->subsys_desc.err_fatal_gpio))
panic("%s: System ramdump requested. Triggering device restart!\n",
__func__);
- subsys_set_crash_status(drv->subsys, true);
+ subsys_set_crash_status(drv->subsys, CRASH_STATUS_WDOG_BITE);
restart_modem(drv);
return IRQ_HANDLED;
}
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index a1cd3b1eeaff..a39c2b6aa672 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -149,7 +149,7 @@ err_vreg_pll:
err_cx_enable:
regulator_set_load(drv->vreg_cx, 0);
err_cx_mode:
- regulator_set_voltage(drv->vreg_cx, RPM_REGULATOR_CORNER_NONE, uv);
+ regulator_set_voltage(drv->vreg_cx, RPM_REGULATOR_CORNER_NONE, INT_MAX);
err_cx_voltage:
clk_disable_unprepare(drv->qdss_clk);
err_qdss_vote:
@@ -179,7 +179,7 @@ void pil_q6v5_remove_proxy_votes(struct pil_desc *pil)
}
regulator_disable(drv->vreg_cx);
regulator_set_load(drv->vreg_cx, 0);
- regulator_set_voltage(drv->vreg_cx, RPM_REGULATOR_CORNER_NONE, uv);
+ regulator_set_voltage(drv->vreg_cx, RPM_REGULATOR_CORNER_NONE, INT_MAX);
clk_disable_unprepare(drv->xo);
clk_disable_unprepare(drv->pnoc_clk);
clk_disable_unprepare(drv->qdss_clk);
@@ -198,9 +198,9 @@ void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base)
ret = readl_poll_timeout(halt_base + AXI_HALTACK,
status, status != 0, 50, HALT_ACK_TIMEOUT_US);
if (ret)
- dev_warn(pil->dev, "Port %p halt timeout\n", halt_base);
+ dev_warn(pil->dev, "Port %pK halt timeout\n", halt_base);
else if (!readl_relaxed(halt_base + AXI_IDLE))
- dev_warn(pil->dev, "Port %p halt failed\n", halt_base);
+ dev_warn(pil->dev, "Port %pK halt failed\n", halt_base);
/* Clear halt request (port will remain halted until reset) */
writel_relaxed(0, halt_base + AXI_HALTREQ);
@@ -512,6 +512,8 @@ static int __pil_q6v55_reset(struct pil_desc *pil)
val |= BIT(i);
writel_relaxed(val, drv->reg_base +
QDSP6V6SS_MEM_PWR_CTL);
+ val = readl_relaxed(drv->reg_base +
+ QDSP6V6SS_MEM_PWR_CTL);
/*
* Wait for 1us for both memory peripheral and
* data array to turn on.
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index f2216f968319..045a5001fc9f 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -134,6 +134,7 @@ struct scm_response {
#define R3_STR "r3"
#define R4_STR "r4"
#define R5_STR "r5"
+#define R6_STR "r6"
#endif
@@ -481,6 +482,7 @@ static int __scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
register u32 r3 asm("r3") = w3;
register u32 r4 asm("r4") = w4;
register u32 r5 asm("r5") = w5;
+ register u32 r6 asm("r6") = 0;
do {
asm volatile(
@@ -494,13 +496,14 @@ static int __scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
__asmeq("%7", R3_STR)
__asmeq("%8", R4_STR)
__asmeq("%9", R5_STR)
+ __asmeq("%10", R6_STR)
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0\n"
: "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
- "r" (r5));
+ "r" (r5), "r" (r6));
} while (r0 == SCM_INTERRUPTED);
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
index 6de73217bf86..d4be8c641ad8 100644
--- a/drivers/soc/qcom/smcinvoke.c
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -235,7 +235,7 @@ static int marshal_out(void *buf, uint32_t buf_size,
pr_err("%s: buffer overflow detected\n", __func__);
goto out;
}
- if (copy_to_user((void __user *)(args_buf[i].b.addr),
+ if (copy_to_user((void __user *)(uintptr_t)(args_buf[i].b.addr),
(uint8_t *)(buf) + tz_args->b.offset,
tz_args->b.size)) {
pr_err("Error %d copying ctxt to user\n", ret);
@@ -320,7 +320,7 @@ static int marshal_in(const struct smcinvoke_cmd_req *req,
tz_args++;
if (copy_from_user(buf+offset,
- (void __user *)(args_buf[i].b.addr),
+ (void __user *)(uintptr_t)(args_buf[i].b.addr),
args_buf[i].b.size))
goto out;
@@ -389,7 +389,7 @@ long smcinvoke_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
}
ret = copy_from_user(args_buf,
- (void __user *)(req.args),
+ (void __user *)(uintptr_t)(req.args),
nr_args * req.argsize);
if (ret) {
@@ -424,8 +424,8 @@ long smcinvoke_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
ret = marshal_out(in_msg, inmsg_size, &req, args_buf);
- ret |= copy_to_user((void __user *)(req.args), args_buf,
- nr_args * req.argsize);
+ ret |= copy_to_user((void __user *)(uintptr_t)(req.args),
+ args_buf, nr_args * req.argsize);
ret |= copy_to_user((void __user *)arg, &req, sizeof(req));
if (ret)
goto out;
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 0c5f3b84162b..5fee83f8ada4 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -352,11 +352,11 @@ static void spcom_link_state_notif_cb(struct glink_link_state_cb_info *cb_info,
switch (cb_info->link_state) {
case GLINK_LINK_STATE_UP:
- pr_debug("GLINK_LINK_STATE_UP.\n");
+ pr_info("GLINK_LINK_STATE_UP.\n");
spcom_create_predefined_channels_chardev();
break;
case GLINK_LINK_STATE_DOWN:
- pr_debug("GLINK_LINK_STATE_DOWN.\n");
+ pr_err("GLINK_LINK_STATE_DOWN.\n");
break;
default:
pr_err("unknown link_state [%d].\n", cb_info->link_state);
@@ -466,7 +466,7 @@ static void spcom_notify_state(void *handle, const void *priv, unsigned event)
* This is not expected on normal operation.
* This may happen upon remote SSR.
*/
- pr_debug("GLINK_REMOTE_DISCONNECTED, ch [%s].\n", ch->name);
+ pr_err("GLINK_REMOTE_DISCONNECTED, ch [%s].\n", ch->name);
/*
* after glink_close(),
* expecting notify GLINK_LOCAL_DISCONNECTED
@@ -731,7 +731,9 @@ static int spcom_close(struct spcom_channel *ch)
ch->glink_handle = NULL;
ch->ref_count = 0;
-
+ ch->rx_abort = false;
+ ch->tx_abort = false;
+ ch->glink_state = GLINK_LOCAL_DISCONNECTED;
ch->txn_id = INITIAL_TXN_ID; /* use non-zero nonce for debug */
ch->pid = 0;
@@ -1333,6 +1335,16 @@ static int spcom_handle_send_command(struct spcom_channel *ch,
pr_debug("send req/resp ch [%s] size [%d] .\n", ch->name, size);
+ /*
+ * check that cmd buf size is at least struct size,
+ * to allow access to struct fields.
+ */
+ if (size < sizeof(*cmd)) {
+ pr_err("ch [%s] invalid cmd buf.\n",
+ ch->name);
+ return -EINVAL;
+ }
+
/* Check if remote side connect */
if (!spcom_is_channel_connected(ch)) {
pr_err("ch [%s] remote side not connect.\n", ch->name);
@@ -1344,6 +1356,18 @@ static int spcom_handle_send_command(struct spcom_channel *ch,
buf_size = cmd->buf_size;
timeout_msec = cmd->timeout_msec;
+ /* Check param validity */
+ if (buf_size > SPCOM_MAX_RESPONSE_SIZE) {
+ pr_err("ch [%s] invalid buf size [%d].\n",
+ ch->name, buf_size);
+ return -EINVAL;
+ }
+ if (size != sizeof(*cmd) + buf_size) {
+ pr_err("ch [%s] invalid cmd size [%d].\n",
+ ch->name, size);
+ return -EINVAL;
+ }
+
/* Allocate Buffers*/
tx_buf_size = sizeof(*hdr) + buf_size;
tx_buf = kzalloc(tx_buf_size, GFP_KERNEL);
@@ -1407,6 +1431,11 @@ static int modify_ion_addr(void *buf,
return -ENODEV;
}
+ if (buf_size < sizeof(uint64_t)) {
+ pr_err("buf size too small [%d].\n", buf_size);
+ return -ENODEV;
+ }
+
if (buf_offset > buf_size - sizeof(uint64_t)) {
pr_err("invalid buf_offset [%d].\n", buf_offset);
return -ENODEV;
@@ -1469,6 +1498,16 @@ static int spcom_handle_send_modified_command(struct spcom_channel *ch,
pr_debug("send req/resp ch [%s] size [%d] .\n", ch->name, size);
+ /*
+ * check that cmd buf size is at least struct size,
+ * to allow access to struct fields.
+ */
+ if (size < sizeof(*cmd)) {
+ pr_err("ch [%s] invalid cmd buf.\n",
+ ch->name);
+ return -EINVAL;
+ }
+
/* Check if remote side connect */
if (!spcom_is_channel_connected(ch)) {
pr_err("ch [%s] remote side not connect.\n", ch->name);
@@ -1481,6 +1520,18 @@ static int spcom_handle_send_modified_command(struct spcom_channel *ch,
timeout_msec = cmd->timeout_msec;
memcpy(ion_info, cmd->ion_info, sizeof(ion_info));
+ /* Check param validity */
+ if (buf_size > SPCOM_MAX_RESPONSE_SIZE) {
+ pr_err("ch [%s] invalid buf size [%d].\n",
+ ch->name, buf_size);
+ return -EINVAL;
+ }
+ if (size != sizeof(*cmd) + buf_size) {
+ pr_err("ch [%s] invalid cmd size [%d].\n",
+ ch->name, size);
+ return -EINVAL;
+ }
+
/* Allocate Buffers*/
tx_buf_size = sizeof(*hdr) + buf_size;
tx_buf = kzalloc(tx_buf_size, GFP_KERNEL);
@@ -1539,13 +1590,18 @@ static int spcom_handle_lock_ion_buf_command(struct spcom_channel *ch,
struct ion_handle *ion_handle;
int i;
+ if (size != sizeof(*cmd)) {
+ pr_err("cmd size [%d] , expected [%d].\n",
+ (int) size, (int) sizeof(*cmd));
+ return -EINVAL;
+ }
+
/* Check ION client */
if (spcom_dev->ion_client == NULL) {
pr_err("invalid ion client.\n");
return -ENODEV;
}
-
/* Get ION handle from fd - this increments the ref count */
ion_handle = ion_import_dma_buf(spcom_dev->ion_client, fd);
if (ion_handle == NULL) {
@@ -1591,6 +1647,12 @@ static int spcom_handle_unlock_ion_buf_command(struct spcom_channel *ch,
struct ion_client *ion_client = spcom_dev->ion_client;
int i;
+ if (size != sizeof(*cmd)) {
+ pr_err("cmd size [%d] , expected [%d].\n",
+ (int) size, (int) sizeof(*cmd));
+ return -EINVAL;
+ }
+
/* Check ION client */
if (ion_client == NULL) {
pr_err("fail to create ion client.\n");
@@ -1746,6 +1808,13 @@ static int spcom_handle_read_req_resp(struct spcom_channel *ch,
return -ENOTCONN;
}
+ /* Check param validity */
+ if (size > SPCOM_MAX_RESPONSE_SIZE) {
+ pr_err("ch [%s] inavlid size [%d].\n",
+ ch->name, size);
+ return -EINVAL;
+ }
+
/* Allocate Buffers*/
rx_buf_size = sizeof(*hdr) + size;
rx_buf = kzalloc(rx_buf_size, GFP_KERNEL);
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index e70a56e7ce2e..b8d096a9c057 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -876,7 +876,7 @@ static irqreturn_t subsys_err_fatal_intr_handler (int irq, void *dev_id)
d->subsys_desc.name);
return IRQ_HANDLED;
}
- subsys_set_crash_status(d->subsys, true);
+ subsys_set_crash_status(d->subsys, CRASH_STATUS_ERR_FATAL);
log_failure_reason(d);
subsystem_restart_dev(d->subsys);
@@ -895,7 +895,7 @@ static irqreturn_t subsys_wdog_bite_irq_handler(int irq, void *dev_id)
!gpio_get_value(d->subsys_desc.err_fatal_gpio))
panic("%s: System ramdump requested. Triggering device restart!\n",
__func__);
- subsys_set_crash_status(d->subsys, true);
+ subsys_set_crash_status(d->subsys, CRASH_STATUS_WDOG_BITE);
log_failure_reason(d);
subsystem_restart_dev(d->subsys);
@@ -952,7 +952,7 @@ static void clear_wdog(struct pil_tz_data *d)
if (!subsys_get_crash_status(d->subsys)) {
pr_err("wdog bite received from %s!\n", d->subsys_desc.name);
__raw_writel(BIT(d->bits_arr[ERR_READY]), d->irq_clear);
- subsys_set_crash_status(d->subsys, true);
+ subsys_set_crash_status(d->subsys, CRASH_STATUS_WDOG_BITE);
log_failure_reason(d);
subsystem_restart_dev(d->subsys);
}
diff --git a/drivers/soc/qcom/subsystem_notif.c b/drivers/soc/qcom/subsystem_notif.c
index 431bbd8cee6f..8b24008b5abe 100644
--- a/drivers/soc/qcom/subsystem_notif.c
+++ b/drivers/soc/qcom/subsystem_notif.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011, 2013, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -196,7 +196,7 @@ static int subsys_notifier_test_call(struct notifier_block *this,
switch (code) {
default:
- printk(KERN_WARNING "%s: Notification %s from subsystem %p\n",
+ pr_warn("%s: Notification %s from subsystem %pK\n",
__func__, notif_to_string(code), data);
break;
@@ -212,7 +212,7 @@ static struct notifier_block nb = {
static void subsys_notif_reg_test_notifier(const char *subsys_name)
{
void *handle = subsys_notif_register_notifier(subsys_name, &nb);
- printk(KERN_WARNING "%s: Registered test notifier, handle=%p",
+ pr_warn("%s: Registered test notifier, handle=%pK",
__func__, handle);
}
#endif
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index 015e60ac622c..77362912321d 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -178,7 +178,7 @@ struct subsys_device {
struct cdev char_dev;
dev_t dev_no;
struct completion err_ready;
- bool crashed;
+ enum crash_status crashed;
int notif_state;
struct list_head list;
};
@@ -598,10 +598,11 @@ static void subsystem_shutdown(struct subsys_device *dev, void *data)
{
const char *name = dev->desc->name;
- pr_info("[%p]: Shutting down %s\n", current, name);
+ pr_info("[%s:%d]: Shutting down %s\n",
+ current->comm, current->pid, name);
if (dev->desc->shutdown(dev->desc, true) < 0)
- panic("subsys-restart: [%p]: Failed to shutdown %s!",
- current, name);
+ panic("subsys-restart: [%s:%d]: Failed to shutdown %s!",
+ current->comm, current->pid, name);
dev->crash_count++;
subsys_set_state(dev, SUBSYS_OFFLINE);
disable_all_irqs(dev);
@@ -613,7 +614,8 @@ static void subsystem_ramdump(struct subsys_device *dev, void *data)
if (dev->desc->ramdump)
if (dev->desc->ramdump(is_ramdump_enabled(dev), dev->desc) < 0)
- pr_warn("%s[%p]: Ramdump failed.\n", name, current);
+ pr_warn("%s[%s:%d]: Ramdump failed.\n",
+ name, current->comm, current->pid);
dev->do_ramdump_on_put = false;
}
@@ -628,13 +630,14 @@ static void subsystem_powerup(struct subsys_device *dev, void *data)
const char *name = dev->desc->name;
int ret;
- pr_info("[%p]: Powering up %s\n", current, name);
+ pr_info("[%s:%d]: Powering up %s\n", current->comm, current->pid, name);
init_completion(&dev->err_ready);
if (dev->desc->powerup(dev->desc) < 0) {
notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
NULL);
- panic("[%p]: Powerup error: %s!", current, name);
+ panic("[%s:%d]: Powerup error: %s!",
+ current->comm, current->pid, name);
}
enable_all_irqs(dev);
@@ -642,11 +645,11 @@ static void subsystem_powerup(struct subsys_device *dev, void *data)
if (ret) {
notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
NULL);
- panic("[%p]: Timed out waiting for error ready: %s!",
- current, name);
+ panic("[%s:%d]: Timed out waiting for error ready: %s!",
+ current->comm, current->pid, name);
}
subsys_set_state(dev, SUBSYS_ONLINE);
- subsys_set_crash_status(dev, false);
+ subsys_set_crash_status(dev, CRASH_STATUS_NO_CRASH);
}
static int __find_subsys(struct device *dev, void *data)
@@ -952,8 +955,8 @@ static void subsystem_restart_wq_func(struct work_struct *work)
*/
mutex_lock(&soc_order_reg_lock);
- pr_debug("[%p]: Starting restart sequence for %s\n", current,
- desc->name);
+ pr_debug("[%s:%d]: Starting restart sequence for %s\n",
+ current->comm, current->pid, desc->name);
notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL);
for_each_subsys_device(list, count, NULL, subsystem_shutdown);
notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL);
@@ -974,8 +977,8 @@ static void subsystem_restart_wq_func(struct work_struct *work)
for_each_subsys_device(list, count, NULL, subsystem_powerup);
notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL);
- pr_info("[%p]: Restart sequence for %s completed.\n",
- current, desc->name);
+ pr_info("[%s:%d]: Restart sequence for %s completed.\n",
+ current->comm, current->pid, desc->name);
mutex_unlock(&soc_order_reg_lock);
mutex_unlock(&track->lock);
@@ -1126,12 +1129,13 @@ int subsystem_crashed(const char *name)
}
EXPORT_SYMBOL(subsystem_crashed);
-void subsys_set_crash_status(struct subsys_device *dev, bool crashed)
+void subsys_set_crash_status(struct subsys_device *dev,
+ enum crash_status crashed)
{
dev->crashed = crashed;
}
-bool subsys_get_crash_status(struct subsys_device *dev)
+enum crash_status subsys_get_crash_status(struct subsys_device *dev)
{
return dev->crashed;
}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 73c8ea0b1360..3cac73e4c3e4 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -548,7 +548,14 @@ static void reset_sccr1(struct driver_data *drv_data)
u32 sccr1_reg;
sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
- sccr1_reg &= ~SSCR1_RFT;
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ sccr1_reg &= ~QUARK_X1000_SSCR1_RFT;
+ break;
+ default:
+ sccr1_reg &= ~SSCR1_RFT;
+ break;
+ }
sccr1_reg |= chip->threshold;
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
}
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index fbb0a4d74e91..39d7c7c70112 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -170,13 +170,17 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
{
struct sun4i_spi *sspi = spi_master_get_devdata(master);
unsigned int mclk_rate, div, timeout;
+ unsigned int start, end, tx_time;
unsigned int tx_len = 0;
int ret = 0;
u32 reg;
/* We don't support transfer larger than the FIFO */
if (tfr->len > SUN4I_FIFO_DEPTH)
- return -EINVAL;
+ return -EMSGSIZE;
+
+ if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
+ return -EMSGSIZE;
reinit_completion(&sspi->done);
sspi->tx_buf = tfr->tx_buf;
@@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
- /* Fill the TX FIFO */
- sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+ /*
+ * Fill the TX FIFO
+ * Filling the FIFO fully causes timeout for some reason
+ * at least on spi2 on A10s
+ */
+ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
/* Enable the interrupts */
sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
@@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
+ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+ start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(tx_time));
+ end = jiffies;
if (!timeout) {
+ dev_warn(&master->dev,
+ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+ jiffies_to_msecs(end - start), tx_time);
ret = -ETIMEDOUT;
goto out;
}
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index ac48f59705a8..e77add01b0e9 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
{
struct sun6i_spi *sspi = spi_master_get_devdata(master);
unsigned int mclk_rate, div, timeout;
+ unsigned int start, end, tx_time;
unsigned int tx_len = 0;
int ret = 0;
u32 reg;
@@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+ start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(tx_time));
+ end = jiffies;
if (!timeout) {
+ dev_warn(&master->dev,
+ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+ jiffies_to_msecs(end - start), tx_time);
ret = -ETIMEDOUT;
goto out;
}
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index 176f22ba570c..c2ef091d72ce 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -157,6 +157,15 @@ int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
}
EXPORT_SYMBOL(msm_ion_do_cache_op);
+int msm_ion_do_cache_offset_op(
+ struct ion_client *client, struct ion_handle *handle,
+ void *vaddr, unsigned int offset, unsigned long len,
+ unsigned int cmd)
+{
+ return ion_do_cache_op(client, handle, vaddr, offset, len, cmd);
+}
+EXPORT_SYMBOL(msm_ion_do_cache_offset_op);
+
static int ion_no_pages_cache_ops(struct ion_client *client,
struct ion_handle *handle,
void *vaddr,
@@ -305,13 +314,23 @@ static int ion_pages_cache_ops(struct ion_client *client,
};
for_each_sg(table->sgl, sg, table->nents, i) {
+ unsigned int sg_offset, sg_left, size = 0;
+
len += sg->length;
- if (len < offset)
+ if (len <= offset)
continue;
- __do_cache_ops(sg_page(sg), sg->offset, sg->length, op);
+ sg_left = len - offset;
+ sg_offset = sg->length - sg_left;
+
+ size = (length < sg_left) ? length : sg_left;
+
+ __do_cache_ops(sg_page(sg), sg_offset, size, op);
+
+ offset += size;
+ length -= size;
- if (len > length + offset)
+ if (length == 0)
break;
}
return 0;
diff --git a/drivers/staging/android/ion/msm/msm_ion.h b/drivers/staging/android/ion/msm/msm_ion.h
index d8677b2fb55a..098104d56fdb 100644
--- a/drivers/staging/android/ion/msm/msm_ion.h
+++ b/drivers/staging/android/ion/msm/msm_ion.h
@@ -1,3 +1,16 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
#ifndef _MSM_MSM_ION_H
#define _MSM_MSM_ION_H
@@ -157,6 +170,11 @@ int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
void *vaddr, unsigned long len, unsigned int cmd);
+int msm_ion_do_cache_offset_op(
+ struct ion_client *client, struct ion_handle *handle,
+ void *vaddr, unsigned int offset, unsigned long len,
+ unsigned int cmd);
+
#else
static inline struct ion_client *msm_ion_client_create(const char *name)
{
@@ -176,6 +194,14 @@ static inline int msm_ion_do_cache_op(struct ion_client *client,
return -ENODEV;
}
+int msm_ion_do_cache_offset_op(
+ struct ion_client *client, struct ion_handle *handle,
+ void *vaddr, unsigned int offset, unsigned long len,
+ unsigned int cmd)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_ION */
#endif
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index a76a7ff618b9..68a4559f9d26 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -98,6 +98,8 @@ static unsigned long lowmem_count(struct shrinker *s,
static atomic_t shift_adj = ATOMIC_INIT(0);
static short adj_max_shift = 353;
+module_param_named(adj_max_shift, adj_max_shift, short,
+ S_IRUGO | S_IWUSR);
/* User knob to enable/disable adaptive lmk feature */
static int enable_adaptive_lmk;
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index 4ab186669f0c..ec5b9a23494d 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -56,11 +56,6 @@
#define N_CHANS 8
-enum waveform_state_bits {
- WAVEFORM_AI_RUNNING,
- WAVEFORM_AO_RUNNING
-};
-
/* Data unique to this driver */
struct waveform_private {
struct timer_list ai_timer; /* timer for AI commands */
@@ -68,7 +63,6 @@ struct waveform_private {
unsigned int wf_amplitude; /* waveform amplitude in microvolts */
unsigned int wf_period; /* waveform period in microseconds */
unsigned int wf_current; /* current time in waveform period */
- unsigned long state_bits;
unsigned int ai_scan_period; /* AI scan period in usec */
unsigned int ai_convert_period; /* AI conversion period in usec */
struct timer_list ao_timer; /* timer for AO commands */
@@ -191,10 +185,6 @@ static void waveform_ai_timer(unsigned long arg)
unsigned int nsamples;
unsigned int time_increment;
- /* check command is still active */
- if (!test_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits))
- return;
-
now = ktime_to_us(ktime_get());
nsamples = comedi_nsamples_left(s, UINT_MAX);
@@ -386,11 +376,6 @@ static int waveform_ai_cmd(struct comedi_device *dev,
*/
devpriv->ai_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
-
- /* mark command as active */
- smp_mb__before_atomic();
- set_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
add_timer(&devpriv->ai_timer);
return 0;
}
@@ -400,11 +385,12 @@ static int waveform_ai_cancel(struct comedi_device *dev,
{
struct waveform_private *devpriv = dev->private;
- /* mark command as no longer active */
- clear_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
- /* cannot call del_timer_sync() as may be called from timer routine */
- del_timer(&devpriv->ai_timer);
+ if (in_softirq()) {
+ /* Assume we were called from the timer routine itself. */
+ del_timer(&devpriv->ai_timer);
+ } else {
+ del_timer_sync(&devpriv->ai_timer);
+ }
return 0;
}
@@ -436,10 +422,6 @@ static void waveform_ao_timer(unsigned long arg)
u64 scans_since;
unsigned int scans_avail = 0;
- /* check command is still active */
- if (!test_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits))
- return;
-
/* determine number of scan periods since last time */
now = ktime_to_us(ktime_get());
scans_since = now - devpriv->ao_last_scan_time;
@@ -518,11 +500,6 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
devpriv->ao_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
-
- /* mark command as active */
- smp_mb__before_atomic();
- set_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
add_timer(&devpriv->ao_timer);
return 1;
@@ -608,11 +585,12 @@ static int waveform_ao_cancel(struct comedi_device *dev,
struct waveform_private *devpriv = dev->private;
s->async->inttrig = NULL;
- /* mark command as no longer active */
- clear_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
- /* cannot call del_timer_sync() as may be called from timer routine */
- del_timer(&devpriv->ao_timer);
+ if (in_softirq()) {
+ /* Assume we were called from the timer routine itself. */
+ del_timer(&devpriv->ao_timer);
+ } else {
+ del_timer_sync(&devpriv->ao_timer);
+ }
return 0;
}
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 57ab6680e3ae..e5fee6e0fb47 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -636,7 +636,7 @@ static const void *daqboard2000_find_boardinfo(struct comedi_device *dev,
const struct daq200_boardtype *board;
int i;
- if (pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH)
+ if (pcidev->subsystem_vendor != PCI_VENDOR_ID_IOTECH)
return NULL;
for (i = 0; i < ARRAY_SIZE(boardtypes); i++) {
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 27fbf1a81097..35ab4a9ef95d 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -2823,7 +2823,15 @@ static int ni_ao_inttrig(struct comedi_device *dev,
int i;
static const int timeout = 1000;
- if (trig_num != cmd->start_arg)
+ /*
+ * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
+ * For backwards compatibility, also allow trig_num == 0 when
+ * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
+ * in that case, the internal trigger is being used as a pre-trigger
+ * before the external trigger.
+ */
+ if (!(trig_num == cmd->start_arg ||
+ (trig_num == 0 && cmd->start_src != TRIG_INT)))
return -EINVAL;
/* Null trig at beginning prevent ao start trigger from executing more than
@@ -5346,7 +5354,7 @@ static int ni_E_init(struct comedi_device *dev,
s->maxdata = (devpriv->is_m_series) ? 0xffffffff
: 0x00ffffff;
s->insn_read = ni_tio_insn_read;
- s->insn_write = ni_tio_insn_read;
+ s->insn_write = ni_tio_insn_write;
s->insn_config = ni_tio_insn_config;
#ifdef PCIDMA
if (dev->irq && devpriv->mite) {
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 9096d311e45d..c2d9b793759d 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -631,8 +631,6 @@ struct ll_file_data {
struct lov_stripe_md;
-extern spinlock_t inode_lock;
-
extern struct dentry *llite_root;
extern struct kset *llite_kset;
diff --git a/drivers/staging/rdma/ipath/ipath_file_ops.c b/drivers/staging/rdma/ipath/ipath_file_ops.c
index 13c3cd11ab92..05d30f433b19 100644
--- a/drivers/staging/rdma/ipath/ipath_file_ops.c
+++ b/drivers/staging/rdma/ipath/ipath_file_ops.c
@@ -45,6 +45,8 @@
#include <linux/uio.h>
#include <asm/pgtable.h>
+#include <rdma/ib.h>
+
#include "ipath_kernel.h"
#include "ipath_common.h"
#include "ipath_user_sdma.h"
@@ -2243,6 +2245,9 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
ssize_t ret = 0;
void *dest;
+ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ return -EACCES;
+
if (count < sizeof(cmd.type)) {
ret = -EINVAL;
goto bail;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 72204fbf2bb1..bd810c109277 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -492,7 +492,8 @@ static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
spin_lock_bh(&conn->cmd_lock);
- if (!list_empty(&cmd->i_conn_node))
+ if (!list_empty(&cmd->i_conn_node) &&
+ !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
@@ -4194,6 +4195,7 @@ transport_err:
static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
{
+ LIST_HEAD(tmp_list);
struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
struct iscsi_session *sess = conn->sess;
/*
@@ -4202,18 +4204,26 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
* has been reset -> returned sleeping pre-handler state.
*/
spin_lock_bh(&conn->cmd_lock);
- list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
+ list_splice_init(&conn->conn_cmd_list, &tmp_list);
+ list_for_each_entry(cmd, &tmp_list, i_conn_node) {
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (se_cmd->se_tfo != NULL) {
+ spin_lock(&se_cmd->t_state_lock);
+ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+ spin_unlock(&se_cmd->t_state_lock);
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
list_del_init(&cmd->i_conn_node);
- spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
-
iscsit_free_cmd(cmd, true);
- spin_lock_bh(&conn->cmd_lock);
}
- spin_unlock_bh(&conn->cmd_lock);
}
static void iscsit_stop_timers_for_cmds(
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 96e78c823d13..316f66172335 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1357,8 +1357,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
}
login->zero_tsih = zero_tsih;
- conn->sess->se_sess->sup_prot_ops =
- conn->conn_transport->iscsit_get_sup_prot_ops(conn);
+ if (conn->sess)
+ conn->sess->se_sess->sup_prot_ops =
+ conn->conn_transport->iscsit_get_sup_prot_ops(conn);
tpg = conn->tpg;
if (!tpg) {
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 3436a83568ea..dcd5ed26eb18 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -832,13 +832,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
* in ATA and we need to set TPE=1
*/
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct request_queue *q, int block_size)
+ struct request_queue *q)
{
+ int block_size = queue_logical_block_size(q);
+
if (!blk_queue_discard(q))
return false;
- attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
- block_size;
+ attrib->max_unmap_lba_count =
+ q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 75f0f08b2a34..79291869bce6 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -161,8 +161,7 @@ static int fd_configure_device(struct se_device *dev)
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
- if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
- fd_dev->fd_block_size))
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
pr_debug("IFILE: BLOCK Discard support available,"
" disabled by default\n");
/*
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 2c53dcefff3e..4620c1dcdbc7 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -121,8 +121,7 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
- if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
- dev->dev_attrib.hw_block_size))
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index dae0750c2032..253a91bff943 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -148,6 +148,7 @@ sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
bool target_check_wce(struct se_device *dev);
bool target_check_fua(struct se_device *dev);
+void __target_execute_cmd(struct se_cmd *, bool);
/* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_device *);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 98698d875742..c220bb8dfa9d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -594,7 +594,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
- __target_execute_cmd(cmd);
+ __target_execute_cmd(cmd, false);
kfree(buf);
return ret;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d151bc3d6971..7bc3778a1ac9 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1270,23 +1270,6 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
trace_target_sequencer_start(cmd);
- /*
- * Check for an existing UNIT ATTENTION condition
- */
- ret = target_scsi3_ua_check(cmd);
- if (ret)
- return ret;
-
- ret = target_alua_state_check(cmd);
- if (ret)
- return ret;
-
- ret = target_check_reservation(cmd);
- if (ret) {
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- return ret;
- }
-
ret = dev->transport->parse_cdb(cmd);
if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
@@ -1749,20 +1732,45 @@ queue_full:
}
EXPORT_SYMBOL(transport_generic_request_failure);
-void __target_execute_cmd(struct se_cmd *cmd)
+void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
{
sense_reason_t ret;
- if (cmd->execute_cmd) {
- ret = cmd->execute_cmd(cmd);
- if (ret) {
- spin_lock_irq(&cmd->t_state_lock);
- cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
- spin_unlock_irq(&cmd->t_state_lock);
+ if (!cmd->execute_cmd) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto err;
+ }
+ if (do_checks) {
+ /*
+ * Check for an existing UNIT ATTENTION condition after
+ * target_handle_task_attr() has done SAM task attr
+ * checking, and possibly have already defered execution
+ * out to target_restart_delayed_cmds() context.
+ */
+ ret = target_scsi3_ua_check(cmd);
+ if (ret)
+ goto err;
+
+ ret = target_alua_state_check(cmd);
+ if (ret)
+ goto err;
- transport_generic_request_failure(cmd, ret);
+ ret = target_check_reservation(cmd);
+ if (ret) {
+ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+ goto err;
}
}
+
+ ret = cmd->execute_cmd(cmd);
+ if (!ret)
+ return;
+err:
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ transport_generic_request_failure(cmd, ret);
}
static int target_write_prot_action(struct se_cmd *cmd)
@@ -1807,6 +1815,8 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return false;
+ cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
+
/*
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
* to allow the passed struct se_cmd list of tasks to the front of the list.
@@ -1887,7 +1897,7 @@ void target_execute_cmd(struct se_cmd *cmd)
return;
}
- __target_execute_cmd(cmd);
+ __target_execute_cmd(cmd, true);
}
EXPORT_SYMBOL(target_execute_cmd);
@@ -1911,7 +1921,7 @@ static void target_restart_delayed_cmds(struct se_device *dev)
list_del(&cmd->se_delayed_node);
spin_unlock(&dev->delayed_cmd_lock);
- __target_execute_cmd(cmd);
+ __target_execute_cmd(cmd, true);
if (cmd->sam_task_attr == TCM_ORDERED_TAG)
break;
@@ -1929,6 +1939,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return;
+ if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
+ goto restart;
+
if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
atomic_dec_mb(&dev->simple_cmds);
dev->dev_cur_ordered_id++;
@@ -1945,7 +1958,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
dev->dev_cur_ordered_id);
}
-
+restart:
target_restart_delayed_cmds(dev);
}
@@ -2533,15 +2546,10 @@ static void target_release_cmd_kref(struct kref *kref)
bool fabric_stop;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- if (list_empty(&se_cmd->se_cmd_list)) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- target_free_cmd_mem(se_cmd);
- se_cmd->se_tfo->release_cmd(se_cmd);
- return;
- }
spin_lock(&se_cmd->t_state_lock);
- fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
+ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
+ (se_cmd->transport_state & CMD_T_ABORTED);
spin_unlock(&se_cmd->t_state_lock);
if (se_cmd->cmd_wait_set || fabric_stop) {
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 7865228f664f..807d80145686 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -679,14 +679,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
/* this is called once with whichever end is closed last */
static void pty_unix98_shutdown(struct tty_struct *tty)
{
- struct inode *ptmx_inode;
+ struct pts_fs_info *fsi;
if (tty->driver->subtype == PTY_TYPE_MASTER)
- ptmx_inode = tty->driver_data;
+ fsi = tty->driver_data;
else
- ptmx_inode = tty->link->driver_data;
- devpts_kill_index(ptmx_inode, tty->index);
- devpts_del_ref(ptmx_inode);
+ fsi = tty->link->driver_data;
+ devpts_kill_index(fsi, tty->index);
+ devpts_put_ref(fsi);
}
static const struct tty_operations ptm_unix98_ops = {
@@ -738,6 +738,7 @@ static const struct tty_operations pty_unix98_ops = {
static int ptmx_open(struct inode *inode, struct file *filp)
{
+ struct pts_fs_info *fsi;
struct tty_struct *tty;
struct inode *slave_inode;
int retval;
@@ -752,47 +753,41 @@ static int ptmx_open(struct inode *inode, struct file *filp)
if (retval)
return retval;
+ fsi = devpts_get_ref(inode, filp);
+ retval = -ENODEV;
+ if (!fsi)
+ goto out_free_file;
+
/* find a device that is not in use. */
mutex_lock(&devpts_mutex);
- index = devpts_new_index(inode);
- if (index < 0) {
- retval = index;
- mutex_unlock(&devpts_mutex);
- goto err_file;
- }
-
+ index = devpts_new_index(fsi);
mutex_unlock(&devpts_mutex);
- mutex_lock(&tty_mutex);
- tty = tty_init_dev(ptm_driver, index);
+ retval = index;
+ if (index < 0)
+ goto out_put_ref;
- if (IS_ERR(tty)) {
- retval = PTR_ERR(tty);
- goto out;
- }
+ mutex_lock(&tty_mutex);
+ tty = tty_init_dev(ptm_driver, index);
/* The tty returned here is locked so we can safely
drop the mutex */
mutex_unlock(&tty_mutex);
- set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
- tty->driver_data = inode;
+ retval = PTR_ERR(tty);
+ if (IS_ERR(tty))
+ goto out;
/*
- * In the case where all references to ptmx inode are dropped and we
- * still have /dev/tty opened pointing to the master/slave pair (ptmx
- * is closed/released before /dev/tty), we must make sure that the inode
- * is still valid when we call the final pty_unix98_shutdown, thus we
- * hold an additional reference to the ptmx inode. For the same /dev/tty
- * last close case, we also need to make sure the super_block isn't
- * destroyed (devpts instance unmounted), before /dev/tty is closed and
- * on its release devpts_kill_index is called.
+ * From here on out, the tty is "live", and the index and
+ * fsi will be killed/put by the tty_release()
*/
- devpts_add_ref(inode);
+ set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ tty->driver_data = fsi;
tty_add_file(tty, filp);
- slave_inode = devpts_pty_new(inode,
+ slave_inode = devpts_pty_new(fsi,
MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index,
tty->link);
if (IS_ERR(slave_inode)) {
@@ -811,12 +806,14 @@ static int ptmx_open(struct inode *inode, struct file *filp)
return 0;
err_release:
tty_unlock(tty);
+ // This will also put-ref the fsi
tty_release(inode, filp);
return retval;
out:
- mutex_unlock(&tty_mutex);
- devpts_kill_index(inode, index);
-err_file:
+ devpts_kill_index(fsi, index);
+out_put_ref:
+ devpts_put_ref(fsi);
+out_free_file:
tty_free_file(filp);
return retval;
}
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 7bbadd176c74..7b5462eb8388 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -485,19 +485,21 @@ static void atmel_start_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- if (atmel_use_pdc_tx(port)) {
- if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN)
- /* The transmitter is already running. Yes, we
- really need this.*/
- return;
+ if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
+ & ATMEL_PDC_TXTEN))
+ /* The transmitter is already running. Yes, we
+ really need this.*/
+ return;
+ if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
if ((port->rs485.flags & SER_RS485_ENABLED) &&
!(port->rs485.flags & SER_RS485_RX_DURING_TX))
atmel_stop_rx(port);
+ if (atmel_use_pdc_tx(port))
/* re-enable PDC transmit */
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
- }
+
/* Enable interrupts */
atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
}
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 9fdb06e08d4b..8f68acd1d95d 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -35,7 +35,6 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -404,12 +403,8 @@ static void msm_stop_tx(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
msm_port->imr &= ~UART_IMR_TXLEV;
msm_write(port, msm_port->imr, UART_IMR);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static void msm_start_tx(struct uart_port *port)
@@ -421,12 +416,8 @@ static void msm_start_tx(struct uart_port *port)
if (dma->count)
return;
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
msm_port->imr |= UART_IMR_TXLEV;
msm_write(port, msm_port->imr, UART_IMR);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static void msm_reset_dm_count(struct uart_port *port, int count)
@@ -448,8 +439,6 @@ static void msm_complete_tx_dma(void *args)
unsigned int count;
u32 val;
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
spin_lock_irqsave(&port->lock, flags);
/* Already stopped */
@@ -486,8 +475,6 @@ static void msm_complete_tx_dma(void *args)
msm_handle_tx(port);
done:
spin_unlock_irqrestore(&port->lock, flags);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
@@ -560,8 +547,6 @@ static void msm_complete_rx_dma(void *args)
unsigned long flags;
u32 val;
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
spin_lock_irqsave(&port->lock, flags);
/* Already stopped */
@@ -613,8 +598,6 @@ done:
if (count)
tty_flip_buffer_push(tport);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static void msm_start_rx_dma(struct msm_port *msm_port)
@@ -689,28 +672,19 @@ static void msm_stop_rx(struct uart_port *port)
struct msm_port *msm_port = UART_TO_MSM(port);
struct msm_dma *dma = &msm_port->rx_dma;
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
msm_write(port, msm_port->imr, UART_IMR);
if (dma->chan)
msm_stop_dma(port, dma);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static void msm_enable_ms(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
-
msm_port->imr |= UART_IMR_DELTA_CTS;
msm_write(port, msm_port->imr, UART_IMR);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
@@ -957,8 +931,6 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
unsigned int misr;
u32 val;
- if (pm_runtime_get_sync(port->dev) < 0)
- return IRQ_NONE;
spin_lock_irqsave(&port->lock, flags);
misr = msm_read(port, UART_MISR);
msm_write(port, 0, UART_IMR); /* disable interrupt */
@@ -992,25 +964,13 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
spin_unlock_irqrestore(&port->lock, flags);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
return IRQ_HANDLED;
}
static unsigned int msm_tx_empty(struct uart_port *port)
{
- int ret;
-
- ret = pm_runtime_get_sync(port->dev);
- if (ret < 0)
- return ret;
-
- ret = msm_read(port, UART_SR) & UART_SR_TX_EMPTY ? TIOCSER_TEMT : 0;
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
-
- return ret;
+ return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
}
static unsigned int msm_get_mctrl(struct uart_port *port)
@@ -1039,8 +999,6 @@ static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int mr;
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
mr = msm_read(port, UART_MR1);
if (!(mctrl & TIOCM_RTS)) {
@@ -1051,20 +1009,14 @@ static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
mr |= UART_MR1_RX_RDY_CTL;
msm_write(port, mr, UART_MR1);
}
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static void msm_break_ctl(struct uart_port *port, int break_ctl)
{
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
if (break_ctl)
msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
else
msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
struct msm_baud_map {
@@ -1224,15 +1176,16 @@ static int msm_startup(struct uart_port *port)
* avoid losing received character
*/
ret = clk_prepare_enable(msm_port->clk);
- if (ret)
- return ret;
- ret = clk_prepare(msm_port->pclk);
- if (ret)
+ if (ret) {
+ goto err_clk;
return ret;
+ }
- ret = pm_runtime_get_sync(port->dev);
- if (ret < 0)
- goto err;
+ ret = clk_prepare_enable(msm_port->pclk);
+ if (ret) {
+ goto err_pclk;
+ return ret;
+ }
msm_serial_set_mnd_regs(port);
@@ -1260,15 +1213,13 @@ static int msm_startup(struct uart_port *port)
msm_request_rx_dma(msm_port, msm_port->uart.mapbase);
}
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
-
return 0;
-err:
- clk_unprepare(msm_port->pclk);
+err_pclk:
clk_disable_unprepare(msm_port->clk);
+err_clk:
free_irq(port->irq, port);
+
return ret;
}
@@ -1276,18 +1227,13 @@ static void msm_shutdown(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
-
msm_port->imr = 0;
msm_write(port, 0, UART_IMR); /* disable interrupts */
if (msm_port->is_uartdm)
msm_release_dma(msm_port);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
- clk_unprepare(msm_port->pclk);
+ clk_disable_unprepare(msm_port->pclk);
clk_disable_unprepare(msm_port->clk);
free_irq(port->irq, port);
@@ -1301,8 +1247,6 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned long flags;
unsigned int baud, mr;
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
spin_lock_irqsave(&port->lock, flags);
if (dma->chan) /* Terminate if any */
@@ -1376,8 +1320,6 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
msm_start_rx_dma(msm_port);
spin_unlock_irqrestore(&port->lock, flags);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static const char *msm_type(struct uart_port *port)
@@ -1464,20 +1406,14 @@ static void msm_power(struct uart_port *port, unsigned int state,
*/
if (clk_prepare_enable(msm_port->clk))
return;
- if (clk_prepare(msm_port->pclk)) {
- clk_disable_unprepare(msm_port->clk);
- return;
- }
- if (pm_runtime_get_sync(port->dev) < 0) {
- clk_unprepare(msm_port->pclk);
+ if (clk_prepare_enable(msm_port->pclk)) {
clk_disable_unprepare(msm_port->clk);
return;
}
break;
case 3:
- pm_runtime_put(port->dev);
- clk_unprepare(msm_port->pclk);
clk_disable_unprepare(msm_port->clk);
+ clk_disable_unprepare(msm_port->pclk);
break;
default:
pr_err("msm_serial: Unknown PM state %d\n", state);
@@ -1719,11 +1655,7 @@ static void msm_console_write(struct console *co, const char *s,
port = msm_get_port_from_line(co->index);
msm_port = UART_TO_MSM(port);
- if (pm_runtime_get_sync(port->dev) < 0)
- return;
__msm_console_write(port, s, count, msm_port->is_uartdm);
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
}
static int __init msm_console_setup(struct console *co, char *options)
@@ -1885,12 +1817,6 @@ static int msm_serial_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, port);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
- pm_runtime_irq_safe(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
-
return uart_add_one_port(&msm_uart_driver, port);
}
@@ -1899,7 +1825,6 @@ static int msm_serial_remove(struct platform_device *pdev)
struct uart_port *port = platform_get_drvdata(pdev);
uart_remove_one_port(&msm_uart_driver, port);
- pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -1910,34 +1835,6 @@ static const struct of_device_id msm_match_table[] = {
{}
};
-#ifdef CONFIG_PM
-static int msm_serial_runtime_suspend(struct device *dev)
-{
- struct uart_port *port = dev_get_drvdata(dev);
- struct msm_port *msm_port = UART_TO_MSM(port);
-
- if (msm_port->is_uartdm)
- clk_disable(msm_port->pclk);
-
- return 0;
-}
-
-static int msm_serial_runtime_resume(struct device *dev)
-{
- struct uart_port *port = dev_get_drvdata(dev);
- struct msm_port *msm_port = UART_TO_MSM(port);
- int ret;
-
- if (msm_port->is_uartdm) {
- ret = clk_enable(msm_port->pclk);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-#endif
-
#ifdef CONFIG_PM_SLEEP
static int msm_serial_suspend(struct device *dev)
{
@@ -1960,8 +1857,6 @@ static int msm_serial_resume(struct device *dev)
static const struct dev_pm_ops msm_serial_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(msm_serial_suspend, msm_serial_resume)
- SET_RUNTIME_PM_OPS(msm_serial_runtime_suspend,
- msm_serial_runtime_resume, NULL)
};
static struct platform_driver msm_platform_driver = {
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index c8ab5370670d..ab8308ff7e69 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1676,7 +1676,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
return -ENODEV;
if (port->mapbase != 0)
- return 0;
+ return -EINVAL;
/* setup info for port */
port->dev = &platdev->dev;
@@ -1730,22 +1730,25 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
ourport->dma = devm_kzalloc(port->dev,
sizeof(*ourport->dma),
GFP_KERNEL);
- if (!ourport->dma)
- return -ENOMEM;
+ if (!ourport->dma) {
+ ret = -ENOMEM;
+ goto err;
+ }
}
ourport->clk = clk_get(&platdev->dev, "uart");
if (IS_ERR(ourport->clk)) {
pr_err("%s: Controller clock not found\n",
dev_name(&platdev->dev));
- return PTR_ERR(ourport->clk);
+ ret = PTR_ERR(ourport->clk);
+ goto err;
}
ret = clk_prepare_enable(ourport->clk);
if (ret) {
pr_err("uart: clock failed to prepare+enable: %d\n", ret);
clk_put(ourport->clk);
- return ret;
+ goto err;
}
/* Keep all interrupts masked and cleared */
@@ -1761,7 +1764,12 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
/* reset the fifos (and setup the uart) */
s3c24xx_serial_resetport(port, cfg);
+
return 0;
+
+err:
+ port->mapbase = 0;
+ return ret;
}
/* Device driver serial port probe */
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 391a1225b0ba..ca367b05e440 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1585,8 +1585,11 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
- /* Data+ pullup controlled by OTG state machine in OTG fsm mode */
- if (ci_otg_is_fsm_mode(ci))
+ /*
+ * Data+ pullup controlled by OTG state machine in OTG fsm mode;
+ * and don't touch Data+ in host mode for dual role config.
+ */
+ if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
return 0;
pm_runtime_get_sync(&ci->gadget.dev);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index d37fdcc3143c..7f374369e539 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1336,7 +1336,6 @@ made_compressed_probe:
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
- acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
acm->is_int_ep = usb_endpoint_xfer_int(epread);
if (acm->is_int_ep)
acm->bInterval = epread->bInterval;
@@ -1376,14 +1375,14 @@ made_compressed_probe:
urb->transfer_dma = rb->dma;
if (acm->is_int_ep) {
usb_fill_int_urb(urb, acm->dev,
- acm->rx_endpoint,
+ usb_rcvintpipe(usb_dev, epread->bEndpointAddress),
rb->base,
acm->readsize,
acm_read_bulk_callback, rb,
acm->bInterval);
} else {
usb_fill_bulk_urb(urb, acm->dev,
- acm->rx_endpoint,
+ usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress),
rb->base,
acm->readsize,
acm_read_bulk_callback, rb);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index ccfaba9ab4e4..b30ac5fcde68 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -95,7 +95,6 @@ struct acm {
struct urb *read_urbs[ACM_NR];
struct acm_rb read_buffers[ACM_NR];
int rx_buflimit;
- int rx_endpoint;
spinlock_t read_lock;
int write_used; /* number of non-empty write buffers */
int transmitting;
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 666155f1a185..ddc6bfb02164 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -51,6 +51,7 @@ static const char *const speed_names[] = {
[USB_SPEED_HIGH] = "high-speed",
[USB_SPEED_WIRELESS] = "wireless",
[USB_SPEED_SUPER] = "super-speed",
+ [USB_SPEED_SUPER_PLUS] = "super-speed-plus",
};
const char *usb_speed_string(enum usb_device_speed speed)
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 5050760f5e17..80c8d90d8b75 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -142,6 +142,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
}
}
+static const unsigned short low_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 8,
+ [USB_ENDPOINT_XFER_ISOC] = 0,
+ [USB_ENDPOINT_XFER_BULK] = 0,
+ [USB_ENDPOINT_XFER_INT] = 8,
+};
+static const unsigned short full_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 64,
+ [USB_ENDPOINT_XFER_ISOC] = 1023,
+ [USB_ENDPOINT_XFER_BULK] = 64,
+ [USB_ENDPOINT_XFER_INT] = 64,
+};
+static const unsigned short high_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 64,
+ [USB_ENDPOINT_XFER_ISOC] = 1024,
+ [USB_ENDPOINT_XFER_BULK] = 512,
+ [USB_ENDPOINT_XFER_INT] = 1024,
+};
+static const unsigned short super_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 512,
+ [USB_ENDPOINT_XFER_ISOC] = 1024,
+ [USB_ENDPOINT_XFER_BULK] = 1024,
+ [USB_ENDPOINT_XFER_INT] = 1024,
+};
+
static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
int asnum, struct usb_host_interface *ifp, int num_ep,
unsigned char *buffer, int size)
@@ -150,6 +175,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
struct usb_endpoint_descriptor *d;
struct usb_host_endpoint *endpoint;
int n, i, j, retval;
+ unsigned int maxp;
+ const unsigned short *maxpacket_maxes;
d = (struct usb_endpoint_descriptor *) buffer;
buffer += d->bLength;
@@ -191,6 +218,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
if (usb_endpoint_xfer_int(d)) {
i = 1;
switch (to_usb_device(ddev)->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
/* Many device manufacturers are using full-speed
@@ -256,6 +284,42 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
}
+ /* Validate the wMaxPacketSize field */
+ maxp = usb_endpoint_maxp(&endpoint->desc);
+
+ /* Find the highest legal maxpacket size for this endpoint */
+ i = 0; /* additional transactions per microframe */
+ switch (to_usb_device(ddev)->speed) {
+ case USB_SPEED_LOW:
+ maxpacket_maxes = low_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_FULL:
+ maxpacket_maxes = full_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_HIGH:
+ /* Bits 12..11 are allowed only for HS periodic endpoints */
+ if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
+ i = maxp & (BIT(12) | BIT(11));
+ maxp &= ~i;
+ }
+ /* fallthrough */
+ default:
+ maxpacket_maxes = high_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
+ maxpacket_maxes = super_speed_maxpacket_maxes;
+ break;
+ }
+ j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
+
+ if (maxp > j) {
+ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
+ cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
+ maxp = j;
+ endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
+ }
+
/*
* Some buggy high speed devices have bulk endpoints using
* maxpacket sizes other than 512. High speed HCDs may not
@@ -263,9 +327,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
*/
if (to_usb_device(ddev)->speed == USB_SPEED_HIGH
&& usb_endpoint_xfer_bulk(d)) {
- unsigned maxp;
-
- maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff;
if (maxp != 512)
dev_warn(ddev, "config %d interface %d altsetting %d "
"bulk endpoint 0x%X has invalid maxpacket %d\n",
@@ -274,7 +335,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
}
/* Parse a possible SuperSpeed endpoint companion descriptor */
- if (to_usb_device(ddev)->speed == USB_SPEED_SUPER)
+ if (to_usb_device(ddev)->speed >= USB_SPEED_SUPER)
usb_parse_ss_endpoint_companion(ddev, cfgno,
inum, asnum, endpoint, buffer, size);
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 2a3bbdf7eb94..332ed277a06c 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -221,7 +221,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
break;
case USB_ENDPOINT_XFER_INT:
type = "Int.";
- if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER)
+ if (speed == USB_SPEED_HIGH || speed >= USB_SPEED_SUPER)
interval = 1 << (desc->bInterval - 1);
else
interval = desc->bInterval;
@@ -230,7 +230,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
return start;
}
interval *= (speed == USB_SPEED_HIGH ||
- speed == USB_SPEED_SUPER) ? 125 : 1000;
+ speed >= USB_SPEED_SUPER) ? 125 : 1000;
if (interval % 1000)
unit = 'u';
else {
@@ -322,7 +322,7 @@ static char *usb_dump_config_descriptor(char *start, char *end,
if (start > end)
return start;
- if (speed == USB_SPEED_SUPER)
+ if (speed >= USB_SPEED_SUPER)
mul = 8;
else
mul = 2;
@@ -534,6 +534,8 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
speed = "480"; break;
case USB_SPEED_SUPER:
speed = "5000"; break;
+ case USB_SPEED_SUPER_PLUS:
+ speed = "10000"; break;
default:
speed = "??";
}
@@ -553,7 +555,7 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
/* super/high speed reserves 80%, full/low reserves 90% */
if (usbdev->speed == USB_SPEED_HIGH ||
- usbdev->speed == USB_SPEED_SUPER)
+ usbdev->speed >= USB_SPEED_SUPER)
max = 800;
else
max = FRAME_TIME_MAX_USECS_ALLOC;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 3ffb01ff6549..f5c92d904ded 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1530,11 +1530,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
as->urb->start_frame = uurb->start_frame;
as->urb->number_of_packets = number_of_packets;
as->urb->stream_id = stream_id;
- if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
- ps->dev->speed == USB_SPEED_HIGH)
- as->urb->interval = 1 << min(15, ep->desc.bInterval - 1);
- else
- as->urb->interval = ep->desc.bInterval;
+
+ if (ep->desc.bInterval) {
+ if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
+ ps->dev->speed == USB_SPEED_HIGH ||
+ ps->dev->speed >= USB_SPEED_SUPER)
+ as->urb->interval = 1 <<
+ min(15, ep->desc.bInterval - 1);
+ else
+ as->urb->interval = ep->desc.bInterval;
+ }
+
as->urb->context = as;
as->urb->complete = async_completed;
for (totlen = u = 0; u < number_of_packets; u++) {
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index b8b580e5ae6e..40378487e023 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -206,7 +206,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
* The xHCI driver has its own irq management
* make sure irq setup is not touched for xhci in generic hcd code
*/
- if ((driver->flags & HCD_MASK) != HCD_USB3) {
+ if ((driver->flags & HCD_MASK) < HCD_USB3) {
if (!dev->irq) {
dev_err(&dev->dev,
"Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ac0eb0939ecf..a24a8ea9df7c 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1078,7 +1078,7 @@ static int register_root_hub(struct usb_hcd *hcd)
retval = usb_get_bos_descriptor(usb_dev);
if (!retval) {
usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
- } else if (usb_dev->speed == USB_SPEED_SUPER) {
+ } else if (usb_dev->speed >= USB_SPEED_SUPER) {
mutex_unlock(&usb_bus_list_lock);
dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
dev_name(&usb_dev->dev), retval);
@@ -2112,7 +2112,7 @@ int usb_alloc_streams(struct usb_interface *interface,
hcd = bus_to_hcd(dev->bus);
if (!hcd->driver->alloc_streams || !hcd->driver->free_streams)
return -EINVAL;
- if (dev->speed != USB_SPEED_SUPER)
+ if (dev->speed < USB_SPEED_SUPER)
return -EINVAL;
if (dev->state < USB_STATE_CONFIGURED)
return -ENODEV;
@@ -2160,7 +2160,7 @@ int usb_free_streams(struct usb_interface *interface,
dev = interface_to_usbdev(interface);
hcd = bus_to_hcd(dev->bus);
- if (dev->speed != USB_SPEED_SUPER)
+ if (dev->speed < USB_SPEED_SUPER)
return -EINVAL;
/* Double-free is not allowed */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 5839111ab4e0..29242ffe8dca 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -303,7 +303,7 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
unsigned int hub_u1_del;
unsigned int hub_u2_del;
- if (!udev->lpm_capable || udev->speed != USB_SPEED_SUPER)
+ if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER)
return;
hub = usb_hub_to_struct_hub(udev->parent);
@@ -1047,14 +1047,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Continue a partial initialization */
if (type == HUB_INIT2 || type == HUB_INIT3) {
- device_lock(hub->intfdev);
+ device_lock(&hdev->dev);
/* Was the hub disconnected while we were waiting? */
- if (hub->disconnected) {
- device_unlock(hub->intfdev);
- kref_put(&hub->kref, hub_release);
- return;
- }
+ if (hub->disconnected)
+ goto disconnected;
if (type == HUB_INIT2)
goto init2;
goto init3;
@@ -1257,7 +1254,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
queue_delayed_work(system_power_efficient_wq,
&hub->init_work,
msecs_to_jiffies(delay));
- device_unlock(hub->intfdev);
+ device_unlock(&hdev->dev);
return; /* Continues at init3: below */
} else {
msleep(delay);
@@ -1276,12 +1273,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Scan all ports that need attention */
kick_hub_wq(hub);
- /* Allow autosuspend if it was suppressed */
- if (type <= HUB_INIT3)
+ if (type == HUB_INIT2 || type == HUB_INIT3) {
+ /* Allow autosuspend if it was suppressed */
+ disconnected:
usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
-
- if (type == HUB_INIT2 || type == HUB_INIT3)
- device_unlock(hub->intfdev);
+ device_unlock(&hdev->dev);
+ }
kref_put(&hub->kref, hub_release);
}
@@ -1310,8 +1307,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
struct usb_device *hdev = hub->hdev;
int i;
- cancel_delayed_work_sync(&hub->init_work);
-
/* hub_wq and related activity won't re-trigger */
hub->quiescing = 1;
@@ -2656,7 +2651,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
*/
static bool use_new_scheme(struct usb_device *udev, int retry)
{
- if (udev->speed == USB_SPEED_SUPER)
+ if (udev->speed >= USB_SPEED_SUPER)
return false;
return USE_NEW_SCHEME(retry);
@@ -3998,7 +3993,7 @@ int usb_disable_lpm(struct usb_device *udev)
struct usb_hcd *hcd;
if (!udev || !udev->parent ||
- udev->speed != USB_SPEED_SUPER ||
+ udev->speed < USB_SPEED_SUPER ||
!udev->lpm_capable ||
udev->state < USB_STATE_DEFAULT)
return 0;
@@ -4055,7 +4050,7 @@ void usb_enable_lpm(struct usb_device *udev)
struct usb_hcd *hcd;
if (!udev || !udev->parent ||
- udev->speed != USB_SPEED_SUPER ||
+ udev->speed < USB_SPEED_SUPER ||
!udev->lpm_capable ||
udev->state < USB_STATE_DEFAULT)
return;
@@ -4321,7 +4316,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
retval = -ENODEV;
- if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
+ /* Don't allow speed changes at reset, except usb 3.0 to faster */
+ if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed &&
+ !(oldspeed == USB_SPEED_SUPER && udev->speed > oldspeed)) {
dev_dbg(&udev->dev, "device reset changed speed!\n");
goto fail;
}
@@ -4333,6 +4330,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
*/
switch (udev->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
case USB_SPEED_WIRELESS: /* fixed at 512 */
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
@@ -4359,7 +4357,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
else
speed = usb_speed_string(udev->speed);
- if (udev->speed != USB_SPEED_SUPER)
+ if (udev->speed < USB_SPEED_SUPER)
dev_info(&udev->dev,
"%s %s USB device number %d using %s\n",
(udev->config) ? "reset" : "new", speed,
@@ -4489,11 +4487,12 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
devnum, retval);
goto fail;
}
- if (udev->speed == USB_SPEED_SUPER) {
+ if (udev->speed >= USB_SPEED_SUPER) {
devnum = udev->devnum;
dev_info(&udev->dev,
- "%s SuperSpeed USB device number %d using %s\n",
+ "%s SuperSpeed%s USB device number %d using %s\n",
(udev->config) ? "reset" : "new",
+ (udev->speed == USB_SPEED_SUPER_PLUS) ? "Plus" : "",
devnum, udev->bus->controller->driver->name);
}
@@ -4532,7 +4531,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* got from those devices show they aren't superspeed devices. Warm
* reset the port attached by the devices can fix them.
*/
- if ((udev->speed == USB_SPEED_SUPER) &&
+ if ((udev->speed >= USB_SPEED_SUPER) &&
(le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
dev_err(&udev->dev, "got a wrong device descriptor, "
"warm reset device\n");
@@ -4543,7 +4542,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
}
if (udev->descriptor.bMaxPacketSize0 == 0xff ||
- udev->speed == USB_SPEED_SUPER)
+ udev->speed >= USB_SPEED_SUPER)
i = 512;
else
i = udev->descriptor.bMaxPacketSize0;
@@ -4753,7 +4752,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
udev->level = hdev->level + 1;
udev->wusb = hub_is_wusb(hub);
- /* Only USB 3.0 devices are connected to SuperSpeed hubs. */
+ /* Devices connected to SuperSpeed hubs are USB 3.0 or later */
if (hub_is_superspeed(hub->hdev))
udev->speed = USB_SPEED_SUPER;
else
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 944a6dca0fcb..d2e50a27140c 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -128,6 +128,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x04f3, 0x016f), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
+ { USB_DEVICE(0x04f3, 0x0381), .driver_info =
+ USB_QUIRK_NO_LPM },
+
{ USB_DEVICE(0x04f3, 0x21b8), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 3d274778caaf..c601e25b609f 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -401,7 +401,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
/* SuperSpeed isoc endpoints have up to 16 bursts of up to
* 3 packets each
*/
- if (dev->speed == USB_SPEED_SUPER) {
+ if (dev->speed >= USB_SPEED_SUPER) {
int burst = 1 + ep->ss_ep_comp.bMaxBurst;
int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
max *= burst;
@@ -499,6 +499,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
}
/* too big? */
switch (dev->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER: /* units are 125us */
/* Handle up to 2^(16-1) microframes */
if (urb->interval > (1 << 15))
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index b43d542e3bd4..062677f8e91d 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -682,9 +682,6 @@ EXPORT_SYMBOL(usb_sec_event_ring_setup);
int usb_sec_event_ring_cleanup(struct usb_device *dev,
unsigned intr_num)
{
- if (dev->state == USB_STATE_NOTATTACHED)
- return 0;
-
return usb_hcd_sec_event_ring_cleanup(dev, intr_num);
}
EXPORT_SYMBOL(usb_sec_event_ring_cleanup);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index ccb35af525e2..fbff25ff23a9 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -45,7 +45,7 @@ static inline unsigned usb_get_max_power(struct usb_device *udev,
struct usb_host_config *c)
{
/* SuperSpeed power is in 8 mA units; others are in 2 mA units */
- unsigned mul = (udev->speed == USB_SPEED_SUPER ? 8 : 2);
+ unsigned mul = (udev->speed >= USB_SPEED_SUPER ? 8 : 2);
return c->desc.bMaxPower * mul;
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 07867ead2413..85410a2214da 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -180,9 +180,9 @@ static int dwc3_core_reset(struct dwc3 *dwc)
reg &= ~DWC3_GUSB3PIPECTL_DELAYP1TRANS;
dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
- dwc3_notify_event(dwc, DWC3_CONTROLLER_RESET_EVENT);
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_RESET_EVENT, 0);
- dwc3_notify_event(dwc, DWC3_CONTROLLER_POST_RESET_EVENT);
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_POST_RESET_EVENT, 0);
return 0;
}
@@ -908,19 +908,19 @@ void dwc3_post_host_reset_core_init(struct dwc3 *dwc)
dwc3_gadget_restart(dwc);
}
-static void (*notify_event) (struct dwc3 *, unsigned);
-void dwc3_set_notifier(void (*notify)(struct dwc3 *, unsigned))
+static void (*notify_event)(struct dwc3 *, unsigned, unsigned);
+void dwc3_set_notifier(void (*notify)(struct dwc3 *, unsigned, unsigned))
{
notify_event = notify;
}
EXPORT_SYMBOL(dwc3_set_notifier);
-int dwc3_notify_event(struct dwc3 *dwc, unsigned event)
+int dwc3_notify_event(struct dwc3 *dwc, unsigned event, unsigned value)
{
int ret = 0;
if (dwc->notify_event)
- dwc->notify_event(dwc, event);
+ dwc->notify_event(dwc, event, value);
else
ret = -ENODEV;
@@ -1317,7 +1317,7 @@ static int dwc3_suspend(struct device *dev)
unsigned long flags;
/* Check if platform glue driver handling PM, if not then handle here */
- if (!dwc3_notify_event(dwc, DWC3_CORE_PM_SUSPEND_EVENT))
+ if (!dwc3_notify_event(dwc, DWC3_CORE_PM_SUSPEND_EVENT, 0))
return 0;
spin_lock_irqsave(&dwc->lock, flags);
@@ -1353,7 +1353,7 @@ static int dwc3_resume(struct device *dev)
int ret;
/* Check if platform glue driver handling PM, if not then handle here */
- if (!dwc3_notify_event(dwc, DWC3_CORE_PM_RESUME_EVENT))
+ if (!dwc3_notify_event(dwc, DWC3_CORE_PM_RESUME_EVENT, 0))
return 0;
pinctrl_pm_select_default_state(dev);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 1fb5ce9caf98..c2cdfd1a823b 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -731,6 +731,7 @@ struct dwc3_scratchpad_array {
#define DWC3_CONTROLLER_NOTIFY_OTG_EVENT 6
#define DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT 7
#define DWC3_CONTROLLER_RESTART_USB_SESSION 8
+#define DWC3_CONTROLLER_NOTIFY_DISABLE_UPDXFER 9
#define MAX_INTR_STATS 10
/**
@@ -952,7 +953,7 @@ struct dwc3 {
const char *hsphy_interface;
- void (*notify_event) (struct dwc3 *, unsigned);
+ void (*notify_event)(struct dwc3 *, unsigned, unsigned);
struct work_struct wakeup_work;
unsigned delayed_status:1;
@@ -1252,7 +1253,7 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc);
void dwc3_usb3_phy_suspend(struct dwc3 *dwc, int suspend);
extern void dwc3_set_notifier(
- void (*notify) (struct dwc3 *dwc3, unsigned event));
-extern int dwc3_notify_event(struct dwc3 *dwc3, unsigned event);
+ void (*notify)(struct dwc3 *dwc3, unsigned event, unsigned value));
+extern int dwc3_notify_event(struct dwc3 *dwc3, unsigned event, unsigned value);
#endif /* __DRIVERS_USB_DWC3_CORE_H */
diff --git a/drivers/usb/dwc3/dbm.c b/drivers/usb/dwc3/dbm.c
index 285cd5a47d82..0fbb1fb39f5c 100644
--- a/drivers/usb/dwc3/dbm.c
+++ b/drivers/usb/dwc3/dbm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -37,6 +37,7 @@ enum dbm_reg {
DBM_HW_TRB2_EP,
DBM_HW_TRB3_EP,
DBM_PIPE_CFG,
+ DBM_DISABLE_UPDXFER,
DBM_SOFT_RESET,
DBM_GEN_CFG,
DBM_GEVNTADR_LSB,
@@ -103,6 +104,7 @@ static const struct dbm_reg_data dbm_1_5_regtable[] = {
[DBM_HW_TRB2_EP] = { 0x0240, 0x4 },
[DBM_HW_TRB3_EP] = { 0x0250, 0x4 },
[DBM_PIPE_CFG] = { 0x0274, 0x0 },
+ [DBM_DISABLE_UPDXFER] = { 0x0298, 0x0 },
[DBM_SOFT_RESET] = { 0x020C, 0x0 },
[DBM_GEN_CFG] = { 0x0210, 0x0 },
[DBM_GEVNTADR_LSB] = { 0x0260, 0x0 },
@@ -291,6 +293,7 @@ int dbm_ep_config(struct dbm *dbm, u8 usb_ep, u8 bam_pipe, bool producer,
{
int dbm_ep;
u32 ep_cfg;
+ u32 data;
if (!dbm) {
pr_err("%s: dbm pointer is NULL!\n", __func__);
@@ -312,9 +315,6 @@ int dbm_ep_config(struct dbm *dbm, u8 usb_ep, u8 bam_pipe, bool producer,
return -ENODEV;
}
- /* First, reset the dbm endpoint */
- ep_soft_reset(dbm, dbm_ep, 0);
-
/* Set ioc bit for dbm_ep if needed */
msm_dbm_write_reg_field(dbm, DBM_DBG_CNFG,
DBM_ENABLE_IOC_MASK & 1 << dbm_ep, ioc ? 1 : 0);
@@ -337,6 +337,10 @@ int dbm_ep_config(struct dbm *dbm, u8 usb_ep, u8 bam_pipe, bool producer,
msm_dbm_write_ep_reg_field(dbm, DBM_EP_CFG, dbm_ep, DBM_EN_EP, 1);
+ data = msm_dbm_read_reg(dbm, DBM_DISABLE_UPDXFER);
+ data &= ~(0x1 << dbm_ep);
+ msm_dbm_write_reg(dbm, DBM_DISABLE_UPDXFER, data);
+
return dbm_ep;
}
@@ -391,23 +395,10 @@ int dbm_ep_unconfig(struct dbm *dbm, u8 usb_ep)
data &= (~0x1);
msm_dbm_write_ep_reg(dbm, DBM_EP_CFG, dbm_ep, data);
- /* Reset the dbm endpoint */
- ep_soft_reset(dbm, dbm_ep, true);
/*
- * The necessary delay between asserting and deasserting the dbm ep
- * reset is based on the number of active endpoints. If there is more
- * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
- * delay will suffice.
- *
- * As this function can be called in atomic context, sleeping variants
- * for delay are not possible - albeit a 1ms delay.
+ * ep_soft_reset is not required during disconnect as pipe reset on
+ * next connect will take care of the same.
*/
- if (dbm_get_num_of_eps_configured(dbm) > 1)
- udelay(1000);
- else
- udelay(10);
- ep_soft_reset(dbm, dbm_ep, false);
-
return 0;
}
@@ -449,6 +440,35 @@ int dbm_event_buffer_config(struct dbm *dbm, u32 addr_lo, u32 addr_hi, int size)
return 0;
}
+/**
+ * Disable update xfer before queueing stop xfer command to USB3 core.
+ *
+ * @usb_ep - USB physical EP number.
+ *
+ */
+int dwc3_dbm_disable_update_xfer(struct dbm *dbm, u8 usb_ep)
+{
+ u32 data;
+ u8 dbm_ep;
+
+ if (!dbm) {
+ pr_err("%s: dbm pointer is NULL!\n", __func__);
+ return -EPERM;
+ }
+
+ dbm_ep = find_matching_dbm_ep(dbm, usb_ep);
+
+ if (dbm_ep < 0) {
+ pr_err("usb ep index %d has no corresponding dbm ep\n", usb_ep);
+ return -ENODEV;
+ }
+
+ data = msm_dbm_read_reg(dbm, DBM_DISABLE_UPDXFER);
+ data |= (0x1 << dbm_ep);
+ msm_dbm_write_reg(dbm, DBM_DISABLE_UPDXFER, data);
+
+ return 0;
+}
int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, phys_addr_t addr,
u32 size, u8 dst_pipe_idx)
diff --git a/drivers/usb/dwc3/dbm.h b/drivers/usb/dwc3/dbm.h
index 260afc241015..bf20d7cbd454 100644
--- a/drivers/usb/dwc3/dbm.h
+++ b/drivers/usb/dwc3/dbm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -63,6 +63,7 @@ int dbm_ep_unconfig(struct dbm *dbm, u8 usb_ep);
int dbm_get_num_of_eps_configured(struct dbm *dbm);
int dbm_event_buffer_config(struct dbm *dbm, u32 addr_lo, u32 addr_hi,
int size);
+int dwc3_dbm_disable_update_xfer(struct dbm *dbm, u8 usb_ep);
int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, phys_addr_t addr,
u32 size, u8 dst_pipe_idx);
void dbm_set_speed(struct dbm *dbm, bool speed);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index bb6afb6a7d6d..cd55d908ea61 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -204,12 +204,16 @@ struct dwc3_msm {
unsigned int irq_to_affin;
struct notifier_block dwc3_cpu_notifier;
+ struct notifier_block usbdev_nb;
+ bool hc_died;
struct extcon_dev *extcon_vbus;
struct extcon_dev *extcon_id;
struct notifier_block vbus_nb;
struct notifier_block id_nb;
+ struct notifier_block host_nb;
+
int pwr_event_irq;
atomic_t in_p3;
unsigned int lpm_to_suspend_delay;
@@ -365,6 +369,16 @@ static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
return dwc3_msm_is_dev_superspeed(mdwc);
}
+int dwc3_msm_dbm_disable_updxfer(struct dwc3 *dwc, u8 usb_ep)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+ dev_dbg(mdwc->dev, "%s\n", __func__);
+ dwc3_dbm_disable_update_xfer(mdwc->dbm, usb_ep);
+
+ return 0;
+}
+
#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
/**
* Configure the DBM with the BAM's data fifo.
@@ -622,18 +636,36 @@ static int dwc3_msm_ep_queue(struct usb_ep *ep,
struct dwc3_msm_req_complete *req_complete;
unsigned long flags;
int ret = 0, size;
- u8 bam_pipe;
- bool producer;
- bool disable_wb;
- bool internal_mem;
- bool ioc;
bool superspeed;
+ /*
+ * We must obtain the lock of the dwc3 core driver,
+ * including disabling interrupts, so we will be sure
+ * that we are the only ones that configure the HW device
+ * core and ensure that we queuing the request will finish
+ * as soon as possible so we will release back the lock.
+ */
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (!dep->endpoint.desc) {
+ dev_err(mdwc->dev,
+ "%s: trying to queue request %p to disabled ep %s\n",
+ __func__, request, ep->name);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EPERM;
+ }
+
+ if (!request) {
+ dev_err(mdwc->dev, "%s: request is NULL\n", __func__);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EINVAL;
+ }
+
if (!(request->udc_priv & MSM_SPS_MODE)) {
/* Not SPS mode, call original queue */
dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
__func__);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return (mdwc->original_ep_ops[dep->number])->queue(ep,
request,
gfp_flags);
@@ -642,9 +674,29 @@ static int dwc3_msm_ep_queue(struct usb_ep *ep,
/* HW restriction regarding TRB size (8KB) */
if (req->request.length < 0x2000) {
dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return -EINVAL;
}
+ if (dep->number == 0 || dep->number == 1) {
+ dev_err(mdwc->dev,
+ "%s: trying to queue dbm request %p to control ep %s\n",
+ __func__, request, ep->name);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EPERM;
+ }
+
+ if (dep->busy_slot != dep->free_slot || !list_empty(&dep->request_list)
+ || !list_empty(&dep->req_queued)) {
+ dev_err(mdwc->dev,
+ "%s: trying to queue dbm request %p tp ep %s\n",
+ __func__, request, ep->name);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EPERM;
+ }
+ dep->busy_slot = 0;
+ dep->free_slot = 0;
+
/*
* Override req->complete function, but before doing that,
* store it's original pointer in the req_complete_list.
@@ -652,6 +704,7 @@ static int dwc3_msm_ep_queue(struct usb_ep *ep,
req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
if (!req_complete) {
dev_err(mdwc->dev, "%s: not enough memory\n", __func__);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return -ENOMEM;
}
req_complete->req = request;
@@ -659,23 +712,6 @@ static int dwc3_msm_ep_queue(struct usb_ep *ep,
list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
request->complete = dwc3_msm_req_complete_func;
- /*
- * Configure the DBM endpoint
- */
- bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
- producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
- disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
- internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
- ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
-
- ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
- disable_wb, internal_mem, ioc);
- if (ret < 0) {
- dev_err(mdwc->dev,
- "error %d after calling dbm_ep_config\n", ret);
- return ret;
- }
-
dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
__func__, request, ep->name, request->length);
size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
@@ -684,43 +720,6 @@ static int dwc3_msm_ep_queue(struct usb_ep *ep,
dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
DWC3_GEVNTSIZ_SIZE(size));
- /*
- * We must obtain the lock of the dwc3 core driver,
- * including disabling interrupts, so we will be sure
- * that we are the only ones that configure the HW device
- * core and ensure that we queuing the request will finish
- * as soon as possible so we will release back the lock.
- */
- spin_lock_irqsave(&dwc->lock, flags);
- if (!dep->endpoint.desc) {
- dev_err(mdwc->dev,
- "%s: trying to queue request %p to disabled ep %s\n",
- __func__, request, ep->name);
- ret = -EPERM;
- goto err;
- }
-
- if (dep->number == 0 || dep->number == 1) {
- dev_err(mdwc->dev,
- "%s: trying to queue dbm request %p to control ep %s\n",
- __func__, request, ep->name);
- ret = -EPERM;
- goto err;
- }
-
-
- if (dep->busy_slot != dep->free_slot || !list_empty(&dep->request_list)
- || !list_empty(&dep->req_queued)) {
- dev_err(mdwc->dev,
- "%s: trying to queue dbm request %p tp ep %s\n",
- __func__, request, ep->name);
- ret = -EPERM;
- goto err;
- } else {
- dep->busy_slot = 0;
- dep->free_slot = 0;
- }
-
ret = __dwc3_msm_ep_queue(dep, req);
if (ret < 0) {
dev_err(mdwc->dev,
@@ -1346,12 +1345,19 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
*
* @return int - 0 on success, negetive on error.
*/
-int msm_ep_config(struct usb_ep *ep)
+int msm_ep_config(struct usb_ep *ep, struct usb_request *request,
+ gfp_t gfp_flags)
{
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
struct usb_ep_ops *new_ep_ops;
+ int ret = 0;
+ u8 bam_pipe;
+ bool producer;
+ bool disable_wb;
+ bool internal_mem;
+ bool ioc;
/* Save original ep ops for future restore*/
@@ -1364,7 +1370,7 @@ int msm_ep_config(struct usb_ep *ep)
mdwc->original_ep_ops[dep->number] = ep->ops;
/* Set new usb ops as we like */
- new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
+ new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), gfp_flags);
if (!new_ep_ops) {
dev_err(mdwc->dev,
"%s: unable to allocate mem for new usb ep ops\n",
@@ -1376,10 +1382,25 @@ int msm_ep_config(struct usb_ep *ep)
new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
ep->ops = new_ep_ops;
+ if (!mdwc->dbm || !request || (dep->endpoint.ep_type == EP_TYPE_GSI))
+ return 0;
+
/*
- * Do HERE more usb endpoint configurations
- * which are specific to MSM.
+ * Configure the DBM endpoint if required.
*/
+ bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
+ producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
+ disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
+ internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
+ ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
+
+ ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
+ disable_wb, internal_mem, ioc);
+ if (ret < 0) {
+ dev_err(mdwc->dev,
+ "error %d after calling dbm_ep_config\n", ret);
+ return ret;
+ }
return 0;
}
@@ -1475,6 +1496,33 @@ static void dwc3_restart_usb_work(struct work_struct *w)
flush_delayed_work(&mdwc->sm_work);
}
+static int msm_dwc3_usbdev_notify(struct notifier_block *self,
+ unsigned long action, void *priv)
+{
+ struct dwc3_msm *mdwc = container_of(self, struct dwc3_msm, usbdev_nb);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ struct usb_bus *bus = priv;
+
+ /* Interested only in recovery when HC dies */
+ if (action != USB_BUS_DIED)
+ return 0;
+
+ dev_dbg(mdwc->dev, "%s initiate recovery from hc_died\n", __func__);
+ /* Recovery already under process */
+ if (mdwc->hc_died)
+ return 0;
+
+ if (bus->controller != &dwc->xhci->dev) {
+ dev_dbg(mdwc->dev, "%s event for diff HCD\n", __func__);
+ return 0;
+ }
+
+ mdwc->hc_died = true;
+ schedule_delayed_work(&mdwc->sm_work, 0);
+ return 0;
+}
+
+
/*
* Check whether the DWC3 requires resetting the ep
* after going to Low Power Mode (lpm)
@@ -1616,7 +1664,8 @@ static void dwc3_msm_vbus_draw_work(struct work_struct *w)
dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
}
-static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned event)
+static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned event,
+ unsigned value)
{
struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
u32 reg;
@@ -1708,6 +1757,9 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned event)
dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
schedule_work(&mdwc->restart_usb_work);
break;
+ case DWC3_CONTROLLER_NOTIFY_DISABLE_UPDXFER:
+ dwc3_msm_dbm_disable_updxfer(dwc, value);
+ break;
default:
dev_dbg(mdwc->dev, "unknown dwc3 event\n");
break;
@@ -1748,6 +1800,7 @@ static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
{
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
u32 val;
+ int ret;
/* Configure AHB2PHY for one wait state read/write */
if (mdwc->ahb2phy_base) {
@@ -1766,7 +1819,11 @@ static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
if (!mdwc->init) {
dbg_event(0xFF, "dwc3 init",
atomic_read(&mdwc->dev->power.usage_count));
- dwc3_core_pre_init(dwc);
+ ret = dwc3_core_pre_init(dwc);
+ if (ret) {
+ dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
+ return;
+ }
mdwc->init = true;
}
@@ -2009,15 +2066,6 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
enable_irq_wake(mdwc->ss_phy_irq);
enable_irq(mdwc->ss_phy_irq);
}
- /*
- * Enable power event irq during bus suspend in host mode for
- * mapping MPM pin for DP so that wakeup can happen in system
- * suspend.
- */
- if (mdwc->in_host_mode) {
- enable_irq(mdwc->pwr_event_irq);
- enable_irq_wake(mdwc->pwr_event_irq);
- }
mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
}
@@ -2123,6 +2171,9 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
atomic_set(&dwc->in_lpm, 0);
+ /* enable power evt irq for IN P3 detection */
+ enable_irq(mdwc->pwr_event_irq);
+
/* Disable HSPHY auto suspend */
dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
@@ -2137,18 +2188,11 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
disable_irq_wake(mdwc->ss_phy_irq);
disable_irq_nosync(mdwc->ss_phy_irq);
}
- if (mdwc->in_host_mode) {
- disable_irq_wake(mdwc->pwr_event_irq);
- disable_irq(mdwc->pwr_event_irq);
- }
mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
}
dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
- /* enable power evt irq for IN P3 detection */
- enable_irq(mdwc->pwr_event_irq);
-
/* Enable core irq */
if (dwc->irq)
enable_irq(dwc->irq);
@@ -2381,12 +2425,15 @@ static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
return ret;
}
- if (!of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
+ if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
(u32 *)&mdwc->core_clk_rate)) {
- mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
- mdwc->core_clk_rate);
+ dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
+ return -EINVAL;
}
+ mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
+ mdwc->core_clk_rate);
+
dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
mdwc->core_clk_rate);
ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
@@ -2529,8 +2576,12 @@ static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
struct extcon_dev *edev;
int ret = 0;
- if (!of_property_read_bool(node, "extcon"))
- return 0;
+ if (!of_property_read_bool(node, "extcon")) {
+ if (usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST)
+ return 0;
+ dev_err(mdwc->dev, "extcon property doesn't exist\n");
+ return -EINVAL;
+ }
edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
@@ -3023,6 +3074,53 @@ static int dwc3_msm_remove(struct platform_device *pdev)
return 0;
}
+static int dwc3_msm_host_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ struct usb_device *udev = ptr;
+ union power_supply_propval pval;
+ unsigned max_power;
+
+ if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
+ return NOTIFY_DONE;
+
+ if (!mdwc->usb_psy) {
+ mdwc->usb_psy = power_supply_get_by_name("usb");
+ if (!mdwc->usb_psy)
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * For direct-attach devices, new udev is direct child of root hub
+ * i.e. dwc -> xhci -> root_hub -> udev
+ * root_hub's udev->parent==NULL, so traverse struct device hierarchy
+ */
+ if (udev->parent && !udev->parent->parent &&
+ udev->dev.parent->parent == &dwc->xhci->dev) {
+ if (event == USB_DEVICE_ADD && udev->actconfig) {
+ if (udev->speed >= USB_SPEED_SUPER)
+ max_power = udev->actconfig->desc.bMaxPower * 8;
+ else
+ max_power = udev->actconfig->desc.bMaxPower * 2;
+ dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
+ dev_name(&udev->dev), max_power);
+
+ /* inform PMIC of max power so it can optimize boost */
+ pval.intval = max_power * 1000;
+ power_supply_set_property(mdwc->usb_psy,
+ POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
+ } else {
+ pval.intval = 0;
+ power_supply_set_property(mdwc->usb_psy,
+ POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
/**
@@ -3083,6 +3181,11 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
+ mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
+ usb_register_notify(&mdwc->host_nb);
+
+ mdwc->usbdev_nb.notifier_call = msm_dwc3_usbdev_notify;
+ usb_register_atomic_notify(&mdwc->usbdev_nb);
/*
* FIXME If micro A cable is disconnected during system suspend,
* xhci platform device will be removed before runtime pm is
@@ -3103,6 +3206,7 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
pm_runtime_put_sync(mdwc->dev);
dbg_event(0xFF, "pdeverr psync",
atomic_read(&mdwc->dev->power.usage_count));
+ usb_unregister_notify(&mdwc->host_nb);
return ret;
}
@@ -3125,6 +3229,7 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
} else {
dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
+ usb_unregister_atomic_notify(&mdwc->usbdev_nb);
if (!IS_ERR(mdwc->vbus_reg))
ret = regulator_disable(mdwc->vbus_reg);
if (ret) {
@@ -3139,6 +3244,7 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
platform_device_del(dwc->xhci);
+ usb_unregister_notify(&mdwc->host_nb);
/*
* Perform USB hardware RESET (both core reset and DBM reset)
@@ -3415,11 +3521,12 @@ static void dwc3_otg_sm_work(struct work_struct *w)
break;
case OTG_STATE_A_HOST:
- if (test_bit(ID, &mdwc->inputs)) {
- dev_dbg(mdwc->dev, "id\n");
+ if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
+ dev_dbg(mdwc->dev, "id || hc_died\n");
dwc3_otg_start_host(mdwc, 0);
mdwc->otg_state = OTG_STATE_B_IDLE;
mdwc->vbus_retry_count = 0;
+ mdwc->hc_died = false;
work = 1;
} else {
dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 009d83048c8c..3d731d1b5c60 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -36,6 +36,7 @@
#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
+#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -214,6 +215,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 2450cc52fa24..251ae0a7a5ee 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -213,7 +213,8 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
* use mult as 3 for GSI IN endpoint always irrespective
* USB speed.
*/
- if (dep->endpoint.ep_type == EP_TYPE_GSI)
+ if (dep->endpoint.ep_type == EP_TYPE_GSI ||
+ dep->endpoint.endless)
mult = 3;
if (!(dep->flags & DWC3_EP_ENABLED)) {
@@ -383,7 +384,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
if (!(cmd & DWC3_DEPCMD_ENDTRANSFER)) {
dwc->ep_cmd_timeout_cnt++;
dwc3_notify_event(dwc,
- DWC3_CONTROLLER_RESTART_USB_SESSION);
+ DWC3_CONTROLLER_RESTART_USB_SESSION, 0);
}
return -ETIMEDOUT;
}
@@ -1871,7 +1872,7 @@ static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned mA)
dwc->vbus_draw = mA;
dev_dbg(dwc->dev, "Notify controller from %s. mA = %d\n", __func__, mA);
- dwc3_notify_event(dwc, DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT);
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT, 0);
return 0;
}
@@ -1906,7 +1907,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
*/
dev_dbg(dwc->dev, "Notify OTG from %s\n", __func__);
dwc->b_suspend = false;
- dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT);
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT, 0);
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -2142,7 +2143,7 @@ static int dwc3_gadget_restart_usb_session(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
- return dwc3_notify_event(dwc, DWC3_CONTROLLER_RESTART_USB_SESSION);
+ return dwc3_notify_event(dwc, DWC3_CONTROLLER_RESTART_USB_SESSION, 0);
}
static const struct usb_gadget_ops dwc3_gadget_ops = {
@@ -2658,6 +2659,10 @@ void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
if (!dep->resource_index)
return;
+ if (dep->endpoint.endless)
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_DISABLE_UPDXFER,
+ dep->number);
+
/*
* NOTICE: We are violating what the Databook says about the
* EndTransfer command. Ideally we would _always_ wait for the
@@ -2742,7 +2747,7 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dev_dbg(dwc->dev, "Notify OTG from %s\n", __func__);
dwc->b_suspend = false;
- dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT);
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT, 0);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_INITU1ENA;
@@ -2798,7 +2803,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
dev_dbg(dwc->dev, "Notify OTG from %s\n", __func__);
dwc->b_suspend = false;
- dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT);
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT, 0);
dwc3_usb3_phy_suspend(dwc, false);
usb_gadget_vbus_draw(&dwc->gadget, 100);
@@ -2959,7 +2964,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
return;
}
- dwc3_notify_event(dwc, DWC3_CONTROLLER_CONNDONE_EVENT);
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_CONNDONE_EVENT, 0);
/*
* Configure PHY via GUSB3PIPECTLn if required.
@@ -2995,7 +3000,8 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc, bool remote_wakeup)
*/
dev_dbg(dwc->dev, "Notify OTG from %s\n", __func__);
dwc->b_suspend = false;
- dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT);
+ dwc3_notify_event(dwc,
+ DWC3_CONTROLLER_NOTIFY_OTG_EVENT, 0);
/*
* set state to U0 as function level resume is trying to queue
@@ -3162,7 +3168,7 @@ static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
dev_dbg(dwc->dev, "Notify OTG from %s\n", __func__);
dwc->b_suspend = true;
- dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT);
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT, 0);
}
dwc->link_state = next;
@@ -3333,7 +3339,8 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
evt->lpos = (evt->lpos + left) %
DWC3_EVENT_BUFFERS_SIZE;
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), left);
- if (dwc3_notify_event(dwc, DWC3_CONTROLLER_ERROR_EVENT))
+ if (dwc3_notify_event(dwc,
+ DWC3_CONTROLLER_ERROR_EVENT, 0))
dwc->err_evt_seen = 0;
break;
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index a480b0a9a238..04985ccbbe6d 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -675,7 +675,7 @@ static int bos_desc(struct usb_composite_dev *cdev)
usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
- usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT);
+ usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT);
if (gadget_is_superspeed(cdev->gadget)) {
/*
@@ -1631,6 +1631,12 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
case USB_DT_DEVICE:
cdev->desc.bNumConfigurations =
count_configs(cdev, USB_DT_DEVICE);
+ if (cdev->desc.bNumConfigurations == 0) {
+ pr_err("%s:config is not active. send stall\n",
+ __func__);
+ break;
+ }
+
cdev->desc.bMaxPacketSize0 =
cdev->gadget->ep0->maxpacket;
cdev->desc.bcdUSB = cpu_to_le16(0x0200);
@@ -1655,7 +1661,9 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
if (!gadget_is_dualspeed(gadget) ||
gadget->speed >= USB_SPEED_SUPER)
break;
+ spin_lock(&cdev->lock);
device_qual(cdev);
+ spin_unlock(&cdev->lock);
value = min_t(int, w_length,
sizeof(struct usb_qualifier_descriptor));
break;
@@ -1665,13 +1673,17 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
break;
/* FALLTHROUGH */
case USB_DT_CONFIG:
+ spin_lock(&cdev->lock);
value = config_desc(cdev, w_value);
+ spin_unlock(&cdev->lock);
if (value >= 0)
value = min(w_length, (u16) value);
break;
case USB_DT_STRING:
+ spin_lock(&cdev->lock);
value = get_string(cdev, req->buf,
w_index, w_value & 0xff);
+ spin_unlock(&cdev->lock);
if (value >= 0)
value = min(w_length, (u16) value);
break;
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index bc1c34f05fa3..013a9d6702db 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -575,15 +575,6 @@ static int create_bulk_endpoints(struct acc_dev *dev,
ep->driver_data = dev; /* claim the endpoint */
dev->ep_out = ep;
- ep = usb_ep_autoconfig(cdev->gadget, out_desc);
- if (!ep) {
- DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
- return -ENODEV;
- }
- DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
- ep->driver_data = dev; /* claim the endpoint */
- dev->ep_out = ep;
-
/* now allocate requests for our endpoints */
for (i = 0; i < TX_REQ_MAX; i++) {
req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 19d6a997ee6c..052b6dbc4471 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3124,6 +3124,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
const int super = func->ffs->ss_descs_count;
int fs_len, hs_len, ss_len, ret, i;
+ struct ffs_ep *eps_ptr;
/* Make it a single chunk, less management later on */
vla_group(d);
@@ -3175,12 +3176,9 @@ static int _ffs_func_bind(struct usb_configuration *c,
ffs->raw_descs_length);
memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
- for (ret = ffs->eps_count; ret; --ret) {
- struct ffs_ep *ptr;
-
- ptr = vla_ptr(vlabuf, d, eps);
- ptr[ret].num = -1;
- }
+ eps_ptr = vla_ptr(vlabuf, d, eps);
+ for (i = 0; i < ffs->eps_count; i++)
+ eps_ptr[i].num = -1;
/* Save pointers
* d_eps == vlabuf, func->eps used to kfree vlabuf later
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index af20033b621f..8088593fad1a 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1467,7 +1467,15 @@ static void gsi_ctrl_notify_resp_complete(struct usb_ep *ep,
event->bNotificationType, req->status);
/* FALLTHROUGH */
case 0:
- /*
+ /* no need to handle multiple resp available for RNDIS */
+ if (gsi->prot_id == IPA_USB_RNDIS) {
+ atomic_set(&gsi->c_port.notify_count, 0);
+ log_event_dbg("notify_count = %d",
+ atomic_read(&gsi->c_port.notify_count));
+ break;
+ }
+
+ /*
* handle multiple pending resp available
* notifications by queuing same until we're done,
* rest of the notification require queuing new
@@ -2245,7 +2253,7 @@ skip_string_id_alloc:
if (!ep)
goto fail;
gsi->d_port.in_ep = ep;
- msm_ep_config(gsi->d_port.in_ep);
+ msm_ep_config(gsi->d_port.in_ep, NULL, GFP_KERNEL);
ep->driver_data = cdev; /* claim */
}
@@ -2255,7 +2263,7 @@ skip_string_id_alloc:
if (!ep)
goto fail;
gsi->d_port.out_ep = ep;
- msm_ep_config(gsi->d_port.out_ep);
+ msm_ep_config(gsi->d_port.out_ep, NULL, GFP_KERNEL);
ep->driver_data = cdev; /* claim */
}
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
index eb306529981f..ede1c8dd51a6 100644
--- a/drivers/usb/gadget/function/f_qc_rndis.c
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -472,16 +472,26 @@ static void rndis_qc_response_available(void *_rndis)
static void rndis_qc_response_complete(struct usb_ep *ep,
struct usb_request *req)
{
- struct f_rndis_qc *rndis = req->context;
+ struct f_rndis_qc *rndis;
int status = req->status;
struct usb_composite_dev *cdev;
+ struct usb_ep *notify_ep;
+
+ spin_lock(&rndis_lock);
+ rndis = _rndis_qc;
+ if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
+ spin_unlock(&rndis_lock);
+ return;
+ }
if (!rndis->func.config || !rndis->func.config->cdev) {
pr_err("%s(): cdev or config is NULL.\n", __func__);
+ spin_unlock(&rndis_lock);
return;
}
cdev = rndis->func.config->cdev;
+
/* after TX:
* - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
* - RNDIS_RESPONSE_AVAILABLE (status/irq)
@@ -491,7 +501,7 @@ static void rndis_qc_response_complete(struct usb_ep *ep,
case -ESHUTDOWN:
/* connection gone */
atomic_set(&rndis->notify_count, 0);
- break;
+ goto out;
default:
pr_info("RNDIS %s response error %d, %d/%d\n",
ep->name, status,
@@ -499,30 +509,47 @@ static void rndis_qc_response_complete(struct usb_ep *ep,
/* FALLTHROUGH */
case 0:
if (ep != rndis->notify)
- break;
+ goto out;
/* handle multiple pending RNDIS_RESPONSE_AVAILABLE
* notifications by resending until we're done
*/
if (atomic_dec_and_test(&rndis->notify_count))
- break;
- status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+ goto out;
+ notify_ep = rndis->notify;
+ spin_unlock(&rndis_lock);
+ status = usb_ep_queue(notify_ep, req, GFP_ATOMIC);
if (status) {
- atomic_dec(&rndis->notify_count);
+ spin_lock(&rndis_lock);
+ if (!_rndis_qc)
+ goto out;
+ atomic_dec(&_rndis_qc->notify_count);
DBG(cdev, "notify/1 --> %d\n", status);
+ spin_unlock(&rndis_lock);
}
- break;
}
+
+ return;
+
+out:
+ spin_unlock(&rndis_lock);
}
static void rndis_qc_command_complete(struct usb_ep *ep,
struct usb_request *req)
{
- struct f_rndis_qc *rndis = req->context;
+ struct f_rndis_qc *rndis;
int status;
rndis_init_msg_type *buf;
u32 ul_max_xfer_size, dl_max_xfer_size;
+ spin_lock(&rndis_lock);
+ rndis = _rndis_qc;
+ if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
+ spin_unlock(&rndis_lock);
+ return;
+ }
+
/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
if (status < 0)
@@ -551,6 +578,7 @@ static void rndis_qc_command_complete(struct usb_ep *ep,
rndis_get_dl_max_xfer_size(rndis->params);
ipa_data_set_dl_max_xfer_size(dl_max_xfer_size);
}
+ spin_unlock(&rndis_lock);
}
static int
@@ -749,13 +777,16 @@ static void rndis_qc_disable(struct usb_function *f)
{
struct f_rndis_qc *rndis = func_to_rndis_qc(f);
struct usb_composite_dev *cdev = f->config->cdev;
+ unsigned long flags;
if (!rndis->notify->driver_data)
return;
DBG(cdev, "rndis deactivated\n");
+ spin_lock_irqsave(&rndis_lock, flags);
rndis_uninit(rndis->params);
+ spin_unlock_irqrestore(&rndis_lock, flags);
ipa_data_disconnect(&rndis->bam_port, USB_IPA_FUNC_RNDIS);
msm_ep_unconfig(rndis->bam_port.out);
@@ -1092,18 +1123,14 @@ rndis_qc_unbind(struct usb_configuration *c, struct usb_function *f)
void rndis_ipa_reset_trigger(void)
{
struct f_rndis_qc *rndis;
- unsigned long flags;
- spin_lock_irqsave(&rndis_lock, flags);
rndis = _rndis_qc;
if (!rndis) {
pr_err("%s: No RNDIS instance", __func__);
- spin_unlock_irqrestore(&rndis_lock, flags);
return;
}
rndis->net_ready_trigger = false;
- spin_unlock_irqrestore(&rndis_lock, flags);
}
/*
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 8e04d9451b67..16bd7d890d3e 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -844,32 +844,37 @@ EXPORT_SYMBOL(usb_qdss_open);
void usb_qdss_close(struct usb_qdss_ch *ch)
{
struct f_qdss *qdss = ch->priv_usb;
- struct usb_gadget *gadget = qdss->gadget;
+ struct usb_gadget *gadget;
unsigned long flags;
int status;
pr_debug("usb_qdss_close\n");
spin_lock_irqsave(&qdss_lock, flags);
+ if (!qdss || !qdss->usb_connected) {
+ ch->app_conn = 0;
+ spin_unlock_irqrestore(&qdss_lock, flags);
+ return;
+ }
+
usb_ep_dequeue(qdss->port.data, qdss->endless_req);
usb_ep_free_request(qdss->port.data, qdss->endless_req);
qdss->endless_req = NULL;
+ gadget = qdss->gadget;
ch->app_conn = 0;
spin_unlock_irqrestore(&qdss_lock, flags);
- if (qdss->usb_connected) {
- status = uninit_data(qdss->port.data);
- if (status)
- pr_err("%s: uninit_data error\n", __func__);
+ status = uninit_data(qdss->port.data);
+ if (status)
+ pr_err("%s: uninit_data error\n", __func__);
- status = set_qdss_data_connection(
+ status = set_qdss_data_connection(
gadget,
qdss->port.data,
qdss->port.data->address,
0);
- if (status)
- pr_err("%s:qdss_disconnect error\n", __func__);
- }
+ if (status)
+ pr_err("%s:qdss_disconnect error\n", __func__);
usb_gadget_restart(gadget);
}
EXPORT_SYMBOL(usb_qdss_close);
diff --git a/drivers/usb/gadget/function/f_rmnet.c b/drivers/usb/gadget/function/f_rmnet.c
index 0fd7e213ef99..3458f42ee06e 100644
--- a/drivers/usb/gadget/function/f_rmnet.c
+++ b/drivers/usb/gadget/function/f_rmnet.c
@@ -26,20 +26,18 @@
#define RMNET_NOTIFY_INTERVAL 5
#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
-
#define ACM_CTRL_DTR (1 << 0)
-/* TODO: use separate structures for data and
- * control paths
- */
struct f_rmnet {
struct usb_function func;
+ enum qti_port_type qti_port_type;
+ enum ipa_func_type func_type;
struct grmnet port;
int ifc_id;
atomic_t online;
atomic_t ctrl_online;
struct usb_composite_dev *cdev;
-
+ struct gadget_ipa_port ipa_port;
spinlock_t lock;
/* usb eps*/
@@ -47,11 +45,9 @@ struct f_rmnet {
struct usb_request *notify_req;
/* control info */
- struct gadget_ipa_port ipa_port;
struct list_head cpkt_resp_q;
unsigned long notify_count;
- unsigned long cpkts_len;
-} *rmnet_port;
+};
static struct usb_interface_descriptor rmnet_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
@@ -213,6 +209,70 @@ static struct usb_gadget_strings *rmnet_strings[] = {
NULL,
};
+static struct usb_interface_descriptor dpl_data_intf_desc = {
+ .bLength = sizeof(dpl_data_intf_desc),
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+};
+
+static struct usb_endpoint_descriptor dpl_hs_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor dpl_ss_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor dpl_data_ep_comp_desc = {
+ .bLength = sizeof(dpl_data_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 1,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_descriptor_header *dpl_hs_data_only_desc[] = {
+ (struct usb_descriptor_header *) &dpl_data_intf_desc,
+ (struct usb_descriptor_header *) &dpl_hs_data_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *dpl_ss_data_only_desc[] = {
+ (struct usb_descriptor_header *) &dpl_data_intf_desc,
+ (struct usb_descriptor_header *) &dpl_ss_data_desc,
+ (struct usb_descriptor_header *) &dpl_data_ep_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string dpl_string_defs[] = {
+ [0].s = "QDSS DATA",
+ {}, /* end of list */
+};
+
+static struct usb_gadget_strings dpl_string_table = {
+ .language = 0x0409,
+ .strings = dpl_string_defs,
+};
+
+static struct usb_gadget_strings *dpl_strings[] = {
+ &dpl_string_table,
+ NULL,
+};
+
static void frmnet_ctrl_response_available(struct f_rmnet *dev);
/* ------- misc functions --------------------*/
@@ -227,6 +287,24 @@ static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
return container_of(r, struct f_rmnet, port);
}
+int name_to_prot(struct f_rmnet *dev, const char *name)
+{
+ if (!name)
+ goto error;
+
+ if (!strncasecmp("rmnet", name, MAX_INST_NAME_LEN)) {
+ dev->qti_port_type = QTI_PORT_RMNET;
+ dev->func_type = USB_IPA_FUNC_RMNET;
+ } else if (!strncasecmp("dpl", name, MAX_INST_NAME_LEN)) {
+ dev->qti_port_type = QTI_PORT_DPL;
+ dev->func_type = USB_IPA_FUNC_DPL;
+ }
+ return 0;
+
+error:
+ return -EINVAL;
+}
+
static struct usb_request *
frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
{
@@ -279,51 +357,57 @@ static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
/* -------------------------------------------*/
-static int gport_rmnet_connect(struct f_rmnet *dev, unsigned intf)
+static int gport_rmnet_connect(struct f_rmnet *dev)
{
int ret;
int src_connection_idx = 0, dst_connection_idx = 0;
struct usb_gadget *gadget = dev->cdev->gadget;
enum usb_ctrl usb_bam_type;
+ int bam_pipe_num = (dev->qti_port_type == QTI_PORT_DPL) ? 1 : 0;
- ret = gqti_ctrl_connect(&dev->port, QTI_PORT_RMNET, dev->ifc_id);
+ ret = gqti_ctrl_connect(&dev->port, dev->qti_port_type, dev->ifc_id);
if (ret) {
pr_err("%s: gqti_ctrl_connect failed: err:%d\n",
__func__, ret);
return ret;
}
-
+ if (dev->qti_port_type == QTI_PORT_DPL)
+ dev->port.send_encap_cmd(QTI_PORT_DPL, NULL, 0);
dev->ipa_port.cdev = dev->cdev;
- ipa_data_port_select(USB_IPA_FUNC_RMNET);
+ ipa_data_port_select(dev->func_type);
usb_bam_type = usb_bam_get_bam_type(gadget->name);
- src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
- IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
- QTI_PORT_RMNET);
- dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
- IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
- QTI_PORT_RMNET);
+
+ if (dev->ipa_port.in) {
+ dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, PEER_PERIPHERAL_TO_USB,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
+ if (dev->ipa_port.out) {
+ src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, USB_TO_PEER_PERIPHERAL,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
if (dst_connection_idx < 0 || src_connection_idx < 0) {
pr_err("%s: usb_bam_get_connection_idx failed\n",
__func__);
- gqti_ctrl_disconnect(&dev->port, QTI_PORT_RMNET);
+ gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
return -EINVAL;
}
- ret = ipa_data_connect(&dev->ipa_port, USB_IPA_FUNC_RMNET,
+ ret = ipa_data_connect(&dev->ipa_port, dev->func_type,
src_connection_idx, dst_connection_idx);
if (ret) {
pr_err("%s: ipa_data_connect failed: err:%d\n",
__func__, ret);
- gqti_ctrl_disconnect(&dev->port, QTI_PORT_RMNET);
+ gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
return ret;
}
-
return 0;
}
static int gport_rmnet_disconnect(struct f_rmnet *dev)
{
- gqti_ctrl_disconnect(&dev->port, QTI_PORT_RMNET);
- ipa_data_disconnect(&dev->ipa_port, USB_IPA_FUNC_RMNET);
+ gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
+ ipa_data_disconnect(&dev->ipa_port, dev->func_type);
return 0;
}
@@ -333,24 +417,25 @@ static void frmnet_free(struct usb_function *f)
opts = container_of(f->fi, struct f_rmnet_opts, func_inst);
opts->refcnt--;
- kfree(rmnet_port);
- rmnet_port = NULL;
}
static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_gadget *gadget = c->cdev->gadget;
- pr_debug("%s: start unbinding\n", __func__);
- if (gadget_is_superspeed(c->cdev->gadget))
+ pr_debug("%s: start unbinding\nclear_desc\n", __func__);
+ if (gadget_is_superspeed(gadget) && f->ss_descriptors)
usb_free_descriptors(f->ss_descriptors);
- if (gadget_is_dualspeed(c->cdev->gadget))
+
+ if (gadget_is_dualspeed(gadget) && f->hs_descriptors)
usb_free_descriptors(f->hs_descriptors);
- usb_free_descriptors(f->fs_descriptors);
- frmnet_free_req(dev->notify, dev->notify_req);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
- kfree(f->name);
+ if (dev->notify_req)
+ frmnet_free_req(dev->notify, dev->notify_req);
}
static void frmnet_purge_responses(struct f_rmnet *dev)
@@ -384,11 +469,11 @@ static void frmnet_suspend(struct usb_function *f)
pr_debug("%s: dev: %p remote_wakeup: %d\n",
__func__, dev, remote_wakeup_allowed);
- usb_ep_fifo_flush(dev->notify);
- frmnet_purge_responses(dev);
-
- ipa_data_suspend(&dev->ipa_port, USB_IPA_FUNC_RMNET,
- remote_wakeup_allowed);
+ if (dev->notify) {
+ usb_ep_fifo_flush(dev->notify);
+ frmnet_purge_responses(dev);
+ }
+ ipa_data_suspend(&dev->ipa_port, dev->func_type, remote_wakeup_allowed);
}
static void frmnet_resume(struct usb_function *f)
@@ -404,8 +489,7 @@ static void frmnet_resume(struct usb_function *f)
pr_debug("%s: dev: %p remote_wakeup: %d\n",
__func__, dev, remote_wakeup_allowed);
- ipa_data_resume(&dev->ipa_port, USB_IPA_FUNC_RMNET,
- remote_wakeup_allowed);
+ ipa_data_resume(&dev->ipa_port, dev->func_type, remote_wakeup_allowed);
}
static void frmnet_disable(struct usb_function *f)
@@ -413,15 +497,13 @@ static void frmnet_disable(struct usb_function *f)
struct f_rmnet *dev = func_to_rmnet(f);
pr_debug("%s: Disabling\n", __func__);
- usb_ep_disable(dev->notify);
- dev->notify->driver_data = NULL;
-
atomic_set(&dev->online, 0);
+ if (dev->notify) {
+ usb_ep_disable(dev->notify);
+ dev->notify->driver_data = NULL;
+ frmnet_purge_responses(dev);
+ }
- frmnet_purge_responses(dev);
-
- msm_ep_unconfig(dev->ipa_port.out);
- msm_ep_unconfig(dev->ipa_port.in);
gport_rmnet_disconnect(dev);
}
@@ -430,64 +512,78 @@ frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_rmnet *dev = func_to_rmnet(f);
struct usb_composite_dev *cdev = f->config->cdev;
- int ret;
- struct list_head *cpkt;
+ int ret = 0;
pr_debug("%s: dev: %p\n", __func__, dev);
dev->cdev = cdev;
- if (dev->notify->driver_data) {
- pr_debug("%s: reset port\n", __func__);
- usb_ep_disable(dev->notify);
- }
+ if (dev->notify) {
+ if (dev->notify->driver_data) {
+ pr_debug("%s: reset port\n", __func__);
+ usb_ep_disable(dev->notify);
+ }
- ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
- if (ret) {
- dev->notify->desc = NULL;
- ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
+ ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
+ if (ret) {
+ dev->notify->desc = NULL;
+ ERROR(cdev,
+ "config_ep_by_speed failed for ep %s, result %d\n",
dev->notify->name, ret);
- return ret;
- }
- ret = usb_ep_enable(dev->notify);
+ return ret;
+ }
- if (ret) {
- pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ ret = usb_ep_enable(dev->notify);
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
__func__, dev->notify->name, ret);
- dev->notify->desc = NULL;
- return ret;
- }
- dev->notify->driver_data = dev;
-
- if (!dev->ipa_port.in->desc || !dev->ipa_port.out->desc) {
- if (config_ep_by_speed(cdev->gadget, f, dev->ipa_port.in) ||
- config_ep_by_speed(cdev->gadget, f, dev->ipa_port.out)) {
- pr_err("%s(): config_ep_by_speed failed.\n", __func__);
- ret = -EINVAL;
- goto err_disable_ep;
+ dev->notify->desc = NULL;
+ return ret;
}
- dev->ipa_port.cdev = dev->cdev;
+
+ dev->notify->driver_data = dev;
+ }
+
+ if (dev->ipa_port.in && !dev->ipa_port.in->desc
+ && config_ep_by_speed(cdev->gadget, f, dev->ipa_port.in)) {
+ pr_err("%s(): config_ep_by_speed failed.\n",
+ __func__);
+ dev->ipa_port.in->desc = NULL;
+ ret = -EINVAL;
+ goto err_disable_ep;
+ }
+
+ if (dev->ipa_port.out && !dev->ipa_port.out->desc
+ && config_ep_by_speed(cdev->gadget, f, dev->ipa_port.out)) {
+ pr_err("%s(): config_ep_by_speed failed.\n",
+ __func__);
+ dev->ipa_port.out->desc = NULL;
+ ret = -EINVAL;
+ goto err_disable_ep;
}
- ret = gport_rmnet_connect(dev, intf);
+ ret = gport_rmnet_connect(dev);
if (ret) {
pr_err("%s(): gport_rmnet_connect fail with err:%d\n",
- __func__, ret);
+ __func__, ret);
goto err_disable_ep;
}
atomic_set(&dev->online, 1);
-
/*
- * In case notifications were aborted, but there are pending control
- * packets in the response queue, re-add the notifications.
- */
- list_for_each(cpkt, &dev->cpkt_resp_q)
- frmnet_ctrl_response_available(dev);
+ * In case notifications were aborted, but there are
+ * pending control packets in the response queue,
+ * re-add the notifications.
+ */
+ if (dev->qti_port_type == QTI_PORT_RMNET) {
+ struct list_head *cpkt;
+
+ list_for_each(cpkt, &dev->cpkt_resp_q)
+ frmnet_ctrl_response_available(dev);
+ }
return ret;
err_disable_ep:
- dev->ipa_port.in->desc = NULL;
- dev->ipa_port.out->desc = NULL;
- usb_ep_disable(dev->notify);
+ if (dev->notify && dev->notify->driver_data)
+ usb_ep_disable(dev->notify);
return ret;
}
@@ -813,108 +909,119 @@ invalid:
return ret;
}
-static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
+static int ipa_update_function_bind_params(struct f_rmnet *dev,
+ struct usb_composite_dev *cdev, struct ipa_function_bind_info *info)
{
- struct f_rmnet *dev = func_to_rmnet(f);
- struct usb_ep *ep;
- struct usb_composite_dev *cdev = c->cdev;
- int ret = -ENODEV;
-
- if (rmnet_string_defs[0].id == 0) {
- ret = usb_string_id(c->cdev);
- if (ret < 0) {
- pr_err("%s: failed to get string id, err:%d\n",
- __func__, ret);
- return ret;
- }
- rmnet_string_defs[0].id = ret;
+ struct usb_ep *ep;
+ struct usb_function *f = &dev->func;
+ int status;
+
+ /* maybe allocate device-global string IDs */
+ if (info->string_defs[0].id != 0)
+ goto skip_string_id_alloc;
+
+ if (info->data_str_idx >= 0 && info->data_desc) {
+ /* data interface label */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ info->string_defs[info->data_str_idx].id = status;
+ info->data_desc->iInterface = status;
}
- pr_debug("%s: start binding\n", __func__);
- dev->ifc_id = usb_interface_id(c, f);
- if (dev->ifc_id < 0) {
- pr_err("%s: unable to allocate ifc id, err:%d\n",
- __func__, dev->ifc_id);
- return dev->ifc_id;
- }
- rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
+skip_string_id_alloc:
+ if (info->data_desc)
+ info->data_desc->bInterfaceNumber = dev->ifc_id;
- ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
- if (!ep) {
- pr_err("%s: usb epin autoconfig failed\n", __func__);
- return -ENODEV;
- }
- dev->ipa_port.in = ep;
- ep->driver_data = cdev;
-
- ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
- if (!ep) {
- pr_err("%s: usb epout autoconfig failed\n", __func__);
- ret = -ENODEV;
- goto ep_auto_out_fail;
+ if (info->fs_in_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_in_desc);
+ if (!ep) {
+ pr_err("%s: usb epin autoconfig failed\n",
+ __func__);
+ return -ENODEV;
+ }
+ dev->ipa_port.in = ep;
+ ep->driver_data = cdev;
}
- dev->ipa_port.out = ep;
- ep->driver_data = cdev;
-
- ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
- if (!ep) {
- pr_err("%s: usb epnotify autoconfig failed\n", __func__);
- ret = -ENODEV;
- goto ep_auto_notify_fail;
+
+ if (info->fs_out_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_out_desc);
+ if (!ep) {
+ pr_err("%s: usb epout autoconfig failed\n",
+ __func__);
+ status = -ENODEV;
+ goto ep_auto_out_fail;
+ }
+ dev->ipa_port.out = ep;
+ ep->driver_data = cdev;
}
- dev->notify = ep;
- ep->driver_data = cdev;
- dev->notify_req = frmnet_alloc_req(ep,
+ if (info->fs_notify_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_notify_desc);
+ if (!ep) {
+ pr_err("%s: usb epnotify autoconfig failed\n",
+ __func__);
+ status = -ENODEV;
+ goto ep_auto_notify_fail;
+ }
+ dev->notify = ep;
+ ep->driver_data = cdev;
+ dev->notify_req = frmnet_alloc_req(ep,
sizeof(struct usb_cdc_notification),
GFP_KERNEL);
- if (IS_ERR(dev->notify_req)) {
- pr_err("%s: unable to allocate memory for notify req\n",
+ if (IS_ERR(dev->notify_req)) {
+ pr_err("%s: unable to allocate memory for notify req\n",
__func__);
- ret = -ENOMEM;
- goto ep_notify_alloc_fail;
- }
-
- dev->notify_req->complete = frmnet_notify_complete;
- dev->notify_req->context = dev;
+ status = -ENOMEM;
+ goto ep_notify_alloc_fail;
+ }
- ret = -ENOMEM;
- f->fs_descriptors = usb_copy_descriptors(rmnet_fs_function);
+ dev->notify_req->complete = frmnet_notify_complete;
+ dev->notify_req->context = dev;
+ }
+ status = -ENOMEM;
+ f->fs_descriptors = usb_copy_descriptors(info->fs_desc_hdr);
if (!f->fs_descriptors) {
- pr_err("%s: no descriptors,usb_copy descriptors(fs)failed\n",
+ pr_err("%s: no descriptors, usb_copy descriptors(fs)failed\n",
__func__);
goto fail;
}
+
if (gadget_is_dualspeed(cdev->gadget)) {
- rmnet_hs_in_desc.bEndpointAddress =
- rmnet_fs_in_desc.bEndpointAddress;
- rmnet_hs_out_desc.bEndpointAddress =
- rmnet_fs_out_desc.bEndpointAddress;
- rmnet_hs_notify_desc.bEndpointAddress =
- rmnet_fs_notify_desc.bEndpointAddress;
+ if (info->fs_in_desc && info->hs_in_desc)
+ info->hs_in_desc->bEndpointAddress =
+ info->fs_in_desc->bEndpointAddress;
+ if (info->fs_out_desc && info->hs_out_desc)
+ info->hs_out_desc->bEndpointAddress =
+ info->fs_out_desc->bEndpointAddress;
+ if (info->fs_notify_desc && info->hs_notify_desc)
+ info->hs_notify_desc->bEndpointAddress =
+ info->fs_notify_desc->bEndpointAddress;
/* copy descriptors, and track endpoint copies */
- f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
-
+ f->hs_descriptors = usb_copy_descriptors(info->hs_desc_hdr);
if (!f->hs_descriptors) {
- pr_err("%s: no hs_descriptors,usb_copy descriptors(hs)failed\n",
- __func__);
+ pr_err("%s: no hs_descriptors, usb_copy descriptors(hs)failed\n",
+ __func__);
goto fail;
}
}
if (gadget_is_superspeed(cdev->gadget)) {
- rmnet_ss_in_desc.bEndpointAddress =
- rmnet_fs_in_desc.bEndpointAddress;
- rmnet_ss_out_desc.bEndpointAddress =
- rmnet_fs_out_desc.bEndpointAddress;
- rmnet_ss_notify_desc.bEndpointAddress =
- rmnet_fs_notify_desc.bEndpointAddress;
+ if (info->fs_in_desc && info->ss_in_desc)
+ info->ss_in_desc->bEndpointAddress =
+ info->fs_in_desc->bEndpointAddress;
- /* copy descriptors, and track endpoint copies */
- f->ss_descriptors = usb_copy_descriptors(rmnet_ss_function);
+ if (info->fs_out_desc && info->ss_out_desc)
+ info->ss_out_desc->bEndpointAddress =
+ info->fs_out_desc->bEndpointAddress;
+ if (info->fs_notify_desc && info->ss_notify_desc)
+ info->ss_notify_desc->bEndpointAddress =
+ info->fs_notify_desc->bEndpointAddress;
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(info->ss_desc_hdr);
if (!f->ss_descriptors) {
pr_err("%s: no ss_descriptors,usb_copy descriptors(ss)failed\n",
__func__);
@@ -922,57 +1029,95 @@ static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
}
}
- pr_debug("%s: RmNet %s Speed, IN:%s OUT:%s\n",
- __func__, gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
- dev->ipa_port.in->name, dev->ipa_port.out->name);
-
return 0;
fail:
- if (f->ss_descriptors)
+ if (gadget_is_superspeed(cdev->gadget) && f->ss_descriptors)
usb_free_descriptors(f->ss_descriptors);
- if (f->hs_descriptors)
+ if (gadget_is_dualspeed(cdev->gadget) && f->hs_descriptors)
usb_free_descriptors(f->hs_descriptors);
if (f->fs_descriptors)
usb_free_descriptors(f->fs_descriptors);
if (dev->notify_req)
frmnet_free_req(dev->notify, dev->notify_req);
ep_notify_alloc_fail:
- dev->notify->driver_data = NULL;
- dev->notify = NULL;
+ dev->notify->driver_data = NULL;
+ dev->notify = NULL;
ep_auto_notify_fail:
- dev->ipa_port.out->driver_data = NULL;
- dev->ipa_port.out = NULL;
+ dev->ipa_port.out->driver_data = NULL;
+ dev->ipa_port.out = NULL;
ep_auto_out_fail:
- dev->ipa_port.in->driver_data = NULL;
- dev->ipa_port.in = NULL;
+ dev->ipa_port.in->driver_data = NULL;
+ dev->ipa_port.in = NULL;
+
+ return status;
+}
+
+static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_composite_dev *cdev = c->cdev;
+ int ret = -ENODEV;
+ struct ipa_function_bind_info info = {0};
+
+ pr_debug("%s: start binding\n", __func__);
+ dev->ifc_id = usb_interface_id(c, f);
+ if (dev->ifc_id < 0) {
+ pr_err("%s: unable to allocate ifc id, err:%d\n",
+ __func__, dev->ifc_id);
+ return dev->ifc_id;
+ }
+
+ info.data_str_idx = 0;
+ if (dev->qti_port_type == QTI_PORT_RMNET) {
+ info.string_defs = rmnet_string_defs;
+ info.data_desc = &rmnet_interface_desc;
+ info.fs_in_desc = &rmnet_fs_in_desc;
+ info.fs_out_desc = &rmnet_fs_out_desc;
+ info.fs_notify_desc = &rmnet_fs_notify_desc;
+ info.hs_in_desc = &rmnet_hs_in_desc;
+ info.hs_out_desc = &rmnet_hs_out_desc;
+ info.hs_notify_desc = &rmnet_hs_notify_desc;
+ info.ss_in_desc = &rmnet_ss_in_desc;
+ info.ss_out_desc = &rmnet_ss_out_desc;
+ info.ss_notify_desc = &rmnet_ss_notify_desc;
+ info.fs_desc_hdr = rmnet_fs_function;
+ info.hs_desc_hdr = rmnet_hs_function;
+ info.ss_desc_hdr = rmnet_ss_function;
+ } else {
+ info.string_defs = dpl_string_defs;
+ info.data_desc = &dpl_data_intf_desc;
+ info.fs_in_desc = &dpl_hs_data_desc;
+ info.hs_in_desc = &dpl_hs_data_desc;
+ info.ss_in_desc = &dpl_ss_data_desc;
+ info.fs_desc_hdr = dpl_hs_data_only_desc;
+ info.hs_desc_hdr = dpl_hs_data_only_desc;
+ info.ss_desc_hdr = dpl_ss_data_only_desc;
+ }
+
+ ret = ipa_update_function_bind_params(dev, cdev, &info);
return ret;
}
static struct usb_function *frmnet_bind_config(struct usb_function_instance *fi)
{
- struct f_rmnet_opts *opts;
- int status;
+ struct f_rmnet_opts *opts;
struct f_rmnet *dev;
struct usb_function *f;
- unsigned long flags;
- /* allocate and initialize one new instance */
- status = -ENOMEM;
opts = container_of(fi, struct f_rmnet_opts, func_inst);
opts->refcnt++;
dev = opts->dev;
- spin_lock_irqsave(&dev->lock, flags);
f = &dev->func;
- f->name = kasprintf(GFP_ATOMIC, "rmnet%d", 0);
- spin_unlock_irqrestore(&dev->lock, flags);
- if (!f->name) {
- pr_err("%s: cannot allocate memory for name\n", __func__);
- return ERR_PTR(-ENOMEM);
+ if (dev->qti_port_type == QTI_PORT_RMNET) {
+ f->name = "rmnet";
+ f->strings = rmnet_strings;
+ } else {
+ f->name = "dpl";
+ f->strings = dpl_strings;
}
- f->strings = rmnet_strings;
f->bind = frmnet_bind;
f->unbind = frmnet_unbind;
f->disable = frmnet_disable;
@@ -1004,21 +1149,53 @@ static void rmnet_free_inst(struct usb_function_instance *f)
{
struct f_rmnet_opts *opts = container_of(f, struct f_rmnet_opts,
func_inst);
- ipa_data_free(USB_IPA_FUNC_RMNET);
+ ipa_data_free(opts->dev->func_type);
+ kfree(opts->dev);
kfree(opts);
}
static int rmnet_set_inst_name(struct usb_function_instance *fi,
const char *name)
{
- int name_len;
- int ret;
+ int name_len, ret = 0;
+ struct f_rmnet *dev;
+ struct f_rmnet_opts *opts = container_of(fi,
+ struct f_rmnet_opts, func_inst);
name_len = strlen(name) + 1;
if (name_len > MAX_INST_NAME_LEN)
return -ENAMETOOLONG;
- ret = ipa_data_setup(USB_IPA_FUNC_RMNET);
+ dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+ /* Update qti->qti_port_type */
+ ret = name_to_prot(dev, name);
+ if (ret < 0) {
+ pr_err("%s: failed to find prot for %s instance\n",
+ __func__, name);
+ goto fail;
+ }
+
+ if (dev->qti_port_type >= QTI_NUM_PORTS ||
+ dev->func_type >= USB_IPA_NUM_FUNCS) {
+ pr_err("%s: invalid prot\n", __func__);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&dev->cpkt_resp_q);
+ ret = ipa_data_setup(dev->func_type);
+ if (ret)
+ goto fail;
+
+ opts->dev = dev;
+ return 0;
+
+fail:
+ kfree(dev);
return ret;
}
@@ -1062,14 +1239,6 @@ static struct usb_function_instance *rmnet_alloc_inst(void)
static struct usb_function *rmnet_alloc(struct usb_function_instance *fi)
{
- struct f_rmnet_opts *opts = container_of(fi,
- struct f_rmnet_opts, func_inst);
- rmnet_port = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
- if (!rmnet_port)
- return ERR_PTR(-ENOMEM);
- opts->dev = rmnet_port;
- spin_lock_init(&rmnet_port->lock);
- INIT_LIST_HEAD(&rmnet_port->cpkt_resp_q);
return frmnet_bind_config(fi);
}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 044ca79d3cb5..12628dd36e55 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1291,6 +1291,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
struct cntrl_cur_lay3 c;
+ memset(&c, 0, sizeof(struct cntrl_cur_lay3));
if (entity_id == USB_IN_CLK_ID)
c.dCUR = p_srate;
diff --git a/drivers/usb/gadget/function/u_ctrl_qti.c b/drivers/usb/gadget/function/u_ctrl_qti.c
index c0650b0abf8c..287bfeb21ea3 100644
--- a/drivers/usb/gadget/function/u_ctrl_qti.c
+++ b/drivers/usb/gadget/function/u_ctrl_qti.c
@@ -204,7 +204,6 @@ int gqti_ctrl_connect(void *gr, enum qti_port_type qport, unsigned intf)
{
struct qti_ctrl_port *port;
struct grmnet *g_rmnet = NULL;
- struct gqdss *g_dpl = NULL;
unsigned long flags;
pr_debug("%s: port type:%d gadget:%p\n", __func__, qport, gr);
@@ -224,17 +223,13 @@ int gqti_ctrl_connect(void *gr, enum qti_port_type qport, unsigned intf)
port->ep_type = DATA_EP_TYPE_HSUSB;
port->intf = intf;
- if (gr && port->port_type == QTI_PORT_RMNET) {
+ if (gr) {
port->port_usb = gr;
g_rmnet = (struct grmnet *)gr;
g_rmnet->send_encap_cmd = gqti_ctrl_send_cpkt_tomodem;
g_rmnet->notify_modem = gqti_ctrl_notify_modem;
- } else if (gr && port->port_type == QTI_PORT_DPL) {
- port->port_usb = gr;
- g_dpl = (struct gqdss *)gr;
- g_dpl->send_encap_cmd = gqti_ctrl_send_cpkt_tomodem;
- g_dpl->notify_modem = gqti_ctrl_notify_modem;
- atomic_set(&port->line_state, 1);
+ if (port->port_type == QTI_PORT_DPL)
+ atomic_set(&port->line_state, 1);
} else {
spin_unlock_irqrestore(&port->lock, flags);
pr_err("%s(): Port is used without port type.\n", __func__);
@@ -263,7 +258,6 @@ void gqti_ctrl_disconnect(void *gr, enum qti_port_type qport)
unsigned long flags;
struct rmnet_ctrl_pkt *cpkt;
struct grmnet *g_rmnet = NULL;
- struct gqdss *g_dpl = NULL;
pr_debug("%s: gadget:%p\n", __func__, gr);
@@ -287,14 +281,10 @@ void gqti_ctrl_disconnect(void *gr, enum qti_port_type qport)
port->ipa_cons_idx = -1;
port->port_usb = NULL;
- if (gr && port->port_type == QTI_PORT_RMNET) {
+ if (gr) {
g_rmnet = (struct grmnet *)gr;
g_rmnet->send_encap_cmd = NULL;
g_rmnet->notify_modem = NULL;
- } else if (gr && port->port_type == QTI_PORT_DPL) {
- g_dpl = (struct gqdss *)gr;
- g_dpl->send_encap_cmd = NULL;
- g_dpl->notify_modem = NULL;
} else {
pr_err("%s(): unrecognized gadget type(%d).\n",
__func__, port->port_type);
diff --git a/drivers/usb/gadget/function/u_data_ipa.c b/drivers/usb/gadget/function/u_data_ipa.c
index 2da0c59fdfc2..8f6099f900d6 100644
--- a/drivers/usb/gadget/function/u_data_ipa.c
+++ b/drivers/usb/gadget/function/u_data_ipa.c
@@ -457,7 +457,7 @@ static void ipa_data_connect_work(struct work_struct *w)
configure_fifo(port->usb_bam_type,
port->src_connection_idx,
port->port_usb->out);
- ret = msm_ep_config(gport->out);
+ ret = msm_ep_config(gport->out, port->rx_req, GFP_ATOMIC);
if (ret) {
pr_err("msm_ep_config() failed for OUT EP\n");
usb_bam_free_fifos(port->usb_bam_type,
@@ -475,7 +475,7 @@ static void ipa_data_connect_work(struct work_struct *w)
port->tx_req->udc_priv = sps_params;
configure_fifo(port->usb_bam_type,
port->dst_connection_idx, gport->in);
- ret = msm_ep_config(gport->in);
+ ret = msm_ep_config(gport->in, port->tx_req, GFP_ATOMIC);
if (ret) {
pr_err("msm_ep_config() failed for IN EP\n");
goto unconfig_msm_ep_out;
@@ -837,13 +837,16 @@ void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func,
* the BAM disconnect API. This lets us restore this info when
* the USB bus is resumed.
*/
- gp->in_ep_desc_backup = gp->in->desc;
- gp->out_ep_desc_backup = gp->out->desc;
-
- pr_debug("in_ep_desc_backup = %p, out_ep_desc_backup = %p",
- gp->in_ep_desc_backup,
- gp->out_ep_desc_backup);
-
+ if (gp->in) {
+ gp->in_ep_desc_backup = gp->in->desc;
+ pr_debug("in_ep_desc_backup = %p\n",
+ gp->in_ep_desc_backup);
+ }
+ if (gp->out) {
+ gp->out_ep_desc_backup = gp->out->desc;
+ pr_debug("out_ep_desc_backup = %p\n",
+ gp->out_ep_desc_backup);
+ }
ipa_data_disconnect(gp, func);
return;
}
@@ -919,8 +922,8 @@ void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
struct ipa_data_ch_info *port;
unsigned long flags;
struct usb_gadget *gadget = NULL;
- u8 src_connection_idx;
- u8 dst_connection_idx;
+ u8 src_connection_idx = 0;
+ u8 dst_connection_idx = 0;
enum usb_ctrl usb_bam_type;
pr_debug("dev:%p port number:%d\n", gp, func);
@@ -944,20 +947,25 @@ void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
gadget = gp->cdev->gadget;
/* resume with remote wakeup disabled */
if (!remote_wakeup_enabled) {
- /* Restore endpoint descriptors info. */
- gp->in->desc = gp->in_ep_desc_backup;
- gp->out->desc = gp->out_ep_desc_backup;
-
- pr_debug("in_ep_desc_backup = %p, out_ep_desc_backup = %p",
- gp->in_ep_desc_backup,
- gp->out_ep_desc_backup);
+ int bam_pipe_num = (func == USB_IPA_FUNC_DPL) ? 1 : 0;
usb_bam_type = usb_bam_get_bam_type(gadget->name);
- src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
- IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
- 0);
- dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
- IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
- 0);
+ /* Restore endpoint descriptors info. */
+ if (gp->in) {
+ gp->in->desc = gp->in_ep_desc_backup;
+ pr_debug("in_ep_desc_backup = %p\n",
+ gp->in_ep_desc_backup);
+ dst_connection_idx = usb_bam_get_connection_idx(
+ usb_bam_type, IPA_P_BAM, PEER_PERIPHERAL_TO_USB,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
+ if (gp->out) {
+ gp->out->desc = gp->out_ep_desc_backup;
+ pr_debug("out_ep_desc_backup = %p\n",
+ gp->out_ep_desc_backup);
+ src_connection_idx = usb_bam_get_connection_idx(
+ usb_bam_type, IPA_P_BAM, USB_TO_PEER_PERIPHERAL,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
ipa_data_connect(gp, func,
src_connection_idx, dst_connection_idx);
return;
diff --git a/drivers/usb/gadget/function/u_data_ipa.h b/drivers/usb/gadget/function/u_data_ipa.h
index 14411575af22..17dccbc4cf16 100644
--- a/drivers/usb/gadget/function/u_data_ipa.h
+++ b/drivers/usb/gadget/function/u_data_ipa.h
@@ -47,6 +47,25 @@ struct gadget_ipa_port {
};
+struct ipa_function_bind_info {
+ struct usb_string *string_defs;
+ int data_str_idx;
+ struct usb_interface_descriptor *data_desc;
+ struct usb_endpoint_descriptor *fs_in_desc;
+ struct usb_endpoint_descriptor *fs_out_desc;
+ struct usb_endpoint_descriptor *fs_notify_desc;
+ struct usb_endpoint_descriptor *hs_in_desc;
+ struct usb_endpoint_descriptor *hs_out_desc;
+ struct usb_endpoint_descriptor *hs_notify_desc;
+ struct usb_endpoint_descriptor *ss_in_desc;
+ struct usb_endpoint_descriptor *ss_out_desc;
+ struct usb_endpoint_descriptor *ss_notify_desc;
+
+ struct usb_descriptor_header **fs_desc_hdr;
+ struct usb_descriptor_header **hs_desc_hdr;
+ struct usb_descriptor_header **ss_desc_hdr;
+};
+
/* for configfs support */
#define MAX_INST_NAME_LEN 40
diff --git a/drivers/usb/gadget/function/u_qdss.c b/drivers/usb/gadget/function/u_qdss.c
index 8f5fcc16eb15..e26f0977b9b7 100644
--- a/drivers/usb/gadget/function/u_qdss.c
+++ b/drivers/usb/gadget/function/u_qdss.c
@@ -94,11 +94,12 @@ int set_qdss_data_connection(struct usb_gadget *gadget,
static int init_data(struct usb_ep *ep)
{
+ struct f_qdss *qdss = ep->driver_data;
int res = 0;
pr_debug("init_data\n");
- res = msm_ep_config(ep);
+ res = msm_ep_config(ep, qdss->endless_req, GFP_ATOMIC);
if (res)
pr_err("msm_ep_config failed\n");
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 55386619a0f1..e57f48f9528f 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -541,7 +541,7 @@ static ssize_t ep_aio(struct kiocb *iocb,
*/
spin_lock_irq(&epdata->dev->lock);
value = -ENODEV;
- if (unlikely(epdata->ep))
+ if (unlikely(epdata->ep == NULL))
goto fail;
req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 5fb6f8b4f0b4..c73689b72f95 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2053,7 +2053,7 @@ static void setup_received_handle(struct qe_udc *udc,
struct qe_ep *ep;
if (wValue != 0 || wLength != 0
- || pipe > USB_MAX_ENDPOINTS)
+ || pipe >= USB_MAX_ENDPOINTS)
break;
ep = &udc->eps[pipe];
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 48c92bf78bd0..f7661d9750fd 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -332,11 +332,11 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
int port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
- ehci_writel(ehci, PORT_RWC_BITS,
- &ehci->regs->port_status[port]);
spin_unlock_irq(&ehci->lock);
ehci_port_power(ehci, port, false);
spin_lock_irq(&ehci->lock);
+ ehci_writel(ehci, PORT_RWC_BITS,
+ &ehci->regs->port_status[port]);
}
}
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index d029bbe9eb36..641fed609911 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
{
int branch;
- ed->state = ED_OPER;
ed->ed_prev = NULL;
ed->ed_next = NULL;
ed->hwNextED = 0;
@@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
/* the HC may not see the schedule updates yet, but if it does
* then they'll be properly ordered.
*/
+
+ ed->state = ED_OPER;
return 0;
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a7b055bc279a..5ff3f39af38d 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -377,6 +377,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
ret = 0;
virt_dev = xhci->devs[slot_id];
+ if (!virt_dev)
+ return -ENODEV;
+
cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
if (!cmd) {
xhci_dbg(xhci, "Couldn't allocate command structure.\n");
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 29dc6ab252b1..373e9d039457 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1072,7 +1072,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
struct usb_device *top_dev;
struct usb_hcd *hcd;
- if (udev->speed == USB_SPEED_SUPER)
+ if (udev->speed >= USB_SPEED_SUPER)
hcd = xhci->shared_hcd;
else
hcd = xhci->main_hcd;
@@ -1107,6 +1107,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
/* 3) Only the control endpoint is valid - one endpoint context */
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
switch (udev->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
max_packets = MAX_PACKET(512);
@@ -1294,6 +1295,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
}
/* Fall through - SS and HS isoc/int have same decoding */
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
if (usb_endpoint_xfer_int(&ep->desc) ||
usb_endpoint_xfer_isoc(&ep->desc)) {
@@ -1334,7 +1336,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
static u32 xhci_get_endpoint_mult(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
- if (udev->speed != USB_SPEED_SUPER ||
+ if (udev->speed < USB_SPEED_SUPER ||
!usb_endpoint_xfer_isoc(&ep->desc))
return 0;
return ep->ss_ep_comp.bmAttributes;
@@ -1384,7 +1386,7 @@ static u32 xhci_get_max_esit_payload(struct usb_device *udev,
usb_endpoint_xfer_bulk(&ep->desc))
return 0;
- if (udev->speed == USB_SPEED_SUPER)
+ if (udev->speed >= USB_SPEED_SUPER)
return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
@@ -1455,6 +1457,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
max_burst = 0;
switch (udev->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
/* dig out max burst from ep companion desc */
max_burst = ep->ss_ep_comp.bMaxBurst;
@@ -1786,11 +1789,84 @@ void xhci_free_command(struct xhci_hcd *xhci,
kfree(command);
}
+void xhci_handle_sec_intr_events(struct xhci_hcd *xhci, int intr_num)
+{
+ union xhci_trb *erdp_trb, *current_trb;
+ struct xhci_segment *seg;
+ u64 erdp_reg;
+ u32 iman_reg;
+ dma_addr_t deq;
+ unsigned long segment_offset;
+
+ /* disable irq, ack pending interrupt and ack all pending events */
+
+ iman_reg =
+ readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+ iman_reg &= ~IMAN_IE;
+ writel_relaxed(iman_reg,
+ &xhci->sec_ir_set[intr_num]->irq_pending);
+ iman_reg =
+ readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+ if (iman_reg & IMAN_IP)
+ writel_relaxed(iman_reg,
+ &xhci->sec_ir_set[intr_num]->irq_pending);
+
+ /* last acked event trb is in erdp reg */
+ erdp_reg =
+ xhci_read_64(xhci, &xhci->sec_ir_set[intr_num]->erst_dequeue);
+ deq = (dma_addr_t)(erdp_reg & ~ERST_PTR_MASK);
+ if (!deq) {
+ pr_debug("%s: event ring handling not required\n", __func__);
+ return;
+ }
+
+ seg = xhci->sec_event_ring[intr_num]->first_seg;
+ segment_offset = deq - seg->dma;
+
+ /* find out virtual address of the last acked event trb */
+ erdp_trb = current_trb = &seg->trbs[0] +
+ (segment_offset/sizeof(*current_trb));
+
+ /* read cycle state of the last acked trb to find out CCS */
+ xhci->sec_event_ring[intr_num]->cycle_state =
+ (current_trb->event_cmd.flags & TRB_CYCLE);
+
+ while (1) {
+ /* last trb of the event ring: toggle cycle state */
+ if (current_trb == &seg->trbs[TRBS_PER_SEGMENT - 1]) {
+ xhci->sec_event_ring[intr_num]->cycle_state ^= 1;
+ current_trb = &seg->trbs[0];
+ } else {
+ current_trb++;
+ }
+
+ /* cycle state transition */
+ if ((le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE) !=
+ xhci->sec_event_ring[intr_num]->cycle_state)
+ break;
+ }
+
+ if (erdp_trb != current_trb) {
+ deq =
+ xhci_trb_virt_to_dma(xhci->sec_event_ring[intr_num]->deq_seg,
+ current_trb);
+ if (deq == 0)
+ xhci_warn(xhci,
+ "WARN ivalid SW event ring dequeue ptr.\n");
+ /* Update HC event ring dequeue pointer */
+ erdp_reg &= ERST_PTR_MASK;
+ erdp_reg |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ }
+
+ /* Clear the event handler busy flag (RW1C); event ring is empty. */
+ erdp_reg |= ERST_EHB;
+ xhci_write_64(xhci, erdp_reg,
+ &xhci->sec_ir_set[intr_num]->erst_dequeue);
+}
+
int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned intr_num)
{
int size;
- u32 iman_reg;
- u64 erdp_reg;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct device *dev = xhci_to_hcd(xhci)->self.controller;
@@ -1803,28 +1879,7 @@ int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned intr_num)
size =
sizeof(struct xhci_erst_entry)*(xhci->sec_erst[intr_num].num_entries);
if (xhci->sec_erst[intr_num].entries) {
- /*
- * disable irq, ack pending interrupt and clear EHB for xHC to
- * generate interrupt again when new event ring is setup
- */
- iman_reg =
- readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
- iman_reg &= ~IMAN_IE;
- writel_relaxed(iman_reg,
- &xhci->sec_ir_set[intr_num]->irq_pending);
- iman_reg =
- readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
- if (iman_reg & IMAN_IP)
- writel_relaxed(iman_reg,
- &xhci->sec_ir_set[intr_num]->irq_pending);
- /* make sure IP gets cleared before clearing EHB */
- mb();
-
- erdp_reg = xhci_read_64(xhci,
- &xhci->sec_ir_set[intr_num]->erst_dequeue);
- xhci_write_64(xhci, erdp_reg | ERST_EHB,
- &xhci->sec_ir_set[intr_num]->erst_dequeue);
-
+ xhci_handle_sec_intr_events(xhci, intr_num);
dma_free_coherent(dev, size, xhci->sec_erst[intr_num].entries,
xhci->sec_erst[intr_num].erst_dma_addr);
xhci->sec_erst[intr_num].entries = NULL;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index de644e56aa3b..963867c2c1d5 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -311,11 +311,12 @@ static void xhci_pci_remove(struct pci_dev *dev)
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
}
- usb_hcd_pci_remove(dev);
/* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
pci_set_power_state(dev, PCI_D3hot);
+
+ usb_hcd_pci_remove(dev);
}
#ifdef CONFIG_PM
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index fcb9b4b822aa..1221a80e0bdc 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -336,7 +336,7 @@ static int xhci_plat_runtime_suspend(struct device *dev)
dev_dbg(dev, "xhci-plat runtime suspend\n");
- return 0;
+ return xhci_suspend(xhci, true);
}
static int xhci_plat_runtime_resume(struct device *dev)
@@ -350,7 +350,7 @@ static int xhci_plat_runtime_resume(struct device *dev)
dev_dbg(dev, "xhci-plat runtime resume\n");
- ret = 0;
+ ret = xhci_resume(xhci, false);
pm_runtime_mark_last_busy(dev);
return ret;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 34cd23724bed..1f37b89e7267 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1331,12 +1331,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
- if (cmd->command_trb != xhci->cmd_ring->dequeue) {
- xhci_err(xhci,
- "Command completion event does not match command\n");
- return;
- }
-
del_timer(&xhci->cmd_timer);
trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
@@ -1348,6 +1342,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci_handle_stopped_cmd_ring(xhci, cmd);
return;
}
+
+ if (cmd->command_trb != xhci->cmd_ring->dequeue) {
+ xhci_err(xhci,
+ "Command completion event does not match command\n");
+ return;
+ }
+
/*
* Host aborted the command ring, check if the current command was
* supposed to be aborted, otherwise continue normally.
@@ -3575,7 +3576,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
{
unsigned int max_burst;
- if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
+ if (xhci->hci_version < 0x100 || udev->speed < USB_SPEED_SUPER)
return 0;
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
@@ -3601,6 +3602,7 @@ static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
return 0;
switch (udev->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
/* bMaxBurst is zero based: 0 means 1 packet per burst */
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index a37b219a8dc5..8cfc4ca4c050 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2085,6 +2085,7 @@ static unsigned int xhci_get_block_size(struct usb_device *udev)
case USB_SPEED_HIGH:
return HS_BLOCK;
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
return SS_BLOCK;
case USB_SPEED_UNKNOWN:
case USB_SPEED_WIRELESS:
@@ -2210,7 +2211,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
unsigned int packets_remaining = 0;
unsigned int i;
- if (virt_dev->udev->speed == USB_SPEED_SUPER)
+ if (virt_dev->udev->speed >= USB_SPEED_SUPER)
return xhci_check_ss_bw(xhci, virt_dev);
if (virt_dev->udev->speed == USB_SPEED_HIGH) {
@@ -2411,7 +2412,7 @@ void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
if (xhci_is_async_ep(ep_bw->type))
return;
- if (udev->speed == USB_SPEED_SUPER) {
+ if (udev->speed >= USB_SPEED_SUPER) {
if (xhci_is_sync_in_ep(ep_bw->type))
xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
xhci_get_ss_bw_consumed(ep_bw);
@@ -2449,6 +2450,7 @@ void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
break;
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_UNKNOWN:
case USB_SPEED_WIRELESS:
/* Should never happen because only LS/FS/HS endpoints will get
@@ -2508,6 +2510,7 @@ static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
break;
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_UNKNOWN:
case USB_SPEED_WIRELESS:
/* Should never happen because only LS/FS/HS endpoints will get
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 1a812eafe670..1624b09d9748 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -558,7 +558,6 @@ static void sg_timeout(unsigned long _req)
{
struct usb_sg_request *req = (struct usb_sg_request *) _req;
- req->status = -ETIMEDOUT;
usb_sg_cancel(req);
}
@@ -589,8 +588,10 @@ static int perform_sglist(
mod_timer(&sg_timer, jiffies +
msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
usb_sg_wait(req);
- del_timer_sync(&sg_timer);
- retval = req->status;
+ if (!del_timer_sync(&sg_timer))
+ retval = -ETIMEDOUT;
+ else
+ retval = req->status;
/* FIXME check resulting data pattern */
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 86908d2ce9d5..0f9447c986a4 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -1048,6 +1048,9 @@ static int qusb_phy_probe(struct platform_device *pdev)
if (ret)
usb_remove_phy(&qphy->phy);
+ /* de-asseert clamp dig n to reduce leakage on 1p8 upon boot up */
+ writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+
return ret;
}
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 839d701ec9b5..07c96419fd60 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -1088,6 +1088,10 @@ static int qusb_phy_probe(struct platform_device *pdev)
if (ret)
usb_remove_phy(&qphy->phy);
+ /* de-assert clamp dig n to reduce leakage on 1p8 upon boot up */
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+
return ret;
}
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index fc61e3172d0b..64916f5566b5 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -307,13 +307,13 @@ static int msm_ssphy_qmp_init(struct usb_phy *uphy)
phy->clk_enabled = true;
}
- writel_relaxed(0x01,
- phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
-
/* select usb3 phy mode */
if (phy->tcsr_usb3_dp_phymode)
writel_relaxed(0x0, phy->tcsr_usb3_dp_phymode);
+ writel_relaxed(0x01,
+ phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+
/* Make sure that above write completed to get PHY into POWER DOWN */
mb();
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index f1893e08e51a..36e5b5c530bd 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -808,20 +808,27 @@ static void xfer_work(struct work_struct *work)
{
struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
struct usbhs_pipe *pipe = pkt->pipe;
- struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
+ struct usbhs_fifo *fifo;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct dma_async_tx_descriptor *desc;
- struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
+ struct dma_chan *chan;
struct device *dev = usbhs_priv_to_dev(priv);
enum dma_transfer_direction dir;
+ unsigned long flags;
+ usbhs_lock(priv, flags);
+ fifo = usbhs_pipe_to_fifo(pipe);
+ if (!fifo)
+ goto xfer_work_end;
+
+ chan = usbhsf_dma_chan_get(fifo, pkt);
dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
pkt->trans, dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
- return;
+ goto xfer_work_end;
desc->callback = usbhsf_dma_complete;
desc->callback_param = pipe;
@@ -829,7 +836,7 @@ static void xfer_work(struct work_struct *work)
pkt->cookie = dmaengine_submit(desc);
if (pkt->cookie < 0) {
dev_err(dev, "Failed to submit dma descriptor\n");
- return;
+ goto xfer_work_end;
}
dev_dbg(dev, " %s %d (%d/ %d)\n",
@@ -840,6 +847,9 @@ static void xfer_work(struct work_struct *work)
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
dma_async_issue_pending(chan);
usbhs_pipe_enable(pipe);
+
+xfer_work_end:
+ usbhs_unlock(priv, flags);
}
/*
@@ -859,7 +869,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
/* use PIO if packet is less than pio_dma_border or pipe is DCP */
if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
- usbhs_pipe_is_dcp(pipe))
+ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
goto usbhsf_pio_prepare_push;
/* check data length if this driver don't use USB-DMAC */
@@ -964,7 +974,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
/* use PIO if packet is less than pio_dma_border or pipe is DCP */
if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) ||
- usbhs_pipe_is_dcp(pipe))
+ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
goto usbhsf_pio_prepare_pop;
fifo = usbhsf_get_dma_fifo(priv, pkt);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index fa14198daf77..efc4fae123a4 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -586,6 +586,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
struct usbhs_pipe *pipe;
int ret = -EIO;
+ unsigned long flags;
+
+ usbhs_lock(priv, flags);
/*
* if it already have pipe,
@@ -594,7 +597,8 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
if (uep->pipe) {
usbhs_pipe_clear(uep->pipe);
usbhs_pipe_sequence_data0(uep->pipe);
- return 0;
+ ret = 0;
+ goto usbhsg_ep_enable_end;
}
pipe = usbhs_pipe_malloc(priv,
@@ -614,14 +618,20 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
* use dmaengine if possible.
* It will use pio handler if impossible.
*/
- if (usb_endpoint_dir_in(desc))
+ if (usb_endpoint_dir_in(desc)) {
pipe->handler = &usbhs_fifo_dma_push_handler;
- else
+ } else {
pipe->handler = &usbhs_fifo_dma_pop_handler;
+ usbhs_xxxsts_clear(priv, BRDYSTS,
+ usbhs_pipe_number(pipe));
+ }
ret = 0;
}
+usbhsg_ep_enable_end:
+ usbhs_unlock(priv, flags);
+
return ret;
}
@@ -1065,7 +1075,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
dev_info(dev, "%stransceiver found\n",
- gpriv->transceiver ? "" : "no ");
+ !IS_ERR(gpriv->transceiver) ? "" : "no ");
/*
* CAUTION
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b61f12160d37..8c48c9d83d48 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -648,6 +648,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) },
{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -1008,6 +1010,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
+ { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index c5d6c1e73e8e..f87a938cf005 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -406,6 +406,12 @@
#define FTDI_4N_GALAXY_DE_3_PID 0xF3C2
/*
+ * Ivium Technologies product IDs
+ */
+#define FTDI_PALMSENS_PID 0xf440
+#define FTDI_IVIUM_XSTAT_PID 0xf441
+
+/*
* Linx Technologies product ids
*/
#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
@@ -673,6 +679,12 @@
#define INTREPID_NEOVI_PID 0x0701
/*
+ * WICED USB UART
+ */
+#define WICED_VID 0x0A5C
+#define WICED_USB20706V2_PID 0x6422
+
+/*
* Definitions for ID TECH (www.idt-net.com) devices
*/
#define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 06c7dbc1c802..63db004af21f 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1252,7 +1252,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
if (urb->transfer_buffer == NULL) {
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!urb->transfer_buffer)
goto exit;
}
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 8ac9b55f05af..7f3ddd7ba2ce 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1340,8 +1340,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
}
if (urb->transfer_buffer == NULL) {
- urb->transfer_buffer =
- kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
+ urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+ GFP_ATOMIC);
if (!urb->transfer_buffer)
goto exit;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index d96d423d00e6..9894e341c6ac 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -273,6 +273,13 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
+#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
+#define TELIT_PRODUCT_LE920A4_1207 0x1207
+#define TELIT_PRODUCT_LE920A4_1208 0x1208
+#define TELIT_PRODUCT_LE920A4_1211 0x1211
+#define TELIT_PRODUCT_LE920A4_1212 0x1212
+#define TELIT_PRODUCT_LE920A4_1213 0x1213
+#define TELIT_PRODUCT_LE920A4_1214 0x1214
/* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2
@@ -518,6 +525,12 @@ static void option_instat_callback(struct urb *urb);
#define VIATELECOM_VENDOR_ID 0x15eb
#define VIATELECOM_PRODUCT_CDS7 0x0001
+/* WeTelecom products */
+#define WETELECOM_VENDOR_ID 0x22de
+#define WETELECOM_PRODUCT_WMD200 0x6801
+#define WETELECOM_PRODUCT_6802 0x6802
+#define WETELECOM_PRODUCT_WMD300 0x6803
+
struct option_blacklist_info {
/* bitmask of interface numbers blacklisted for send_setup */
const unsigned long sendsetup;
@@ -627,6 +640,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
.reserved = BIT(1) | BIT(5),
};
+static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1),
+};
+
static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
.sendsetup = BIT(2),
.reserved = BIT(0) | BIT(1) | BIT(3),
@@ -1198,8 +1216,20 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
+ .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
+ .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
@@ -1963,9 +1993,13 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 46f1f13b41f1..a0ca291bc07f 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1432,7 +1432,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
rc = usb_register(udriver);
if (rc)
- return rc;
+ goto failed_usb_register;
for (sd = serial_drivers; *sd; ++sd) {
(*sd)->usb_driver = udriver;
@@ -1450,6 +1450,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
while (sd-- > serial_drivers)
usb_serial_deregister(*sd);
usb_deregister(udriver);
+failed_usb_register:
+ kfree(udriver);
return rc;
}
EXPORT_SYMBOL_GPL(usb_serial_register_drivers);
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 3b3ba15558b7..20e9a86d2dcf 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -563,67 +563,80 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
}
static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
- uint32_t flags, void *data)
+ unsigned int count, uint32_t flags,
+ void *data)
{
- int32_t fd = *(int32_t *)data;
-
- if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
- return -EINVAL;
-
/* DATA_NONE/DATA_BOOL enables loopback testing */
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- if (*ctx)
- eventfd_signal(*ctx, 1);
- return 0;
+ if (*ctx) {
+ if (count) {
+ eventfd_signal(*ctx, 1);
+ } else {
+ eventfd_ctx_put(*ctx);
+ *ctx = NULL;
+ }
+ return 0;
+ }
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
- uint8_t trigger = *(uint8_t *)data;
+ uint8_t trigger;
+
+ if (!count)
+ return -EINVAL;
+
+ trigger = *(uint8_t *)data;
if (trigger && *ctx)
eventfd_signal(*ctx, 1);
- return 0;
- }
- /* Handle SET_DATA_EVENTFD */
- if (fd == -1) {
- if (*ctx)
- eventfd_ctx_put(*ctx);
- *ctx = NULL;
return 0;
- } else if (fd >= 0) {
- struct eventfd_ctx *efdctx;
- efdctx = eventfd_ctx_fdget(fd);
- if (IS_ERR(efdctx))
- return PTR_ERR(efdctx);
- if (*ctx)
- eventfd_ctx_put(*ctx);
- *ctx = efdctx;
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd;
+
+ if (!count)
+ return -EINVAL;
+
+ fd = *(int32_t *)data;
+ if (fd == -1) {
+ if (*ctx)
+ eventfd_ctx_put(*ctx);
+ *ctx = NULL;
+ } else if (fd >= 0) {
+ struct eventfd_ctx *efdctx;
+
+ efdctx = eventfd_ctx_fdget(fd);
+ if (IS_ERR(efdctx))
+ return PTR_ERR(efdctx);
+
+ if (*ctx)
+ eventfd_ctx_put(*ctx);
+
+ *ctx = efdctx;
+ }
return 0;
- } else
- return -EINVAL;
+ }
+
+ return -EINVAL;
}
static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
- if (index != VFIO_PCI_ERR_IRQ_INDEX)
+ if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
return -EINVAL;
- /*
- * We should sanitize start & count, but that wasn't caught
- * originally, so this IRQ index must forever ignore them :-(
- */
-
- return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data);
+ return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
+ count, flags, data);
}
static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
- if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1)
+ if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
return -EINVAL;
- return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data);
+ return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
+ count, flags, data);
}
int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 29cfc57d496e..e4110d6de0b5 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -88,7 +88,7 @@ struct vhost_scsi_cmd {
struct scatterlist *tvc_prot_sgl;
struct page **tvc_upages;
/* Pointer to response header iovec */
- struct iovec *tvc_resp_iov;
+ struct iovec tvc_resp_iov;
/* Pointer to vhost_scsi for our device */
struct vhost_scsi *tvc_vhost;
/* Pointer to vhost_virtqueue for the cmd */
@@ -557,7 +557,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
memcpy(v_rsp.sense, cmd->tvc_sense_buf,
se_cmd->scsi_sense_length);
- iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
+ iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
cmd->tvc_in_iovs, sizeof(v_rsp));
ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
if (likely(ret == sizeof(v_rsp))) {
@@ -1054,7 +1054,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
}
cmd->tvc_vhost = vs;
cmd->tvc_vq = vq;
- cmd->tvc_resp_iov = &vq->iov[out];
+ cmd->tvc_resp_iov = vq->iov[out];
cmd->tvc_in_iovs = in;
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.c b/drivers/video/fbdev/msm/mdss_compat_utils.c
index d3eb3db48eb7..9f1a24431de9 100644
--- a/drivers/video/fbdev/msm/mdss_compat_utils.c
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.c
@@ -846,6 +846,7 @@ static int __from_user_pcc_coeff_v17(
return -EFAULT;
}
+ memset(&pcc_cfg_payload, 0, sizeof(pcc_cfg_payload));
pcc_cfg_payload.r.b = pcc_cfg_payload32.r.b;
pcc_cfg_payload.r.g = pcc_cfg_payload32.r.g;
pcc_cfg_payload.r.c = pcc_cfg_payload32.r.c;
@@ -1127,6 +1128,8 @@ static int __from_user_igc_lut_data_v17(
pr_err("failed to copy payload from user for igc\n");
return -EFAULT;
}
+
+ memset(&igc_cfg_payload, 0, sizeof(igc_cfg_payload));
igc_cfg_payload.c0_c1_data = compat_ptr(igc_cfg_payload_32.c0_c1_data);
igc_cfg_payload.c2_data = compat_ptr(igc_cfg_payload_32.c2_data);
igc_cfg_payload.len = igc_cfg_payload_32.len;
@@ -1261,6 +1264,7 @@ static int __from_user_pgc_lut_data_v1_7(
pr_err("failed to copy from user the pgc32 payload\n");
return -EFAULT;
}
+ memset(&pgc_cfg_payload, 0, sizeof(pgc_cfg_payload));
pgc_cfg_payload.c0_data = compat_ptr(pgc_cfg_payload_32.c0_data);
pgc_cfg_payload.c1_data = compat_ptr(pgc_cfg_payload_32.c1_data);
pgc_cfg_payload.c2_data = compat_ptr(pgc_cfg_payload_32.c2_data);
@@ -1470,6 +1474,7 @@ static int __from_user_hist_lut_data_v1_7(
return -EFAULT;
}
+ memset(&hist_lut_cfg_payload, 0, sizeof(hist_lut_cfg_payload));
hist_lut_cfg_payload.len = hist_lut_cfg_payload32.len;
hist_lut_cfg_payload.data = compat_ptr(hist_lut_cfg_payload32.data);
@@ -2024,6 +2029,7 @@ static int __from_user_pa_data_v1_7(
return -EFAULT;
}
+ memset(&pa_cfg_payload, 0, sizeof(pa_cfg_payload));
pa_cfg_payload.mode = pa_cfg_payload32.mode;
pa_cfg_payload.global_hue_adj = pa_cfg_payload32.global_hue_adj;
pa_cfg_payload.global_sat_adj = pa_cfg_payload32.global_sat_adj;
@@ -2280,6 +2286,8 @@ static int __from_user_gamut_cfg_data_v17(
pr_err("failed to copy the gamut payload from userspace\n");
return -EFAULT;
}
+
+ memset(&gamut_cfg_payload, 0, sizeof(gamut_cfg_payload));
gamut_cfg_payload.mode = gamut_cfg_payload32.mode;
for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
gamut_cfg_payload.tbl_size[i] =
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index f925fd5296d4..79e74df12988 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -1226,6 +1226,58 @@ void mdss_dsi_panel_dsc_pps_send(struct mdss_dsi_ctrl_pdata *ctrl,
mdss_dsi_panel_cmds_send(ctrl, &pcmds, CMD_REQ_COMMIT);
}
+static int mdss_dsi_parse_hdr_settings(struct device_node *np,
+ struct mdss_panel_info *pinfo)
+{
+ int rc = 0;
+ struct mdss_panel_hdr_properties *hdr_prop;
+
+ if (!np) {
+ pr_err("%s: device node pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!pinfo) {
+ pr_err("%s: panel info is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ hdr_prop = &pinfo->hdr_properties;
+ hdr_prop->hdr_enabled = of_property_read_bool(np,
+ "qcom,mdss-dsi-panel-hdr-enabled");
+
+ if (hdr_prop->hdr_enabled) {
+ rc = of_property_read_u32_array(np,
+ "qcom,mdss-dsi-panel-hdr-color-primaries",
+ hdr_prop->display_primaries,
+ DISPLAY_PRIMARIES_COUNT);
+ if (rc) {
+ pr_info("%s:%d, Unable to read color primaries,rc:%u",
+ __func__, __LINE__,
+ hdr_prop->hdr_enabled = false);
+ }
+
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-panel-peak-brightness",
+ &(hdr_prop->peak_brightness));
+ if (rc) {
+ pr_info("%s:%d, Unable to read hdr brightness, rc:%u",
+ __func__, __LINE__, rc);
+ hdr_prop->hdr_enabled = false;
+ }
+
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-panel-blackness-level",
+ &(hdr_prop->blackness_level));
+ if (rc) {
+ pr_info("%s:%d, Unable to read hdr brightness, rc:%u",
+ __func__, __LINE__, rc);
+ hdr_prop->hdr_enabled = false;
+ }
+ }
+ return 0;
+}
+
static int mdss_dsi_parse_dsc_version(struct device_node *np,
struct mdss_panel_timing *timing)
{
@@ -2606,6 +2658,9 @@ static int mdss_panel_parse_dt(struct device_node *np,
rc = mdss_panel_parse_display_timings(np, &ctrl_pdata->panel_data);
if (rc)
return rc;
+ rc = mdss_dsi_parse_hdr_settings(np, pinfo);
+ if (rc)
+ return rc;
pinfo->mipi.rx_eot_ignore = of_property_read_bool(np,
"qcom,mdss-dsi-rx-eot-ignore");
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 98ca6c3da20b..e37f0a602c0f 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -566,7 +566,13 @@ static ssize_t mdss_fb_get_panel_info(struct device *dev,
"min_fps=%d\nmax_fps=%d\npanel_name=%s\n"
"primary_panel=%d\nis_pluggable=%d\ndisplay_id=%s\n"
"is_cec_supported=%d\nis_pingpong_split=%d\n"
- "dfps_porch_mode=%d\npu_roi_cnt=%d\ndual_dsi=%d",
+ "dfps_porch_mode=%d\npu_roi_cnt=%d\ndual_dsi=%d\n"
+ "is_hdr_enabled=%d\n"
+ "peak_brightness=%d\nblackness_level=%d\n"
+ "white_chromaticity_x=%d\nwhite_chromaticity_y=%d\n"
+ "red_chromaticity_x=%d\nred_chromaticity_y=%d\n"
+ "green_chromaticity_x=%d\ngreen_chromaticity_y=%d\n"
+ "blue_chromaticity_x=%d\nblue_chromaticity_y=%d\n",
pinfo->partial_update_enabled,
pinfo->roi_alignment.xstart_pix_align,
pinfo->roi_alignment.width_pix_align,
@@ -580,7 +586,17 @@ static ssize_t mdss_fb_get_panel_info(struct device *dev,
pinfo->is_pluggable, pinfo->display_id,
pinfo->is_cec_supported, is_pingpong_split(mfd),
dfps_porch_mode, pinfo->partial_update_enabled,
- is_panel_split(mfd));
+ is_panel_split(mfd), pinfo->hdr_properties.hdr_enabled,
+ pinfo->hdr_properties.peak_brightness,
+ pinfo->hdr_properties.blackness_level,
+ pinfo->hdr_properties.display_primaries[0],
+ pinfo->hdr_properties.display_primaries[1],
+ pinfo->hdr_properties.display_primaries[2],
+ pinfo->hdr_properties.display_primaries[3],
+ pinfo->hdr_properties.display_primaries[4],
+ pinfo->hdr_properties.display_primaries[5],
+ pinfo->hdr_properties.display_primaries[6],
+ pinfo->hdr_properties.display_primaries[7]);
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index e9ae30bb6914..92d2ffdd41cd 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -64,8 +64,10 @@
#define HDMI_TX_3_MAX_PCLK_RATE 297000
#define HDMI_TX_4_MAX_PCLK_RATE 600000
-#define hdmi_tx_get_fd(x) (x ? hdmi_ctrl->feature_data[ffs(x) - 1] : 0)
-#define hdmi_tx_set_fd(x, y) {if (x) hdmi_ctrl->feature_data[ffs(x) - 1] = y; }
+#define hdmi_tx_get_fd(x) ((x && (ffs(x) > 0)) ? \
+ hdmi_ctrl->feature_data[ffs(x) - 1] : 0)
+#define hdmi_tx_set_fd(x, y) {if (x && (ffs(x) > 0)) \
+ hdmi_ctrl->feature_data[ffs(x) - 1] = y; }
#define MAX_EDID_READ_RETRY 5
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
index 4d42e42035c3..6311352cb0cf 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pipe.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
@@ -933,17 +933,18 @@ int mdss_mdp_smp_handoff(struct mdss_data_type *mdata)
data = readl_relaxed(mdata->mdp_base +
MDSS_MDP_REG_SMP_ALLOC_W0 + off);
client_id = (data >> s) & 0xFF;
- if (test_bit(i, mdata->mmb_alloc_map)) {
- /*
- * Certain pipes may have a dedicated set of
- * SMP MMBs statically allocated to them. In
- * such cases, we do not need to do anything
- * here.
- */
- pr_debug("smp mmb %d already assigned to pipe %d (client_id %d)\n"
- , i, pipe ? pipe->num : -1, client_id);
- continue;
- }
+ if (i < ARRAY_SIZE(mdata->mmb_alloc_map))
+ if (test_bit(i, mdata->mmb_alloc_map)) {
+ /*
+ * Certain pipes may have a dedicated set of
+ * SMP MMBs statically allocated to them. In
+ * such cases, we do not need to do anything
+ * here.
+ */
+ pr_debug("smp mmb %d already assigned to pipe %d (client_id %d)\n"
+ , i, pipe ? pipe->num : -1, client_id);
+ continue;
+ }
if (client_id) {
if (client_id != prev_id) {
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c b/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
index 1e4adc984802..71cab148e1c3 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
@@ -833,6 +833,8 @@ static int pp_gamut_set_config(char __iomem *base_addr,
struct mdp_gamut_cfg_data *gamut_cfg_data = NULL;
struct mdp_gamut_data_v1_7 *gamut_data = NULL;
char __iomem *base_addr_scale = base_addr;
+ uint64_t gamut_val;
+
if (!base_addr || !cfg_data || !pp_sts) {
pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
base_addr, cfg_data, pp_sts);
@@ -900,12 +902,18 @@ static int pp_gamut_set_config(char __iomem *base_addr,
val = index_start;
val |= GAMUT_TABLE_SELECT(i);
writel_relaxed(val, (base_addr + GAMUT_TABLE_INDEX));
- for (j = 0; j < gamut_data->tbl_size[i]; j++) {
- writel_relaxed(gamut_data->c1_c2_data[i][j],
- base_addr + GAMUT_TABLE_LOWER_GB);
- writel_relaxed(gamut_data->c0_data[i][j],
- base_addr + GAMUT_TABLE_UPPER_R);
+
+ writel_relaxed(gamut_data->c1_c2_data[i][0],
+ base_addr + GAMUT_TABLE_LOWER_GB);
+ for (j = 0; j < gamut_data->tbl_size[i] - 1 ; j++) {
+ gamut_val = gamut_data->c1_c2_data[i][j + 1];
+ gamut_val = (gamut_val << 32) |
+ gamut_data->c0_data[i][j];
+ writeq_relaxed(gamut_val,
+ base_addr + GAMUT_TABLE_UPPER_R);
}
+ writel_relaxed(gamut_data->c0_data[i][j],
+ base_addr + GAMUT_TABLE_UPPER_R);
if ((i >= MDP_GAMUT_SCALE_OFF_TABLE_NUM) ||
(!gamut_data->map_en))
continue;
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index 16bb48e22bee..4698d441f365 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -56,6 +56,9 @@ struct panel_id {
#define DSC_PPS_LEN 128
#define INTF_EVENT_STR(x) #x
+/* HDR propeties count */
+#define DISPLAY_PRIMARIES_COUNT 8 /* WRGB x and y values*/
+
static inline const char *mdss_panel2str(u32 panel)
{
static const char const *names[] = {
@@ -734,6 +737,19 @@ struct mdss_dsi_dual_pu_roi {
bool enabled;
};
+struct mdss_panel_hdr_properties {
+ bool hdr_enabled;
+
+ /* WRGB X and y values arrayed in format */
+ /* [WX, WY, RX, RY, GX, GY, BX, BY] */
+ u32 display_primaries[DISPLAY_PRIMARIES_COUNT];
+
+ /* peak brightness supported by panel */
+ u32 peak_brightness;
+ /* Blackness level supported by panel */
+ u32 blackness_level;
+};
+
struct mdss_panel_info {
u32 xres;
u32 yres;
@@ -878,6 +894,9 @@ struct mdss_panel_info {
/* stores initial adaptive variable refresh vtotal value */
u32 saved_avr_vtotal;
+
+ /* HDR properties of display panel*/
+ struct mdss_panel_hdr_properties hdr_properties;
};
struct mdss_panel_timing {
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 8ab6238c9299..56f7e2521202 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -196,6 +196,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
num = min(num, ARRAY_SIZE(vb->pfns));
mutex_lock(&vb->balloon_lock);
+ /* We can't release more pages than taken */
+ num = min(num, (size_t)vb->num_pages);
for (vb->num_pfns = 0; vb->num_pfns < num;
vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
page = balloon_page_dequeue(vb_dev_info);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ee663c458b20..dc2b94142f53 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -202,6 +202,8 @@ static inline int virtqueue_add(struct virtqueue *_vq,
* host should service the ring ASAP. */
if (out_sgs)
vq->notify(&vq->vq);
+ if (indirect)
+ kfree(desc);
END_USE(vq);
return -ENOSPC;
}
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 0e2f43bccf1f..0c427d6a12d1 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -390,8 +390,6 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
goto out;
}
- hdq_data->hdq_irqstatus = 0;
-
if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 9c234209d8b5..47a4177b16d2 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
field_start = OFFSET(cfg_entry);
field_end = OFFSET(cfg_entry) + field->size;
- if ((req_start >= field_start && req_start < field_end)
- || (req_end > field_start && req_end <= field_end)) {
+ if (req_end > field_start && field_end > req_start) {
err = conf_space_read(dev, cfg_entry, field_start,
&tmp_val);
if (err)
@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
field_start = OFFSET(cfg_entry);
field_end = OFFSET(cfg_entry) + field->size;
- if ((req_start >= field_start && req_start < field_end)
- || (req_end > field_start && req_end <= field_end)) {
+ if (req_end > field_start && field_end > req_start) {
tmp_val = 0;
err = xen_pcibk_config_read(dev, field_start,
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 9433e46518c8..531e76474983 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -316,11 +316,18 @@ static int xenbus_write_transaction(unsigned msg_type,
rc = -ENOMEM;
goto out;
}
+ } else {
+ list_for_each_entry(trans, &u->transactions, list)
+ if (trans->handle.id == u->u.msg.tx_id)
+ break;
+ if (&trans->list == &u->transactions)
+ return -ESRCH;
}
reply = xenbus_dev_request_and_reply(&u->u.msg);
if (IS_ERR(reply)) {
- kfree(trans);
+ if (msg_type == XS_TRANSACTION_START)
+ kfree(trans);
rc = PTR_ERR(reply);
goto out;
}
@@ -333,12 +340,7 @@ static int xenbus_write_transaction(unsigned msg_type,
list_add(&trans->list, &u->transactions);
}
} else if (u->u.msg.type == XS_TRANSACTION_END) {
- list_for_each_entry(trans, &u->transactions, list)
- if (trans->handle.id == u->u.msg.tx_id)
- break;
- BUG_ON(&trans->list == &u->transactions);
list_del(&trans->list);
-
kfree(trans);
}
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index ba804f3d8278..ce65591b4168 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -250,9 +250,6 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
mutex_unlock(&xs_state.request_mutex);
- if (IS_ERR(ret))
- return ret;
-
if ((msg->type == XS_TRANSACTION_END) ||
((req_msg.type == XS_TRANSACTION_START) &&
(msg->type == XS_ERROR)))