summaryrefslogtreecommitdiff
path: root/drivers/scsi/ufs/ufs-qcom.h
diff options
context:
space:
mode:
authorGilad Broner <gbroner@codeaurora.org>2015-06-29 11:58:29 +0300
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-22 11:00:12 -0700
commit38da06a7709696c2c1179ae0f7ffc8d5b782ae92 (patch)
tree33a14b01b0ad5ac8ca6e4df4625d95fde422e960 /drivers/scsi/ufs/ufs-qcom.h
parent58b6108338e3512d07c2590d1af1f64ab24cc056 (diff)
scsi: ufs: update pm qos implementation
Current UFS PM QoS design and implementation do not seem to give the desired increase in performance. This change revisits the PM QoS implementation trying to improve performance by making the following changes: * de-couple voting from clock scaling decision so voting occurs from the first request and unvoting on the completion of the last request regardless to clock scaling state. Otherwise, suspending the PM QoS voting during the time it takes to decide on clock up-scaling, seems to degrade random access performance. * vote on a per-cluster basis by inspecting the request object's cpu field. This follows the soft-irq allocation scheme in the block layer, so the cpu to which the block layer schedules the soft-irq will not be put into deep LPM. We should note that PM QoS voting using cpu mask for specific cpus is a feature of the qcom specific PM QoS implementation. Change-Id: I427d202aeb45cd284a3bb128e26e519212614801 Signed-off-by: Gilad Broner <gbroner@codeaurora.org> Signed-off-by: Krishna Konda <kkonda@codeaurora.org> [venkatg@codeaurora.org: resolved merge conflicts] Signed-off-by: Venkat Gopalakrishnan <venkatg@codeaurora.org>
Diffstat (limited to 'drivers/scsi/ufs/ufs-qcom.h')
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h53
1 files changed, 53 insertions, 0 deletions
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index d3f440ddf0ff..a8cc22ebb0ee 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -15,6 +15,7 @@
#define UFS_QCOM_H_
#include <linux/phy/phy.h>
+#include <linux/pm_qos.h>
#include "ufshcd.h"
#define MAX_UFS_QCOM_HOSTS 1
@@ -247,6 +248,54 @@ struct ufs_qcom_testbus {
u8 select_minor;
};
+/* PM QoS voting state */
+enum ufs_qcom_pm_qos_state {
+ PM_QOS_UNVOTED,
+ PM_QOS_VOTED,
+ PM_QOS_REQ_VOTE,
+ PM_QOS_REQ_UNVOTE,
+};
+
+/**
+ * struct ufs_qcom_pm_qos_cpu_group - data related to cluster PM QoS voting
+ * logic
+ * @req: request object for PM QoS
+ * @vote_work: work object for voting procedure
+ * @unvote_work: work object for un-voting procedure
+ * @host: back pointer to the main structure
+ * @state: voting state machine current state
+ * @latency_us: requested latency value used for cluster voting, in
+ * microseconds
+ * @mask: cpu mask defined for this cluster
+ * @active_reqs: number of active requests on this cluster
+ */
+struct ufs_qcom_pm_qos_cpu_group {
+ struct pm_qos_request req;
+ struct work_struct vote_work;
+ struct work_struct unvote_work;
+ struct ufs_qcom_host *host;
+ enum ufs_qcom_pm_qos_state state;
+ s32 latency_us;
+ cpumask_t mask;
+ int active_reqs;
+};
+
+/**
+ * struct ufs_qcom_pm_qos - data related to PM QoS voting logic
+ * @groups: PM QoS cpu group state array
+ * @workq: single threaded workqueue to run PM QoS voting/unvoting
+ * @num_clusters: number of clusters defined
+ * @default_cpu: cpu to use for voting for request not specifying a cpu
+ * @is_enabled: flag specifying whether voting logic is enabled
+ */
+struct ufs_qcom_pm_qos {
+ struct ufs_qcom_pm_qos_cpu_group *groups;
+ struct workqueue_struct *workq;
+ int num_groups;
+ int default_cpu;
+ bool is_enabled;
+};
+
struct ufs_qcom_host {
/*
* Set this capability if host controller supports the QUniPro mode
@@ -271,6 +320,10 @@ struct ufs_qcom_host {
struct clk *tx_l0_sync_clk;
struct clk *rx_l1_sync_clk;
struct clk *tx_l1_sync_clk;
+
+ /* PM Quality-of-Service (QoS) data */
+ struct ufs_qcom_pm_qos pm_qos;
+
bool is_lane_clks_enabled;
bool sec_cfg_updated;
struct ufs_qcom_ice_data ice;