summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/mmc/card/block.c28
-rw-r--r--drivers/mmc/core/core.c34
-rw-r--r--drivers/mmc/core/mmc.c31
-rw-r--r--include/linux/mmc/card.h3
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/mmc.h3
6 files changed, 98 insertions, 2 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index eaee850b7d37..d1f95b57deab 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1792,7 +1792,30 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = md->queue.card;
int ret = 0;
- ret = mmc_flush_cache(card);
+ if (!req)
+ return 0;
+
+ if (req->cmd_flags & REQ_BARRIER) {
+ /*
+ * If eMMC cache flush policy is set to 1, then the device
+ * shall flush the requests in First-In-First-Out (FIFO) order.
+ * In this case, as per spec, the host must not send any cache
+ * barrier requests as they are redundant and add unnecessary
+ * overhead to both device and host.
+ */
+ if (card->ext_csd.cache_flush_policy & 1)
+ goto end_req;
+
+ /*
+ * In case barrier is not supported or enabled in the device,
+ * use flush as a fallback option.
+ */
+ ret = mmc_cache_barrier(card);
+ if (ret)
+ ret = mmc_flush_cache(card);
+ } else if (req->cmd_flags & REQ_FLUSH) {
+ ret = mmc_flush_cache(card);
+ }
if (ret == -ENODEV) {
pr_err("%s: %s: restart mmc card",
req->rq_disk->disk_name, __func__);
@@ -1809,6 +1832,7 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
ret = -EIO;
}
+end_req:
blk_end_request_all(req, ret);
return ret ? 0 : 1;
@@ -3402,7 +3426,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
ret = mmc_blk_issue_secdiscard_rq(mq, req);
else
ret = mmc_blk_issue_discard_rq(mq, req);
- } else if (cmd_flags & REQ_FLUSH) {
+ } else if (cmd_flags & (REQ_FLUSH | REQ_BARRIER)) {
/* complete ongoing async transfer before issuing flush */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 54f9459e4303..e108a234dd07 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -3781,6 +3781,40 @@ int mmc_power_restore_host(struct mmc_host *host)
EXPORT_SYMBOL(mmc_power_restore_host);
/*
+ * Add barrier request to the requests in cache
+ */
+int mmc_cache_barrier(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err = 0;
+
+ if (!card->ext_csd.cache_ctrl ||
+ (card->quirks & MMC_QUIRK_CACHE_DISABLE))
+ goto out;
+
+ if (!mmc_card_mmc(card))
+ goto out;
+
+ if (!card->ext_csd.barrier_en)
+ return -ENOTSUPP;
+
+ /*
+ * If a device receives maximum supported barrier
+ * requests, a barrier command is treated as a
+ * flush command. Hence, it is betetr to use
+ * flush timeout instead a generic CMD6 timeout
+ */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 0x2, 0);
+ if (err)
+ pr_err("%s: cache barrier error %d\n",
+ mmc_hostname(host), err);
+out:
+ return err;
+}
+EXPORT_SYMBOL(mmc_cache_barrier);
+
+/*
* Flush the cache to the non-volatile storage.
*/
int mmc_flush_cache(struct mmc_card *card)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 9ab7b7c74796..ed6af9d1d883 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -625,9 +625,19 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
mmc_hostname(card->host),
card->ext_csd.cmdq_depth);
}
+ card->ext_csd.barrier_support =
+ ext_csd[EXT_CSD_BARRIER_SUPPORT];
+ card->ext_csd.cache_flush_policy =
+ ext_csd[EXT_CSD_CACHE_FLUSH_POLICY];
+ pr_info("%s: cache barrier support %d flush policy %d\n",
+ mmc_hostname(card->host),
+ card->ext_csd.barrier_support,
+ card->ext_csd.cache_flush_policy);
} else {
card->ext_csd.cmdq_support = 0;
card->ext_csd.cmdq_depth = 0;
+ card->ext_csd.barrier_support = 0;
+ card->ext_csd.cache_flush_policy = 0;
}
/* eMMC v5 or later */
@@ -1975,6 +1985,27 @@ reinit:
} else {
card->ext_csd.cache_ctrl = 1;
}
+ /* enable cache barrier if supported by the device */
+ if (card->ext_csd.cache_ctrl &&
+ card->ext_csd.barrier_support) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BARRIER_CTRL, 1,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: mmc_switch() for BARRIER_CTRL fails %d\n",
+ mmc_hostname(host), __func__,
+ err);
+ goto free_card;
+ }
+ if (err) {
+ pr_warn("%s: Barrier is supported but failed to turn on (%d)\n",
+ mmc_hostname(card->host), err);
+ card->ext_csd.barrier_en = 0;
+ err = 0;
+ } else {
+ card->ext_csd.barrier_en = 1;
+ }
+ }
} else {
/*
* mmc standard doesn't say what is the card default
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 609a792e9e5c..0f4e22db31ab 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -119,10 +119,13 @@ struct mmc_ext_csd {
u8 raw_pwr_cl_ddr_52_195; /* 238 */
u8 raw_pwr_cl_ddr_52_360; /* 239 */
u8 raw_pwr_cl_ddr_200_360; /* 253 */
+ u8 cache_flush_policy; /* 240 */
u8 raw_bkops_status; /* 246 */
u8 raw_sectors[4]; /* 212 - 4 bytes */
u8 cmdq_depth; /* 307 */
u8 cmdq_support; /* 308 */
+ u8 barrier_support; /* 486 */
+ u8 barrier_en;
unsigned int feature_support;
#define MMC_DISCARD_FEATURE BIT(0) /* CMD38 feature */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index fcad0aa31b2e..22c610609a25 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -185,6 +185,7 @@ extern void mmc_put_card(struct mmc_card *card);
extern void mmc_set_ios(struct mmc_host *host);
extern int mmc_flush_cache(struct mmc_card *);
+extern int mmc_cache_barrier(struct mmc_card *);
extern int mmc_detect_card_removed(struct mmc_host *host);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 07b04c6e5c64..5612781ef522 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -221,6 +221,7 @@ struct _mmc_csd {
*/
#define EXT_CSD_CMDQ 15 /* R/W */
+#define EXT_CSD_BARRIER_CTRL 31 /* R/W */
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
@@ -273,6 +274,7 @@ struct _mmc_csd {
#define EXT_CSD_PWR_CL_200_360 237 /* RO */
#define EXT_CSD_PWR_CL_DDR_52_195 238 /* RO */
#define EXT_CSD_PWR_CL_DDR_52_360 239 /* RO */
+#define EXT_CSD_CACHE_FLUSH_POLICY 240 /* RO */
#define EXT_CSD_BKOPS_STATUS 246 /* RO */
#define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */
#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */
@@ -281,6 +283,7 @@ struct _mmc_csd {
#define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */
#define EXT_CSD_CMDQ_DEPTH 307 /* RO */
#define EXT_CSD_CMDQ_SUPPORT 308 /* RO */
+#define EXT_CSD_BARRIER_SUPPORT 486 /* RO */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */