aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarek Vasut <marek.vasut+renesas@mailbox.org>2023-08-16 17:05:55 +0200
committerTom Rini <trini@konsulko.com>2023-08-24 13:47:44 -0400
commitc5b3e5cd3d573bd3615c09f1aaa19291608e2093 (patch)
treed33747464a80f321ea3feac3de85ddfb65d1c2a9
parent12675cb100a860ec7f8fe6df36bb0f7602797e29 (diff)
downloadu-boot-WIP/2023-08-24-further-ufs-updates.zip
u-boot-WIP/2023-08-24-further-ufs-updates.tar.gz
u-boot-WIP/2023-08-24-further-ufs-updates.tar.bz2
ufs: Implement cache managementWIP/2023-08-24-further-ufs-updates
Add function to flush and invalidate cache over request and response queue entries, and perform flush and optional invalidate over block layer data that are passed into the UFS layer. This makes it possible to use UFS with caches enabled. Signed-off-by: Marek Vasut <marek.vasut+renesas@mailbox.org> Reviewed-by: Bhupesh Sharma <bhupesh.sharma@linaro.org> Tested-by: Bhupesh Sharma <bhupesh.sharma@linaro.org>
-rw-r--r--drivers/ufs/ufs.c42
1 files changed, 41 insertions, 1 deletions
diff --git a/drivers/ufs/ufs.c b/drivers/ufs/ufs.c
index 041caee..7c48d57 100644
--- a/drivers/ufs/ufs.c
+++ b/drivers/ufs/ufs.c
@@ -693,6 +693,21 @@ static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
}
/**
+ * ufshcd_cache_flush_and_invalidate - Flush and invalidate cache
+ *
+ * Flush and invalidate cache in aligned address..address+size range.
+ * The invalidation is in place to avoid stale data in cache.
+ */
+static void ufshcd_cache_flush_and_invalidate(void *addr, unsigned long size)
+{
+ uintptr_t aaddr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1);
+ unsigned long asize = ALIGN(size, ARCH_DMA_MINALIGN);
+
+ flush_dcache_range(aaddr, aaddr + asize);
+ invalidate_dcache_range(aaddr, aaddr + asize);
+}
+
+/**
* ufshcd_prepare_req_desc_hdr() - Fills the requests header
* descriptor according to request
*/
@@ -735,6 +750,8 @@ static void ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
req_desc->header.dword_3 = 0;
req_desc->prd_table_length = 0;
+
+ ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
}
static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
@@ -763,10 +780,15 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE);
/* Copy the Descriptor */
- if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) {
memcpy(ucd_req_ptr + 1, query->descriptor, len);
+ ufshcd_cache_flush_and_invalidate(ucd_req_ptr, 2 * sizeof(*ucd_req_ptr));
+ } else {
+ ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
+ }
memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+ ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
}
static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
@@ -783,6 +805,9 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
ucd_req_ptr->header.dword_2 = 0;
memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+
+ ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
+ ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
}
/**
@@ -1409,6 +1434,8 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba,
memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len);
memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+ ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
+ ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
}
static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry,
@@ -1423,6 +1450,7 @@ static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
{
struct utp_transfer_req_desc *req_desc = hba->utrdl;
struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr;
+ uintptr_t aaddr = (uintptr_t)(pccb->pdata) & ~(ARCH_DMA_MINALIGN - 1);
ulong datalen = pccb->datalen;
int table_length;
u8 *buf;
@@ -1430,9 +1458,19 @@ static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
if (!datalen) {
req_desc->prd_table_length = 0;
+ ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
return;
}
+ if (pccb->dma_dir == DMA_TO_DEVICE) { /* Write to device */
+ flush_dcache_range(aaddr, aaddr +
+ ALIGN(datalen, ARCH_DMA_MINALIGN));
+ }
+
+ /* In any case, invalidate cache to avoid stale data in it. */
+ invalidate_dcache_range(aaddr, aaddr +
+ ALIGN(datalen, ARCH_DMA_MINALIGN));
+
table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY);
buf = pccb->pdata;
i = table_length;
@@ -1446,6 +1484,8 @@ static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1);
req_desc->prd_table_length = table_length;
+ ufshcd_cache_flush_and_invalidate(prd_table, sizeof(*prd_table) * table_length);
+ ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
}
static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb)