From e0312feddbe995a98e197ac5b7b7ec03fb64bfb0 Mon Sep 17 00:00:00 2001 From: "harshal.patil" Date: Tue, 22 Oct 2024 17:00:15 +0530 Subject: [PATCH] fix(mbedtls/aes): Fix external memory corruption caused due to unaligned length cache sync Fixes the memory corruption issue that arises due to external memory cache sync of unaligned length bytes when L2 cache line size is greater than the L1 cache line size --- components/mbedtls/port/aes/dma/esp_aes_dma_core.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/components/mbedtls/port/aes/dma/esp_aes_dma_core.c b/components/mbedtls/port/aes/dma/esp_aes_dma_core.c index 5846c9f081..c722148baf 100644 --- a/components/mbedtls/port/aes/dma/esp_aes_dma_core.c +++ b/components/mbedtls/port/aes/dma/esp_aes_dma_core.c @@ -325,21 +325,15 @@ static inline void *aes_dma_calloc(size_t num, size_t size, uint32_t caps, size_ return heap_caps_aligned_calloc(DMA_DESC_MEM_ALIGN_SIZE, num, size, caps | MALLOC_CAP_DMA | MALLOC_CAP_8BIT); } -static inline esp_err_t dma_desc_link(crypto_dma_desc_t *dmadesc, size_t crypto_dma_desc_num) +static inline esp_err_t dma_desc_link(crypto_dma_desc_t *dmadesc, size_t crypto_dma_desc_num, size_t buffer_cache_line_size) { esp_err_t ret = ESP_OK; for (int i = 0; i < crypto_dma_desc_num; i++) { dmadesc[i].dw0.suc_eof = ((i == crypto_dma_desc_num - 1) ? 1 : 0); dmadesc[i].next = ((i == crypto_dma_desc_num - 1) ? NULL : &dmadesc[i+1]); #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE - /* Write back both input buffers and output buffers to clear any cache dirty bit if set - If we want to remove `ESP_CACHE_MSYNC_FLAG_UNALIGNED` aligned flag then we need to pass - cache msync size = ALIGN_UP(dma_desc.size, cache_line_size), where cache_line_size is the - the cache line size coressponding to the buffer that is being synced, instead of dma_desc.size - Keeping the `ESP_CACHE_MSYNC_FLAG_UNALIGNED` flag just because it should not look like - we are syncing extra bytes due to ALIGN_UP'ed size but just the number of bytes that - are needed in the operation. */ - ret = esp_cache_msync(dmadesc[i].buffer, dmadesc[i].dw0.length, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED); + /* Write back both input buffers and output buffers to clear any cache dirty bit if set */ + ret = esp_cache_msync(dmadesc[i].buffer, ALIGN_UP(dmadesc[i].dw0.length, buffer_cache_line_size), ESP_CACHE_MSYNC_FLAG_DIR_C2M); if (ret != ESP_OK) { return ret; } @@ -471,7 +465,7 @@ static esp_err_t generate_descriptor_list(const uint8_t *buffer, const size_t le populated_dma_descs += 1; } - if (dma_desc_link(dma_descriptors, dma_descs_needed) != ESP_OK) { + if (dma_desc_link(dma_descriptors, dma_descs_needed, cache_line_size) != ESP_OK) { ESP_LOGE(TAG, "DMA descriptors cache sync C2M failed"); return ESP_FAIL; }