From 8c8affc81255152a7da697a68de34efb39ae2b23 Mon Sep 17 00:00:00 2001 From: Armando Date: Mon, 14 Aug 2023 13:58:35 +0800 Subject: [PATCH] feat(mmu): support mmu and flash mmap driver on p4 --- components/esp_mm/esp_mmu_map.c | 104 ++++++--- .../esp_mm/port/esp32p4/ext_mem_layout.c | 8 +- .../test_apps/mmap_hw/main/test_mmap_hw.c | 12 +- .../test_apps/mmap_hw/sdkconfig.defaults | 1 + components/hal/esp32/include/hal/mmu_ll.h | 4 +- components/hal/esp32c2/include/hal/mmu_ll.h | 11 +- components/hal/esp32c3/include/hal/mmu_ll.h | 10 +- components/hal/esp32c6/include/hal/mmu_ll.h | 12 +- components/hal/esp32h2/include/hal/mmu_ll.h | 12 +- components/hal/esp32p4/include/hal/cache_ll.h | 4 +- components/hal/esp32p4/include/hal/mmu_ll.h | 221 ++++++++++++------ components/hal/esp32s2/include/hal/mmu_ll.h | 4 +- components/hal/esp32s3/include/hal/mmu_ll.h | 10 +- components/hal/include/hal/mmu_types.h | 9 - components/hal/mmu_hal.c | 31 +-- .../soc/esp32p4/include/soc/ext_mem_defs.h | 91 +++----- components/spi_flash/flash_mmap.c | 9 +- 17 files changed, 327 insertions(+), 226 deletions(-) diff --git a/components/esp_mm/esp_mmu_map.c b/components/esp_mm/esp_mmu_map.c index 6f7e201244..9fbc9d50b5 100644 --- a/components/esp_mm/esp_mmu_map.c +++ b/components/esp_mm/esp_mmu_map.c @@ -317,9 +317,9 @@ esp_err_t esp_mmu_map_reserve_block_with_caps(size_t size, mmu_mem_caps_t caps, uint32_t vaddr = 0; if (caps & MMU_MEM_CAP_EXEC) { - vaddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_INSTRUCTION); + vaddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_INSTRUCTION, target); } else { - vaddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_DATA); + vaddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_DATA, target); } *out_ptr = (void *)vaddr; @@ -336,7 +336,6 @@ IRAM_ATTR esp_err_t esp_mmu_paddr_find_caps(const esp_paddr_t paddr, mmu_mem_cap return ESP_ERR_INVALID_ARG; } - for (int i = 0; i < s_mmu_ctx.num_regions; i++) { region = &s_mmu_ctx.mem_regions[i]; @@ -378,6 +377,38 @@ static void IRAM_ATTR NOINLINE_ATTR s_do_cache_invalidate(uint32_t vaddr_start, #endif // CONFIG_IDF_TARGET_ESP32 } + +#if MMU_LL_MMU_PER_TARGET +FORCE_INLINE_ATTR uint32_t s_mapping_operation(mmu_target_t target, uint32_t vaddr_start, esp_paddr_t paddr_start, uint32_t size) +{ + uint32_t actual_mapped_len = 0; + uint32_t mmu_id = 0; + if (target == MMU_TARGET_FLASH0) { + mmu_id = MMU_LL_FLASH_MMU_ID; + } else { + mmu_id = MMU_LL_PSRAM_MMU_ID; + } + mmu_hal_map_region(mmu_id, target, vaddr_start, paddr_start, size, &actual_mapped_len); + + return actual_mapped_len; +} +#else +FORCE_INLINE_ATTR uint32_t s_mapping_operation(mmu_target_t target, uint32_t vaddr_start, esp_paddr_t paddr_start, uint32_t size) +{ + uint32_t actual_mapped_len = 0; + + mmu_hal_map_region(0, target, vaddr_start, paddr_start, size, &actual_mapped_len); +#if (SOC_MMU_PERIPH_NUM == 2) +#if !CONFIG_FREERTOS_UNICORE + mmu_hal_map_region(1, target, vaddr_start, paddr_start, size, &actual_mapped_len); +#endif // #if !CONFIG_FREERTOS_UNICORE +#endif // #if (SOC_MMU_PERIPH_NUM == 2) + + return actual_mapped_len; +} +#endif + + static void IRAM_ATTR NOINLINE_ATTR s_do_mapping(mmu_target_t target, uint32_t vaddr_start, esp_paddr_t paddr_start, uint32_t size) { /** @@ -387,16 +418,7 @@ static void IRAM_ATTR NOINLINE_ATTR s_do_mapping(mmu_target_t target, uint32_t v */ spi_flash_disable_interrupts_caches_and_other_cpu(); - uint32_t actual_mapped_len = 0; - mmu_hal_map_region(0, target, vaddr_start, paddr_start, size, &actual_mapped_len); -#if (SOC_MMU_PERIPH_NUM == 2) -#if !CONFIG_FREERTOS_UNICORE -#ifndef CONFIG_IDF_TARGET_ESP32P4 // for spi flash mmap, we always use flash mmu - //TODO: IDF-7509 - mmu_hal_map_region(1, target, vaddr_start, paddr_start, size, &actual_mapped_len); -#endif -#endif // #if !CONFIG_FREERTOS_UNICORE -#endif // #if (SOC_MMU_PERIPH_NUM == 2) + uint32_t actual_mapped_len = s_mapping_operation(target, vaddr_start, paddr_start, size); cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, vaddr_start, size); cache_ll_l1_enable_bus(0, bus_mask); @@ -532,22 +554,16 @@ esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_target_t target, new_block->laddr_end = new_block->laddr_start + aligned_size; new_block->size = aligned_size; new_block->caps = caps; -#if CONFIG_IDF_TARGET_ESP32P4 - //TODO: IDF-7509 - new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_FLASH); - new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_FLASH); -#else - if (caps & MMU_MEM_CAP_EXEC) { - new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_INSTRUCTION); - new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_INSTRUCTION); - } else { - new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_DATA); - new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_DATA); - } -#endif new_block->paddr_start = paddr_start; new_block->paddr_end = paddr_start + aligned_size; new_block->target = target; + if (caps & MMU_MEM_CAP_EXEC) { + new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_INSTRUCTION, target); + new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_INSTRUCTION, target); + } else { + new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_DATA, target); + new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_DATA, target); + } //do mapping s_do_mapping(target, new_block->vaddr_start, paddr_start, aligned_size); @@ -567,6 +583,32 @@ err: } +#if MMU_LL_MMU_PER_TARGET +FORCE_INLINE_ATTR void s_unmapping_operation(uint32_t vaddr_start, uint32_t size) +{ + uint32_t mmu_id = 0; + mmu_target_t target = mmu_ll_vaddr_to_target(vaddr_start); + + if (target == MMU_TARGET_FLASH0) { + mmu_id = MMU_LL_FLASH_MMU_ID; + } else { + mmu_id = MMU_LL_PSRAM_MMU_ID; + } + mmu_hal_unmap_region(mmu_id, vaddr_start, size); +} +#else +FORCE_INLINE_ATTR void s_unmapping_operation(uint32_t vaddr_start, uint32_t size) +{ + mmu_hal_unmap_region(0, vaddr_start, size); +#if (SOC_MMU_PERIPH_NUM == 2) +#if !CONFIG_FREERTOS_UNICORE + mmu_hal_unmap_region(1, vaddr_start, size); +#endif // #if !CONFIG_FREERTOS_UNICORE +#endif // #if (SOC_MMU_PERIPH_NUM == 2) +} +#endif + + static void IRAM_ATTR NOINLINE_ATTR s_do_unmapping(uint32_t vaddr_start, uint32_t size) { /** @@ -576,15 +618,7 @@ static void IRAM_ATTR NOINLINE_ATTR s_do_unmapping(uint32_t vaddr_start, uint32_ */ spi_flash_disable_interrupts_caches_and_other_cpu(); - mmu_hal_unmap_region(0, vaddr_start, size); -#if (SOC_MMU_PERIPH_NUM == 2) -#if !CONFIG_FREERTOS_UNICORE -#ifndef CONFIG_IDF_TARGET_ESP32P4 // for flash mmap, we always use flash mmu - //TODO: IDF-7509 - mmu_hal_unmap_region(1, vaddr_start, size); -#endif -#endif // #if !CONFIG_FREERTOS_UNICORE -#endif // #if (SOC_MMU_PERIPH_NUM == 2) + s_unmapping_operation(vaddr_start, size); //enable Cache, after this function, internal RAM access is no longer mandatory spi_flash_enable_interrupts_caches_and_other_cpu(); diff --git a/components/esp_mm/port/esp32p4/ext_mem_layout.c b/components/esp_mm/port/esp32p4/ext_mem_layout.c index dab3eeced7..da21c367c4 100644 --- a/components/esp_mm/port/esp32p4/ext_mem_layout.c +++ b/components/esp_mm/port/esp32p4/ext_mem_layout.c @@ -18,17 +18,17 @@ const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = { [0] = { .start = SOC_MMU_FLASH_LINEAR_ADDRESS_LOW, .end = SOC_MMU_FLASH_LINEAR_ADDRESS_HIGH, - .size = SOC_MMU_FLASH_LINEAR_ADDRESS_SIZE, + .size = BUS_SIZE(SOC_MMU_FLASH_LINEAR), .bus_id = CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0, .targets = MMU_TARGET_FLASH0, - .caps = MMU_MEM_CAP_FLASH | MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT, + .caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT, }, [1] = { .start = SOC_MMU_PSRAM_LINEAR_ADDRESS_LOW, .end = SOC_MMU_PSRAM_LINEAR_ADDRESS_HIGH, - .size = SOC_MMU_PSRAM_LINEAR_ADDRESS_SIZE, + .size = BUS_SIZE(SOC_MMU_PSRAM_LINEAR), .bus_id = CACHE_BUS_IBUS1 | CACHE_BUS_DBUS1, .targets = MMU_TARGET_PSRAM0, - .caps = MMU_MEM_CAP_PSRAM | MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT, + .caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT, }, }; diff --git a/components/esp_mm/test_apps/mmap_hw/main/test_mmap_hw.c b/components/esp_mm/test_apps/mmap_hw/main/test_mmap_hw.c index 6af3f9c9a7..e34a0c56d7 100644 --- a/components/esp_mm/test_apps/mmap_hw/main/test_mmap_hw.c +++ b/components/esp_mm/test_apps/mmap_hw/main/test_mmap_hw.c @@ -55,7 +55,6 @@ typedef struct test_block_info_ { } test_block_info_t; static LIST_HEAD(test_block_list_head_, test_block_info_) test_block_head; -static DRAM_ATTR uint8_t sector_buf[TEST_BLOCK_SIZE]; static void s_fill_random_data(uint8_t *buffer, size_t size, int random_seed) @@ -66,7 +65,7 @@ static void s_fill_random_data(uint8_t *buffer, size_t size, int random_seed) } } -static bool s_test_mmap_data_by_random(uint8_t *mblock_ptr, size_t size, int random_seed) +static bool s_test_mmap_data_by_random(uint8_t *mblock_ptr, size_t size, int random_seed, uint8_t *flash_ref_buf) { srand(random_seed); uint8_t *test_ptr = mblock_ptr; @@ -77,7 +76,7 @@ static bool s_test_mmap_data_by_random(uint8_t *mblock_ptr, size_t size, int ran printf("i: %d\n", i); printf("test_data: %d\n", test_data); printf("test_ptr[%d]: %d\n", i, test_ptr[i]); - printf("sector_buf[%d]: %d\n", i, sector_buf[i]); + printf("flash_ref_buf[%d]: %d\n", i, flash_ref_buf[i]); ESP_EARLY_LOGE(TAG, "FAIL!!!!!!"); return false; } @@ -87,6 +86,9 @@ static bool s_test_mmap_data_by_random(uint8_t *mblock_ptr, size_t size, int ran TEST_CASE("test all readable vaddr can map to flash", "[mmu]") { + uint8_t *sector_buf = heap_caps_calloc(1, TEST_BLOCK_SIZE, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); + TEST_ASSERT(sector_buf); + //Get the partition used for SPI1 erase operation const esp_partition_t *part = s_get_partition(); ESP_LOGI(TAG, "found partition '%s' at offset 0x%"PRIx32" with size 0x%"PRIx32, part->label, part->address, part->size); @@ -113,7 +115,7 @@ TEST_CASE("test all readable vaddr can map to flash", "[mmu]") ret = esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_TARGET_FLASH0, MMU_MEM_CAP_READ, 0, &ptr); if (ret == ESP_OK) { ESP_LOGI(TAG, "ptr is %p", ptr); - bool success = s_test_mmap_data_by_random((uint8_t *)ptr, sizeof(sector_buf), test_seed); + bool success = s_test_mmap_data_by_random((uint8_t *)ptr, sizeof(sector_buf), test_seed, sector_buf); TEST_ASSERT(success); } else if (ret == ESP_ERR_NOT_FOUND) { free(block_info); @@ -138,6 +140,8 @@ TEST_CASE("test all readable vaddr can map to flash", "[mmu]") block_to_free = LIST_NEXT(block_to_free, entries); free(temp); } + + free(sector_buf); } diff --git a/components/esp_mm/test_apps/mmap_hw/sdkconfig.defaults b/components/esp_mm/test_apps/mmap_hw/sdkconfig.defaults index 919fc46938..fa5b41640c 100644 --- a/components/esp_mm/test_apps/mmap_hw/sdkconfig.defaults +++ b/components/esp_mm/test_apps/mmap_hw/sdkconfig.defaults @@ -5,3 +5,4 @@ CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="partitions.csv" CONFIG_PARTITION_TABLE_FILENAME="partitions.csv" CONFIG_COMPILER_DUMP_RTL_FILES=y +CONFIG_HAL_ASSERTION_SILENT=y diff --git a/components/hal/esp32/include/hal/mmu_ll.h b/components/hal/esp32/include/hal/mmu_ll.h index a747b1e95d..9d3390126b 100644 --- a/components/hal/esp32/include/hal/mmu_ll.h +++ b/components/hal/esp32/include/hal/mmu_ll.h @@ -38,11 +38,13 @@ static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr) * * @param laddr linear address * @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t` + * @param target virtual address aimed physical memory target, not used * * @return virtual address */ -static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type) +static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type, mmu_target_t target) { + (void)target; uint32_t vaddr_base = 0; if (vaddr_type == MMU_VADDR_DATA) { vaddr_base = SOC_MMU_DBUS_VADDR_BASE; diff --git a/components/hal/esp32c2/include/hal/mmu_ll.h b/components/hal/esp32c2/include/hal/mmu_ll.h index eaf4a1bb78..f6ae30f188 100644 --- a/components/hal/esp32c2/include/hal/mmu_ll.h +++ b/components/hal/esp32c2/include/hal/mmu_ll.h @@ -36,11 +36,13 @@ static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr) * * @param laddr linear address * @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t` + * @param target virtual address aimed physical memory target, not used * * @return virtual address */ -static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type) +static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type, mmu_target_t target) { + (void)target; uint32_t vaddr_base = 0; if (vaddr_type == MMU_VADDR_DATA) { vaddr_base = SOC_MMU_DBUS_VADDR_BASE; @@ -371,7 +373,12 @@ static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t e HAL_ASSERT(shift_code); } uint32_t laddr = entry_id << shift_code; - return mmu_ll_laddr_to_vaddr(laddr, type); + + /** + * For `mmu_ll_laddr_to_vaddr`, target is for compatibility on this chip. + * Here we just pass MMU_TARGET_FLASH0 to get vaddr + */ + return mmu_ll_laddr_to_vaddr(laddr, type, MMU_TARGET_FLASH0); } #ifdef __cplusplus diff --git a/components/hal/esp32c3/include/hal/mmu_ll.h b/components/hal/esp32c3/include/hal/mmu_ll.h index 4e4440ee03..3131e4f9ca 100644 --- a/components/hal/esp32c3/include/hal/mmu_ll.h +++ b/components/hal/esp32c3/include/hal/mmu_ll.h @@ -36,11 +36,13 @@ static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr) * * @param laddr linear address * @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t` + * @param target virtual address aimed physical memory target, not used * * @return virtual address */ -static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type) +static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type, mmu_target_t target) { + (void)target; uint32_t vaddr_base = 0; if (vaddr_type == MMU_VADDR_DATA) { vaddr_base = SOC_MMU_DBUS_VADDR_BASE; @@ -309,7 +311,11 @@ static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t e (void)mmu_id; uint32_t laddr = entry_id << 16; - return mmu_ll_laddr_to_vaddr(laddr, type); + /** + * For `mmu_ll_laddr_to_vaddr`, target is for compatibility on this chip. + * Here we just pass MMU_TARGET_FLASH0 to get vaddr + */ + return mmu_ll_laddr_to_vaddr(laddr, type, MMU_TARGET_FLASH0); } #ifdef __cplusplus diff --git a/components/hal/esp32c6/include/hal/mmu_ll.h b/components/hal/esp32c6/include/hal/mmu_ll.h index 2e224ec6a3..19e28a496b 100644 --- a/components/hal/esp32c6/include/hal/mmu_ll.h +++ b/components/hal/esp32c6/include/hal/mmu_ll.h @@ -36,11 +36,14 @@ static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr) * * @param laddr linear address * @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t` + * @param target virtual address aimed physical memory target, not used * * @return virtual address */ -static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type) +static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type, mmu_target_t target) { + (void)target; + (void)vaddr_type; //On ESP32C6, I/D share the same vaddr range return SOC_MMU_IBUS_VADDR_BASE | laddr; } @@ -391,7 +394,12 @@ static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t e HAL_ASSERT(shift_code); } uint32_t laddr = entry_id << shift_code; - return mmu_ll_laddr_to_vaddr(laddr, type); + + /** + * For `mmu_ll_laddr_to_vaddr`, target is for compatibility on this chip. + * Here we just pass MMU_TARGET_FLASH0 to get vaddr + */ + return mmu_ll_laddr_to_vaddr(laddr, type, MMU_TARGET_FLASH0); } #ifdef __cplusplus diff --git a/components/hal/esp32h2/include/hal/mmu_ll.h b/components/hal/esp32h2/include/hal/mmu_ll.h index 169ae88103..b2d5d34b92 100644 --- a/components/hal/esp32h2/include/hal/mmu_ll.h +++ b/components/hal/esp32h2/include/hal/mmu_ll.h @@ -37,11 +37,14 @@ static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr) * * @param laddr linear address * @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t` + * @param target virtual address aimed physical memory target, not used * * @return virtual address */ -static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type) +static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type, mmu_target_t target) { + (void)target; + (void)vaddr_type; //On ESP32C6, I/D share the same vaddr range return SOC_MMU_IBUS_VADDR_BASE | laddr; } @@ -417,7 +420,12 @@ static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t e HAL_ASSERT(shift_code); } uint32_t laddr = entry_id << shift_code; - return mmu_ll_laddr_to_vaddr(laddr, type); + + /** + * For `mmu_ll_laddr_to_vaddr`, target is for compatibility on this chip. + * Here we just pass MMU_TARGET_FLASH0 to get vaddr + */ + return mmu_ll_laddr_to_vaddr(laddr, type, MMU_TARGET_FLASH0); } diff --git a/components/hal/esp32p4/include/hal/cache_ll.h b/components/hal/esp32p4/include/hal/cache_ll.h index 7f3db05b02..b0c05b8faf 100644 --- a/components/hal/esp32p4/include/hal/cache_ll.h +++ b/components/hal/esp32p4/include/hal/cache_ll.h @@ -63,10 +63,10 @@ static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t v cache_bus_mask_t mask = 0; uint32_t vaddr_end = vaddr_start + len - 1; - if (vaddr_start >= IRAM0_CACHE_ADDRESS_LOW && vaddr_end < SINGLE_BANK_CACHE_ADDRESS_HIGH) { + if (vaddr_start >= DRAM_FLASH_ADDRESS_LOW && vaddr_end < DRAM_FLASH_ADDRESS_HIGH) { mask |= CACHE_BUS_IBUS0; mask |= CACHE_BUS_DBUS0; - } else if (vaddr_start >= DUAL_BANK_CACHE_ADDRESS_LOW && vaddr_end < DUAL_BANK_CACHE_ADDRESS_HIGH) { + } else if (vaddr_start >= DRAM_PSRAM_ADDRESS_LOW && vaddr_end < DRAM_PSRAM_ADDRESS_HIGH) { mask |= CACHE_BUS_IBUS1; mask |= CACHE_BUS_DBUS1; } else { diff --git a/components/hal/esp32p4/include/hal/mmu_ll.h b/components/hal/esp32p4/include/hal/mmu_ll.h index 5d5ce609b9..6b35881676 100644 --- a/components/hal/esp32p4/include/hal/mmu_ll.h +++ b/components/hal/esp32p4/include/hal/mmu_ll.h @@ -20,6 +20,12 @@ extern "C" { #endif +///< MMU is per target +#define MMU_LL_MMU_PER_TARGET 1 + +#define MMU_LL_FLASH_MMU_ID 0 +#define MMU_LL_PSRAM_MMU_ID 1 + /** * Convert MMU virtual address to linear address * @@ -29,7 +35,7 @@ extern "C" { */ static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr) { - return vaddr & SOC_MMU_LINEAR_FLASH_ADDR_MASK; + return vaddr & SOC_MMU_LINEAR_ADDR_MASK; } /** @@ -37,20 +43,44 @@ static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr) * * @param laddr linear address * @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t` + * @param target virtual address aimed physical memory target * * @return virtual address */ -static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type) +static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type, mmu_target_t target) { - uint32_t raw_laddr = (laddr & ~SOC_MMU_MEM_PHYSICAL_LINEAR_CAP); + (void)vaddr_type; uint32_t vaddr_base = 0; - if (vaddr_type == MMU_VADDR_FLASH) { + if (target == MMU_TARGET_FLASH0) { vaddr_base = SOC_MMU_FLASH_VADDR_BASE; } else { vaddr_base = SOC_MMU_PSRAM_VADDR_BASE; } - return vaddr_base | raw_laddr; + return vaddr_base | laddr; +} + +/** + * Convert MMU virtual address to its target + * + * @param vaddr virtual address + * + * @return target paddr memory target + */ +__attribute__((always_inline)) +static inline mmu_target_t mmu_ll_vaddr_to_target(uint32_t vaddr) +{ + mmu_target_t target = MMU_TARGET_FLASH0; + + if (ADDRESS_IN_DRAM_FLASH(vaddr)) { + target = MMU_TARGET_FLASH0; + } else if (ADDRESS_IN_DRAM_PSRAM(vaddr)) { + target = MMU_TARGET_PSRAM0; + } else { + HAL_ASSERT(false); + } + + return target; } __attribute__((always_inline)) static inline bool mmu_ll_cache_encryption_enabled(void) @@ -83,9 +113,7 @@ static inline mmu_page_size_t mmu_ll_get_page_size(uint32_t mmu_id) __attribute__((always_inline)) static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size) { - (void)mmu_id; - (void)size; - return; + HAL_ASSERT(size == MMU_PAGE_64KB); } /** @@ -105,7 +133,7 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t (void)mmu_id; (void)type; uint32_t vaddr_end = vaddr_start + len - 1; - return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end)); + return (ADDRESS_IN_DRAM_FLASH(vaddr_start) && ADDRESS_IN_DRAM_FLASH(vaddr_end)) || (ADDRESS_IN_DRAM_PSRAM(vaddr_start) && ADDRESS_IN_DRAM_PSRAM(vaddr_end)); } /** @@ -120,10 +148,18 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t */ static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len) { - (void)mmu_id; - return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) && - (len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) && - ((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)); + int max_paddr_page_num = 0; + if (mmu_id == MMU_LL_FLASH_MMU_ID) { + max_paddr_page_num = MMU_FLASH_MAX_PADDR_PAGE_NUM; + } else if (mmu_id == MMU_LL_PSRAM_MMU_ID) { + max_paddr_page_num = MMU_PSRAM_MAX_PADDR_PAGE_NUM; + } else { + HAL_ASSERT(false); + } + + return (paddr_start < (mmu_ll_get_page_size(mmu_id) * max_paddr_page_num)) && + (len < (mmu_ll_get_page_size(mmu_id) * max_paddr_page_num)) && + ((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * max_paddr_page_num)); } /** @@ -138,7 +174,6 @@ static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t pad __attribute__((always_inline)) static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr) { - (void)mmu_id; mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id); uint32_t shift_code = 0; switch (page_size) { @@ -173,7 +208,6 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr) __attribute__((always_inline)) static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target) { - (void)mmu_id; (void)target; mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id); uint32_t shift_code = 0; @@ -206,30 +240,32 @@ static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_ */ __attribute__((always_inline)) static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32_t mmu_val, mmu_target_t target) { - (void)mmu_id; - (void)target; - uint32_t index_reg, content_reg, sensitive, invalid_mask; - if (mmu_id == 0) { // flash mmu + uint32_t index_reg = 0; + uint32_t content_reg = 0; + uint32_t sensitive = 0; + + if (mmu_id == MMU_LL_FLASH_MMU_ID) { index_reg = SPI_MEM_C_MMU_ITEM_INDEX_REG; content_reg = SPI_MEM_C_MMU_ITEM_CONTENT_REG; - sensitive = MMU_SENSITIVE; - invalid_mask = MMU_INVALID_MASK; - } else { // psram mmu + sensitive = MMU_FLASH_SENSITIVE; + mmu_val |= MMU_FLASH_VALID; + mmu_val |= MMU_ACCESS_FLASH; + } else if (mmu_id == MMU_LL_PSRAM_MMU_ID) { index_reg = SPI_MEM_S_MMU_ITEM_INDEX_REG; content_reg = SPI_MEM_S_MMU_ITEM_CONTENT_REG; - sensitive = DMMU_SENSITIVE; - invalid_mask = DMMU_INVALID_MASK; - mmu_val |= MMU_PSRAM_ACCESS_SPIRAM; - + sensitive = MMU_PSRAM_SENSITIVE; + mmu_val |= MMU_PSRAM_VALID; + mmu_val |= MMU_ACCESS_PSRAM; + } else { + HAL_ASSERT(false); } - uint32_t mmu_raw_value; + if (mmu_ll_cache_encryption_enabled()) { mmu_val |= sensitive; } - /* Note: for ESP32-P4, invert invalid bit for compatible with upper-layer software */ - mmu_raw_value = mmu_val ^ invalid_mask; + REG_WRITE(index_reg, entry_id); - REG_WRITE(content_reg, mmu_raw_value); + REG_WRITE(content_reg, mmu_val); } /** @@ -241,29 +277,24 @@ __attribute__((always_inline)) static inline void mmu_ll_write_entry(uint32_t mm */ __attribute__((always_inline)) static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id) { - (void)mmu_id; - uint32_t mmu_raw_value; - uint32_t ret; - uint32_t index_reg, content_reg, sensitive, invalid_mask; - if (mmu_id == 0) { // flash mmu + uint32_t index_reg = 0; + uint32_t content_reg = 0; + uint32_t mmu_val = 0; + + if (mmu_id == MMU_LL_FLASH_MMU_ID) { index_reg = SPI_MEM_C_MMU_ITEM_INDEX_REG; content_reg = SPI_MEM_C_MMU_ITEM_CONTENT_REG; - sensitive = MMU_SENSITIVE; - invalid_mask = MMU_INVALID_MASK; - } else { // psram mmu + } else if (mmu_id == MMU_LL_PSRAM_MMU_ID) { index_reg = SPI_MEM_S_MMU_ITEM_INDEX_REG; content_reg = SPI_MEM_S_MMU_ITEM_CONTENT_REG; - sensitive = DMMU_SENSITIVE; - invalid_mask = DMMU_INVALID_MASK; + } else { + HAL_ASSERT(false); } + REG_WRITE(index_reg, entry_id); - mmu_raw_value = REG_READ(content_reg); - if (mmu_ll_cache_encryption_enabled()) { - mmu_raw_value &= ~sensitive; - } - /* Note: for ESP32-P4, invert invalid bit for compatible with upper-layer software */ - ret = mmu_raw_value ^ invalid_mask; - return ret; + mmu_val = REG_READ(content_reg); + + return mmu_val; } /** @@ -274,16 +305,24 @@ __attribute__((always_inline)) static inline uint32_t mmu_ll_read_entry(uint32_t */ __attribute__((always_inline)) static inline void mmu_ll_set_entry_invalid(uint32_t mmu_id, uint32_t entry_id) { - uint32_t index_reg, content_reg; - if (mmu_id == 0) { // flash mmu + uint32_t index_reg = 0; + uint32_t content_reg = 0; + uint32_t invalid_mask = 0; + + if (mmu_id == MMU_LL_FLASH_MMU_ID) { index_reg = SPI_MEM_C_MMU_ITEM_INDEX_REG; content_reg = SPI_MEM_C_MMU_ITEM_CONTENT_REG; - } else { // psram mmu + invalid_mask = MMU_FLASH_INVALID; + } else if (mmu_id == MMU_LL_PSRAM_MMU_ID) { index_reg = SPI_MEM_S_MMU_ITEM_INDEX_REG; content_reg = SPI_MEM_S_MMU_ITEM_CONTENT_REG; + invalid_mask = MMU_PSRAM_INVALID; + } else { + HAL_ASSERT(false); } + REG_WRITE(index_reg, entry_id); - REG_WRITE(content_reg, MMU_INVALID); + REG_WRITE(content_reg, invalid_mask); } /** @@ -309,21 +348,32 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id) */ static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id) { - uint32_t mmu_raw_value; - uint32_t index_reg, content_reg, invalid_mask; - if (mmu_id == 0) { // flash mmu + uint32_t mmu_raw_value = 0; + uint32_t index_reg = 0; + uint32_t content_reg = 0; + uint32_t valid_mask = 0; + + if (mmu_id == MMU_LL_FLASH_MMU_ID) { index_reg = SPI_MEM_C_MMU_ITEM_INDEX_REG; content_reg = SPI_MEM_C_MMU_ITEM_CONTENT_REG; - invalid_mask = MMU_INVALID_MASK; - } else { // psram mmu + valid_mask = MMU_FLASH_VALID; + } else if (mmu_id == MMU_LL_PSRAM_MMU_ID) { index_reg = SPI_MEM_S_MMU_ITEM_INDEX_REG; content_reg = SPI_MEM_S_MMU_ITEM_CONTENT_REG; - invalid_mask = DMMU_INVALID_MASK; + valid_mask = MMU_PSRAM_VALID; + } else { + HAL_ASSERT(false); } + REG_WRITE(index_reg, entry_id); mmu_raw_value = REG_READ(content_reg); - /* Note: for ESP32-P4, the invalid-bit of MMU: 0 for invalid, 1 for valid */ - return (mmu_raw_value & invalid_mask) ? true : false; + + bool is_valid = false; + if (mmu_raw_value & valid_mask) { + is_valid = true; + } + + return is_valid; } /** @@ -336,10 +386,18 @@ static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id) */ static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id) { - if (mmu_id == 0) - return MMU_TARGET_FLASH0; - else - return MMU_TARGET_PSRAM0; + (void)entry_id; + mmu_target_t target = MMU_TARGET_FLASH0; + + if (mmu_id == MMU_LL_FLASH_MMU_ID) { + target = MMU_TARGET_FLASH0; + } else if (mmu_id == MMU_LL_PSRAM_MMU_ID) { + target = MMU_TARGET_PSRAM0; + } else { + HAL_ASSERT(false); + } + + return target; } /** @@ -352,7 +410,6 @@ static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t ent */ static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id) { - (void)mmu_id; HAL_ASSERT(entry_id < MMU_ENTRY_NUM); mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id); @@ -373,13 +430,14 @@ static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t e default: HAL_ASSERT(shift_code); } - if (mmu_id == 0) { + if (mmu_id == MMU_LL_FLASH_MMU_ID) { REG_WRITE(SPI_MEM_C_MMU_ITEM_INDEX_REG, entry_id); - return (REG_READ(SPI_MEM_C_MMU_ITEM_CONTENT_REG) & MMU_VALID_VAL_MASK) << shift_code; - } else { + return (REG_READ(SPI_MEM_C_MMU_ITEM_CONTENT_REG) & MMU_FLASH_VALID_VAL_MASK) << shift_code; + } else if (mmu_id == MMU_LL_PSRAM_MMU_ID) { REG_WRITE(SPI_MEM_S_MMU_ITEM_INDEX_REG, entry_id); - return (REG_READ(SPI_MEM_S_MMU_ITEM_CONTENT_REG) & MMU_VALID_VAL_MASK) << shift_code; - + return (REG_READ(SPI_MEM_S_MMU_ITEM_CONTENT_REG) & MMU_PSRAM_VALID_VAL_MASK) << shift_code; + } else { + HAL_ASSERT(false); } } @@ -396,13 +454,27 @@ static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t e */ static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target) { - //TODO, should check PSRAM as well? - (void)mmu_id; + uint32_t index_reg = 0; + uint32_t content_reg = 0; + uint32_t valid_val_mask = 0; + + if (mmu_id == MMU_LL_FLASH_MMU_ID) { + index_reg = SPI_MEM_C_MMU_ITEM_INDEX_REG; + content_reg = SPI_MEM_C_MMU_ITEM_CONTENT_REG; + valid_val_mask = MMU_FLASH_VALID_VAL_MASK; + } else if (mmu_id == MMU_LL_PSRAM_MMU_ID) { + index_reg = SPI_MEM_S_MMU_ITEM_INDEX_REG; + content_reg = SPI_MEM_S_MMU_ITEM_CONTENT_REG; + valid_val_mask = MMU_PSRAM_VALID_VAL_MASK; + } else { + HAL_ASSERT(false); + } + for (int i = 0; i < MMU_ENTRY_NUM; i++) { if (mmu_ll_check_entry_valid(mmu_id, i)) { if (mmu_ll_get_entry_target(mmu_id, i) == target) { - REG_WRITE(SPI_MEM_C_MMU_ITEM_INDEX_REG, i); - if ((REG_READ(SPI_MEM_C_MMU_ITEM_CONTENT_REG) & MMU_VALID_VAL_MASK) == mmu_val) { + REG_WRITE(index_reg, i); + if ((REG_READ(content_reg) & valid_val_mask) == mmu_val) { return i; } } @@ -421,7 +493,6 @@ static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint3 */ static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type) { - (void)mmu_id; mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id); uint32_t shift_code = 0; @@ -442,7 +513,7 @@ static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t e HAL_ASSERT(shift_code); } uint32_t laddr = entry_id << shift_code; - return mmu_ll_laddr_to_vaddr(laddr, type); + return mmu_ll_laddr_to_vaddr(laddr, type, (mmu_id == MMU_LL_FLASH_MMU_ID) ? MMU_TARGET_FLASH0 : MMU_TARGET_PSRAM0); } #ifdef __cplusplus diff --git a/components/hal/esp32s2/include/hal/mmu_ll.h b/components/hal/esp32s2/include/hal/mmu_ll.h index ddf3f2cf79..2a830661f4 100644 --- a/components/hal/esp32s2/include/hal/mmu_ll.h +++ b/components/hal/esp32s2/include/hal/mmu_ll.h @@ -37,11 +37,13 @@ static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr) * * @param laddr linear address * @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t` + * @param target virtual address aimed physical memory target, not used * * @return virtual address */ -static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type) +static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type, mmu_target_t target) { + (void)target; uint32_t vaddr_base = 0; if (vaddr_type == MMU_VADDR_DATA) { vaddr_base = SOC_MMU_DBUS_VADDR_BASE; diff --git a/components/hal/esp32s3/include/hal/mmu_ll.h b/components/hal/esp32s3/include/hal/mmu_ll.h index 04186dd3d8..f732d5811f 100644 --- a/components/hal/esp32s3/include/hal/mmu_ll.h +++ b/components/hal/esp32s3/include/hal/mmu_ll.h @@ -36,11 +36,13 @@ static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr) * * @param laddr linear address * @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t` + * @param target virtual address aimed physical memory target, not used * * @return virtual address */ -static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type) +static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type, mmu_target_t target) { + (void)target; uint32_t vaddr_base = 0; if (vaddr_type == MMU_VADDR_DATA) { vaddr_base = SOC_MMU_DBUS_VADDR_BASE; @@ -310,7 +312,11 @@ static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t e (void)mmu_id; uint32_t laddr = entry_id << 16; - return mmu_ll_laddr_to_vaddr(laddr, type); + /** + * For `mmu_ll_laddr_to_vaddr`, target is for compatibility on this chip. + * Here we just pass MMU_TARGET_FLASH0 to get vaddr + */ + return mmu_ll_laddr_to_vaddr(laddr, type, MMU_TARGET_FLASH0); } #ifdef __cplusplus diff --git a/components/hal/include/hal/mmu_types.h b/components/hal/include/hal/mmu_types.h index 85e085f55c..ee42bd5632 100644 --- a/components/hal/include/hal/mmu_types.h +++ b/components/hal/include/hal/mmu_types.h @@ -7,7 +7,6 @@ #pragma once #include "esp_bit_defs.h" -#include "sdkconfig.h" //To remove, TODO: IDF-7509 #ifdef __cplusplus extern "C" { @@ -19,10 +18,6 @@ typedef enum { MMU_MEM_CAP_WRITE = BIT(2), MMU_MEM_CAP_32BIT = BIT(3), MMU_MEM_CAP_8BIT = BIT(4), -#if CONFIG_IDF_TARGET_ESP32P4 //TODO: IDF-7509 - MMU_MEM_CAP_FLASH = BIT(5), - MMU_MEM_CAP_PSRAM = BIT(5), -#endif } mmu_mem_caps_t; /** @@ -41,10 +36,6 @@ typedef enum { typedef enum { MMU_VADDR_DATA = BIT(0), MMU_VADDR_INSTRUCTION = BIT(1), -#if CONFIG_IDF_TARGET_ESP32P4 //TODO: IDF-7509 - MMU_VADDR_FLASH = BIT(2), - MMU_VADDR_PSRAM = BIT(3), -#endif } mmu_vaddr_t; /** diff --git a/components/hal/mmu_hal.c b/components/hal/mmu_hal.c index 74955678f0..622de3b4f9 100644 --- a/components/hal/mmu_hal.c +++ b/components/hal/mmu_hal.c @@ -16,12 +16,11 @@ void mmu_hal_init(void) { -//TODO: IDF-7509 -#if CONFIG_ESP_ROM_RAM_APP_NEEDS_MMU_INIT || CONFIG_IDF_TARGET_ESP32P4 +#if CONFIG_ESP_ROM_RAM_APP_NEEDS_MMU_INIT ROM_Boot_Cache_Init(); #endif -//TODO: IDF-7509 +//TODO: IDF-7516 #if CONFIG_IDF_TARGET_ESP32P4 Cache_Invalidate_All(CACHE_MAP_L2_CACHE); #endif @@ -32,10 +31,15 @@ void mmu_hal_init(void) void mmu_hal_unmap_all(void) { +#if MMU_LL_MMU_PER_TARGET + mmu_ll_unmap_all(MMU_LL_FLASH_MMU_ID); + mmu_ll_unmap_all(MMU_LL_PSRAM_MMU_ID); +#else mmu_ll_unmap_all(0); #if !CONFIG_FREERTOS_UNICORE mmu_ll_unmap_all(1); #endif +#endif } uint32_t mmu_hal_pages_to_bytes(uint32_t mmu_id, uint32_t page_num) @@ -90,11 +94,6 @@ void mmu_hal_map_region(uint32_t mmu_id, mmu_target_t mem_type, uint32_t vaddr, uint32_t entry_id = 0; uint32_t mmu_val; //This is the physical address in the format that MMU supported -//TODO: IDF-7509 -#if CONFIG_IDF_TARGET_ESP32P4 - uint32_t vaddr_orig = vaddr; -#endif - *out_len = mmu_hal_pages_to_bytes(mmu_id, page_num); mmu_val = mmu_ll_format_paddr(mmu_id, paddr, mem_type); @@ -105,22 +104,11 @@ void mmu_hal_map_region(uint32_t mmu_id, mmu_target_t mem_type, uint32_t vaddr, mmu_val++; page_num--; } - -//TODO: IDF-7509 -#if CONFIG_IDF_TARGET_ESP32P4 - Cache_Invalidate_Addr(CACHE_MAP_L1_DCACHE | CACHE_MAP_L2_CACHE, vaddr_orig, len); -#endif } void mmu_hal_unmap_region(uint32_t mmu_id, uint32_t vaddr, uint32_t len) { uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1); - -//TODO: IDF-7509 -#if CONFIG_IDF_TARGET_ESP32P4 - uint32_t vaddr_orig = vaddr; -#endif - HAL_ASSERT(vaddr % page_size_in_bytes == 0); HAL_ASSERT(mmu_hal_check_valid_ext_vaddr_region(mmu_id, vaddr, len, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION)); @@ -132,11 +120,6 @@ void mmu_hal_unmap_region(uint32_t mmu_id, uint32_t vaddr, uint32_t len) vaddr += page_size_in_bytes; page_num--; } - -//TODO: IDF-7509 -#if CONFIG_IDF_TARGET_ESP32P4 - Cache_Invalidate_Addr(CACHE_MAP_L1_DCACHE | CACHE_MAP_L2_CACHE, vaddr_orig, len); -#endif } bool mmu_hal_vaddr_to_paddr(uint32_t mmu_id, uint32_t vaddr, uint32_t *out_paddr, mmu_target_t *out_target) diff --git a/components/soc/esp32p4/include/soc/ext_mem_defs.h b/components/soc/esp32p4/include/soc/ext_mem_defs.h index a4c45e81da..f124121e94 100644 --- a/components/soc/esp32p4/include/soc/ext_mem_defs.h +++ b/components/soc/esp32p4/include/soc/ext_mem_defs.h @@ -29,21 +29,16 @@ extern "C" { #define DRAM_FLASH_ADDRESS_LOW DRAM0_CACHE_ADDRESS_LOW #define DRAM_FLASH_ADDRESS_HIGH 0x44000000 -#define SINGLE_BANK_CACHE_ADDRESS_LOW 0x40000000 -#define SINGLE_BANK_CACHE_ADDRESS_HIGH 0x44000000 -#define DUAL_BANK_CACHE_ADDRESS_LOW 0x48000000 -#define DUAL_BANK_CACHE_ADDRESS_HIGH 0x4C000000 +#define DRAM_PSRAM_ADDRESS_LOW 0x48000000 +#define DRAM_PSRAM_ADDRESS_HIGH 0x4C000000 #define BUS_SIZE(bus_name) (bus_name##_ADDRESS_HIGH - bus_name##_ADDRESS_LOW) #define ADDRESS_IN_BUS(bus_name, vaddr) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH) -#define ADDRESS_IN_IRAM0(vaddr) ADDRESS_IN_BUS(IRAM0, vaddr) #define ADDRESS_IN_IRAM0_CACHE(vaddr) ADDRESS_IN_BUS(IRAM0_CACHE, vaddr) -#define ADDRESS_IN_DRAM0(vaddr) ADDRESS_IN_BUS(DRAM0, vaddr) #define ADDRESS_IN_DRAM0_CACHE(vaddr) ADDRESS_IN_BUS(DRAM0_CACHE, vaddr) - -#define BUS_IRAM0_CACHE_SIZE BUS_SIZE(IRAM0_CACHE) -#define BUS_DRAM0_CACHE_SIZE BUS_SIZE(DRAM0_CACHE) +#define ADDRESS_IN_DRAM_FLASH(vaddr) ADDRESS_IN_BUS(DRAM_FLASH, vaddr) +#define ADDRESS_IN_DRAM_PSRAM(vaddr) ADDRESS_IN_BUS(DRAM_PSRAM, vaddr) //TODO, remove these cache function dependencies #define CACHE_IROM_MMU_START 0 @@ -62,91 +57,79 @@ extern "C" { #define MMU_BUS_START(i) 0 #define MMU_BUS_SIZE(i) (0x400 * 4) -#define MMU_MSPI_ACCESS_FLASH 0 -#define MMU_MSPI_ACCESS_SPIRAM BIT(10) -#define MMU_MSPI_VALID BIT(12) -#define MMU_MSPI_INVALID 0 -#define MMU_MSPI_SENSITIVE BIT(13) -#define MMU_PSRAM_ACCESS_SPIRAM BIT(10) +#define MMU_FLASH_VALID BIT(12) +#define MMU_FLASH_INVALID 0 #define MMU_PSRAM_VALID BIT(11) +#define MMU_PSRAM_INVALID 0 + +#define MMU_ACCESS_FLASH 0 +#define MMU_ACCESS_PSRAM BIT(10) + +#define MMU_FLASH_SENSITIVE BIT(13) #define MMU_PSRAM_SENSITIVE BIT(12) -#define MMU_ACCESS_FLASH MMU_MSPI_ACCESS_FLASH -#define MMU_ACCESS_SPIRAM MMU_MSPI_ACCESS_SPIRAM -#define MMU_VALID MMU_MSPI_VALID -#define MMU_SENSITIVE MMU_MSPI_SENSITIVE -#define DMMU_SENSITIVE MMU_PSRAM_SENSITIVE - -#define MMU_INVALID_MASK MMU_MSPI_VALID -#define MMU_INVALID MMU_MSPI_INVALID - -#define DMMU_INVALID_MASK MMU_PSRAM_VALID -#define DMMU_INVALID 0 #define CACHE_MAX_SYNC_NUM 0x400000 #define CACHE_MAX_LOCK_NUM 0x8000 /** - * MMU entry valid bit mask for mapping value. For an entry: - * valid bit + value bits - * valid bit is BIT(9), so value bits are 0x1ff + * MMU entry valid bit mask for mapping value. + * - For a Flash MMU entry: + * physical page number is BIT(0)~BIT(10), so value bits are 0x7ff + * - For a PSRAM MMU entry: + * physical page number is BIT(0)~BIT(9), so value bits are 0x3ff */ -#define MMU_VALID_VAL_MASK 0x3ff +#define MMU_FLASH_VALID_VAL_MASK 0x7ff +#define MMU_PSRAM_VALID_VAL_MASK 0x3ff /** * Max MMU available paddr page num. * `MMU_MAX_PADDR_PAGE_NUM * SOC_MMU_PAGE_SIZE` means the max paddr address supported by the MMU. e.g.: - * 256 * 64KB, means MMU can support 16MB paddr at most + * 32768 * 64KB, means MMU can support 2GB paddr at most */ -#define MMU_MAX_PADDR_PAGE_NUM 1024 +#define MMU_FLASH_MAX_PADDR_PAGE_NUM 32768 +#define MMU_PSRAM_MAX_PADDR_PAGE_NUM 16384 //MMU entry num #define MMU_ENTRY_NUM 1024 /** * This is the mask used for mapping. e.g.: - * 0x4200_0000 & MMU_VADDR_MASK + * 0x4000_0000 & MMU_VADDR_MASK */ #define MMU_VADDR_MASK ((SOC_MMU_PAGE_SIZE) * MMU_ENTRY_NUM - 1) #define SOC_MMU_FLASH_VADDR_BASE 0x40000000 #define SOC_MMU_PSRAM_VADDR_BASE 0x48000000 -#define SOC_MMU_FLASH_VADDR_START 0x40000000 -#define SOC_MMU_FLASH_VADDR_END 0x44000000 -#define SOC_MMU_PSRAM_VADDR_START 0x48000000 -#define SOC_MMU_PSRAM_VADDR_END 0x4C000000 - /*------------------------------------------------------------------------------ * MMU Linear Address *----------------------------------------------------------------------------*/ /** * - 64KB MMU page size: the last 0xFFFF, which is the offset - * - 1024 MMU entries, needs 0x3F to hold it. + * - 1024 MMU entries for flash, 1024 MMU entries for psram, needs 0xFFF to hold it. * * Therefore, 0x3F,FFFF */ -#define SOC_MMU_MEM_PHYSICAL_LINEAR_CAP (SOC_MMU_FLASH_VADDR_BASE ^ SOC_MMU_PSRAM_VADDR_BASE) -#define SOC_MMU_LINEAR_FLASH_ADDR_MASK (0xBFFFFFF) -#define SOC_MMU_LINEAR_PARSM_ADDR_MASK (0xBFFFFFF | SOC_MMU_MEM_PHYSICAL_LINEAR_CAP) +#define SOC_MMU_LINEAR_ADDR_MASK 0xFFFFFFF /** * - If high linear address isn't 0, this means MMU can recognize these addresses * - If high linear address is 0, this means MMU linear address range is equal or smaller than vaddr range. * Under this condition, we use the max linear space. */ -#define SOC_MMU_FLASH_LINEAR_ADDRESS_LOW (SOC_MMU_FLASH_VADDR_START & SOC_MMU_LINEAR_FLASH_ADDR_MASK) -#define SOC_MMU_FLASH_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_FLASH_ADDR_MASK + 1) -#define SOC_MMU_FLASH_LINEAR_ADDRESS_SIZE (SOC_MMU_FLASH_LINEAR_ADDRESS_HIGH - SOC_MMU_FLASH_LINEAR_ADDRESS_LOW) +#define SOC_MMU_FLASH_LINEAR_ADDRESS_LOW (DRAM_FLASH_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK) +#if ((DRAM_FLASH_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0) +#define SOC_MMU_FLASH_LINEAR_ADDRESS_HIGH (DRAM_FLASH_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) +#else +#define SOC_MMU_FLASH_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1) +#endif -#define SOC_MMU_PSRAM_LINEAR_ADDRESS_LOW (SOC_MMU_PSRAM_VADDR_START & SOC_MMU_LINEAR_PARSM_ADDR_MASK) -#define SOC_MMU_PSRAM_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_PARSM_ADDR_MASK + 1) -#define SOC_MMU_PSRAM_LINEAR_ADDRESS_SIZE (SOC_MMU_PSRAM_LINEAR_ADDRESS_HIGH - SOC_MMU_PSRAM_LINEAR_ADDRESS_LOW) - -/** - * I/D share the MMU linear address range - */ -_Static_assert((SOC_MMU_FLASH_LINEAR_ADDRESS_LOW & ~SOC_MMU_MEM_PHYSICAL_LINEAR_CAP) == (SOC_MMU_PSRAM_LINEAR_ADDRESS_LOW & ~SOC_MMU_MEM_PHYSICAL_LINEAR_CAP), \ - "IRAM0 and DRAM0 raw linear address should be same"); +#define SOC_MMU_PSRAM_LINEAR_ADDRESS_LOW (DRAM_PSRAM_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK) +#if ((DRAM_PSRAM_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0) +#define SOC_MMU_PSRAM_LINEAR_ADDRESS_HIGH (DRAM_PSRAM_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) +#else +#define SOC_MMU_PSRAM_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1) +#endif #ifdef __cplusplus diff --git a/components/spi_flash/flash_mmap.c b/components/spi_flash/flash_mmap.c index 0fe072263a..218c799f3d 100644 --- a/components/spi_flash/flash_mmap.c +++ b/components/spi_flash/flash_mmap.c @@ -28,7 +28,7 @@ #if CONFIG_IDF_TARGET_ESP32 #include "esp_private/esp_cache_esp32_private.h" #elif CONFIG_IDF_TARGET_ESP32P4 -//TODO: IDF-7509 +//TODO: IDF-7516 #include "esp32p4/rom/cache.h" #endif @@ -82,12 +82,7 @@ esp_err_t spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_memory_t m } else { caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_8BIT; } -#if CONFIG_IDF_TARGET_ESP32P4 - //TODO: IDF-7509 - ret = esp_mmu_map(src_addr, size, MMU_TARGET_FLASH0, MMU_MEM_CAP_FLASH | caps, ESP_MMU_MMAP_FLAG_PADDR_SHARED, &ptr); -#else ret = esp_mmu_map(src_addr, size, MMU_TARGET_FLASH0, caps, ESP_MMU_MMAP_FLAG_PADDR_SHARED, &ptr); -#endif if (ret == ESP_OK) { vaddr_list[0] = (uint32_t)ptr; block->list_num = 1; @@ -380,7 +375,7 @@ IRAM_ATTR bool spi_flash_check_and_flush_cache(size_t start_addr, size_t length) #else // CONFIG_IDF_TARGET_ESP32 if (vaddr != NULL) { #if CONFIG_IDF_TARGET_ESP32P4 - //TODO: IDF-7509 + //TODO: IDF-7516 Cache_Invalidate_Addr(CACHE_MAP_L1_DCACHE | CACHE_MAP_L2_CACHE, (uint32_t)vaddr, SPI_FLASH_MMU_PAGE_SIZE); #else cache_hal_invalidate_addr((uint32_t)vaddr, SPI_FLASH_MMU_PAGE_SIZE);