cache: move arch_mem_coherent() into cache subsys
arch_mem_coherent() is cache related so it is better to move it under cache subsys. It is renamed to sys_cache_is_mem_coherent() to reflect this change. The only user of arch_mem_coherent() is Xtensa. However, it is not an architecture feature. That's why it is moved to the cache subsys. Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
committed by
Henrik Brix Andersen
parent
a0a529aecc
commit
169304813a
15
arch/Kconfig
15
arch/Kconfig
@@ -760,13 +760,6 @@ config ARCH_HAS_EXTRA_EXCEPTION_INFO
|
||||
config ARCH_HAS_GDBSTUB
|
||||
bool
|
||||
|
||||
config ARCH_HAS_COHERENCE
|
||||
bool
|
||||
help
|
||||
When selected, the architecture supports the
|
||||
arch_mem_coherent() API and can link into incoherent/cached
|
||||
memory using the ".cached" linker section.
|
||||
|
||||
config ARCH_HAS_THREAD_LOCAL_STORAGE
|
||||
bool
|
||||
|
||||
@@ -1173,6 +1166,14 @@ config EXTERNAL_CACHE
|
||||
|
||||
endchoice
|
||||
|
||||
config CACHE_CAN_SAY_MEM_COHERENCE
|
||||
bool
|
||||
help
|
||||
sys_cache_is_mem_coherent() is defined when enabled. This function can be
|
||||
used to determine if a pointer lies inside "coherence regions" and can be
|
||||
safely used in multiprocessor code without explicit flush or invalidate
|
||||
operations.
|
||||
|
||||
endif # CACHE_MANAGEMENT
|
||||
|
||||
endmenu
|
||||
|
||||
@@ -221,3 +221,8 @@ Trusted Firmware-M
|
||||
|
||||
Architectures
|
||||
*************
|
||||
|
||||
* Renamed ``CONFIG_ARCH_HAS_COHERENCE`` to :kconfig:option:`CONFIG_CACHE_CAN_SAY_MEM_COHERENCE` as
|
||||
the feature is cache related so move it under cache.
|
||||
|
||||
* Use :c:func:`sys_cache_is_mem_coherent` instead of :c:func:`arch_mem_coherent`.
|
||||
|
||||
@@ -870,32 +870,10 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf);
|
||||
size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err);
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
/**
|
||||
* @brief Detect memory coherence type
|
||||
*
|
||||
* Required when ARCH_HAS_COHERENCE is true. This function returns
|
||||
* true if the byte pointed to lies within an architecture-defined
|
||||
* "coherence region" (typically implemented with uncached memory) and
|
||||
* can safely be used in multiprocessor code without explicit flush or
|
||||
* invalidate operations.
|
||||
*
|
||||
* @note The result is for only the single byte at the specified
|
||||
* address, this API is not required to check region boundaries or to
|
||||
* expect aligned pointers. The expectation is that the code above
|
||||
* will have queried the appropriate address(es).
|
||||
*/
|
||||
#ifndef CONFIG_ARCH_HAS_COHERENCE
|
||||
static inline bool arch_mem_coherent(void *ptr)
|
||||
{
|
||||
ARG_UNUSED(ptr);
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Ensure cache coherence prior to context switch
|
||||
*
|
||||
* Required when ARCH_HAS_COHERENCE is true. On cache-incoherent
|
||||
* Required when CONFIG_KERNEL_COHERENCE is true. On cache-incoherent
|
||||
* multiprocessor architectures, thread stacks are cached by default
|
||||
* for performance reasons. They must therefore be flushed
|
||||
* appropriately on context switch. The rules are:
|
||||
|
||||
@@ -354,6 +354,10 @@ void *arch_cache_uncached_ptr_get(void __sparse_cache *ptr);
|
||||
|
||||
void arch_cache_init(void);
|
||||
|
||||
#if defined(CONFIG_CACHE_CAN_SAY_MEM_COHERENCE) || defined(__DOXYGEN__)
|
||||
#define cache_is_mem_coherent(ptr) arch_mem_coherent(ptr)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
@@ -171,16 +171,6 @@ static ALWAYS_INLINE void xtensa_vecbase_lock(void)
|
||||
__asm__ volatile("wsr.vecbase %0; rsync" : : "r" (vecbase | 1));
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_COHERENCE) || defined(__DOXYGEN__)
|
||||
/** Implementation of @ref arch_mem_coherent. */
|
||||
static inline bool arch_mem_coherent(void *ptr)
|
||||
{
|
||||
size_t addr = (size_t) ptr;
|
||||
|
||||
return (addr >> 29) == CONFIG_XTENSA_UNCACHED_REGION;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_XTENSA_MMU) || defined(__DOXYGEN__)
|
||||
/**
|
||||
* @brief Perform additional steps after MMU initialization.
|
||||
|
||||
@@ -335,6 +335,14 @@ static ALWAYS_INLINE void arch_cache_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
#if defined(CONFIG_CACHE_CAN_SAY_MEM_COHERENCE)
|
||||
static ALWAYS_INLINE bool arch_mem_coherent(void *ptr)
|
||||
{
|
||||
size_t addr = (size_t) ptr;
|
||||
|
||||
return (addr >> 29) == CONFIG_XTENSA_UNCACHED_REGION;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
|
||||
@@ -539,6 +539,30 @@ static ALWAYS_INLINE void sys_cache_flush(void *addr, size_t size)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CACHE_CAN_SAY_MEM_COHERENCE) || defined(__DOXYGEN__)
|
||||
/**
|
||||
* @brief Detect memory coherence type
|
||||
*
|
||||
* This function returns true if the byte pointed to lies within
|
||||
* "coherence regions" (typically implemented with uncached memory) and
|
||||
* can safely be used in multiprocessor code without explicit flush or
|
||||
* invalidate operations.
|
||||
*
|
||||
* @note The result is for only the single byte at the specified
|
||||
* address, this API is not required to check region boundaries or to
|
||||
* expect aligned pointers. The expectation is that the code above
|
||||
* will have queried the appropriate address(es).
|
||||
*
|
||||
* @param ptr Pointer to be checked.
|
||||
*
|
||||
* @return True is pointer is in any coherence regions, false otherwise.
|
||||
*/
|
||||
static ALWAYS_INLINE bool sys_cache_is_mem_coherent(void *ptr)
|
||||
{
|
||||
return cache_is_mem_coherent(ptr);
|
||||
}
|
||||
#endif /* CONFIG_CACHE_CAN_SAY_MEM_COHERENCE */
|
||||
|
||||
#include <zephyr/syscalls/cache.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
@@ -4541,9 +4541,9 @@ struct z_work_canceller {
|
||||
* from both the caller thread and the work queue thread.
|
||||
*
|
||||
* @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
|
||||
* coherent memory; see arch_mem_coherent(). The stack on these architectures
|
||||
* is generally not coherent. be stack-allocated. Violations are detected by
|
||||
* runtime assertion.
|
||||
* coherent memory; see sys_cache_is_mem_coherent(). The stack on these
|
||||
* architectures is generally not coherent. be stack-allocated. Violations are
|
||||
* detected by runtime assertion.
|
||||
*/
|
||||
struct k_work_sync {
|
||||
union {
|
||||
|
||||
@@ -98,7 +98,7 @@ config IPI_OPTIMIZE
|
||||
|
||||
config KERNEL_COHERENCE
|
||||
bool "Place all shared data into coherent memory"
|
||||
depends on ARCH_HAS_COHERENCE
|
||||
depends on CACHE_CAN_SAY_MEM_COHERENCE
|
||||
default y if SMP && MP_MAX_NUM_CPUS > 1
|
||||
select THREAD_STACK_INFO
|
||||
help
|
||||
|
||||
@@ -320,7 +320,7 @@ static void bg_thread_main(void *unused1, void *unused2, void *unused3)
|
||||
z_init_static_threads();
|
||||
|
||||
#ifdef CONFIG_KERNEL_COHERENCE
|
||||
__ASSERT_NO_MSG(arch_mem_coherent(&_kernel));
|
||||
__ASSERT_NO_MSG(sys_cache_is_mem_coherent(&_kernel));
|
||||
#endif /* CONFIG_KERNEL_COHERENCE */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
@@ -325,7 +325,7 @@ static struct _cpu *thread_active_elsewhere(struct k_thread *thread)
|
||||
static void ready_thread(struct k_thread *thread)
|
||||
{
|
||||
#ifdef CONFIG_KERNEL_COHERENCE
|
||||
__ASSERT_NO_MSG(arch_mem_coherent(thread));
|
||||
__ASSERT_NO_MSG(sys_cache_is_mem_coherent(thread));
|
||||
#endif /* CONFIG_KERNEL_COHERENCE */
|
||||
|
||||
/* If thread is queued already, do not try and added it to the
|
||||
@@ -542,7 +542,7 @@ static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
|
||||
k_timeout_t timeout)
|
||||
{
|
||||
#ifdef CONFIG_KERNEL_COHERENCE
|
||||
__ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
|
||||
__ASSERT_NO_MSG(wait_q == NULL || sys_cache_is_mem_coherent(wait_q));
|
||||
#endif /* CONFIG_KERNEL_COHERENCE */
|
||||
add_to_waitq_locked(thread, wait_q);
|
||||
add_thread_timeout(thread, timeout);
|
||||
|
||||
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(z_spin_lock_set_owner);
|
||||
#ifdef CONFIG_KERNEL_COHERENCE
|
||||
bool z_spin_lock_mem_coherent(struct k_spinlock *l)
|
||||
{
|
||||
return arch_mem_coherent((void *)l);
|
||||
return sys_cache_is_mem_coherent((void *)l);
|
||||
}
|
||||
EXPORT_SYMBOL(z_spin_lock_mem_coherent);
|
||||
#endif /* CONFIG_KERNEL_COHERENCE */
|
||||
|
||||
@@ -625,13 +625,13 @@ char *z_setup_new_thread(struct k_thread *new_thread,
|
||||
/* Check that the thread object is safe, but that the stack is
|
||||
* still cached!
|
||||
*/
|
||||
__ASSERT_NO_MSG(arch_mem_coherent(new_thread));
|
||||
__ASSERT_NO_MSG(sys_cache_is_mem_coherent(new_thread));
|
||||
|
||||
/* When dynamic thread stack is available, the stack may come from
|
||||
* uncached area.
|
||||
*/
|
||||
#ifndef CONFIG_DYNAMIC_THREAD
|
||||
__ASSERT_NO_MSG(!arch_mem_coherent(stack));
|
||||
__ASSERT_NO_MSG(!sys_cache_is_mem_coherent(stack));
|
||||
#endif /* CONFIG_DYNAMIC_THREAD */
|
||||
|
||||
#endif /* CONFIG_KERNEL_COHERENCE */
|
||||
|
||||
@@ -105,7 +105,7 @@ k_ticks_t z_add_timeout(struct _timeout *to, _timeout_func_t fn, k_timeout_t tim
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KERNEL_COHERENCE
|
||||
__ASSERT_NO_MSG(arch_mem_coherent(to));
|
||||
__ASSERT_NO_MSG(sys_cache_is_mem_coherent(to));
|
||||
#endif /* CONFIG_KERNEL_COHERENCE */
|
||||
|
||||
__ASSERT(!sys_dnode_is_linked(&to->node), "");
|
||||
|
||||
@@ -463,7 +463,7 @@ bool k_work_flush(struct k_work *work,
|
||||
__ASSERT_NO_MSG(!k_is_in_isr());
|
||||
__ASSERT_NO_MSG(sync != NULL);
|
||||
#ifdef CONFIG_KERNEL_COHERENCE
|
||||
__ASSERT_NO_MSG(arch_mem_coherent(sync));
|
||||
__ASSERT_NO_MSG(sys_cache_is_mem_coherent(sync));
|
||||
#endif /* CONFIG_KERNEL_COHERENCE */
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work);
|
||||
@@ -575,7 +575,7 @@ bool k_work_cancel_sync(struct k_work *work,
|
||||
__ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
|
||||
__ASSERT_NO_MSG(!k_is_in_isr());
|
||||
#ifdef CONFIG_KERNEL_COHERENCE
|
||||
__ASSERT_NO_MSG(arch_mem_coherent(sync));
|
||||
__ASSERT_NO_MSG(sys_cache_is_mem_coherent(sync));
|
||||
#endif /* CONFIG_KERNEL_COHERENCE */
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync);
|
||||
@@ -1198,7 +1198,7 @@ bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
|
||||
__ASSERT_NO_MSG(sync != NULL);
|
||||
__ASSERT_NO_MSG(!k_is_in_isr());
|
||||
#ifdef CONFIG_KERNEL_COHERENCE
|
||||
__ASSERT_NO_MSG(arch_mem_coherent(sync));
|
||||
__ASSERT_NO_MSG(sys_cache_is_mem_coherent(sync));
|
||||
#endif /* CONFIG_KERNEL_COHERENCE */
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable_sync, dwork, sync);
|
||||
@@ -1230,7 +1230,7 @@ bool k_work_flush_delayable(struct k_work_delayable *dwork,
|
||||
__ASSERT_NO_MSG(sync != NULL);
|
||||
__ASSERT_NO_MSG(!k_is_in_isr());
|
||||
#ifdef CONFIG_KERNEL_COHERENCE
|
||||
__ASSERT_NO_MSG(arch_mem_coherent(sync));
|
||||
__ASSERT_NO_MSG(sys_cache_is_mem_coherent(sync));
|
||||
#endif /* CONFIG_KERNEL_COHERENCE */
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush_delayable, dwork, sync);
|
||||
|
||||
@@ -6,7 +6,7 @@ config SOC_SERIES_INTEL_ADSP_ACE
|
||||
select XTENSA
|
||||
select XTENSA_HAL if (("$(ZEPHYR_TOOLCHAIN_VARIANT)" != "xcc") && ("$(ZEPHYR_TOOLCHAIN_VARIANT)" != "xt-clang"))
|
||||
select ATOMIC_OPERATIONS_BUILTIN if "$(ZEPHYR_TOOLCHAIN_VARIANT)" != "xcc"
|
||||
select ARCH_HAS_COHERENCE
|
||||
select CACHE_CAN_SAY_MEM_COHERENCE
|
||||
select SCHED_IPI_SUPPORTED
|
||||
select ARCH_HAS_CUSTOM_CPU_IDLE
|
||||
select DW_ICTL_ACE
|
||||
|
||||
@@ -9,7 +9,7 @@ config SOC_SERIES_INTEL_ADSP_CAVS
|
||||
select XTENSA_USE_CORE_CRT1
|
||||
select ATOMIC_OPERATIONS_BUILTIN if "$(ZEPHYR_TOOLCHAIN_VARIANT)" != "xcc"
|
||||
select ATOMIC_OPERATIONS_ARCH if "$(ZEPHYR_TOOLCHAIN_VARIANT)" = "xcc"
|
||||
select ARCH_HAS_COHERENCE
|
||||
select CACHE_CAN_SAY_MEM_COHERENCE
|
||||
select ARCH_HAS_GDBSTUB
|
||||
select HAS_PM
|
||||
select INTEL_ADSP_MEMORY_IS_MIRRORED
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
#define ATOMCTL_WB_RCW BIT(4) /* RCW Transaction for Writeback Cacheable Memory */
|
||||
#define ATOMCTL_VALUE (ATOMCTL_BY_RCW | ATOMCTL_WT_RCW | ATOMCTL_WB_RCW)
|
||||
|
||||
#ifdef CONFIG_ADSP_MEMORY_IS_MIRRORED
|
||||
#ifdef CONFIG_INTEL_ADSP_MEMORY_IS_MIRRORED
|
||||
|
||||
/* Utility to generate an unrolled and optimal[1] code sequence to set
|
||||
* the RPO TLB registers (contra the HAL cacheattr macros, which
|
||||
@@ -75,7 +75,7 @@
|
||||
FOR_EACH(_SET_ONE_TLB, (;), 0, 1, 2, 3, 4, 5, 6, 7); \
|
||||
} while (0)
|
||||
|
||||
#endif /* CONFIG_ADSP_MEMORY_IS_MIRRORED */
|
||||
#endif /* CONFIG_INTEL_ADSP_MEMORY_IS_MIRRORED */
|
||||
|
||||
/* Low-level CPU initialization. Call this immediately after entering
|
||||
* C code to initialize the cache, protection and synchronization
|
||||
@@ -140,9 +140,9 @@ static ALWAYS_INLINE void cpu_early_init(void)
|
||||
* Let use the default configuration and properly configure the
|
||||
* MMU when running from RAM.
|
||||
*/
|
||||
#if defined(CONFIG_ADSP_MEMORY_IS_MIRRORED) && !defined(CONFIG_MMU)
|
||||
#if defined(CONFIG_INTEL_ADSP_MEMORY_IS_MIRRORED) && !defined(CONFIG_MMU)
|
||||
SET_RPO_TLB();
|
||||
#endif /* CONFIG_ADSP_MEMORY_IS_MIRRORED && !CONFIG_MMU */
|
||||
#endif /* CONFIG_INTEL_ADSP_MEMORY_IS_MIRRORED && !CONFIG_MMU */
|
||||
|
||||
|
||||
/* Initialize ATOMCTL: Hardware defaults for S32C1I use
|
||||
|
||||
@@ -60,10 +60,10 @@ ZTEST(intel_adsp_hda_dma, test_hda_host_in_dma)
|
||||
}
|
||||
|
||||
#if (IS_ENABLED(CONFIG_KERNEL_COHERENCE))
|
||||
zassert_true(arch_mem_coherent(dma_buf), "Buffer is unexpectedly incoherent!");
|
||||
zassert_true(sys_cache_is_mem_coherent(dma_buf), "Buffer is unexpectedly incoherent!");
|
||||
#else
|
||||
/* The buffer is in the cached address range and must be flushed */
|
||||
zassert_false(arch_mem_coherent(dma_buf), "Buffer is unexpectedly coherent!");
|
||||
zassert_false(sys_cache_is_mem_coherent(dma_buf), "Buffer is unexpectedly coherent!");
|
||||
sys_cache_data_flush_range(dma_buf, DMA_BUF_SIZE);
|
||||
#endif
|
||||
|
||||
@@ -204,12 +204,13 @@ void test_hda_host_out_dma(void)
|
||||
hda_dump_regs(HOST_OUT, HDA_REGBLOCK_SIZE, channel, "dsp wait for full");
|
||||
|
||||
#if (IS_ENABLED(CONFIG_KERNEL_COHERENCE))
|
||||
zassert_true(arch_mem_coherent(dma_buf), "Buffer is unexpectedly incoherent!");
|
||||
zassert_true(sys_cache_is_mem_coherent(dma_buf), "Buffer is unexpectedly incoherent!");
|
||||
#else
|
||||
/* The buffer is in the cached address range and must be invalidated
|
||||
* prior to reading.
|
||||
*/
|
||||
zassert_false(arch_mem_coherent(dma_buf), "Buffer is unexpectedly coherent!");
|
||||
zassert_false(sys_cache_is_mem_coherent(dma_buf),
|
||||
"Buffer is unexpectedly coherent!");
|
||||
sys_cache_data_invd_range(dma_buf, DMA_BUF_SIZE);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -59,10 +59,10 @@ ZTEST(intel_adsp_hda, test_hda_host_in_smoke)
|
||||
}
|
||||
|
||||
#if (IS_ENABLED(CONFIG_KERNEL_COHERENCE))
|
||||
zassert_true(arch_mem_coherent(hda_buf), "Buffer is unexpectedly incoherent!");
|
||||
zassert_true(sys_cache_is_mem_coherent(hda_buf), "Buffer is unexpectedly incoherent!");
|
||||
#else
|
||||
/* The buffer is in the cached address range and must be flushed */
|
||||
zassert_false(arch_mem_coherent(hda_buf), "Buffer is unexpectedly coherent!");
|
||||
zassert_false(sys_cache_is_mem_coherent(hda_buf), "Buffer is unexpectedly coherent!");
|
||||
sys_cache_data_flush_range(hda_buf, HDA_BUF_SIZE);
|
||||
#endif
|
||||
|
||||
@@ -166,12 +166,14 @@ ZTEST(intel_adsp_hda, test_hda_host_out_smoke)
|
||||
hda_dump_regs(HOST_OUT, HDA_REGBLOCK_SIZE, STREAM_ID, "dsp wait for full");
|
||||
|
||||
#if (IS_ENABLED(CONFIG_KERNEL_COHERENCE))
|
||||
zassert_true(arch_mem_coherent(hda_buf), "Buffer is unexpectedly incoherent!");
|
||||
zassert_true(sys_cache_is_mem_coherent(hda_buf),
|
||||
"Buffer is unexpectedly incoherent!");
|
||||
#else
|
||||
/* The buffer is in the cached address range and must be invalidated
|
||||
* prior to reading.
|
||||
*/
|
||||
zassert_false(arch_mem_coherent(hda_buf), "Buffer is unexpectedly coherent!");
|
||||
zassert_false(sys_cache_is_mem_coherent(hda_buf),
|
||||
"Buffer is unexpectedly coherent!");
|
||||
sys_cache_data_invd_range(hda_buf, HDA_BUF_SIZE);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -6,6 +6,6 @@
|
||||
# the shared variables in cached/incoherent memory.
|
||||
config KERNEL_COHERENCE
|
||||
bool
|
||||
default y if ARCH_HAS_COHERENCE
|
||||
default y if CACHE_CAN_SAY_MEM_COHERENCE
|
||||
|
||||
source "Kconfig.zephyr"
|
||||
|
||||
Reference in New Issue
Block a user