Compare commits
163 Commits
v4.1.0-rc2
...
v1.6.0-rc4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4311814a05 | ||
|
|
70c57b2b96 | ||
|
|
bdd6b19a52 | ||
|
|
a599dbd128 | ||
|
|
4344299c00 | ||
|
|
0307d6ea5f | ||
|
|
f38cbb5744 | ||
|
|
4c0d57ed3e | ||
|
|
4540aa0877 | ||
|
|
1c7c4dd43e | ||
|
|
a16bc64bf8 | ||
|
|
3d37868d09 | ||
|
|
98a001e1f8 | ||
|
|
bf2eb5542a | ||
|
|
51859b8ea0 | ||
|
|
7aa536789e | ||
|
|
05a0c6fef0 | ||
|
|
9471d1f6c8 | ||
|
|
09f4f54e72 | ||
|
|
c2fe55bbe5 | ||
|
|
70028dd97f | ||
|
|
d15b758632 | ||
|
|
999c15d1b5 | ||
|
|
d108946a1b | ||
|
|
97ab403573 | ||
|
|
63538db423 | ||
|
|
272ec5e219 | ||
|
|
82804fe115 | ||
|
|
672dc9cc89 | ||
|
|
74d75f2bd5 | ||
|
|
c5a40d60bb | ||
|
|
189e5d0006 | ||
|
|
039a130861 | ||
|
|
61ec28adb6 | ||
|
|
4e38776774 | ||
|
|
14dc173c1f | ||
|
|
0a48547bc6 | ||
|
|
16f5611f3e | ||
|
|
7336b2a978 | ||
|
|
42cf1ab802 | ||
|
|
05291174df | ||
|
|
6c8409c083 | ||
|
|
bf591e9edf | ||
|
|
cf05794924 | ||
|
|
60a31d6ed1 | ||
|
|
6b6572629d | ||
|
|
96fc793c25 | ||
|
|
a30942dbb8 | ||
|
|
963d04d67e | ||
|
|
b792e4277f | ||
|
|
b006b1bb9a | ||
|
|
0fc5801607 | ||
|
|
271ab7d583 | ||
|
|
9f0e4d2a90 | ||
|
|
979aedc2d3 | ||
|
|
850877b95d | ||
|
|
8f0b4d7f4d | ||
|
|
796a6bb4d8 | ||
|
|
385e02ba52 | ||
|
|
5a5d878252 | ||
|
|
cf6e5cf730 | ||
|
|
7f1d5a47e4 | ||
|
|
aca2baa43a | ||
|
|
bf77e2616d | ||
|
|
cfa7ad5c4a | ||
|
|
30414fd866 | ||
|
|
7d21b5402c | ||
|
|
3ed3f29223 | ||
|
|
e9a4431362 | ||
|
|
56816bdbbf | ||
|
|
420594122a | ||
|
|
0efe69ec31 | ||
|
|
8173b881ba | ||
|
|
e7c091bba2 | ||
|
|
4c11ae8e60 | ||
|
|
af941d5832 | ||
|
|
32de7848c4 | ||
|
|
afe1621713 | ||
|
|
42b956050a | ||
|
|
04af679197 | ||
|
|
b082e12265 | ||
|
|
3cbabecfcc | ||
|
|
4d2ad79207 | ||
|
|
0cddc4b665 | ||
|
|
4cdbfbf9e2 | ||
|
|
c38888bccb | ||
|
|
be0db01093 | ||
|
|
a9cd7c0498 | ||
|
|
6af438c440 | ||
|
|
0ae966be58 | ||
|
|
33d0716c21 | ||
|
|
a176dd3274 | ||
|
|
ed8c6e2d1f | ||
|
|
c280ce6bf9 | ||
|
|
67d49c2344 | ||
|
|
f3c2664e53 | ||
|
|
22bbdc2f85 | ||
|
|
3b626c5e15 | ||
|
|
794a47dedf | ||
|
|
e203a64000 | ||
|
|
25a40b19ea | ||
|
|
9366bc161b | ||
|
|
602295a88f | ||
|
|
829c3ceb12 | ||
|
|
9f36bbc07a | ||
|
|
145a4c93fa | ||
|
|
0ce96e850d | ||
|
|
27d7a9e29f | ||
|
|
391ec95f38 | ||
|
|
cb409a67fd | ||
|
|
cf4ce62590 | ||
|
|
3188ed4e64 | ||
|
|
f0523be409 | ||
|
|
db089efe48 | ||
|
|
628ecfa4d7 | ||
|
|
f8d3ac0130 | ||
|
|
853c11885c | ||
|
|
f1c880aa8a | ||
|
|
653328c6e8 | ||
|
|
d88617db9b | ||
|
|
385770cf21 | ||
|
|
cf00c1c184 | ||
|
|
6a7b6679b1 | ||
|
|
3893503f49 | ||
|
|
b42719243d | ||
|
|
fd45cb4567 | ||
|
|
9f0045a30a | ||
|
|
662c8bee81 | ||
|
|
6d0fa01492 | ||
|
|
afe118fa1d | ||
|
|
247a2a0671 | ||
|
|
cb9033d10f | ||
|
|
bc7e0455c8 | ||
|
|
71e85e390b | ||
|
|
eee3a430dc | ||
|
|
32ef1480e9 | ||
|
|
39410d79fe | ||
|
|
40c944f0b6 | ||
|
|
efcdfce517 | ||
|
|
e50f05df3e | ||
|
|
3f0fcedf00 | ||
|
|
baab38500c | ||
|
|
06869d4499 | ||
|
|
de55b9f73a | ||
|
|
eb7910b9d1 | ||
|
|
95f8f6f3e0 | ||
|
|
b65e208171 | ||
|
|
2ccf0ad045 | ||
|
|
efc7ffde75 | ||
|
|
bee0fd0601 | ||
|
|
c6c73c1b2e | ||
|
|
ae495b7618 | ||
|
|
3b22494192 | ||
|
|
c4a5b9c74e | ||
|
|
ca23390e84 | ||
|
|
2644d370c8 | ||
|
|
bef829cfc0 | ||
|
|
c625aa2636 | ||
|
|
4329e5e24a | ||
|
|
4e4ac94f90 | ||
|
|
8d10dc63fc | ||
|
|
ce596d3c54 | ||
|
|
61b596b0e5 |
@@ -379,6 +379,13 @@ M: Anas Nashif <anas.nashif@intel.com>
|
||||
S: Supported
|
||||
F: arch/x86/soc/intel_quark/quark_x1000/
|
||||
|
||||
RELEASE NOTES
|
||||
M: Anas Nashif <anas.nashif@intel.com>
|
||||
M: Javier B Perez <javier.b.perez.hernandez@intel.com>
|
||||
M: Kinder, David <david.b.kinder@intel.com>
|
||||
S: Supported
|
||||
F: release-notes.rst
|
||||
|
||||
SANITYCHECK
|
||||
M: Andrew Boie <andrew.p.boie@intel.com>
|
||||
S: Supported
|
||||
|
||||
10
Makefile
10
Makefile
@@ -1,8 +1,8 @@
|
||||
VERSION_MAJOR = 1
|
||||
VERSION_MINOR = 5
|
||||
PATCHLEVEL = 99
|
||||
VERSION_MINOR = 6
|
||||
PATCHLEVEL = 0
|
||||
VERSION_RESERVED = 0
|
||||
EXTRAVERSION =
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Zephyr Kernel
|
||||
|
||||
export SOURCE_DIR PROJECT MDEF_FILE
|
||||
@@ -796,11 +796,11 @@ libs-y := $(libs-y1) $(libs-y2)
|
||||
export KBUILD_ZEPHYR_MAIN := $(drivers-y) $(libs-y) $(core-y)
|
||||
export LDFLAGS_zephyr
|
||||
|
||||
zephyr-deps := $(KBUILD_LDS) $(KBUILD_ZEPHYR_MAIN) $(app-y)
|
||||
|
||||
ALL_LIBS += $(TOOLCHAIN_LIBS)
|
||||
export ALL_LIBS
|
||||
|
||||
zephyr-deps := $(KBUILD_LDS) $(KBUILD_ZEPHYR_MAIN) $(app-y) $(ALL_LIBS)
|
||||
|
||||
LINK_LIBS := $(foreach l,$(ALL_LIBS), -l$(l))
|
||||
|
||||
OUTPUT_FORMAT ?= elf32-i386
|
||||
|
||||
@@ -28,11 +28,26 @@
|
||||
#include <sections.h>
|
||||
#include <arch/cpu.h>
|
||||
|
||||
#ifdef CONFIG_HARVARD
|
||||
#define _TOP_OF_MEMORY (CONFIG_DCCM_BASE_ADDRESS + CONFIG_DCCM_SIZE * 1024)
|
||||
/* harvard places the initial stack in the dccm memory */
|
||||
GDATA(_interrupt_stack)
|
||||
GDATA(_firq_stack)
|
||||
GDATA(_main_stack)
|
||||
|
||||
/* use one of the available interrupt stacks during init */
|
||||
|
||||
/* FIRQ only ? */
|
||||
#if CONFIG_NUM_IRQ_PRIO_LEVELS == 1
|
||||
|
||||
/* FIRQ, but uses _interrupt_stack ? */
|
||||
#if CONFIG_RGF_NUM_BANKS == 1
|
||||
#define INIT_STACK _interrupt_stack
|
||||
#define INIT_STACK_SIZE CONFIG_ISR_STACK_SIZE
|
||||
#else
|
||||
#define INIT_STACK _firq_stack
|
||||
#define INIT_STACK_SIZE CONFIG_FIRQ_STACK_SIZE
|
||||
#endif
|
||||
#else
|
||||
#define _TOP_OF_MEMORY (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_SIZE * 1024)
|
||||
#define INIT_STACK _interrupt_stack
|
||||
#define INIT_STACK_SIZE CONFIG_ISR_STACK_SIZE
|
||||
#endif
|
||||
|
||||
GTEXT(__reset)
|
||||
@@ -58,7 +73,30 @@ SECTION_FUNC(TEXT,__start)
|
||||
/* lock interrupts: will get unlocked when switch to main task */
|
||||
clri
|
||||
|
||||
/* setup a stack at the end of MEMORY */
|
||||
mov sp, _TOP_OF_MEMORY
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
/*
|
||||
* use the main stack to call memset on the interrupt stack and the
|
||||
* FIRQ stack when CONFIG_INIT_STACKS is enabled before switching to
|
||||
* one of them for the rest of the early boot
|
||||
*/
|
||||
mov sp, _main_stack
|
||||
add sp, sp, CONFIG_MAIN_STACK_SIZE
|
||||
|
||||
mov_s r0, _interrupt_stack
|
||||
mov_s r1, 0xaa
|
||||
mov_s r2, CONFIG_ISR_STACK_SIZE
|
||||
jl memset
|
||||
|
||||
#if CONFIG_RGF_NUM_BANKS != 1
|
||||
mov_s r0, _firq_stack
|
||||
mov_s r1, 0xaa
|
||||
mov_s r2, CONFIG_FIRQ_STACK_SIZE
|
||||
jl memset
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_INIT_STACKS */
|
||||
|
||||
mov sp, INIT_STACK
|
||||
add sp, sp, INIT_STACK_SIZE
|
||||
|
||||
j @_PrepC
|
||||
|
||||
@@ -84,8 +84,8 @@ static ALWAYS_INLINE void thread_monitor_init(struct k_thread *thread)
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void _new_thread(char *pStackMem, unsigned stackSize,
|
||||
void *uk_task_ptr, _thread_entry_t pEntry,
|
||||
void _new_thread(char *pStackMem, size_t stackSize,
|
||||
_thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned options)
|
||||
{
|
||||
@@ -124,14 +124,11 @@ void _new_thread(char *pStackMem, unsigned stackSize,
|
||||
pInitCtx->status32 = _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL);
|
||||
#endif
|
||||
|
||||
/* k_q_node initialized upon first insertion in a list */
|
||||
thread->base.flags = options | K_PRESTART;
|
||||
thread->base.sched_locked = 0;
|
||||
_init_thread_base(&thread->base, priority, K_PRESTART, options);
|
||||
|
||||
/* static threads overwrite them afterwards with real values */
|
||||
thread->init_data = NULL;
|
||||
thread->fn_abort = NULL;
|
||||
thread->base.prio = priority;
|
||||
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
/* Initialize custom data field (value is opaque to kernel) */
|
||||
@@ -147,8 +144,6 @@ void _new_thread(char *pStackMem, unsigned stackSize,
|
||||
thread->entry = (struct __thread_entry *)(pInitCtx);
|
||||
#endif
|
||||
|
||||
ARG_UNUSED(uk_task_ptr);
|
||||
|
||||
/*
|
||||
* intlock_key is constructed based on ARCv2 ISA Programmer's
|
||||
* Reference Manual CLRI instruction description:
|
||||
@@ -160,8 +155,6 @@ void _new_thread(char *pStackMem, unsigned stackSize,
|
||||
thread->callee_saved.sp =
|
||||
(uint32_t)pInitCtx - ___callee_saved_stack_t_SIZEOF;
|
||||
|
||||
_nano_timeout_thread_init(thread);
|
||||
|
||||
/* initial values in all other regs/k_thread entries are irrelevant */
|
||||
|
||||
thread_monitor_init(thread);
|
||||
|
||||
@@ -128,24 +128,6 @@ typedef struct _callee_saved_stack _callee_saved_stack_t;
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
/* Bitmask definitions for the struct tcs->flags bit field */
|
||||
|
||||
#define K_STATIC 0x00000800
|
||||
|
||||
#define K_READY 0x00000000 /* Thread is ready to run */
|
||||
#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */
|
||||
#define K_PENDING 0x00002000 /* Thread is waiting on an object */
|
||||
#define K_PRESTART 0x00004000 /* Thread has not yet started */
|
||||
#define K_DEAD 0x00008000 /* Thread has terminated */
|
||||
#define K_SUSPENDED 0x00010000 /* Thread is suspended */
|
||||
#define K_DUMMY 0x00020000 /* Not a real thread */
|
||||
#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \
|
||||
K_DEAD | K_SUSPENDED | K_DUMMY)
|
||||
|
||||
#define K_FP_REGS 0x010 /* 1 = thread uses floating point registers */
|
||||
#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */
|
||||
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
|
||||
|
||||
/* stacks */
|
||||
|
||||
#define STACK_ALIGN_SIZE 4
|
||||
|
||||
@@ -20,12 +20,13 @@
|
||||
#include <power.h>
|
||||
#include <soc_power.h>
|
||||
#include <init.h>
|
||||
#include <kernel_structs.h>
|
||||
|
||||
#include "ss_power_states.h"
|
||||
|
||||
#define SLEEP_MODE_CORE_OFF (0x0)
|
||||
#define SLEEP_MODE_CORE_TIMERS_RTC_OFF (0x60)
|
||||
#define ENABLE_INTERRUPTS BIT(4)
|
||||
#define ENABLE_INTERRUPTS (BIT(4) | _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL))
|
||||
|
||||
#define ARC_SS1 (SLEEP_MODE_CORE_OFF | ENABLE_INTERRUPTS)
|
||||
#define ARC_SS2 (SLEEP_MODE_CORE_TIMERS_RTC_OFF | ENABLE_INTERRUPTS)
|
||||
|
||||
@@ -28,6 +28,7 @@ config CPU_CORTEX_M
|
||||
# Omit prompt to signify "hidden" option
|
||||
default n
|
||||
select CPU_CORTEX
|
||||
select ARCH_HAS_CUSTOM_SWAP_TO_MAIN
|
||||
help
|
||||
This option signifies the use of a CPU of the Cortex-M family.
|
||||
|
||||
|
||||
@@ -135,6 +135,7 @@ config NUM_IRQ_PRIO_BITS
|
||||
config RUNTIME_NMI
|
||||
bool
|
||||
prompt "Attach an NMI handler at runtime"
|
||||
select REBOOT
|
||||
default n
|
||||
help
|
||||
The kernel provides a simple NMI handler that simply hangs in a tight
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <nanokernel.h>
|
||||
#include <arch/cpu.h>
|
||||
#include <misc/printk.h>
|
||||
#include <misc/reboot.h>
|
||||
#include <toolchain.h>
|
||||
#include <sections.h>
|
||||
|
||||
@@ -51,7 +52,8 @@ static _NmiHandler_t handler = _SysNmiOnReset;
|
||||
static void _DefaultHandler(void)
|
||||
{
|
||||
printk("NMI received! Rebooting...\n");
|
||||
_ScbSystemReset();
|
||||
/* In ARM implementation sys_reboot ignores the parameter */
|
||||
sys_reboot(0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -33,6 +33,8 @@
|
||||
_ASM_FILE_PROLOGUE
|
||||
|
||||
GTEXT(__reset)
|
||||
GTEXT(memset)
|
||||
GDATA(_interrupt_stack)
|
||||
|
||||
/**
|
||||
*
|
||||
@@ -77,20 +79,29 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
|
||||
msr BASEPRI, r0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set PSP and use it to boot without using MSP, so that it
|
||||
* gets set to _interrupt_stack during nanoInit().
|
||||
*/
|
||||
ldr r0, =__CORTEXM_BOOT_PSP
|
||||
msr PSP, r0
|
||||
movs.n r0, #2 /* switch to using PSP (bit1 of CONTROL reg) */
|
||||
msr CONTROL, r0
|
||||
|
||||
#ifdef CONFIG_WDOG_INIT
|
||||
/* board-specific watchdog initialization is necessary */
|
||||
bl _WdogInit
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
ldr r0, =_interrupt_stack
|
||||
ldr r1, =0xaa
|
||||
ldr r2, =CONFIG_ISR_STACK_SIZE
|
||||
bl memset
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set PSP and use it to boot without using MSP, so that it
|
||||
* gets set to _interrupt_stack during nanoInit().
|
||||
*/
|
||||
ldr r0, =_interrupt_stack
|
||||
ldr r1, =CONFIG_ISR_STACK_SIZE
|
||||
adds r0, r0, r1
|
||||
msr PSP, r0
|
||||
movs.n r0, #2 /* switch to using PSP (bit1 of CONTROL reg) */
|
||||
msr CONTROL, r0
|
||||
|
||||
b _PrepC
|
||||
|
||||
#if defined(CONFIG_SOC_TI_LM3S6965_QEMU)
|
||||
|
||||
@@ -36,6 +36,8 @@
|
||||
|
||||
_ASM_FILE_PROLOGUE
|
||||
|
||||
GDATA(_main_stack)
|
||||
|
||||
SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
|
||||
|
||||
/* in XIP kernels. the entry point is also the start of the vector table */
|
||||
@@ -43,7 +45,13 @@ SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
|
||||
SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,__start)
|
||||
#endif
|
||||
|
||||
.word __CORTEXM_BOOT_MSP
|
||||
/*
|
||||
* setting the _very_ early boot on the main stack allows to use memset
|
||||
* on the interrupt stack when CONFIG_INIT_STACKS is enabled before
|
||||
* switching to the interrupt stack for the rest of the early boot
|
||||
*/
|
||||
.word _main_stack + CONFIG_MAIN_STACK_SIZE
|
||||
|
||||
.word __reset
|
||||
.word __nmi
|
||||
|
||||
|
||||
@@ -42,10 +42,6 @@ extern "C" {
|
||||
#include <sections.h>
|
||||
#include <misc/util.h>
|
||||
|
||||
/* location of MSP and PSP upon boot: at the end of SRAM */
|
||||
.equ __CORTEXM_BOOT_MSP, (CONFIG_SRAM_BASE_ADDRESS + KB(CONFIG_SRAM_SIZE) - 8)
|
||||
.equ __CORTEXM_BOOT_PSP, (__CORTEXM_BOOT_MSP - 0x100)
|
||||
|
||||
GTEXT(__start)
|
||||
GTEXT(_vector_table)
|
||||
|
||||
|
||||
@@ -103,7 +103,11 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
|
||||
bgt _EXIT_EXC
|
||||
|
||||
push {lr}
|
||||
|
||||
/* _is_next_thread_current must be called with interrupts locked */
|
||||
cpsid i
|
||||
blx _is_next_thread_current
|
||||
cpsie i
|
||||
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
|
||||
pop {r1}
|
||||
mov lr, r1
|
||||
|
||||
@@ -100,7 +100,7 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
|
||||
* Our policy is to express priority levels with special properties
|
||||
* via flags
|
||||
*/
|
||||
if (flags | IRQ_ZERO_LATENCY) {
|
||||
if (flags & IRQ_ZERO_LATENCY) {
|
||||
prio = 2;
|
||||
} else {
|
||||
prio += IRQ_PRIORITY_OFFSET;
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
#include <kernel_offsets.h>
|
||||
|
||||
GEN_OFFSET_SYM(_thread_arch_t, basepri);
|
||||
GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
|
||||
|
||||
#ifdef CONFIG_FLOAT
|
||||
GEN_OFFSET_SYM(_thread_arch_t, preempt_float);
|
||||
|
||||
@@ -234,16 +234,6 @@ SECTION_FUNC(TEXT, __svc)
|
||||
_context_switch:
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set _Swap()'s default return code to -EAGAIN. This eliminates the
|
||||
* need for the timeout code to invoke fiberRtnValueSet().
|
||||
*/
|
||||
|
||||
mrs r2, PSP /* thread mode, stack frame is on PSP */
|
||||
ldr r3, =_k_neg_eagain
|
||||
ldr r3, [r3, #0]
|
||||
str r3, [r2, #___esf_t_a1_OFFSET]
|
||||
|
||||
/*
|
||||
* Unlock interrupts:
|
||||
* - in a SVC call, so protected against context switches
|
||||
@@ -305,17 +295,21 @@ SECTION_FUNC(TEXT, _Swap)
|
||||
ldr r2, [r1, #_kernel_offset_to_current]
|
||||
str r0, [r2, #_thread_offset_to_basepri]
|
||||
|
||||
/*
|
||||
* Set _Swap()'s default return code to -EAGAIN. This eliminates the need
|
||||
* for the timeout code to set it itself.
|
||||
*/
|
||||
ldr r1, =_k_neg_eagain
|
||||
ldr r1, [r1]
|
||||
str r1, [r2, #_thread_offset_to_swap_return_value]
|
||||
|
||||
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
|
||||
/* No priority-based interrupt masking on M0/M0+,
|
||||
* pending PendSV is used instead of svc
|
||||
*/
|
||||
ldr r1, =_SCS_ICSR
|
||||
ldr r2, =_SCS_ICSR_PENDSV
|
||||
str r2, [r1, #0]
|
||||
|
||||
/* load -EAGAIN as the default return value */
|
||||
ldr r0, =_k_neg_eagain
|
||||
ldr r0, [r0]
|
||||
ldr r3, =_SCS_ICSR_PENDSV
|
||||
str r3, [r1, #0]
|
||||
|
||||
/* Unlock interrupts to allow PendSV, since it's running at prio 0xff
|
||||
*
|
||||
@@ -323,12 +317,10 @@ SECTION_FUNC(TEXT, _Swap)
|
||||
* of a higher priority pending.
|
||||
*/
|
||||
cpsie i
|
||||
|
||||
/* PC stored in stack frame by the hw */
|
||||
bx lr
|
||||
#else /* CONFIG_CPU_CORTEX_M3_M4 */
|
||||
svc #0
|
||||
|
||||
/* r0 contains the return value if needed */
|
||||
bx lr
|
||||
#endif
|
||||
|
||||
/* coming back from exception, r2 still holds the pointer to _current */
|
||||
ldr r0, [r2, #_thread_offset_to_swap_return_value]
|
||||
bx lr
|
||||
|
||||
@@ -80,8 +80,8 @@ static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs)
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
void _new_thread(char *pStackMem, unsigned stackSize,
|
||||
void *uk_task_ptr, _thread_entry_t pEntry,
|
||||
void _new_thread(char *pStackMem, size_t stackSize,
|
||||
_thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned options)
|
||||
{
|
||||
@@ -112,14 +112,11 @@ void _new_thread(char *pStackMem, unsigned stackSize,
|
||||
pInitCtx->xpsr =
|
||||
0x01000000UL; /* clear all, thumb bit is 1, even if RO */
|
||||
|
||||
/* k_q_node initialized upon first insertion in a list */
|
||||
tcs->base.flags = options | K_PRESTART;
|
||||
tcs->base.sched_locked = 0;
|
||||
_init_thread_base(&tcs->base, priority, K_PRESTART, options);
|
||||
|
||||
/* static threads overwrite it afterwards with real value */
|
||||
tcs->init_data = NULL;
|
||||
tcs->fn_abort = NULL;
|
||||
tcs->base.prio = priority;
|
||||
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
/* Initialize custom data field (value is opaque to kernel) */
|
||||
@@ -135,12 +132,10 @@ void _new_thread(char *pStackMem, unsigned stackSize,
|
||||
tcs->entry = (struct __thread_entry *)(pInitCtx);
|
||||
#endif
|
||||
|
||||
ARG_UNUSED(uk_task_ptr);
|
||||
|
||||
tcs->callee_saved.psp = (uint32_t)pInitCtx;
|
||||
tcs->arch.basepri = 0;
|
||||
|
||||
_nano_timeout_thread_init(tcs);
|
||||
/* swap_return_value can contain garbage */
|
||||
|
||||
/* initial values in all other registers/TCS entries are irrelevant */
|
||||
|
||||
|
||||
@@ -86,24 +86,6 @@ typedef struct __esf _esf_t;
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
/* Bitmask definitions for the struct tcs.flags bit field */
|
||||
|
||||
#define K_STATIC 0x00000800
|
||||
|
||||
#define K_READY 0x00000000 /* Thread is ready to run */
|
||||
#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */
|
||||
#define K_PENDING 0x00002000 /* Thread is waiting on an object */
|
||||
#define K_PRESTART 0x00004000 /* Thread has not yet started */
|
||||
#define K_DEAD 0x00008000 /* Thread has terminated */
|
||||
#define K_SUSPENDED 0x00010000 /* Thread is suspended */
|
||||
#define K_DUMMY 0x00020000 /* Not a real thread */
|
||||
#define K_EXECUTION_MASK \
|
||||
(K_TIMING | K_PENDING | K_PRESTART | K_DEAD | K_SUSPENDED | K_DUMMY)
|
||||
|
||||
#define K_FP_REGS 0x010 /* 1 = thread uses floating point registers */
|
||||
#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */
|
||||
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
|
||||
|
||||
/* stacks */
|
||||
|
||||
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
|
||||
@@ -142,6 +124,9 @@ struct _thread_arch {
|
||||
/* interrupt locking key */
|
||||
uint32_t basepri;
|
||||
|
||||
/* r0 in stack frame cannot be written to reliably */
|
||||
uint32_t swap_return_value;
|
||||
|
||||
#ifdef CONFIG_FLOAT
|
||||
/*
|
||||
* No cooperative floating point register set structure exists for
|
||||
|
||||
@@ -47,25 +47,51 @@ static ALWAYS_INLINE void nanoArchInit(void)
|
||||
_CpuIdleInit();
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Set the return value for the specified fiber (inline)
|
||||
*
|
||||
* The register used to store the return value from a function call invocation
|
||||
* to <value>. It is assumed that the specified <fiber> is pending, and thus
|
||||
* the fiber's thread is stored in its struct tcs structure.
|
||||
*
|
||||
* @param fiber pointer to the fiber
|
||||
* @param value is the value to set as a return value
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
static ALWAYS_INLINE void
|
||||
_arch_switch_to_main_thread(char *main_stack, size_t main_stack_size,
|
||||
_thread_entry_t _main)
|
||||
{
|
||||
/* get high address of the stack, i.e. its start (stack grows down) */
|
||||
char *start_of_main_stack;
|
||||
|
||||
start_of_main_stack = main_stack + main_stack_size;
|
||||
start_of_main_stack = (void *)STACK_ROUND_DOWN(start_of_main_stack);
|
||||
|
||||
_current = (void *)main_stack;
|
||||
|
||||
__asm__ __volatile__(
|
||||
|
||||
/* move to main() thread stack */
|
||||
"msr PSP, %0 \t\n"
|
||||
|
||||
/* unlock interrupts */
|
||||
#ifdef CONFIG_CPU_CORTEX_M0_M0PLUS
|
||||
"cpsie i \t\n"
|
||||
#else
|
||||
"movs %%r1, #0 \n\t"
|
||||
"msr BASEPRI, %%r1 \n\t"
|
||||
#endif
|
||||
|
||||
/* branch to _thread_entry(_main, 0, 0, 0) */
|
||||
"mov %%r0, %1 \n\t"
|
||||
"bx %2 \t\n"
|
||||
|
||||
/* never gets here */
|
||||
|
||||
:
|
||||
: "r"(start_of_main_stack),
|
||||
"r"(_main), "r"(_thread_entry)
|
||||
|
||||
: "r0", "r1", "sp"
|
||||
);
|
||||
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
struct __esf *esf = (struct __esf *)thread->callee_saved.psp;
|
||||
|
||||
esf->a1 = value;
|
||||
thread->arch.swap_return_value = value;
|
||||
}
|
||||
|
||||
extern void nano_cpu_atomic_idle(unsigned int);
|
||||
|
||||
@@ -30,6 +30,9 @@
|
||||
#define _thread_offset_to_basepri \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_basepri_OFFSET)
|
||||
|
||||
#define _thread_offset_to_swap_return_value \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_swap_return_value_OFFSET)
|
||||
|
||||
#define _thread_offset_to_preempt_float \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_preempt_float_OFFSET)
|
||||
|
||||
|
||||
@@ -87,8 +87,10 @@ int stm32_gpio_flags_to_conf(int flags, int *pincfg)
|
||||
}
|
||||
|
||||
if (direction == GPIO_DIR_OUT) {
|
||||
/* Pin is configured as an output */
|
||||
*pincfg = STM32F10X_PIN_CONFIG_DRIVE_PUSH_PULL;
|
||||
} else if (direction == GPIO_DIR_IN) {
|
||||
} else {
|
||||
/* Pin is configured as an input */
|
||||
int pud = flags & GPIO_PUD_MASK;
|
||||
|
||||
/* pull-{up,down} maybe? */
|
||||
@@ -100,8 +102,6 @@ int stm32_gpio_flags_to_conf(int flags, int *pincfg)
|
||||
/* floating */
|
||||
*pincfg = STM32F10X_PIN_CONFIG_BIAS_HIGH_IMPEDANCE;
|
||||
}
|
||||
} else {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -7,7 +7,6 @@ depends on SOC_SERIES_CC32XX
|
||||
|
||||
config SOC_CC3200
|
||||
bool "CC3200"
|
||||
select CPU_HAS_FPU
|
||||
select HAS_CC3200SDK
|
||||
|
||||
endchoice
|
||||
|
||||
@@ -60,8 +60,8 @@ struct init_stack_frame {
|
||||
};
|
||||
|
||||
|
||||
void _new_thread(char *stack_memory, unsigned stack_size,
|
||||
void *uk_task_ptr, _thread_entry_t thread_func,
|
||||
void _new_thread(char *stack_memory, size_t stack_size,
|
||||
_thread_entry_t thread_func,
|
||||
void *arg1, void *arg2, void *arg3,
|
||||
int priority, unsigned options)
|
||||
{
|
||||
@@ -85,11 +85,8 @@ void _new_thread(char *stack_memory, unsigned stack_size,
|
||||
|
||||
/* Initialize various struct k_thread members */
|
||||
thread = (struct k_thread *)stack_memory;
|
||||
thread->base.prio = priority;
|
||||
|
||||
/* k_q_node initialized upon first insertion in a list */
|
||||
thread->base.flags = options | K_PRESTART;
|
||||
thread->base.sched_locked = 0;
|
||||
_init_thread_base(&thread->base, priority, K_PRESTART, options);
|
||||
|
||||
/* static threads overwrite it afterwards with real value */
|
||||
thread->init_data = NULL;
|
||||
@@ -99,16 +96,10 @@ void _new_thread(char *stack_memory, unsigned stack_size,
|
||||
/* Initialize custom data field (value is opaque to kernel) */
|
||||
thread->custom_data = NULL;
|
||||
#endif
|
||||
ARG_UNUSED(uk_task_ptr);
|
||||
|
||||
thread->callee_saved.sp = (uint32_t)iframe;
|
||||
thread->callee_saved.ra = (uint32_t)_thread_entry_wrapper;
|
||||
thread->callee_saved.key = NIOS2_STATUS_PIE_MSK;
|
||||
/* Leave the rest of thread->callee_saved junk */
|
||||
|
||||
#ifdef CONFIG_NANO_TIMEOUTS
|
||||
_nano_timeout_thread_init(thread);
|
||||
#endif
|
||||
|
||||
thread_monitor_init(thread);
|
||||
}
|
||||
|
||||
@@ -47,24 +47,13 @@ extern "C" {
|
||||
#include <misc/dlist.h>
|
||||
#endif
|
||||
|
||||
/* Bitmask definitions for the struct tcs->flags bit field */
|
||||
#define K_STATIC 0x00000800
|
||||
/* nios2 bitmask definitions for the struct k_thread->flags bit field */
|
||||
|
||||
#define K_READY 0x00000000 /* Thread is ready to run */
|
||||
#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */
|
||||
#define K_PENDING 0x00002000 /* Thread is waiting on an object */
|
||||
#define K_PRESTART 0x00004000 /* Thread has not yet started */
|
||||
#define K_DEAD 0x00008000 /* Thread has terminated */
|
||||
#define K_SUSPENDED 0x00010000 /* Thread is suspended */
|
||||
#define K_DUMMY 0x00020000 /* Not a real thread */
|
||||
#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \
|
||||
K_DEAD | K_SUSPENDED | K_DUMMY)
|
||||
/* 1 = executing context is interrupt handler */
|
||||
#define INT_ACTIVE (1 << 1)
|
||||
|
||||
#define INT_ACTIVE 0x002 /* 1 = executing context is interrupt handler */
|
||||
#define EXC_ACTIVE 0x004 /* 1 = executing context is exception handler */
|
||||
#define K_FP_REGS 0x010 /* 1 = thread uses floating point registers */
|
||||
#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */
|
||||
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
|
||||
/* 1 = executing context is exception handler */
|
||||
#define EXC_ACTIVE (1 << 2)
|
||||
|
||||
/* stacks */
|
||||
|
||||
|
||||
@@ -60,17 +60,12 @@
|
||||
/* SSE control/status register default value (used by assembler code) */
|
||||
extern uint32_t _sse_mxcsr_default_value;
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Save a thread's floating point context information.
|
||||
/*
|
||||
* Save a thread's floating point context information.
|
||||
*
|
||||
* This routine saves the system's "live" floating point context into the
|
||||
* specified thread control block. The SSE registers are saved only if the
|
||||
* thread is actually using them.
|
||||
*
|
||||
* @param tcs Pointer to thread control block.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
static void _FpCtxSave(struct tcs *tcs)
|
||||
{
|
||||
@@ -83,16 +78,11 @@ static void _FpCtxSave(struct tcs *tcs)
|
||||
_do_fp_regs_save(&tcs->arch.preempFloatReg);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Initialize a thread's floating point context information.
|
||||
/*
|
||||
* Initialize a thread's floating point context information.
|
||||
*
|
||||
* This routine initializes the system's "live" floating point context.
|
||||
* The SSE registers are initialized only if the thread is actually using them.
|
||||
*
|
||||
* @param tcs Pointer to thread control block.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
static inline void _FpCtxInit(struct tcs *tcs)
|
||||
{
|
||||
@@ -104,37 +94,9 @@ static inline void _FpCtxInit(struct tcs *tcs)
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Enable preservation of floating point context information.
|
||||
*
|
||||
* @brief Enable preservation of floating point context information.
|
||||
*
|
||||
* This routine informs the kernel that the specified thread (which may be
|
||||
* the current thread) will be using the floating point registers.
|
||||
* The @a options parameter indicates which floating point register sets
|
||||
* will be used by the specified thread:
|
||||
*
|
||||
* a) K_FP_REGS indicates x87 FPU and MMX registers only
|
||||
* b) K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
|
||||
*
|
||||
* Invoking this routine initializes the thread's floating point context info
|
||||
* to that of an FPU that has been reset. The next time the thread is scheduled
|
||||
* by _Swap() it will either inherit an FPU that is guaranteed to be in a "sane"
|
||||
* state (if the most recent user of the FPU was cooperatively swapped out)
|
||||
* or the thread's own floating point context will be loaded (if the most
|
||||
* recent user of the FPU was pre-empted, or if this thread is the first user
|
||||
* of the FPU). Thereafter, the kernel will protect the thread's FP context
|
||||
* so that it is not altered during a preemptive context switch.
|
||||
*
|
||||
* @warning
|
||||
* This routine should only be used to enable floating point support for a
|
||||
* thread that does not currently have such support enabled already.
|
||||
*
|
||||
* @param tcs Pointer to thread control block.
|
||||
* @param options Registers to be preserved (K_FP_REGS or K_SSE_REGS).
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
* @internal
|
||||
* The transition from "non-FP supporting" to "FP supporting" must be done
|
||||
* atomically to avoid confusing the floating point logic used by _Swap(), so
|
||||
* this routine locks interrupts to ensure that a context switch does not occur.
|
||||
@@ -232,21 +194,8 @@ void k_float_enable(struct tcs *tcs, unsigned int options)
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable preservation of floating point context information.
|
||||
*
|
||||
* @brief Disable preservation of floating point context information.
|
||||
*
|
||||
* This routine informs the kernel that the specified thread (which may be
|
||||
* the current thread) will no longer be using the floating point registers.
|
||||
*
|
||||
* @warning
|
||||
* This routine should only be used to disable floating point support for
|
||||
* a thread that currently has such support enabled.
|
||||
*
|
||||
* @param tcs Pointer to thread control block.
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
* @internal
|
||||
* The transition from "FP supporting" to "non-FP supporting" must be done
|
||||
* atomically to avoid confusing the floating point logic used by _Swap(), so
|
||||
* this routine locks interrupts to ensure that a context switch does not occur.
|
||||
@@ -276,9 +225,8 @@ void k_float_disable(struct tcs *tcs)
|
||||
irq_unlock(imask);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Handler for "device not available" exception.
|
||||
/*
|
||||
* Handler for "device not available" exception.
|
||||
*
|
||||
* This routine is registered to handle the "device not available" exception
|
||||
* (vector = 7).
|
||||
@@ -286,10 +234,6 @@ void k_float_disable(struct tcs *tcs)
|
||||
* The processor will generate this exception if any x87 FPU, MMX, or SSEx
|
||||
* instruction is executed while CR0[TS]=1. The handler then enables the
|
||||
* current thread to use all supported floating point registers.
|
||||
*
|
||||
* @param pEsf This value is not used.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void _FpNotAvailableExcHandler(NANO_ESF *pEsf)
|
||||
{
|
||||
|
||||
@@ -312,10 +312,6 @@ alreadyOnIntStack:
|
||||
* _Swap() to determine whether non-floating registers need to be
|
||||
* preserved using the lazy save/restore algorithm, or to indicate to
|
||||
* debug tools that a preemptive context switch has occurred.
|
||||
*
|
||||
* Setting the NO_METRICS bit tells _Swap() that the per-execution context
|
||||
* [totalRunTime] calculation has already been performed and that
|
||||
* there is no need to do it again.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)
|
||||
|
||||
@@ -76,22 +76,18 @@ static ALWAYS_INLINE void thread_monitor_init(struct k_thread *thread)
|
||||
* @return N/A
|
||||
*/
|
||||
static void _new_thread_internal(char *pStackMem, unsigned stackSize,
|
||||
void *uk_task_ptr, int priority,
|
||||
int priority,
|
||||
unsigned options)
|
||||
{
|
||||
unsigned long *pInitialCtx;
|
||||
/* ptr to the new task's k_thread */
|
||||
struct k_thread *thread = (struct k_thread *)pStackMem;
|
||||
|
||||
thread->base.prio = priority;
|
||||
#if (defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO))
|
||||
thread->arch.excNestCount = 0;
|
||||
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
||||
|
||||
/* k_q_node initialized upon first insertion in a list */
|
||||
|
||||
thread->base.flags = options | K_PRESTART;
|
||||
thread->base.sched_locked = 0;
|
||||
_init_thread_base(&thread->base, priority, K_PRESTART, options);
|
||||
|
||||
/* static threads overwrite it afterwards with real value */
|
||||
thread->init_data = NULL;
|
||||
@@ -103,8 +99,6 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize,
|
||||
thread->custom_data = NULL;
|
||||
#endif
|
||||
|
||||
ARG_UNUSED(uk_task_ptr);
|
||||
|
||||
/*
|
||||
* The creation of the initial stack for the task has already been done.
|
||||
* Now all that is needed is to set the ESP. However, we have been passed
|
||||
@@ -139,8 +133,6 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize,
|
||||
PRINTK("\nstruct thread * = 0x%x", thread);
|
||||
|
||||
thread_monitor_init(thread);
|
||||
|
||||
_nano_timeout_thread_init(thread);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO) \
|
||||
@@ -245,8 +237,8 @@ __asm__("\t.globl _thread_entry\n"
|
||||
*
|
||||
* @return opaque pointer to initialized k_thread structure
|
||||
*/
|
||||
void _new_thread(char *pStackMem, unsigned stackSize,
|
||||
void *uk_task_ptr, _thread_entry_t pEntry,
|
||||
void _new_thread(char *pStackMem, size_t stackSize,
|
||||
_thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned options)
|
||||
{
|
||||
@@ -308,5 +300,5 @@ void _new_thread(char *pStackMem, unsigned stackSize,
|
||||
* aside for the thread's stack.
|
||||
*/
|
||||
|
||||
_new_thread_internal(pStackMem, stackSize, uk_task_ptr, priority, options);
|
||||
_new_thread_internal(pStackMem, stackSize, priority, options);
|
||||
}
|
||||
|
||||
@@ -52,36 +52,21 @@
|
||||
|
||||
#define STACK_ALIGN_SIZE 4
|
||||
|
||||
/*
|
||||
* Bitmask definitions for the struct k_thread->flags bit field
|
||||
*/
|
||||
/* x86 Bitmask definitions for the struct k_thread->flags bit field */
|
||||
|
||||
#define K_STATIC 0x00000800
|
||||
/* executing context is interrupt handler */
|
||||
#define INT_ACTIVE (1 << 1)
|
||||
|
||||
#define K_READY 0x00000000 /* Thread is ready to run */
|
||||
#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */
|
||||
#define K_PENDING 0x00002000 /* Thread is waiting on an object */
|
||||
#define K_PRESTART 0x00004000 /* Thread has not yet started */
|
||||
#define K_DEAD 0x00008000 /* Thread has terminated */
|
||||
#define K_SUSPENDED 0x00010000 /* Thread is suspended */
|
||||
#define K_DUMMY 0x00020000 /* Not a real thread */
|
||||
#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \
|
||||
K_DEAD | K_SUSPENDED | K_DUMMY)
|
||||
|
||||
#define INT_ACTIVE 0x2 /* 1 = executing context is interrupt handler */
|
||||
#define EXC_ACTIVE 0x4 /* 1 = executing context is exception handler */
|
||||
#if defined(CONFIG_FP_SHARING)
|
||||
#define K_FP_REGS 0x10 /* 1 = thread uses floating point registers */
|
||||
#endif
|
||||
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
|
||||
#define K_SSE_REGS 0x20 /* 1 = thread uses SSEx (and also FP) registers */
|
||||
#endif
|
||||
#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */
|
||||
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
|
||||
#define NO_METRICS_BIT_OFFSET 0xa /* Bit position of NO_METRICS */
|
||||
/* executing context is exception handler */
|
||||
#define EXC_ACTIVE (1 << 2)
|
||||
|
||||
#define INT_OR_EXC_MASK (INT_ACTIVE | EXC_ACTIVE)
|
||||
|
||||
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
|
||||
/* thread uses SSEx (and also FP) registers */
|
||||
#define K_SSE_REGS (1 << 5)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
|
||||
#define _FP_USER_MASK (K_FP_REGS | K_SSE_REGS)
|
||||
#elif defined(CONFIG_FP_SHARING)
|
||||
|
||||
@@ -30,15 +30,15 @@ uint64_t _pm_save_gdtr;
|
||||
uint64_t _pm_save_idtr;
|
||||
uint32_t _pm_save_esp;
|
||||
|
||||
extern void _power_soc_sleep(void);
|
||||
extern void _power_restore_cpu_context(void);
|
||||
extern void _power_soc_deep_sleep(void);
|
||||
|
||||
#if (defined(CONFIG_SYS_POWER_DEEP_SLEEP))
|
||||
static uint32_t *__x86_restore_info = (uint32_t *)CONFIG_BSP_SHARED_RAM_ADDR;
|
||||
|
||||
static void _deep_sleep(enum power_states state)
|
||||
{
|
||||
int restore;
|
||||
|
||||
__asm__ volatile ("wbinvd");
|
||||
|
||||
/*
|
||||
* Setting resume vector inside the restore_cpu_context
|
||||
* function since we have nothing to do before cpu context
|
||||
@@ -47,22 +47,20 @@ static void _deep_sleep(enum power_states state)
|
||||
* can be done before cpu context is restored and control
|
||||
* transferred to _sys_soc_suspend.
|
||||
*/
|
||||
qm_x86_set_resume_vector(_sys_soc_restore_cpu_context,
|
||||
qm_x86_set_resume_vector(_power_restore_cpu_context,
|
||||
*__x86_restore_info);
|
||||
|
||||
restore = _sys_soc_save_cpu_context();
|
||||
power_soc_set_x86_restore_flag();
|
||||
|
||||
if (!restore) {
|
||||
power_soc_set_x86_restore_flag();
|
||||
|
||||
switch (state) {
|
||||
case SYS_POWER_STATE_DEEP_SLEEP_1:
|
||||
power_soc_sleep();
|
||||
case SYS_POWER_STATE_DEEP_SLEEP:
|
||||
power_soc_deep_sleep();
|
||||
default:
|
||||
break;
|
||||
}
|
||||
switch (state) {
|
||||
case SYS_POWER_STATE_DEEP_SLEEP_1:
|
||||
_power_soc_sleep();
|
||||
break;
|
||||
case SYS_POWER_STATE_DEEP_SLEEP:
|
||||
_power_soc_deep_sleep();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -23,30 +23,24 @@ GDATA(_pm_save_gdtr)
|
||||
GDATA(_pm_save_idtr)
|
||||
GDATA(_pm_save_esp)
|
||||
|
||||
GTEXT(_sys_soc_save_cpu_context)
|
||||
GTEXT(_sys_soc_restore_cpu_context)
|
||||
GTEXT(_sys_soc_resume_from_deep_sleep)
|
||||
GTEXT(_power_restore_cpu_context)
|
||||
GTEXT(_power_soc_sleep)
|
||||
GTEXT(_power_soc_deep_sleep)
|
||||
|
||||
SECTION_FUNC(TEXT, save_cpu_context)
|
||||
movl %esp, %eax /* save ptr to return address */
|
||||
|
||||
SECTION_FUNC(TEXT, _sys_soc_save_cpu_context)
|
||||
movl %esp, %eax /* save ptr to return address */
|
||||
pushf /* save flags */
|
||||
pusha /* save GPRs */
|
||||
|
||||
movl %esp, _pm_save_esp /* save stack ptr */
|
||||
sidtl _pm_save_idtr /* save idtr */
|
||||
sgdtl _pm_save_gdtr /* save gdtr */
|
||||
|
||||
pushl (%eax) /* push return address */
|
||||
|
||||
xorl %eax, %eax /* 0 indicates saved context */
|
||||
pushl (%eax) /* push return address */
|
||||
ret
|
||||
|
||||
SECTION_FUNC(TEXT, _sys_soc_restore_cpu_context)
|
||||
/*
|
||||
* Will transfer control to _sys_power_save_cpu_context,
|
||||
* from where it will return 1 indicating the function
|
||||
* is exiting after a context switch.
|
||||
*/
|
||||
SECTION_FUNC(TEXT, _power_restore_cpu_context)
|
||||
lgdtl _pm_save_gdtr /* restore gdtr */
|
||||
lidtl _pm_save_idtr /* restore idtr */
|
||||
movl _pm_save_esp, %esp /* restore saved stack ptr */
|
||||
@@ -54,18 +48,32 @@ SECTION_FUNC(TEXT, _sys_soc_restore_cpu_context)
|
||||
popf /* restore saved flags */
|
||||
|
||||
/*
|
||||
* At this point context is restored as it was saved
|
||||
* in _sys_soc_save_cpu_context. The following ret
|
||||
* will emulate a return from that function. Move 1
|
||||
* to eax to emulate a return 1. The caller of
|
||||
* _sys_soc_save_cpu_context will identify it is
|
||||
* returning from a context restore based on the
|
||||
* return value = 1.
|
||||
* At this point the stack contents will be as follows:
|
||||
*
|
||||
* Saved context
|
||||
* ESP ---> Return address of save_cpu_context
|
||||
* Return address of _power_soc_sleep/deep_sleep
|
||||
*
|
||||
* We just popped the saved context. Next we pop out the address
|
||||
* of the caller of save_cpu_context.Then the ret would return
|
||||
* to caller of _power_soc_sleep or _power_soc_deep_sleep.
|
||||
*
|
||||
*/
|
||||
xorl %eax, %eax
|
||||
incl %eax
|
||||
addl $4, %esp
|
||||
ret
|
||||
|
||||
SECTION_FUNC(TEXT, _power_soc_sleep)
|
||||
call save_cpu_context
|
||||
wbinvd
|
||||
call power_soc_sleep
|
||||
/* Does not return */
|
||||
|
||||
SECTION_FUNC(TEXT, _power_soc_deep_sleep)
|
||||
call save_cpu_context
|
||||
wbinvd
|
||||
call power_soc_deep_sleep
|
||||
/* Does not return */
|
||||
|
||||
/*
|
||||
* This is an example function to handle the deep sleep resume notification
|
||||
* in the absence of bootloader context restore support.
|
||||
@@ -78,8 +86,8 @@ SECTION_FUNC(TEXT, _sys_soc_restore_cpu_context)
|
||||
*/
|
||||
SECTION_FUNC(TEXT, _sys_soc_resume_from_deep_sleep)
|
||||
movl $CONFIG_BSP_SHARED_RAM_ADDR, %eax
|
||||
cmpl $_sys_soc_restore_cpu_context, (%eax)
|
||||
je _sys_soc_restore_cpu_context
|
||||
cmpl $_power_restore_cpu_context, (%eax)
|
||||
je _power_restore_cpu_context
|
||||
ret
|
||||
|
||||
#endif
|
||||
|
||||
@@ -30,35 +30,6 @@ enum power_states {
|
||||
SYS_POWER_STATE_MAX
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Save CPU context
|
||||
*
|
||||
* This function would save the CPU context in the stack. It
|
||||
* would also save the idtr and gdtr registers. When context is
|
||||
* restored by _sys_soc_restore_cpu_context(), control will be
|
||||
* transferred into this function where the context was originally
|
||||
* saved. The return values would indicate whether it is returning
|
||||
* after saving context or after a context restore transferred
|
||||
* control to it.
|
||||
*
|
||||
* @retval 0 Indicates it is returning after saving cpu context
|
||||
* @retval 1 Indicates cpu context restore transferred control to it.
|
||||
*/
|
||||
int _sys_soc_save_cpu_context(void);
|
||||
|
||||
/**
|
||||
* @brief Restore CPU context
|
||||
*
|
||||
* This function would restore the CPU context that was saved in
|
||||
* the stack by _sys_soc_save_cpu_context(). It would also restore
|
||||
* the idtr and gdtr registers.
|
||||
*
|
||||
* After context is restored, control will be transferred into
|
||||
* _sys_soc_save_cpu_context() function where the context was originally
|
||||
* saved.
|
||||
*/
|
||||
FUNC_NORETURN void _sys_soc_restore_cpu_context(void);
|
||||
|
||||
/**
|
||||
* @brief Put processor into low power state
|
||||
*
|
||||
|
||||
@@ -15,3 +15,4 @@ CONFIG_UART_QMSI=y
|
||||
CONFIG_CONSOLE=y
|
||||
CONFIG_UART_CONSOLE=y
|
||||
CONFIG_SERIAL=y
|
||||
CONFIG_OMIT_FRAME_POINTER=y
|
||||
|
||||
@@ -18,3 +18,4 @@ CONFIG_UART_NS16550_PORT_1=y
|
||||
CONFIG_UART_NS16550_PORT_0=n
|
||||
CONFIG_UART_INTERRUPT_DRIVEN=y
|
||||
CONFIG_GPIO=y
|
||||
CONFIG_OMIT_FRAME_POINTER=y
|
||||
|
||||
@@ -15,3 +15,4 @@ CONFIG_UART_CONSOLE=y
|
||||
CONFIG_UART_QMSI=y
|
||||
CONFIG_UART_CONSOLE=y
|
||||
CONFIG_SERIAL=y
|
||||
CONFIG_OMIT_FRAME_POINTER=y
|
||||
|
||||
@@ -7,16 +7,16 @@ Welcome to the Zephyr Project's :abbr:`API (Application Programing Interface)`
|
||||
documentation.
|
||||
|
||||
This section contains the API documentation automatically extracted from the
|
||||
code. To ease navigation, we have split the APIs in nanokernel APIs and
|
||||
microkernel APIs. If you are looking for a specific API, enter it on the
|
||||
search box. The search results display all sections containing information
|
||||
code. If you are looking for a specific API, enter it on the search box.
|
||||
The search results display all sections containing information
|
||||
about that API.
|
||||
|
||||
The use of the Zephyr APIs is the same for all SoCs and boards.
|
||||
The Zephyr APIs are used the same way on all SoCs and boards.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
kernel_api.rst
|
||||
device.rst
|
||||
bluetooth.rst
|
||||
io_interfaces.rst
|
||||
@@ -25,4 +25,3 @@ The use of the Zephyr APIs is the same for all SoCs and boards.
|
||||
power_management_api
|
||||
file_system
|
||||
testing
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
.. _event_logger:
|
||||
|
||||
Event Logger APIs
|
||||
#################
|
||||
Event Logging APIs
|
||||
##################
|
||||
|
||||
.. contents::
|
||||
:depth: 1
|
||||
@@ -11,6 +11,20 @@ Event Logger APIs
|
||||
Event Logger
|
||||
************
|
||||
|
||||
An event logger is an object that can record the occurrence of significant
|
||||
events, which can be subsequently extracted and reviewed.
|
||||
|
||||
.. doxygengroup:: event_logger
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
:content-only:
|
||||
|
||||
Kernel Event Logger
|
||||
*******************
|
||||
|
||||
The kernel event logger records the occurrence of significant kernel events,
|
||||
which can be subsequently extracted and reviewed.
|
||||
(See :ref:`kernel_event_logger_v2`.)
|
||||
|
||||
.. doxygengroup:: kernel_event_logger
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
239
doc/api/kernel_api.rst
Normal file
239
doc/api/kernel_api.rst
Normal file
@@ -0,0 +1,239 @@
|
||||
.. _kernel_apis:
|
||||
|
||||
Kernel APIs
|
||||
###########
|
||||
|
||||
This section contains APIs for the kernel's core services,
|
||||
as described in the :ref:`kernel_v2`.
|
||||
|
||||
.. important::
|
||||
Unless otherwise noted these APIs can be used by threads, but not by ISRs.
|
||||
|
||||
.. contents::
|
||||
:depth: 1
|
||||
:local:
|
||||
:backlinks: top
|
||||
|
||||
Threads
|
||||
*******
|
||||
|
||||
A thread is an independently scheduled series of instructions that implements
|
||||
a portion of an application's processing. Threads are used to perform processing
|
||||
that is too lengthy or too complex to be performed by an ISR.
|
||||
(See :ref:`threads_v2`.)
|
||||
|
||||
.. doxygengroup:: thread_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Workqueues
|
||||
**********
|
||||
|
||||
A workqueue processes a series of work items by executing the associated
|
||||
functions in a dedicated thread. Workqueues are typically used by an ISR
|
||||
or high-priority thread to offload non-urgent processing.
|
||||
(See :ref:`workqueues_v2`.)
|
||||
|
||||
.. doxygengroup:: workqueue_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Clocks
|
||||
******
|
||||
|
||||
Kernel clocks enable threads and ISRs to measure the passage of time
|
||||
with either normal and high precision.
|
||||
(See :ref:`clocks_v2`.)
|
||||
|
||||
.. doxygengroup:: clock_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Timers
|
||||
******
|
||||
|
||||
Timers enable threads to measure the passage of time, and to optionally execute
|
||||
an action when the timer expires.
|
||||
(See :ref:`timers_v2`.)
|
||||
|
||||
.. doxygengroup:: timer_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Memory Slabs
|
||||
************
|
||||
|
||||
Memory slabs enable the dynamic allocation and release of fixed-size
|
||||
memory blocks.
|
||||
(See :ref:`memory_slabs_v2`.)
|
||||
|
||||
.. doxygengroup:: mem_slab_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Memory Pools
|
||||
************
|
||||
|
||||
Memory pools enable the dynamic allocation and release of variable-size
|
||||
memory blocks.
|
||||
(See :ref:`memory_pools_v2`.)
|
||||
|
||||
.. doxygengroup:: mem_pool_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Heap Memory Pool
|
||||
****************
|
||||
|
||||
The heap memory pools enable the dynamic allocation and release of memory
|
||||
in a :cpp:func:`malloc()`-like manner.
|
||||
(See :ref:`heap_v2`.)
|
||||
|
||||
.. doxygengroup:: heap_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Semaphores
|
||||
**********
|
||||
|
||||
Semaphores provide traditional counting semaphore capabilities.
|
||||
(See :ref:`semaphores_v2`.)
|
||||
|
||||
.. doxygengroup:: semaphore_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Mutexes
|
||||
*******
|
||||
|
||||
Mutexes provide traditional reentrant mutex capabilities
|
||||
with basic priority inheritance.
|
||||
(See :ref:`mutexes_v2`.)
|
||||
|
||||
.. doxygengroup:: mutex_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Alerts
|
||||
******
|
||||
|
||||
Alerts enable an application to perform asynchronous signalling,
|
||||
somewhat akin to Unix-style signals.
|
||||
(See :ref:`alerts_v2`.)
|
||||
|
||||
.. doxygengroup:: alert_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Fifos
|
||||
*****
|
||||
|
||||
Fifos provide traditional first in, first out (FIFO) queuing of data items
|
||||
of any size.
|
||||
(See :ref:`fifos_v2`.)
|
||||
|
||||
.. doxygengroup:: fifo_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Lifos
|
||||
*****
|
||||
|
||||
Lifos provide traditional last in, first out (LIFO) queuing of data items
|
||||
of any size.
|
||||
(See :ref:`lifos_v2`.)
|
||||
|
||||
.. doxygengroup:: lifo_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Stacks
|
||||
******
|
||||
|
||||
Stacks provide traditional last in, first out (LIFO) queuing of 32-bit
|
||||
data items.
|
||||
(See :ref:`stacks_v2`.)
|
||||
|
||||
.. doxygengroup:: stack_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Message Queues
|
||||
**************
|
||||
|
||||
Message queues provide a simple message queuing mechanism
|
||||
for fixed-size data items.
|
||||
(See :ref:`message_queues_v2`.)
|
||||
|
||||
.. doxygengroup:: msgq_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Mailboxes
|
||||
*********
|
||||
|
||||
Mailboxes provide an enhanced message queuing mechanism
|
||||
for variable-size messages.
|
||||
(See :ref:`mailboxes_v2`.)
|
||||
|
||||
.. doxygengroup:: mailbox_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Pipes
|
||||
*****
|
||||
|
||||
Pipes provide a traditional anonymous pipe mechanism for sending
|
||||
variable-size chunks of data, in whole or in part.
|
||||
(See :ref:`pipes_v2`.)
|
||||
|
||||
.. doxygengroup:: pipe_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Interrupt Service Routines (ISRs)
|
||||
*********************************
|
||||
|
||||
An interrupt service routine is a series of instructions that is
|
||||
executed asynchronously in response to a hardware or software interrupt.
|
||||
(See :ref:`interrupts_v2`.)
|
||||
|
||||
.. doxygengroup:: isr_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Atomic Services
|
||||
***************
|
||||
|
||||
The atomic services enable multiple threads and ISRs to read and modify
|
||||
32-bit variables in an uninterruptible manner.
|
||||
(See :ref:`atomic_v2`.)
|
||||
|
||||
.. important::
|
||||
All atomic services APIs can be used by both threads and ISRs.
|
||||
|
||||
.. doxygengroup:: atomic_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Floating Point Services
|
||||
***********************
|
||||
|
||||
The floating point services enable threads to use a board's floating point
|
||||
registers.
|
||||
(See :ref:`float_v2`.)
|
||||
|
||||
.. doxygengroup:: float_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Ring Buffers
|
||||
************
|
||||
|
||||
Ring buffers enable simple first in, first out (FIFO) queuing
|
||||
of variable-size data items.
|
||||
(See :ref:`ring_buffers_v2`.)
|
||||
|
||||
.. doxygengroup:: ring_buffer_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
@@ -71,7 +71,7 @@ The following code defines and initializes an empty fifo.
|
||||
k_fifo_init(&my_fifo);
|
||||
|
||||
Alternatively, an empty fifo can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_FIFO_DEFINE()`.
|
||||
by calling :c:macro:`K_FIFO_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
|
||||
@@ -153,6 +153,7 @@ APIs
|
||||
|
||||
The following fifo APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_FIFO_DEFINE`
|
||||
* :cpp:func:`k_fifo_init()`
|
||||
* :cpp:func:`k_fifo_put()`
|
||||
* :cpp:func:`k_fifo_put_list()`
|
||||
|
||||
@@ -62,7 +62,7 @@ The following defines and initializes an empty lifo.
|
||||
k_lifo_init(&my_lifo);
|
||||
|
||||
Alternatively, an empty lifo can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_LIFO_DEFINE()`.
|
||||
by calling :c:macro:`K_LIFO_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
|
||||
@@ -141,6 +141,7 @@ APIs
|
||||
|
||||
The following lifo APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_LIFO_DEFINE`
|
||||
* :cpp:func:`k_lifo_init()`
|
||||
* :cpp:func:`k_lifo_put()`
|
||||
* :cpp:func:`k_lifo_get()`
|
||||
|
||||
@@ -130,7 +130,7 @@ The following code defines and initializes an empty mailbox.
|
||||
k_mbox_init(&my_mailbox);
|
||||
|
||||
Alternatively, a mailbox can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_MBOX_DEFINE()`.
|
||||
by calling :c:macro:`K_MBOX_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
|
||||
@@ -484,12 +484,12 @@ The receiving thread must then respond as follows:
|
||||
the mailbox has already completed data retrieval and deleted the message.
|
||||
|
||||
* If the message descriptor size is non-zero and the receiving thread still
|
||||
wants to retrieve the data, the thread must call :c:func:`k_mbox_data_get()`
|
||||
wants to retrieve the data, the thread must call :cpp:func:`k_mbox_data_get()`
|
||||
and supply a message buffer large enough to hold the data. The mailbox copies
|
||||
the data into the message buffer and deletes the message.
|
||||
|
||||
* If the message descriptor size is non-zero and the receiving thread does *not*
|
||||
want to retrieve the data, the thread must call :c:func:`k_mbox_data_get()`.
|
||||
want to retrieve the data, the thread must call :cpp:func:`k_mbox_data_get()`.
|
||||
and specify a message buffer of :c:macro:`NULL`. The mailbox deletes
|
||||
the message without copying the data.
|
||||
|
||||
@@ -548,7 +548,7 @@ A receiving thread may choose to retrieve message data into a memory block,
|
||||
rather than a message buffer. This is done in much the same way as retrieving
|
||||
data subsequently into a message buffer --- the receiving thread first
|
||||
receives the message without its data, then retrieves the data by calling
|
||||
:c:func:`k_mbox_data_block_get()`. The mailbox fills in the block descriptor
|
||||
:cpp:func:`k_mbox_data_block_get()`. The mailbox fills in the block descriptor
|
||||
supplied by the receiving thread, allowing the thread to access the data.
|
||||
The mailbox also deletes the received message, since data retrieval
|
||||
has been completed. The receiving thread is then responsible for freeing
|
||||
@@ -634,6 +634,8 @@ APIs
|
||||
|
||||
The following APIs for a mailbox are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_MBOX_DEFINE`
|
||||
* :cpp:func:`k_mbox_init()`
|
||||
* :cpp:func:`k_mbox_put()`
|
||||
* :cpp:func:`k_mbox_async_put()`
|
||||
* :cpp:func:`k_mbox_get()`
|
||||
|
||||
@@ -85,7 +85,7 @@ that is capable of holding 10 items, each of which is 12 bytes long.
|
||||
k_msgq_init(&my_msgq, my_msgq_buffer, sizeof(data_item_type), 10);
|
||||
|
||||
Alternatively, a message queue can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_MSGQ_DEFINE()`.
|
||||
by calling :c:macro:`K_MSGQ_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above. Observe
|
||||
that the macro defines both the message queue and its buffer.
|
||||
@@ -176,6 +176,7 @@ APIs
|
||||
|
||||
The following message queue APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_MSGQ_DEFINE`
|
||||
* :cpp:func:`k_msgq_init()`
|
||||
* :cpp:func:`k_msgq_put()`
|
||||
* :cpp:func:`k_msgq_get()`
|
||||
|
||||
@@ -54,7 +54,7 @@ Implementation
|
||||
|
||||
A pipe is defined using a variable of type :c:type:`struct k_pipe` and an
|
||||
optional character buffer of type :c:type:`unsigned char`. It must then be
|
||||
initialized by calling :c:func:`k_pipe_init()`.
|
||||
initialized by calling :cpp:func:`k_pipe_init()`.
|
||||
|
||||
The following code defines and initializes an empty pipe that has a ring
|
||||
buffer capable of holding 100 bytes and is aligned to a 4-byte boundary.
|
||||
@@ -68,7 +68,7 @@ buffer capable of holding 100 bytes and is aligned to a 4-byte boundary.
|
||||
k_pipe_init(&my_pipe, my_ring_buffer, sizeof(my_ring_buffer));
|
||||
|
||||
Alternatively, a pipe can be defined and initialized at compile time by
|
||||
calling :c:macro:`K_PIPE_DEFINE()`.
|
||||
calling :c:macro:`K_PIPE_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above. Observe
|
||||
that that macro defines both the pipe and its ring buffer.
|
||||
@@ -80,7 +80,7 @@ that that macro defines both the pipe and its ring buffer.
|
||||
Writing to a Pipe
|
||||
=================
|
||||
|
||||
Data is added to a pipe by calling :c:func:`k_pipe_put()`.
|
||||
Data is added to a pipe by calling :cpp:func:`k_pipe_put()`.
|
||||
|
||||
The following code builds on the example above, and uses the pipe to pass
|
||||
data from a producing thread to one or more consuming threads. If the pipe's
|
||||
@@ -126,7 +126,7 @@ waits for a specified amount of time.
|
||||
Reading from a Pipe
|
||||
===================
|
||||
|
||||
Data is read from the pipe by calling :c:func:`k_pipe_get()`.
|
||||
Data is read from the pipe by calling :cpp:func:`k_pipe_get()`.
|
||||
|
||||
The following code builds on the example above, and uses the pipe to
|
||||
process data items generated by one or more producing threads.
|
||||
@@ -141,7 +141,7 @@ process data items generated by one or more producing threads.
|
||||
|
||||
while (1) {
|
||||
rc = k_pipe_get(&my_pipe, buffer, sizeof(buffer), &bytes_read,
|
||||
sizeof(header), 100);
|
||||
sizeof(header), K_MSEC(100));
|
||||
|
||||
if ((rc < 0) || (bytes_read < sizeof (header))) {
|
||||
/* Incomplete message header received */
|
||||
@@ -172,14 +172,15 @@ Configuration Options
|
||||
|
||||
Related configuration options:
|
||||
|
||||
* CONFIG_NUM_PIPE_ASYNC_MSGS
|
||||
* :option:`CONFIG_NUM_PIPE_ASYNC_MSGS`
|
||||
|
||||
APIs
|
||||
****
|
||||
|
||||
The following message queue APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:func:`k_pipe_init()`
|
||||
* :c:func:`k_pipe_put()`
|
||||
* :c:func:`k_pipe_get()`
|
||||
* :c:func:`k_pipe_block_put()`
|
||||
* :c:macro:`K_PIPE_DEFINE`
|
||||
* :cpp:func:`k_pipe_init()`
|
||||
* :cpp:func:`k_pipe_put()`
|
||||
* :cpp:func:`k_pipe_get()`
|
||||
* :cpp:func:`k_pipe_block_put()`
|
||||
|
||||
@@ -69,7 +69,7 @@ up to ten 32-bit data values.
|
||||
k_stack_init(&my_stack, my_stack_array, MAX_ITEMS);
|
||||
|
||||
Alternatively, a stack can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_STACK_DEFINE()`.
|
||||
by calling :c:macro:`K_STACK_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above. Observe
|
||||
that the macro defines both the stack and its array of data values.
|
||||
@@ -136,6 +136,7 @@ APIs
|
||||
|
||||
The following stack APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_STACK_DEFINE`
|
||||
* :cpp:func:`k_stack_init()`
|
||||
* :cpp:func:`k_stack_push()`
|
||||
* :cpp:func:`k_stack_pop()`
|
||||
|
||||
@@ -122,7 +122,7 @@ However, since a memory pool also requires a number of variable-size data
|
||||
structures to represent its block sets and the status of its quad-blocks,
|
||||
the kernel does not support the run-time definition of a memory pool.
|
||||
A memory pool can only be defined and initialized at compile time
|
||||
by calling :c:macro:`K_MEM_POOL_DEFINE()`.
|
||||
by calling :c:macro:`K_MEM_POOL_DEFINE`.
|
||||
|
||||
The following code defines and initializes a memory pool that has 3 blocks
|
||||
of 4096 bytes each, which can be partitioned into blocks as small as 64 bytes
|
||||
@@ -202,9 +202,9 @@ Configuration Options
|
||||
|
||||
Related configuration options:
|
||||
|
||||
* :option:`CONFIG_MEM_POOL_AD_BEFORE_SEARCH_FOR_BIGGERBLOCK`
|
||||
* :option:`CONFIG_MEM_POOL_AD_AFTER_SEARCH_FOR_BIGGERBLOCK`
|
||||
* :option:`CONFIG_MEM_POOL_AD_NONE`
|
||||
* :option:`CONFIG_MEM_POOL_SPLIT_BEFORE_DEFRAG`
|
||||
* :option:`CONFIG_MEM_POOL_DEFRAG_BEFORE_SPLIT`
|
||||
* :option:`CONFIG_MEM_POOL_SPLIT_ONLY`
|
||||
|
||||
|
||||
APIs
|
||||
@@ -212,6 +212,7 @@ APIs
|
||||
|
||||
The following memory pool APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_MEM_POOL_DEFINE`
|
||||
* :cpp:func:`k_mem_pool_alloc()`
|
||||
* :cpp:func:`k_mem_pool_free()`
|
||||
* :cpp:func:`k_mem_pool_defragment()`
|
||||
* :cpp:func:`k_mem_pool_defrag()`
|
||||
|
||||
@@ -81,7 +81,7 @@ that are 400 bytes long, each of which is aligned to a 4-byte boundary..
|
||||
k_mem_slab_init(&my_slab, my_slab_buffer, 400, 6);
|
||||
|
||||
Alternatively, a memory slab can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_MEM_SLAB_DEFINE()`.
|
||||
by calling :c:macro:`K_MEM_SLAB_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above. Observe
|
||||
that the macro defines both the memory slab and its buffer.
|
||||
@@ -146,6 +146,7 @@ APIs
|
||||
|
||||
The following memory slab APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_MEM_SLAB_DEFINE`
|
||||
* :cpp:func:`k_mem_slab_init()`
|
||||
* :cpp:func:`k_mem_slab_alloc()`
|
||||
* :cpp:func:`k_mem_slab_free()`
|
||||
|
||||
@@ -31,7 +31,7 @@ Defining an Atomic Variable
|
||||
An atomic variable is defined using a variable of type :c:type:`atomic_t`.
|
||||
|
||||
By default an atomic variable is initialized to zero. However, it can be given
|
||||
a different value using :c:macro:`ATOMIC_INIT()`:
|
||||
a different value using :c:macro:`ATOMIC_INIT`:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
@@ -65,6 +65,10 @@ by a higher priority context that also calls the routine.
|
||||
Manipulating an Array of Atomic Variables
|
||||
=========================================
|
||||
|
||||
An array of 32-bit atomic variables can be defined in the conventional manner.
|
||||
However, you can also define an N-bit array of atomic variables using
|
||||
:c:macro:`ATOMIC_DEFINE`.
|
||||
|
||||
A single bit in array of atomic variables can be manipulated using
|
||||
the APIs listed at the end of this section that end with :cpp:func:`_bit`.
|
||||
|
||||
@@ -111,6 +115,8 @@ APIs
|
||||
|
||||
The following atomic operation APIs are provided by :file:`atomic.h`:
|
||||
|
||||
* :c:macro:`ATOMIC_INIT`
|
||||
* :c:macro:`ATOMIC_DEFINE`
|
||||
* :cpp:func:`atomic_get()`
|
||||
* :cpp:func:`atomic_set()`
|
||||
* :cpp:func:`atomic_clear()`
|
||||
|
||||
@@ -1,282 +0,0 @@
|
||||
.. _event_logger_v2:
|
||||
|
||||
Kernel Event Logger [TBD]
|
||||
#########################
|
||||
|
||||
Definition
|
||||
**********
|
||||
|
||||
The kernel event logger is a standardized mechanism to record events within the
|
||||
Kernel while providing a single interface for the user to collect the data.
|
||||
This mechanism is currently used to log the following events:
|
||||
|
||||
* Sleep events (entering and exiting low power conditions).
|
||||
* Context switch events.
|
||||
* Interrupt events.
|
||||
|
||||
Kernel Event Logger Configuration
|
||||
*********************************
|
||||
|
||||
Kconfig provides the ability to enable and disable the collection of events and
|
||||
to configure the size of the buffer used by the event logger.
|
||||
|
||||
These options can be found in the following path :file:`kernel/Kconfig`.
|
||||
|
||||
General kernel event logger configuration:
|
||||
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_BUFFER_SIZE`
|
||||
|
||||
Default size: 128 words, 32-bit length.
|
||||
|
||||
Profiling points configuration:
|
||||
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC`
|
||||
|
||||
Allows modifying at runtime the events to record. At boot no event is
|
||||
recorded if enabled This flag adds functions allowing to enable/disable
|
||||
recording of kernel event logger.
|
||||
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP`
|
||||
|
||||
Enables the possibility to set the timer function to be used to populate
|
||||
kernel event logger timestamp. This has to be done at runtime by calling
|
||||
sys_k_event_logger_set_timer and providing the function callback.
|
||||
|
||||
Adding a Kernel Event Logging Point
|
||||
***********************************
|
||||
|
||||
Custom trace points can be added with the following API:
|
||||
|
||||
* :c:func:`sys_k_event_logger_put()`
|
||||
|
||||
Adds the profile of a new event with custom data.
|
||||
|
||||
* :cpp:func:`sys_k_event_logger_put_timed()`
|
||||
|
||||
Adds timestamped profile of a new event.
|
||||
|
||||
.. important::
|
||||
|
||||
The data must be in 32-bit sized blocks.
|
||||
|
||||
Retrieving Kernel Event Data
|
||||
****************************
|
||||
|
||||
Applications are required to implement a cooperative thread for accessing the
|
||||
recorded event messages. Developers can use the provided API to retrieve the
|
||||
data, or may write their own routines using the ring buffer provided by the
|
||||
event logger.
|
||||
|
||||
The API functions provided are:
|
||||
|
||||
* :c:func:`sys_k_event_logger_get()`
|
||||
* :c:func:`sys_k_event_logger_get_wait()`
|
||||
* :c:func:`sys_k_event_logger_get_wait_timeout()`
|
||||
|
||||
The above functions specify various ways to retrieve a event message and to
|
||||
copy it to the provided buffer. When the buffer size is smaller than the
|
||||
message, the function will return an error. All three functions retrieve
|
||||
messages via a FIFO method. The :literal:`wait` and :literal:`wait_timeout`
|
||||
functions allow the caller to pend until a new message is logged, or until the
|
||||
timeout expires.
|
||||
|
||||
Enabling/disabling event recording
|
||||
**********************************
|
||||
|
||||
If KERNEL_EVENT_LOGGER_DYNAMIC is enabled, following functions must be checked
|
||||
for dynamically enabling/disabling event recording at runtime:
|
||||
|
||||
* :cpp:func:`sys_k_event_logger_set_mask()`
|
||||
* :cpp:func:`sys_k_event_logger_get_mask()`
|
||||
|
||||
Each mask bit corresponds to the corresponding event ID (mask is starting at
|
||||
bit 1 not bit 0).
|
||||
|
||||
More details are provided in function description.
|
||||
|
||||
Timestamp
|
||||
*********
|
||||
|
||||
The timestamp used by the kernel event logger is 32-bit LSB of platform HW
|
||||
timer (for example Lakemont APIC timer for Quark SE). This timer period is very
|
||||
small and leads to timestamp wraparound happening quite often (e.g. every 134s
|
||||
for Quark SE).
|
||||
|
||||
see :option:`CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC`
|
||||
|
||||
This wraparound must be considered when analyzing kernel event logger data and
|
||||
care must be taken when tickless idle is enabled and sleep duration can exceed
|
||||
maximum HW timer value.
|
||||
|
||||
Timestamp used by the kernel event logger can be customized by enabling
|
||||
following option: :option:`CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP`
|
||||
|
||||
In case this option is enabled, a callback function returning a 32-bit
|
||||
timestamp must be provided to the kernel event logger by calling the following
|
||||
function at runtime: :cpp:func:`sys_k_event_logger_set_timer()`
|
||||
|
||||
Message Formats
|
||||
***************
|
||||
|
||||
Interrupt-driven Event Messaging
|
||||
--------------------------------
|
||||
|
||||
The data of the interrupt-driven event message comes in two block of 32 bits:
|
||||
|
||||
* The first block contains the timestamp occurrence of the interrupt event.
|
||||
* The second block contains the Id of the interrupt.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
uint32_t data[2];
|
||||
data[0] = timestamp_event;
|
||||
data[1] = interrupt_id;
|
||||
|
||||
Context-switch Event Messaging
|
||||
------------------------------
|
||||
|
||||
The data of the context-switch event message comes in two block of 32 bits:
|
||||
|
||||
* The first block contains the timestamp occurrence of the context-switch event.
|
||||
* The second block contains the thread id of the context involved.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
uint32_t data[2];
|
||||
data[0] = timestamp_event;
|
||||
data[1] = context_id;
|
||||
|
||||
Sleep Event Messaging
|
||||
---------------------
|
||||
|
||||
The data of the sleep event message comes in three block of 32 bits:
|
||||
|
||||
* The first block contains the timestamp when the CPU went to sleep mode.
|
||||
* The second block contains the timestamp when the CPU woke up.
|
||||
* The third block contains the interrupt Id that woke the CPU up.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
uint32_t data[3];
|
||||
data[0] = timestamp_went_sleep;
|
||||
data[1] = timestamp woke_up.
|
||||
data[2] = interrupt_id.
|
||||
|
||||
|
||||
Example: Retrieving Profiling Messages
|
||||
======================================
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
uint32_t data[3];
|
||||
uint8_t data_length = SIZE32_OF(data);
|
||||
uint8_t dropped_count;
|
||||
|
||||
while(1) {
|
||||
/* collect the data */
|
||||
res = sys_k_event_logger_get_wait(&event_id, &dropped_count, data,
|
||||
&data_length);
|
||||
|
||||
if (dropped_count > 0) {
|
||||
/* process the message dropped count */
|
||||
}
|
||||
|
||||
if (res > 0) {
|
||||
/* process the data */
|
||||
switch (event_id) {
|
||||
case KERNEL_EVENT_CONTEXT_SWITCH_EVENT_ID:
|
||||
/* ... Process the context switch event data ... */
|
||||
break;
|
||||
case KERNEL_EVENT_INTERRUPT_EVENT_ID:
|
||||
/* ... Process the interrupt event data ... */
|
||||
break;
|
||||
case KERNEL_EVENT_SLEEP_EVENT_ID:
|
||||
/* ... Process the data for a sleep event ... */
|
||||
break;
|
||||
default:
|
||||
printf("unrecognized event id %d\n", event_id);
|
||||
}
|
||||
} else {
|
||||
if (res == -EMSGSIZE) {
|
||||
/* ERROR - The buffer provided to collect the
|
||||
* profiling events is too small.
|
||||
*/
|
||||
} else if (ret == -EAGAIN) {
|
||||
/* There is no message available in the buffer */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.. note::
|
||||
|
||||
To see an example that shows how to collect the kernel event data, check the
|
||||
project :file:`samples/kernel_event_logger`.
|
||||
|
||||
Example: Adding a Kernel Event Logging Point
|
||||
============================================
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
uint32_t data[2];
|
||||
|
||||
if (sys_k_must_log_event(KERNEL_EVENT_LOGGER_CUSTOM_ID)) {
|
||||
data[0] = custom_data_1;
|
||||
data[1] = custom_data_2;
|
||||
|
||||
sys_k_event_logger_put(KERNEL_EVENT_LOGGER_CUSTOM_ID, data,
|
||||
ARRAY_SIZE(data));
|
||||
}
|
||||
|
||||
Use the following function to register only the time of an event.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
if (sys_k_must_log_event(KERNEL_EVENT_LOGGER_CUSTOM_ID)) {
|
||||
sys_k_event_logger_put_timed(KERNEL_EVENT_LOGGER_CUSTOM_ID);
|
||||
}
|
||||
|
||||
APIs
|
||||
****
|
||||
|
||||
The following APIs are provided by the :file:`k_event_logger.h` file:
|
||||
|
||||
:cpp:func:`sys_k_event_logger_register_as_collector()`
|
||||
Register the current cooperative thread as the collector thread.
|
||||
|
||||
:c:func:`sys_k_event_logger_put()`
|
||||
Enqueue a kernel event logger message with custom data.
|
||||
|
||||
:cpp:func:`sys_k_event_logger_put_timed()`
|
||||
Enqueue a kernel event logger message with the current time.
|
||||
|
||||
:c:func:`sys_k_event_logger_get()`
|
||||
De-queue a kernel event logger message.
|
||||
|
||||
:c:func:`sys_k_event_logger_get_wait()`
|
||||
De-queue a kernel event logger message. Wait if the buffer is empty.
|
||||
|
||||
:c:func:`sys_k_event_logger_get_wait_timeout()`
|
||||
De-queue a kernel event logger message. Wait if the buffer is empty until
|
||||
the timeout expires.
|
||||
|
||||
:cpp:func:`sys_k_must_log_event()`
|
||||
Check if an event type has to be logged or not
|
||||
|
||||
In case KERNEL_EVENT_LOGGER_DYNAMIC is enabled:
|
||||
|
||||
:cpp:func:`sys_k_event_logger_set_mask()`
|
||||
Set kernel event logger event mask
|
||||
|
||||
:cpp:func:`sys_k_event_logger_get_mask()`
|
||||
Get kernel event logger event mask
|
||||
|
||||
In case KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP is enabled:
|
||||
|
||||
:cpp:func:`sys_k_event_logger_set_timer()`
|
||||
Set kernel event logger timestamp function
|
||||
@@ -102,23 +102,23 @@ pre-tag a thread using one of the techniques listed below.
|
||||
|
||||
* A statically-spawned x86 thread can be pre-tagged by passing the
|
||||
:c:macro:`K_FP_REGS` or :c:macro:`K_SSE_REGS` option to
|
||||
:c:macro:`K_THREAD_DEFINE()`.
|
||||
:c:macro:`K_THREAD_DEFINE`.
|
||||
|
||||
* A dynamically-spawned x86 thread can be pre-tagged by passing the
|
||||
:c:macro:`K_FP_REGS` or :c:macro:`K_SSE_REGS` option to
|
||||
:c:func:`k_thread_spawn()`.
|
||||
:cpp:func:`k_thread_spawn()`.
|
||||
|
||||
* An already-spawned x86 thread can pre-tag itself once it has started
|
||||
by passing the :c:macro:`K_FP_REGS` or :c:macro:`K_SSE_REGS` option to
|
||||
:c:func:`k_float_enable()`.
|
||||
:cpp:func:`k_float_enable()`.
|
||||
|
||||
If an x86 thread uses the floating point registers infrequently it can call
|
||||
:c:func:`k_float_disable()` to remove its tagging as an FPU user or SSE user.
|
||||
:cpp:func:`k_float_disable()` to remove its tagging as an FPU user or SSE user.
|
||||
This eliminates the need for the kernel to take steps to preserve
|
||||
the contents of the floating point registers during context switches
|
||||
when there is no need to do so.
|
||||
When the thread again needs to use the floating point registers it can re-tag
|
||||
itself as an FPU user or SSE user by calling :c:func:`k_float_enable()`.
|
||||
itself as an FPU user or SSE user by calling :cpp:func:`k_float_enable()`.
|
||||
|
||||
Implementation
|
||||
**************
|
||||
|
||||
@@ -127,7 +127,7 @@ Implementation
|
||||
Defining an ISR
|
||||
===============
|
||||
|
||||
An ISR is defined at run-time by calling :c:macro:`IRQ_CONNECT()`. It must
|
||||
An ISR is defined at run-time by calling :c:macro:`IRQ_CONNECT`. It must
|
||||
then be enabled by calling :cpp:func:`irq_enable()`.
|
||||
|
||||
.. important::
|
||||
@@ -185,7 +185,7 @@ APIs
|
||||
|
||||
The following interrupt-related APIs are provided by :file:`irq.h`:
|
||||
|
||||
* :c:macro:`IRQ_CONNECT()`
|
||||
* :c:macro:`IRQ_CONNECT`
|
||||
* :cpp:func:`irq_lock()`
|
||||
* :cpp:func:`irq_unlock()`
|
||||
* :cpp:func:`irq_enable()`
|
||||
@@ -195,3 +195,4 @@ The following interrupt-related APIs are provided by :file:`irq.h`:
|
||||
The following interrupt-related APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :cpp:func:`k_is_in_isr()`
|
||||
* :cpp:func:`k_is_preempt_thread`
|
||||
|
||||
252
doc/kernel_v2/other/kernel_event_logger.rst
Normal file
252
doc/kernel_v2/other/kernel_event_logger.rst
Normal file
@@ -0,0 +1,252 @@
|
||||
.. _kernel_event_logger_v2:
|
||||
|
||||
Kernel Event Logger
|
||||
###################
|
||||
|
||||
The kernel event logger records the occurrence of certain types of kernel
|
||||
events, allowing them to be subsequently extracted and reviewed.
|
||||
This capability can be helpful in profiling the operation of an application,
|
||||
either for debugging purposes or for optimizing the performance the application.
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
:depth: 2
|
||||
|
||||
Concepts
|
||||
********
|
||||
|
||||
The kernel event logger does not exist unless it is configured for an
|
||||
application. The capacity of the kernel event logger is also configurable.
|
||||
By default, it has a ring buffer that can hold up to 128 32-bit words
|
||||
of event information.
|
||||
|
||||
The kernel event logger is capable of recording the following pre-defined
|
||||
event types:
|
||||
|
||||
* Interrupts.
|
||||
* Ccontext switching of threads.
|
||||
* Kernel sleep events (i.e. entering and exiting a low power state).
|
||||
|
||||
The kernel event logger only records the pre-defined event types it has been
|
||||
configured to record. Each event type can be enabled independently.
|
||||
|
||||
An application can also define and record custom event types.
|
||||
The information recorded for a custom event, and the times
|
||||
at which it is recorded, must be implemented by the application.
|
||||
|
||||
All events recorded by the kernel event logger remain in its ring buffer
|
||||
until they are retrieved by the application for review and analysis. The
|
||||
retrieval and analysis logic must be implemented by the application.
|
||||
|
||||
.. important::
|
||||
An application must retrieve the events recorded by the kernel event logger
|
||||
in a timely manner, otherwise new events will be dropped once the event
|
||||
logger's ring buffer becomes full. A recommended approach is to use
|
||||
a cooperative thread to retrieve the events, either on a periodic basis
|
||||
or as its sole responsibility.
|
||||
|
||||
By default, the kernel event logger records all occurrences of all event types
|
||||
that have been enabled. However, it can also be configured to allow an
|
||||
application to dynamically start or stop the recording of events at any time,
|
||||
and to control which event types are being recorded. This permits
|
||||
the application to capture only the events that occur during times
|
||||
of particular interest, thereby reducing the work needed to analyze them.
|
||||
|
||||
.. note::
|
||||
The kernel event logger can also be instructed to ignore context switches
|
||||
involving a single specified thread. This can be used to avoid recording
|
||||
context switch events involving the thread that retrieves the events
|
||||
from the kernel event logger.
|
||||
|
||||
Event Formats
|
||||
=============
|
||||
|
||||
Each event recorded by the kernel event logger consists of one or more
|
||||
32-bit words of data that describe the event.
|
||||
|
||||
An **interrupt event** has the following format:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct {
|
||||
uint32_t timestamp; /* time of interrupt */
|
||||
uint32_t interrupt_id; /* ID of interrupt */
|
||||
};
|
||||
|
||||
A **context-switch event** has the following format:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct {
|
||||
uint32_t timestamp; /* time of context switch */
|
||||
uint32_t context_id; /* ID of thread that was switched out */
|
||||
};
|
||||
|
||||
A **sleep event** has the following format:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct {
|
||||
uint32_t sleep_timestamp; /* time when CPU entered sleep mode */
|
||||
uint32_t wake_timestamp; /* time when CPU exited sleep mode */
|
||||
uint32_t interrupt_id; /* ID of interrupt that woke CPU */
|
||||
};
|
||||
|
||||
A **custom event** must have a type ID that does not conflict with
|
||||
any existing pre-defined event type ID. The format of a custom event
|
||||
is application-defined, but must contain at least one 32-bit data word.
|
||||
A custom event may utilize a variable size, to allow different events
|
||||
of a single type to record differing amounts of information.
|
||||
|
||||
Timestamps
|
||||
==========
|
||||
|
||||
By default, the timestamp recorded with each pre-defined event is obtained from
|
||||
the kernel's :ref:`hardware clock <clocks_v2>`. This 32-bit clock counts up
|
||||
extremely rapidly, which means the timestamp value wraps around frequently.
|
||||
(For example, the Lakemont APIC timer for Quark SE wraps every 134 seconds.)
|
||||
This wraparound must be accounted for when analyzing kernel event logger data.
|
||||
In addition, care must be taken when tickless idle is enabled, in case a sleep
|
||||
duration exceeds 2^32 clock cycles.
|
||||
|
||||
If desired, the kernel event logger can be configured to record
|
||||
a custom timestamp, rather than the default timestamp.
|
||||
The application registers the callback function that generates the custom 32-bit
|
||||
timestamp at run-time by calling :cpp:func:`sys_k_event_logger_set_timer()`.
|
||||
|
||||
Implementation
|
||||
**************
|
||||
|
||||
Retrieving An Event
|
||||
===================
|
||||
|
||||
An event can be retrieved from the kernel event logger in a blocking or
|
||||
non-blocking manner using the following APIs:
|
||||
|
||||
* :cpp:func:`sys_k_event_logger_get()`
|
||||
* :cpp:func:`sys_k_event_logger_get_wait()`
|
||||
* :cpp:func:`sys_k_event_logger_get_wait_timeout()`
|
||||
|
||||
In each case, the API also returns the type and size of the event, as well
|
||||
as the event information itself. The API also indicates how many events
|
||||
were dropped between the occurrence of the previous event and the retrieved
|
||||
event.
|
||||
|
||||
The following code illustrates how a thread can retrieve the events
|
||||
recorded by the kernel event logger.
|
||||
A sample application that shows how to collect kernel event data
|
||||
can also be found at :file:`samples/kernel_event_logger`.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
uint16_t event_id;
|
||||
uint8_t dropped_count;
|
||||
uint32_t data[3];
|
||||
uint8_t data_size;
|
||||
|
||||
while(1) {
|
||||
/* retrieve an event */
|
||||
data_size = SIZE32_OF(data);
|
||||
res = sys_k_event_logger_get_wait(&event_id, &dropped_count, data,
|
||||
&data_size);
|
||||
|
||||
if (dropped_count > 0) {
|
||||
/* ... Process the dropped events count ... */
|
||||
}
|
||||
|
||||
if (res > 0) {
|
||||
/* process the event */
|
||||
switch (event_id) {
|
||||
case KERNEL_EVENT_CONTEXT_SWITCH_EVENT_ID:
|
||||
/* ... Process the context switch event ... */
|
||||
break;
|
||||
case KERNEL_EVENT_INTERRUPT_EVENT_ID:
|
||||
/* ... Process the interrupt event ... */
|
||||
break;
|
||||
case KERNEL_EVENT_SLEEP_EVENT_ID:
|
||||
/* ... Process the sleep event ... */
|
||||
break;
|
||||
default:
|
||||
printf("unrecognized event id %d\n", event_id);
|
||||
}
|
||||
} else if (res == -EMSGSIZE) {
|
||||
/* ... Data array is too small to hold the event! ... */
|
||||
}
|
||||
}
|
||||
|
||||
Adding a Custom Event Type
|
||||
==========================
|
||||
|
||||
A custom event type must use an integer type ID that does not duplicate
|
||||
an existing type ID. The type IDs for the pre-defined events can be found
|
||||
in :file:`include/misc/kernel_event_logger.h`. If dynamic recording of
|
||||
events is enabled, the event type ID must not exceed 32.
|
||||
|
||||
Custom events can be written to the kernel event logger using the following
|
||||
APIs:
|
||||
|
||||
* :cpp:func:`sys_k_event_logger_put()`
|
||||
* :cpp:func:`sys_k_event_logger_put_timed()`
|
||||
|
||||
Both of these APIs record an event as long as there is room in the kernel
|
||||
event logger's ring buffer. To enable dynamic recording of a custom event type,
|
||||
the application must first call :cpp:func:`sys_k_must_log_event()` to determine
|
||||
if event recording is currently active for that event type.
|
||||
|
||||
The following code illustrates how an application can write a custom
|
||||
event consisting of two 32-bit words to the kernel event logger.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
#define MY_CUSTOM_EVENT_ID 8
|
||||
|
||||
/* record custom event only if recording is currently wanted */
|
||||
if (sys_k_must_log_event(MY_CUSTOM_EVENT_ID)) {
|
||||
uint32_t data[2];
|
||||
|
||||
data[0] = custom_data_1;
|
||||
data[1] = custom_data_2;
|
||||
|
||||
sys_k_event_logger_put(MY_CUSTOM_EVENT_ID, data, ARRAY_SIZE(data));
|
||||
}
|
||||
|
||||
The following code illustrates how an application can write a custom event
|
||||
that records just a timestamp using a single 32-bit word.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
#define MY_CUSTOM_TIME_ONLY_EVENT_ID 9
|
||||
|
||||
if (sys_k_must_log_event(MY_CUSTOM_TIME_ONLY_EVENT_ID)) {
|
||||
sys_k_event_logger_put_timed(MY_CUSTOM_TIME_ONLY_EVENT_ID);
|
||||
}
|
||||
|
||||
Configuration Options
|
||||
*********************
|
||||
|
||||
Related configuration options:
|
||||
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER`
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH`
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT`
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_SLEEP`
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_BUFFER_SIZE`
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC`
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP`
|
||||
|
||||
APIs
|
||||
****
|
||||
|
||||
The following kernel event logger APIs are provided by
|
||||
:file:`kernel_event_logger.h`:
|
||||
|
||||
* :cpp:func:`sys_k_event_logger_register_as_collector()`
|
||||
* :cpp:func:`sys_k_event_logger_get()`
|
||||
* :cpp:func:`sys_k_event_logger_get_wait()`
|
||||
* :cpp:func:`sys_k_event_logger_get_wait_timeout()`
|
||||
* :cpp:func:`sys_k_must_log_event()`
|
||||
* :cpp:func:`sys_k_event_logger_put()`
|
||||
* :cpp:func:`sys_k_event_logger_put_timed()`
|
||||
* :cpp:func:`sys_k_event_logger_get_mask()`
|
||||
* :cpp:func:`sys_k_event_logger_set_mask()`
|
||||
* :cpp:func:`sys_k_event_logger_set_timer()`
|
||||
@@ -12,6 +12,6 @@ This section describes other services provided by the kernel.
|
||||
atomic.rst
|
||||
float.rst
|
||||
ring_buffers.rst
|
||||
event_logger.rst
|
||||
kernel_event_logger.rst
|
||||
c_library.rst
|
||||
cxx_support.rst
|
||||
|
||||
@@ -83,7 +83,7 @@ is capable of holding 64 words of data and metadata information.
|
||||
#define MY_RING_BUF_SIZE 64
|
||||
|
||||
struct my_struct {
|
||||
struct ring_buffer rb;
|
||||
struct ring_buf rb;
|
||||
uint32_t buffer[MY_RING_BUF_SIZE];
|
||||
...
|
||||
};
|
||||
@@ -175,8 +175,8 @@ APIs
|
||||
|
||||
The following ring buffer APIs are provided by :file:`misc/ring_buffer.h`:
|
||||
|
||||
* :c:func:`SYS_RING_BUF_DECLARE_POW2()`
|
||||
* :c:func:`SYS_RING_BUF_DECLARE_SIZE()`
|
||||
* :cpp:func:`SYS_RING_BUF_DECLARE_POW2()`
|
||||
* :cpp:func:`SYS_RING_BUF_DECLARE_SIZE()`
|
||||
* :cpp:func:`sys_ring_buf_init()`
|
||||
* :cpp:func:`sys_ring_buf_is_empty()`
|
||||
* :cpp:func:`sys_ring_buf_space_get()`
|
||||
|
||||
@@ -103,7 +103,7 @@ new pending alerts.
|
||||
k_alert_init(&my_alert, my_alert_handler, 10);
|
||||
|
||||
Alternatively, an alert can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_ALERT_DEFINE()`.
|
||||
by calling :c:macro:`K_ALERT_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
|
||||
@@ -234,5 +234,7 @@ APIs
|
||||
|
||||
The following alert APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_ALERT_DEFINE`
|
||||
* :cpp:func:`k_alert_init()`
|
||||
* :cpp:func:`k_alert_send()`
|
||||
* :cpp:func:`k_alert_recv()`
|
||||
|
||||
@@ -105,7 +105,7 @@ The following code defines and initializes a mutex.
|
||||
k_mutex_init(&my_mutex);
|
||||
|
||||
Alternatively, a mutex can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_MUTEX_DEFINE()`.
|
||||
by calling :c:macro:`K_MUTEX_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
|
||||
@@ -130,7 +130,7 @@ available, and gives a warning if the mutex does not become availablee.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
if (k_mutex_lock(&my_mutex, 100) == 0) {
|
||||
if (k_mutex_lock(&my_mutex, K_MSEC(100)) == 0) {
|
||||
/* mutex successfully locked */
|
||||
} else {
|
||||
printf("Cannot lock XYZ display\n");
|
||||
@@ -166,6 +166,7 @@ APIs
|
||||
|
||||
The following mutex APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_MUTEX_DEFINE`
|
||||
* :cpp:func:`k_mutex_init()`
|
||||
* :cpp:func:`k_mutex_lock()`
|
||||
* :cpp:func:`k_mutex_unlock()`
|
||||
|
||||
@@ -60,7 +60,7 @@ semaphore by setting its count to 0 and its limit to 1.
|
||||
k_sem_init(&my_sem, 0, 1);
|
||||
|
||||
Alternatively, a semaphore can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_SEM_DEFINE()`.
|
||||
by calling :c:macro:`K_SEM_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
|
||||
@@ -101,7 +101,7 @@ A warning is issued if the semaphore is not obtained in time.
|
||||
{
|
||||
...
|
||||
|
||||
if (k_sem_take(&my_sem, 50) != 0) {
|
||||
if (k_sem_take(&my_sem, K_MSEC(50)) != 0) {
|
||||
printk("Input data not available!");
|
||||
} else {
|
||||
/* fetch available data */
|
||||
@@ -130,6 +130,7 @@ APIs
|
||||
|
||||
The following semaphore APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_SEM_DEFINE`
|
||||
* :cpp:func:`k_sem_init()`
|
||||
* :cpp:func:`k_sem_give()`
|
||||
* :cpp:func:`k_sem_take()`
|
||||
|
||||
@@ -78,7 +78,7 @@ automatically aborts a thread if the thread triggers a fatal error condition,
|
||||
such as dereferencing a null pointer.
|
||||
|
||||
A thread can also be aborted by another thread (or by itself)
|
||||
by calling :c:func:`k_thread_abort()`. However, it is typically preferable
|
||||
by calling :cpp:func:`k_thread_abort()`. However, it is typically preferable
|
||||
to signal a thread to terminate itself gracefully, rather than aborting it.
|
||||
|
||||
As with thread termination, the kernel does not reclaim shared resources
|
||||
@@ -92,16 +92,16 @@ Thread Suspension
|
||||
=================
|
||||
|
||||
A thread can be prevented from executing for an indefinite period of time
|
||||
if it becomes **suspended**. The function :c:func:`k_thread_suspend()`
|
||||
if it becomes **suspended**. The function :cpp:func:`k_thread_suspend()`
|
||||
can be used to suspend any thread, including the calling thread.
|
||||
Suspending a thread that is already suspended has no additional effect.
|
||||
|
||||
Once suspended, a thread cannot be scheduled until another thread calls
|
||||
:c:func:`k_thread_resume()` to remove the suspension.
|
||||
:cpp:func:`k_thread_resume()` to remove the suspension.
|
||||
|
||||
.. note::
|
||||
A thread can prevent itself from executing for a specified period of time
|
||||
using :c:func:`k_sleep()`. However, this is different from suspending
|
||||
using :cpp:func:`k_sleep()`. However, this is different from suspending
|
||||
a thread since a sleeping thread becomes executable automatically when the
|
||||
time limit is reached.
|
||||
|
||||
@@ -146,7 +146,7 @@ Spawning a Thread
|
||||
|
||||
A thread is spawned by defining its stack area and then calling
|
||||
:cpp:func:`k_thread_spawn()`. The stack area is an array of bytes
|
||||
whose size must equal :c:func:`sizeof(struct k_thread)` plus the size
|
||||
whose size must equal :c:macro:`K_THREAD_SIZEOF` plus the size
|
||||
of the thread's stack. The stack area must be defined using the
|
||||
:c:macro:`__stack` attribute to ensure it is properly aligned.
|
||||
|
||||
@@ -169,7 +169,7 @@ The following code spawns a thread that starts immediately.
|
||||
MY_PRIORITY, 0, K_NO_WAIT);
|
||||
|
||||
Alternatively, a thread can be spawned at compile time by calling
|
||||
:c:macro:`K_THREAD_DEFINE()`. Observe that the macro defines
|
||||
:c:macro:`K_THREAD_DEFINE`. Observe that the macro defines
|
||||
the stack area and thread id variables automatically.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
@@ -226,8 +226,9 @@ Related configuration options:
|
||||
APIs
|
||||
****
|
||||
|
||||
The following thread APIs are are provided by :file:`kernel.h`:
|
||||
The following thread APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_THREAD_DEFINE`
|
||||
* :cpp:func:`k_thread_spawn()`
|
||||
* :cpp:func:`k_thread_cancel()`
|
||||
* :cpp:func:`k_thread_abort()`
|
||||
|
||||
@@ -135,7 +135,7 @@ are measured in system clock ticks. The time slice size is configurable,
|
||||
but this size can be changed while the application is running.
|
||||
|
||||
At the end of every time slice, the scheduler checks to see if the current
|
||||
thread is preemptible and, if so, implicitly invokes :c:func:`k_yield()`
|
||||
thread is preemptible and, if so, implicitly invokes :cpp:func:`k_yield()`
|
||||
on behalf of the thread. This gives other ready threads of the same priority
|
||||
the opportunity to execute before the current thread is scheduled again.
|
||||
If no threads of equal priority are ready, the current thread remains
|
||||
@@ -234,6 +234,8 @@ APIs
|
||||
The following thread scheduling-related APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :cpp:func:`k_current_get()`
|
||||
* :cpp:func:`k_sched_lock()`
|
||||
* :cpp:func:`k_sched_unlock()`
|
||||
* :cpp:func:`k_yield()`
|
||||
* :cpp:func:`k_sleep()`
|
||||
* :cpp:func:`k_wakeup()`
|
||||
|
||||
@@ -150,7 +150,7 @@ Defining a Workqueue
|
||||
A workqueue is defined using a variable of type :c:type:`struct k_work_q`.
|
||||
The workqueue is initialized by defining the stack area used by its thread
|
||||
and then calling :cpp:func:`k_work_q_start()`. The stack area is an array
|
||||
of bytes whose size must equal :c:func:`sizeof(struct k_thread)` plus the size
|
||||
of bytes whose size must equal :c:macro:`K_THREAD_SIZEOF` plus the size
|
||||
of the thread's stack. The stack area must be defined using the
|
||||
:c:macro:`__stack` attribute to ensure it is properly aligned.
|
||||
|
||||
@@ -158,7 +158,7 @@ The following code defines and initializes a workqueue.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
#define MY_STACK_SIZE 500
|
||||
#define MY_STACK_SIZE (K_THREAD_SIZEOF + 500)
|
||||
#define MY_PRIORITY 5
|
||||
|
||||
char __noinit __stack my_stack_area[MY_STACK_SIZE];
|
||||
|
||||
@@ -164,17 +164,10 @@ The following kernel clock APIs are provided by :file:`kernel.h`:
|
||||
* :cpp:func:`k_uptime_delta()`
|
||||
* :cpp:func:`k_uptime_delta_32()`
|
||||
* :cpp:func:`k_cycle_get_32()`
|
||||
|
||||
The following kernel clock variables are provided by :file:`kernel.h`:
|
||||
|
||||
:c:data:`sys_clock_ticks_per_sec`
|
||||
The number of system clock ticks in a single second.
|
||||
|
||||
:c:data:`sys_clock_hw_cycles_per_sec`
|
||||
The number of hardware clock cycles in a single second.
|
||||
|
||||
:c:data:`sys_clock_us_per_tick`
|
||||
The number of microseconds in a single system clock tick.
|
||||
|
||||
:c:data:`sys_clock_hw_cycles_per_tick`
|
||||
The number of hardware clock cycles in a single system clock tick.
|
||||
* :c:macro:`SYS_CLOCK_HW_CYCLES_TO_NS`
|
||||
* :c:macro:`K_NO_WAIT`
|
||||
* :c:macro:`K_MSEC`
|
||||
* :c:macro:`K_SECONDS`
|
||||
* :c:macro:`K_MINUTES`
|
||||
* :c:macro:`K_HOURS`
|
||||
* :c:macro:`K_FOREVER`
|
||||
|
||||
@@ -112,7 +112,7 @@ The following code defines and initializes a timer.
|
||||
k_timer_init(&my_timer, my_expiry_function, NULL);
|
||||
|
||||
Alternatively, a timer can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_TIMER_DEFINE()`.
|
||||
by calling :c:macro:`K_TIMER_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
|
||||
@@ -125,23 +125,22 @@ Using a Timer Expiry Function
|
||||
|
||||
The following code uses a timer to perform a non-trivial action on a periodic
|
||||
basis. Since the required work cannot be done at interrupt level,
|
||||
the timer's expiry function uses a :ref:`kernel alert object <alerts_v2>`
|
||||
to do the work in the context of the system workqueue.
|
||||
the timer's expiry function submits a work item to the
|
||||
:ref:`system workqueue <workqueues_v2>`, whose thread performs the work.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int my_alert_handler(struct k_alert *dummy)
|
||||
void my_work_handler(struct k_work *work)
|
||||
{
|
||||
/* do the processing that needs to be done periodically */
|
||||
...
|
||||
return 0;
|
||||
}
|
||||
|
||||
K_ALERT_DEFINE(my_alert, my_alert_handler);
|
||||
struct k_work my_work = K_WORK_INITIALIZER(my_work_handler);
|
||||
|
||||
void my_timer_handler(struct k_timer *dummy)
|
||||
{
|
||||
k_alert_send(&my_alert);
|
||||
k_work_submit(&my_work);
|
||||
}
|
||||
|
||||
K_TIMER_DEFINE(my_timer, my_timer_handler, NULL);
|
||||
@@ -149,7 +148,7 @@ to do the work in the context of the system workqueue.
|
||||
...
|
||||
|
||||
/* start periodic timer that expires once every second */
|
||||
k_timer_start(&my_timer, 1000, 1000);
|
||||
k_timer_start(&my_timer, K_SECONDS(1), K_SECONDS(1));
|
||||
|
||||
Reading Timer Status
|
||||
====================
|
||||
@@ -164,7 +163,7 @@ if the timer has expired on not.
|
||||
...
|
||||
|
||||
/* start one shot timer that expires after 200 ms */
|
||||
k_timer_start(&my_status_timer, 200, 0);
|
||||
k_timer_start(&my_status_timer, K_MSEC(200), 0);
|
||||
|
||||
/* do work */
|
||||
...
|
||||
@@ -195,7 +194,7 @@ are separated by the specified time interval.
|
||||
...
|
||||
|
||||
/* start one shot timer that expires after 500 ms */
|
||||
k_timer_start(&my_sync_timer, 500, 0);
|
||||
k_timer_start(&my_sync_timer, K_MSEC(500), 0);
|
||||
|
||||
/* do other work */
|
||||
...
|
||||
@@ -241,6 +240,7 @@ APIs
|
||||
|
||||
The following timer APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_TIMER_DEFINE`
|
||||
* :cpp:func:`k_timer_init()`
|
||||
* :cpp:func:`k_timer_start()`
|
||||
* :cpp:func:`k_timer_stop()`
|
||||
|
||||
@@ -77,7 +77,7 @@ The stack is split up as follows in the source tree:
|
||||
functionality of the Bluetooth stack, but are not necessary the best
|
||||
source for sample code (see ``samples/bluetooth`` instead).
|
||||
|
||||
``doc/bluetooth/``
|
||||
``doc/subsystems/bluetooth/``
|
||||
Extra documentation, such as PICS documents.
|
||||
|
||||
Further reading
|
||||
|
||||
@@ -3,175 +3,98 @@
|
||||
Power Management
|
||||
################
|
||||
|
||||
The power management infrastructure consists of interfaces exported by the
|
||||
power management subsystem. This subsystem exports interfaces that a
|
||||
:abbr:`Power Management Application (PMA)` uses to implement power management
|
||||
policies.
|
||||
Zephyr RTOS power management subsystem provides several means for a system
|
||||
integrator to implement power management support that can take full
|
||||
advantage of the power saving features of SOCs.
|
||||
|
||||
|
||||
Terminology
|
||||
***********
|
||||
|
||||
:dfn:`PMA`
|
||||
:dfn:`SOC interface`
|
||||
This is a general term for the components that have knowledge of the
|
||||
SOC and provide interfaces to the hardware features. It will abstract
|
||||
the SOC specific implementations to the applications and the OS.
|
||||
|
||||
The system integrator provides the :abbr:`PMA (Power Manager
|
||||
Application)`. The PMA maintains any power management policies and
|
||||
executes the power management actions based on those policies.
|
||||
The PMA must be integrated into the main Zephyr application.
|
||||
:dfn:`CPU LPS (Low Power State)`
|
||||
Refers to any one of the low power states supported by the CPU. The CPU is
|
||||
usually powered on while the clocks are power gated.
|
||||
|
||||
:dfn:`LPS`
|
||||
:dfn:`Active State`
|
||||
The CPU and clocks are powered on. This is the normal operating state when
|
||||
the system is running.
|
||||
|
||||
:abbr:`LPS (Low Power States)` refers to any one of the low power states supported by the CPU.
|
||||
:dfn:`Deep Sleep State`
|
||||
The CPU is power gated and loses context. Most peripherals would also be
|
||||
power gated. RAM is selectively retained.
|
||||
|
||||
:dfn:`SoC Power State`
|
||||
:dfn:`SOC Power State`
|
||||
SOC Power State describes processor and device power states implemented at
|
||||
the SOC level. Deep Sleep State is an example of SOC Power State.
|
||||
|
||||
An SoC Power State describes processor and device power statuses
|
||||
implemented at the SoC level.
|
||||
:dfn:`Idle Thread`
|
||||
A system thread that runs when there are no other threads ready to run.
|
||||
|
||||
:dfn:`Hook function`
|
||||
|
||||
A Hook function is a callback function that one component implements and
|
||||
another component calls. For example, the PMA implements functions that
|
||||
the kernel calls.
|
||||
|
||||
Architecture and SoC dependent Power States:
|
||||
============================================
|
||||
|
||||
On x86:
|
||||
-------
|
||||
|
||||
`Active`
|
||||
The CPU is active and running in the hardware defined C0 C-state.
|
||||
|
||||
`Idle`
|
||||
The CPU is not active but continues to be powered.
|
||||
The CPU may be in one of any lower C-states: C1, C2, etc.
|
||||
|
||||
`Deep Sleep`
|
||||
The Power is off to the processor and system clock. RAM is retained.
|
||||
|
||||
On ARM
|
||||
------
|
||||
|
||||
`Active`
|
||||
The CPU is active and running.
|
||||
|
||||
`Idle`
|
||||
Stops the processor clock. The ARM documentation describes
|
||||
this as *Sleep*.
|
||||
|
||||
`Deep Sleep`
|
||||
Stops the system clock and switches off the PLL and flash
|
||||
memory. RAM is retained.
|
||||
|
||||
On ARC
|
||||
------
|
||||
|
||||
`Active`
|
||||
The CPU is currently active and running in the SS0 state.
|
||||
|
||||
`Idle`
|
||||
Defined as the SS1 and SS2 states.
|
||||
|
||||
The power states described here are generic terms that map to the power
|
||||
states commonly supported by processors and SoCs based on the three
|
||||
architectures. When coding a PMA, please refer to the data sheet of the SoC
|
||||
to get details on each power state.
|
||||
:dfn:`Power gating`
|
||||
Power gating reduces power consumption by shutting off current to blocks of
|
||||
the integrated circuit that are not in use.
|
||||
|
||||
Overview
|
||||
********
|
||||
|
||||
The Zephyr power management subsystem provides interfaces that a system
|
||||
integrator can use to create a PMA. The PMA then enforces any policies
|
||||
needed. The design is based on the philosophy of not enforcing any policies
|
||||
in the kernel giving full flexibility to the PMA.
|
||||
The interfaces and APIs provided by the power management subsystem
|
||||
are designed to be architecture and SOC independent. This enables power
|
||||
management implementations to be easily adapted to different SOCs and
|
||||
architectures. The kernel does not implement any power schemes of its own, giving
|
||||
the system integrator the flexibility of implementing custom power schemes.
|
||||
|
||||
The provided infrastructure has an architecture independent interface.
|
||||
The kernel notifies the PMA when it is about to
|
||||
enter or exit a system idle state. The PMA can perform the power management
|
||||
policy operations during these notifications.
|
||||
The architecture and SOC independence is achieved by separating the core
|
||||
infrastructure and the SOC specific implementations. The SOC specific
|
||||
implementations are abstracted to the application and the OS using hardware
|
||||
abstraction layers.
|
||||
|
||||
Policies
|
||||
********
|
||||
The power management features are classified into the following categories.
|
||||
|
||||
When the power management subsystem notifies the PMA that the kernel is about
|
||||
to enter a system idle state, it specifies the period of time the system
|
||||
intends to stay idle. The PMA performs any power management operations during
|
||||
this time. The PMA can perform various operations. For example, put the
|
||||
processor or the SoC in a low power state, turn off some or all of the
|
||||
peripherals, and gate device clocks. Using combinations of these operations,
|
||||
the PMA can create fine grain custom power management policies.
|
||||
* Tickless Idle
|
||||
* System Power Management
|
||||
* Device Power Management
|
||||
|
||||
Different levels of power savings and different wake latencies characterize
|
||||
these fine grain policies. In general, operations that save more power have a
|
||||
higher wake latency. When making policy decisions, the PMA chooses the
|
||||
policy that saves the most power. At the same time, the policy's total
|
||||
execution time must fit well within the idle time allotted by the power
|
||||
management subsystem.
|
||||
Tickless Idle
|
||||
*************
|
||||
|
||||
The Zephyr power management subsystem classifies policies into categories
|
||||
based on relative power savings and the corresponding wake latencies. These
|
||||
policies also loosely map to common processor and SoC power states in the
|
||||
supported architectures. The PMA should map the fine grain custom policies to
|
||||
the policy categories of the power management subsystem. The power management
|
||||
subsystem defines three categories:
|
||||
This is the name used to identify the event-based idling mechanism of the
|
||||
Zephyr RTOS kernel scheduler. The kernel scheduler can run in two modes. During
|
||||
normal operation, when at least one thread is active, it sets up the system
|
||||
timer in periodic mode and runs in an interval-based scheduling mode. The
|
||||
interval-based mode allows it to time slice between tasks. Many times, the
|
||||
threads would be waiting on semaphores, timeouts or for events. When there
|
||||
are no threads running, it is inefficient for the kernel scheduler to run
|
||||
in interval-based mode. This is because, in this mode the timer would trigger
|
||||
an interrupt at fixed intervals causing the scheduler to be invoked at each
|
||||
interval. The scheduler checks if any thread is ready to run. If no thread
|
||||
is ready to run then it is a waste of power because of the unnecessary CPU
|
||||
processing. This is avoided by the kernel switching to event-based idling
|
||||
mode whenever there is no thread ready to run.
|
||||
|
||||
* SYS_PM_LOW_POWER_STATE
|
||||
* SYS_PM_DEEP_SLEEP
|
||||
* SYS_PM_DEVICE_SUSPEND_ONLY
|
||||
The kernel holds an ordered list of thread timeouts in the system. These are
|
||||
the amount of time each thread has requested to wait. When the last active
|
||||
thread goes to wait, the idle thread is scheduled. The idle thread programs
|
||||
the timer to one-shot mode and programs the count to the earliest timeout
|
||||
from the ordered thread timeout list. When the timer expires, a timer event
|
||||
is generated. The ISR of this event will invoke the scheduler, which would
|
||||
schedule the thread associated with the timeout. Before scheduling the
|
||||
thread, the scheduler would switch the timer again to periodic mode. This
|
||||
method saves power because the CPU is removed from the wait only when there
|
||||
is a thread ready to run or if an external event occurred.
|
||||
|
||||
SYS_PM_LOW_POWER_STATE
|
||||
======================
|
||||
System Power Management
|
||||
***********************
|
||||
|
||||
In this policy category, the PMA performs power management operations on some
|
||||
or all devices and puts the processor into a low power state. The device
|
||||
power management operations can involve turning off peripherals and gating
|
||||
device clocks. When any of those operations causes the device registers to
|
||||
lose their state, then those states must be saved and restored. The PMA
|
||||
should map fine grain policies with relatively less wake latency to this
|
||||
category. Policies with larger wake latency should be mapped to the
|
||||
`SYS_PM_DEEP_SLEEP`_ category. Policies in this category exit from an
|
||||
external interrupt, a wake up event set by the PMA, or when the idle time
|
||||
alloted by the power management subsystem expires.
|
||||
|
||||
SYS_PM_DEEP_SLEEP
|
||||
=================
|
||||
|
||||
In this policy category, the PMA puts the system into the deep sleep power
|
||||
states supported by SoCs. In this state, the system clock is turned off. The
|
||||
processor is turned off and loses its state. RAM is expected to be retained
|
||||
and can save and restore processor states. Only the devices necessary to wake
|
||||
up the system from the deep sleep power state stay on. The SoC turns off the
|
||||
power to all other devices. Since this causes device registers to lose their
|
||||
state, they must be saved and restored. The PMA should map fine grain
|
||||
policies with the highest wake latency to this policy category. Policies in
|
||||
this category exit from SoC dependent wake events.
|
||||
|
||||
SYS_PM_DEVICE_SUSPEND_ONLY
|
||||
==========================
|
||||
|
||||
In this policy category, the PMA performs power management operations on some
|
||||
devices but none that result in a processor or SoC power state transition.
|
||||
The PMA should map its fine grain policies that have the lowest wake latency
|
||||
to this policy category. Policies in this category exit from an external
|
||||
interrupt or when the idle time alloted by the power management subsystem
|
||||
expires.
|
||||
|
||||
Some policy categories names are similar to the power states of processors or
|
||||
SoCs, for example, :code:`SYS_PM_DEEP_SLEEP`. However, they must be seen
|
||||
as policy categories and do not indicate any specific processor or SoC power
|
||||
state by themselves.
|
||||
|
||||
.. _pm_hook_infra:
|
||||
|
||||
Power Management Hook Infrastructure
|
||||
************************************
|
||||
|
||||
This infrastructure consists of the hook functions that the PMA implemented.
|
||||
The power management subsystem calls these hook functions when the kernel
|
||||
enters and exits the idle state, in other words, when the kernel has nothing
|
||||
to schedule. This section provides a general overview and general concepts of
|
||||
the hook functions. Refer to :ref:`power_management_api` for the detailed
|
||||
description of the APIs.
|
||||
This consists of the hook functions that the power management subsystem calls
|
||||
when the kernel enters and exits the idle state, in other words, when the kernel
|
||||
has nothing to schedule. This section provides a general overview of the hook
|
||||
functions. Refer to :ref:`power_management_api` for the detailed description of
|
||||
the APIs.
|
||||
|
||||
Suspend Hook function
|
||||
=====================
|
||||
@@ -181,39 +104,31 @@ Suspend Hook function
|
||||
int _sys_soc_suspend(int32_t ticks);
|
||||
|
||||
When the kernel is about to go idle, the power management subsystem calls the
|
||||
:code:`_sys_soc_suspend()` function, notifying the PMA that the kernel is
|
||||
ready to enter the idle state.
|
||||
:code:`_sys_soc_suspend()` function, notifying the SOC interface that the kernel
|
||||
is ready to enter the idle state.
|
||||
|
||||
At this point, the kernel has disabled interrupts and computed the maximum
|
||||
number of ticks the system can remain idle. The function passes the time that
|
||||
the system can remain idle to the PMA along with the notification. When
|
||||
notified, the PMA selects and executes one of the fine grain power policies
|
||||
that can be executed within the allotted time.
|
||||
time the system can remain idle. The function passes the time that
|
||||
the system can remain idle. The SOC interface performs power operations that
|
||||
can be done in the available time. The power management operation must halt
|
||||
execution on a CPU or SOC low power state. Before entering the low power state,
|
||||
the SOC interface must setup a wake event.
|
||||
|
||||
The power management subsystem expects the :code:`_sys_soc_suspend()` to
|
||||
return one of the following values based on the power management operations
|
||||
the PMA executed:
|
||||
the SOC interface executed:
|
||||
|
||||
:code:`SYS_PM_NOT_HANDLED`
|
||||
|
||||
No power management operations. Indicates that the PMA could not
|
||||
accomplish any actions in the time allotted by the kernel.
|
||||
|
||||
:code:`SYS_PM_DEVICE_SUSPEND_ONLY`
|
||||
|
||||
Only devices are suspended. Indicates that the PMA could accomplish any
|
||||
device suspend operations. These operations do not include any processor
|
||||
or SOC power operations.
|
||||
Indicates that no power management operations were performed.
|
||||
|
||||
:code:`SYS_PM_LOW_POWER_STATE`
|
||||
|
||||
Entered a LPS. Indicates that the PMA could put the processor into a low
|
||||
power state.
|
||||
Indicates that the CPU was put in a low power state.
|
||||
|
||||
:code:`SYS_PM_DEEP_SLEEP`
|
||||
|
||||
Entered deep sleep. Indicates that the PMA could put the SoC in a deep
|
||||
sleep state.
|
||||
Indicates that the SOC was put in a deep sleep state.
|
||||
|
||||
Resume Hook function
|
||||
====================
|
||||
@@ -222,29 +137,126 @@ Resume Hook function
|
||||
|
||||
void _sys_soc_resume(void);
|
||||
|
||||
The kernel calls this hook function when exiting from an idle state or a low
|
||||
power state. Based on which policy the PMA executed in the
|
||||
:code:`_sys_soc_suspend()` function, the PMA performs the necessary recovery
|
||||
operations in this hook function.
|
||||
The power management subsystem optionally calls this hook function when exiting
|
||||
kernel idling if power management operations were performed in
|
||||
:code:`_sys_soc_suspend()`. Any necessary recovery operations can be performed
|
||||
in this function before the kernel scheduler schedules another thread. Some
|
||||
power states may not need this notification. It can be disabled by calling
|
||||
:code:`_sys_soc_pm_idle_exit_notification_disable()` from
|
||||
:code:`_sys_soc_suspend()`.
|
||||
|
||||
Since the hook functions are called with the interrupts disabled, the PMA
|
||||
should ensure that its operations are completed quickly. Thus, the PMA
|
||||
ensures that the kernel's scheduling performance is not disrupted.
|
||||
Resume From Deep Sleep Hook function
|
||||
====================================
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
void _sys_soc_resume_from_deep_sleep(void);
|
||||
|
||||
This function is optionally called when exiting from deep sleep if the SOC
|
||||
interface does not have bootloader support to handle resume from deep sleep.
|
||||
This function should restore context to the point where system entered
|
||||
the deep sleep state.
|
||||
|
||||
.. note::
|
||||
|
||||
Since the hook functions are called with the interrupts disabled, the SOC
|
||||
interface should ensure that its operations are completed quickly. Thus, the
|
||||
SOC interface ensures that the kernel's scheduling performance is not
|
||||
disrupted.
|
||||
|
||||
Power Schemes
|
||||
*************
|
||||
|
||||
When the power management subsystem notifies the SOC interface that the kernel
|
||||
is about to enter a system idle state, it specifies the period of time the
|
||||
system intends to stay idle. The SOC interface can perform various power
|
||||
management operations during this time. For example, put the processor or the
|
||||
SOC in a low power state, turn off some or all of the peripherals or power gate
|
||||
device clocks.
|
||||
|
||||
Different levels of power savings and different wake latencies characterize
|
||||
these power schemes. In general, operations that save more power have a
|
||||
higher wake latency. When making decisions, the SOC interface chooses the
|
||||
scheme that saves the most power. At the same time, the scheme's total
|
||||
execution time must fit within the idle time allotted by the power management
|
||||
subsystem.
|
||||
|
||||
The power management subsystem classifies power management schemes
|
||||
into two categories based on whether the CPU loses execution context during the
|
||||
power state transition.
|
||||
|
||||
* SYS_PM_LOW_POWER_STATE
|
||||
* SYS_PM_DEEP_SLEEP
|
||||
|
||||
SYS_PM_LOW_POWER_STATE
|
||||
======================
|
||||
|
||||
CPU does not lose execution context. Devices also do not lose power while
|
||||
entering power states in this category. The wake latencies of power states
|
||||
in this category are relatively low.
|
||||
|
||||
SYS_PM_DEEP_SLEEP
|
||||
=================
|
||||
|
||||
CPU is power gated and loses execution context. Execution will resume at
|
||||
OS startup code or at a resume point determined by a bootloader that supports
|
||||
deep sleep resume. Depending on the SOC's implementation of the power saving
|
||||
feature, it may turn off power to most devices. RAM may be retained by some
|
||||
implementations, while others may remove power from RAM saving considerable
|
||||
power. Power states in this category save more power than
|
||||
`SYS_PM_LOW_POWER_STATE`_ and would have higher wake latencies.
|
||||
|
||||
Device Power Management Infrastructure
|
||||
**************************************
|
||||
|
||||
The device power management infrastructure consists of interfaces to the Zephyr
|
||||
device model. These APIs send control commands to the device driver
|
||||
The device power management infrastructure consists of interfaces to the
|
||||
Zephyr RTOS device model. These APIs send control commands to the device driver
|
||||
to update its power state or to get its current power state.
|
||||
Refer to :ref:`power_management_api` for detailed descriptions of the APIs.
|
||||
|
||||
Zephyr RTOS supports two methods of doing device power management.
|
||||
|
||||
* Distributed method
|
||||
* Central method
|
||||
|
||||
Distributed method
|
||||
==================
|
||||
|
||||
In this method, the application or any component that deals with devices directly
|
||||
and has the best knowledge of their use does the device power management. This
|
||||
saves power if some devices that are not in use can be turned off or put
|
||||
in power saving mode. This method allows saving power even when the CPU is
|
||||
active. The components that use the devices need to be power aware and should
|
||||
be able to make decisions related to managing device power. In this method, the
|
||||
SOC interface can enter CPU or SOC low power states quickly when
|
||||
:code:`_sys_soc_suspend()` gets called. This is because it does not need to
|
||||
spend time doing device power management if the devices are already put in
|
||||
the appropriate low power state by the application or component managing the
|
||||
devices.
|
||||
|
||||
Central method
|
||||
==============
|
||||
|
||||
In this method device power management is mostly done inside
|
||||
:code:`_sys_soc_suspend()` along with entering a CPU or SOC low power state.
|
||||
|
||||
If a decision to enter deep sleep is made, the implementation would enter it
|
||||
only after checking if the devices are not in the middle of a hardware
|
||||
transaction that cannot be interrupted. This method can be used in
|
||||
implementations where the applications and components using devices are not
|
||||
expected to be power aware and do not implement device power management.
|
||||
|
||||
This method can also be used to emulate a hardware feature supported by some
|
||||
SOCs which cause automatic entry to deep sleep when all devices are idle.
|
||||
Refer to `Busy Status Indication`_ to see how to indicate whether a device is busy
|
||||
or idle.
|
||||
|
||||
Device Power Management States
|
||||
==============================
|
||||
The Zephyr OS power management subsystem defines four device states.
|
||||
These states are classified based on the degree of context that gets lost in
|
||||
those states, kind of operations done to save power and the impact on the device
|
||||
behavior due to the state transition. Device context include device hardware
|
||||
The Zephyr RTOS power management subsystem defines four device states.
|
||||
These states are classified based on the degree of device context that gets lost
|
||||
in those states, kind of operations done to save power, and the impact on the
|
||||
device behavior due to the state transition. Device context includes device
|
||||
registers, clocks, memory etc.
|
||||
|
||||
The four device power states:
|
||||
@@ -271,15 +283,13 @@ The four device power states:
|
||||
Device Power Management Operations
|
||||
==================================
|
||||
|
||||
Zephyr OS provides a generic API function to send control commands to the driver.
|
||||
Currently the supported control commands are:
|
||||
Zephyr RTOS power management subsystem provides a control function interface
|
||||
to device drivers to indicate power management operations to perform.
|
||||
The supported PM control commands are:
|
||||
|
||||
* DEVICE_PM_SET_POWER_STATE
|
||||
* DEVICE_PM_GET_POWER_STATE
|
||||
|
||||
In the future Zephyr OS may support additional control commands.
|
||||
Drivers can implement the control command handler to support the device driver's
|
||||
power management functionality.
|
||||
Each device driver defines:
|
||||
|
||||
* The device's supported power states.
|
||||
@@ -299,20 +309,20 @@ Device Model with Power Management Support
|
||||
|
||||
Drivers initialize the devices using macros. See :ref:`device_drivers` for
|
||||
details on how these macros are used. Use the DEVICE_DEFINE macro to initialize
|
||||
drivers providing power management support via the control function.
|
||||
One of the macro parameters is the pointer to the device_control handler function.
|
||||
drivers providing power management support via the PM control function.
|
||||
One of the macro parameters is the pointer to the device_pm_control handler function.
|
||||
|
||||
Default Initializer Function
|
||||
----------------------------
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int device_control_nop(struct device *unused_device, uint32_t unused_ctrl_command, void *unused_context);
|
||||
int device_pm_control_nop(struct device *unused_device, uint32_t unused_ctrl_command, void *unused_context);
|
||||
|
||||
|
||||
If the driver doesn't implement any power control operations, the driver can
|
||||
initialize the corresponding pointer with this default nop function. This
|
||||
default initializer function does nothing and should be used instead of
|
||||
default nop function does nothing and should be used instead of
|
||||
implementing a dummy function to avoid wasting code memory in the driver.
|
||||
|
||||
|
||||
@@ -329,18 +339,14 @@ Get Device List
|
||||
|
||||
void device_list_get(struct device **device_list, int *device_count);
|
||||
|
||||
The Zephyr kernel internally maintains a list of all devices in the system.
|
||||
The PMA uses this API to get the device list. The PMA can use the list to
|
||||
The Zephyr RTOS kernel internally maintains a list of all devices in the system.
|
||||
The SOC interface uses this API to get the device list. The SOC interface can use the list to
|
||||
identify the devices on which to execute power management operations.
|
||||
|
||||
The PMA can use this list to create a sorted order list based on device
|
||||
dependencies. The PMA creates device groups to execute different policies
|
||||
on each device group.
|
||||
|
||||
.. note::
|
||||
|
||||
Ensure that the PMA does not alter the original list. Since the kernel
|
||||
uses the original list, it should remain unchanged.
|
||||
Ensure that the SOC interface does not alter the original list. Since the kernel
|
||||
uses the original list, it must remain unchanged.
|
||||
|
||||
Device Set Power State
|
||||
----------------------
|
||||
@@ -349,7 +355,7 @@ Device Set Power State
|
||||
|
||||
int device_set_power_state(struct device *device, uint32_t device_power_state);
|
||||
|
||||
Calls the :c:func:`device_control()` handler function implemented by the
|
||||
Calls the :c:func:`device_pm_control()` handler function implemented by the
|
||||
device driver with DEVICE_PM_SET_POWER_STATE command.
|
||||
|
||||
Device Get Power State
|
||||
@@ -359,28 +365,37 @@ Device Get Power State
|
||||
|
||||
int device_get_power_state(struct device *device, uint32_t * device_power_state);
|
||||
|
||||
Calls the :c:func:`device_control()` handler function implemented by the
|
||||
Calls the :c:func:`device_pm_control()` handler function implemented by the
|
||||
device driver with DEVICE_PM_GET_POWER_STATE command.
|
||||
|
||||
Busy Status Indication
|
||||
======================
|
||||
|
||||
The PMA executes some power policies that can turn off power to devices,
|
||||
The SOC interface executes some power policies that can turn off power to devices,
|
||||
causing them to lose their state. If the devices are in the middle of some
|
||||
hardware transaction, like writing to flash memory when the power is turned
|
||||
off, then such transactions would be left in an inconsistent state. This
|
||||
infrastructure guards such transactions by indicating to the PMA that
|
||||
infrastructure guards such transactions by indicating to the SOC interface that
|
||||
the device is in the middle of a hardware transaction.
|
||||
|
||||
When the :code:`_sys_soc_suspend()` is called, the PMA checks if any device
|
||||
is busy. The PMA can then decide to execute a policy other than deep sleep or
|
||||
When the :code:`_sys_soc_suspend()` is called, the SOC interface checks if any device
|
||||
is busy. The SOC interface can then decide to execute a power management scheme other than deep sleep or
|
||||
to defer power management operations until the next call of
|
||||
:code:`_sys_soc_suspend()`.
|
||||
|
||||
If other recovery or retrieval methods are in place, the driver can avoid
|
||||
guarding the transactions. Not all hardware transactions must be guarded. The
|
||||
Zephyr kernel provides the following APIs for the device drivers and the PMA
|
||||
to decide whether a particular transaction must be guarded.
|
||||
An alternative to using the busy status mechanism is to use the
|
||||
`distributed method`_ of device power management. In such a method where the
|
||||
device power management is handled in a distributed manner rather than centrally in
|
||||
:code:`_sys_soc_suspend()`, the decision to enter deep sleep can be made based
|
||||
on whether all devices are already turned off.
|
||||
|
||||
This feature can be also used to emulate a hardware feature found in some SOCs
|
||||
that causes the system to automatically enter deep sleep when all devices are idle.
|
||||
In such an usage, the busy status can be set by default and cleared as each
|
||||
device becomes idle. When :code:`_sys_soc_suspend()` is called, deep sleep can
|
||||
be entered if no device is found to be busy.
|
||||
|
||||
Here are the APIs used to set, clear, and check the busy status of devices.
|
||||
|
||||
Indicate Busy Status API
|
||||
------------------------
|
||||
@@ -422,8 +437,6 @@ Check Busy Status of All Devices API
|
||||
|
||||
Checks if any device is busy. The API returns 0 if no device in the system is busy.
|
||||
|
||||
.. _pm_config_flags:
|
||||
|
||||
Power Management Configuration Flags
|
||||
************************************
|
||||
|
||||
@@ -434,9 +447,13 @@ the following configuration flags.
|
||||
|
||||
This flag enables the power management subsystem.
|
||||
|
||||
:code:`CONFIG_TICKLESS_IDLE`
|
||||
|
||||
This flag enables the tickless idle power saving feature.
|
||||
|
||||
:code:`CONFIG_SYS_POWER_LOW_POWER_STATE`
|
||||
|
||||
The PMA enables this flag to use the :code:`SYS_PM_LOW_POWER_STATE` policy.
|
||||
The SOC interface enables this flag to use the :code:`SYS_PM_LOW_POWER_STATE` policy.
|
||||
|
||||
:code:`CONFIG_SYS_POWER_DEEP_SLEEP`
|
||||
|
||||
@@ -444,155 +461,6 @@ the following configuration flags.
|
||||
|
||||
:code:`CONFIG_DEVICE_POWER_MANAGEMENT`
|
||||
|
||||
This flag is enabled if the PMA and the devices support device power
|
||||
This flag is enabled if the SOC interface and the devices support device power
|
||||
management.
|
||||
|
||||
Writing a Power Management Application
|
||||
**************************************
|
||||
|
||||
A typical PMA executes policies through power management APIS. This section
|
||||
details various scenarios that can be used to help developers write their own
|
||||
custom PMAs.
|
||||
|
||||
The PMA is part of a larger application doing more than just power
|
||||
management. This section focuses on the power management aspects of the
|
||||
application.
|
||||
|
||||
Initial Setup
|
||||
=============
|
||||
|
||||
To enable the power management support, the application must do the following:
|
||||
|
||||
#. Enable the :code:`CONFIG_SYS_POWER_MANAGEMENT` flag
|
||||
|
||||
#. Enable other required config flags described in :ref:`pm_config_flags`.
|
||||
|
||||
#. Implement the hook functions described in :ref:`pm_hook_infra`.
|
||||
|
||||
Device List and Policies
|
||||
========================
|
||||
|
||||
The PMA retrieves the list of enabled devices in the system using the
|
||||
:c:func:`device_list_get()` function. Since the PMA is part of the
|
||||
application, the PMA starts after all devices in the system have been
|
||||
initialized. Thus, the list of devices will not change once the application
|
||||
has begun.
|
||||
|
||||
Once the device list has been retrieved and stored, the PMA can form device
|
||||
groups and sorted lists based on device dependencies. The PMA uses the device
|
||||
lists and the known aggregate wake latency of the combination of power
|
||||
operations to create the fine grain custom power policies. Finally, the PMA
|
||||
maps these custom policies to the policy categories defined by the power
|
||||
management subsystem as described in `Policies`_.
|
||||
|
||||
Scenarios During Suspend
|
||||
========================
|
||||
|
||||
When the power management subsystem calls the :code:`_sys_soc_suspend()`
|
||||
function, the PMA can select between multiple scenarios.
|
||||
|
||||
Scenario 1
|
||||
----------
|
||||
|
||||
The time allotted is too short for any power management.
|
||||
|
||||
In this case, the PMA leaves the interrupts disabled, and returns the code
|
||||
:code:`SYS_PM_NOT_HANDLED`. This actions allow the Zephyr kernel to continue
|
||||
with its normal idling process.
|
||||
|
||||
Scenario 2
|
||||
----------
|
||||
|
||||
The time allotted allows the suspension of some devices.
|
||||
|
||||
The PMA scans through the devices that meet the criteria and calls the
|
||||
:c:func:`device_set_power_state()` function with DEVICE_PM_SUSPEND_STATE state
|
||||
for each device.
|
||||
|
||||
After all devices are suspended properly, the PMA executes the following
|
||||
operations:
|
||||
|
||||
* If the time allotted is enough for the :code:`SYS_PM_LOW_POWER_STATE`
|
||||
policy:
|
||||
|
||||
#. The PMA sets up the wake event, puts the CPU in a LPS, and re- enables
|
||||
the interrupts at the same time.
|
||||
|
||||
#. The PMA returns the :code:`SYS_PM_LOW_POWER_STATE` code.
|
||||
|
||||
* If the time allotted is not enough for the :code:`SYS_PM_LOW_POWER_STATE`
|
||||
policy, the PMA returns the :code:`SYS_PM_DEVICE_SUSPEND_ONLY` code.
|
||||
|
||||
When a device fails to suspend, the PMA executes the following operations:
|
||||
|
||||
* If the system integrator determined that the device is not essential to the
|
||||
suspend process, the PMA can ignore the failure.
|
||||
|
||||
* If the system integrator determined that the device is essential to the
|
||||
suspend process, the PMA takes any necessary recovery actions and
|
||||
returns the :code:`SYS_PM_NOT_HANDLED` code.
|
||||
|
||||
Scenario 3
|
||||
----------
|
||||
|
||||
The time allotted is enough for all devices to be suspended.
|
||||
|
||||
The PMA calls the :c:func:`device_set_power_stated()` function with
|
||||
DEVICE_PM_SUSPEND_STATE state for each device.
|
||||
|
||||
After all devices are suspended properly and the time allotted is enough for
|
||||
the :code:`SYS_PM_DEEP_SLEEP` policy, the PMA executes the following
|
||||
operations:
|
||||
|
||||
#. Calls the :c:func:`device_any_busy_check()` function to get device busy
|
||||
status. If any device is busy, the PMA must choose a policy other than
|
||||
:code:`SYS_PM_DEEP_SLEEP`.
|
||||
#. Sets up wake event.
|
||||
#. Puts the SOC in the deep sleep state.
|
||||
#. Re-enables interrupts.
|
||||
#. Returns the :code:`SYS_PM_DEEP_SLEEP` code.
|
||||
|
||||
If, on the other hand, the time allotted is only enough for the
|
||||
:code:`SYS_PM_LOW_POWER_STATE` policy, The PMA executes the following
|
||||
operations:
|
||||
|
||||
#. Sets up wake event.
|
||||
#. Puts the CPU in a LPS re-enabling interrupts at the same time.
|
||||
#. Returns the :code:`SYS_PM_LOW_POWER_STATE` code.
|
||||
|
||||
If the time allotted is not enough for any CPU or SOC power management
|
||||
operations, the PMA returns the :code:`SYS_PM_DEVICE_SUSPEND_ONLY` code.
|
||||
|
||||
When a device fails to suspend, the PMA executes the following operations:
|
||||
|
||||
* If the system integrator determined that the device is not essential to the
|
||||
suspend process the PMA can ignore the failure.
|
||||
|
||||
* If the system integrator determined that the device is essential to the
|
||||
suspend process, the PMA takes any necessary recovery actions and
|
||||
returns the :code:`SYS_PM_NOT_HANDLED` code.
|
||||
|
||||
Policy Decision Summary
|
||||
=======================
|
||||
|
||||
+---------------------------------+---------------------------------------+
|
||||
| PM operations | Policy and Return Code |
|
||||
+=================================+=======================================+
|
||||
| Suspend some devices and | :code:`SYS_PM_LOW_POWER_STATE` |
|
||||
| | |
|
||||
| Enter Low Power State | |
|
||||
+---------------------------------+---------------------------------------+
|
||||
| Suspend all devices and | :code:`SYS_PM_LOW_POWER_STATE` |
|
||||
| | |
|
||||
| Enter Low Power State | |
|
||||
+---------------------------------+---------------------------------------+
|
||||
| Suspend all devices and | :code:`SYS_PM_DEEP_SLEEP` |
|
||||
| | |
|
||||
| Enter Deep Sleep | |
|
||||
+---------------------------------+---------------------------------------+
|
||||
| Suspend some or all devices and | :code:`SYS_PM_DEVICE_SUSPEND_ONLY` |
|
||||
| | |
|
||||
| No CPU/SoC PM Operation | |
|
||||
+---------------------------------+---------------------------------------+
|
||||
| No PM operation | :code:`SYS_PM_NOT_HANDLED` |
|
||||
+---------------------------------+---------------------------------------+
|
||||
|
||||
@@ -62,6 +62,8 @@ static int aio_qmsi_cmp_disable(struct device *dev, uint8_t index)
|
||||
/* Disable comparator according to index */
|
||||
config.int_en &= ~(1 << index);
|
||||
config.power &= ~(1 << index);
|
||||
config.reference &= ~(1 << index);
|
||||
config.polarity &= ~(1 << index);
|
||||
|
||||
if (qm_ac_set_config(&config) != 0) {
|
||||
return -EINVAL;
|
||||
|
||||
@@ -68,8 +68,8 @@ static bool reliable_packet(uint8_t type)
|
||||
}
|
||||
|
||||
/* FIXME: Correct timeout */
|
||||
#define H5_RX_ACK_TIMEOUT 250
|
||||
#define H5_TX_ACK_TIMEOUT 250
|
||||
#define H5_RX_ACK_TIMEOUT K_MSEC(250)
|
||||
#define H5_TX_ACK_TIMEOUT K_MSEC(250)
|
||||
|
||||
#define SLIP_DELIMITER 0xc0
|
||||
#define SLIP_ESC 0xdb
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
#endif
|
||||
|
||||
/* Peripheral timeout to initialize Connection Parameter Update procedure */
|
||||
#define CONN_UPDATE_TIMEOUT (5 * MSEC_PER_SEC)
|
||||
#define CONN_UPDATE_TIMEOUT K_SECONDS(5)
|
||||
|
||||
static struct bt_conn conns[CONFIG_BLUETOOTH_MAX_CONN];
|
||||
static struct bt_conn_cb *callback_list;
|
||||
|
||||
@@ -47,6 +47,7 @@ config CLOCK_CONTROL_NRF5_K32SRC_DRV_NAME
|
||||
choice
|
||||
prompt "32KHz clock source"
|
||||
default CLOCK_CONTROL_NRF5_K32SRC_XTAL
|
||||
depends on CLOCK_CONTROL_NRF5
|
||||
|
||||
config CLOCK_CONTROL_NRF5_K32SRC_RC
|
||||
bool
|
||||
@@ -61,6 +62,7 @@ endchoice
|
||||
choice
|
||||
prompt "32KHz clock accuracy"
|
||||
default CLOCK_CONTROL_NRF5_K32SRC_20PPM
|
||||
depends on CLOCK_CONTROL_NRF5
|
||||
|
||||
config CLOCK_CONTROL_NRF5_K32SRC_500PPM
|
||||
bool
|
||||
|
||||
@@ -65,8 +65,7 @@ void uart_console_out_debug_hook_install(uart_console_out_debug_hook_t *hook)
|
||||
}
|
||||
#define HANDLE_DEBUG_HOOK_OUT(c) \
|
||||
(debug_hook_out(c) == UART_CONSOLE_DEBUG_HOOK_HANDLED)
|
||||
#else
|
||||
#define HANDLE_DEBUG_HOOK_OUT(c) 0
|
||||
|
||||
#endif /* CONFIG_UART_CONSOLE_DEBUG_SERVER_HOOKS */
|
||||
|
||||
#if 0 /* NOTUSED */
|
||||
@@ -102,12 +101,16 @@ static int console_in(void)
|
||||
|
||||
static int console_out(int c)
|
||||
{
|
||||
#ifdef CONFIG_UART_CONSOLE_DEBUG_SERVER_HOOKS
|
||||
|
||||
int handled_by_debug_server = HANDLE_DEBUG_HOOK_OUT(c);
|
||||
|
||||
if (handled_by_debug_server) {
|
||||
return c;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_UART_CONSOLE_DEBUG_SERVER_HOOKS */
|
||||
|
||||
if ('\n' == c) {
|
||||
uart_poll_out(uart_console_dev, '\r');
|
||||
}
|
||||
|
||||
@@ -132,7 +132,7 @@ static void eth_enc28j60_clear_eth_reg(struct device *dev, uint16_t reg_addr,
|
||||
}
|
||||
|
||||
static void eth_enc28j60_write_mem(struct device *dev, uint8_t *data_buffer,
|
||||
uint8_t buf_len)
|
||||
uint16_t buf_len)
|
||||
{
|
||||
struct eth_enc28j60_runtime *context = dev->driver_data;
|
||||
uint8_t tx_buf[MAX_BUFFER_LENGTH + 1];
|
||||
@@ -149,7 +149,7 @@ static void eth_enc28j60_write_mem(struct device *dev, uint8_t *data_buffer,
|
||||
tx_buf[0] = ENC28J60_SPI_WBM;
|
||||
|
||||
for (int i = 0; i < num_segments;
|
||||
++i, index_buf += i * MAX_BUFFER_LENGTH) {
|
||||
++i, index_buf += MAX_BUFFER_LENGTH) {
|
||||
|
||||
memcpy(tx_buf + 1, index_buf, MAX_BUFFER_LENGTH);
|
||||
|
||||
@@ -164,7 +164,7 @@ static void eth_enc28j60_write_mem(struct device *dev, uint8_t *data_buffer,
|
||||
}
|
||||
|
||||
static void eth_enc28j60_read_mem(struct device *dev, uint8_t *data_buffer,
|
||||
uint8_t buf_len)
|
||||
uint16_t buf_len)
|
||||
{
|
||||
struct eth_enc28j60_runtime *context = dev->driver_data;
|
||||
uint8_t *index_buf;
|
||||
@@ -181,7 +181,7 @@ static void eth_enc28j60_read_mem(struct device *dev, uint8_t *data_buffer,
|
||||
tx_buf[0] = ENC28J60_SPI_RBM;
|
||||
|
||||
for (int i = 0; i < num_segments;
|
||||
++i, index_buf += i * MAX_BUFFER_LENGTH) {
|
||||
++i, index_buf += MAX_BUFFER_LENGTH) {
|
||||
|
||||
spi_transceive(context->spi, tx_buf, MAX_BUFFER_LENGTH + 1,
|
||||
tx_buf, MAX_BUFFER_LENGTH + 1);
|
||||
|
||||
@@ -74,10 +74,12 @@ static void _config(struct device *dev, uint32_t mask, int flags)
|
||||
cfg->port->lsr = mask;
|
||||
}
|
||||
|
||||
if (flags & GPIO_INT_ACTIVE_LOW) {
|
||||
cfg->port->fellsr = mask;
|
||||
} else if (flags & GPIO_INT_ACTIVE_HIGH) {
|
||||
if (flags & GPIO_INT_ACTIVE_HIGH) {
|
||||
/* Trigger in high level or rising edge */
|
||||
cfg->port->rehlsr = mask;
|
||||
} else {
|
||||
/* Trigger in low level or falling edge */
|
||||
cfg->port->fellsr = mask;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -230,8 +230,7 @@ static inline void dw_port_config(struct device *port, int flags)
|
||||
static inline int gpio_dw_config(struct device *port, int access_op,
|
||||
uint32_t pin, int flags)
|
||||
{
|
||||
if (((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) ||
|
||||
((flags & GPIO_DIR_IN) && (flags & GPIO_DIR_OUT))) {
|
||||
if ((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
@@ -40,8 +40,7 @@ static int gpio_k64_config(struct device *dev,
|
||||
uint8_t i;
|
||||
|
||||
/* check for an invalid pin configuration */
|
||||
if (((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) ||
|
||||
((flags & GPIO_DIR_IN) && (flags & GPIO_DIR_OUT))) {
|
||||
if ((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
|
||||
@@ -272,8 +272,9 @@ static inline void qmsi_port_config(struct device *port, int flags)
|
||||
static inline int gpio_qmsi_config(struct device *port,
|
||||
int access_op, uint32_t pin, int flags)
|
||||
{
|
||||
if (((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) ||
|
||||
((flags & GPIO_DIR_IN) && (flags & GPIO_DIR_OUT))) {
|
||||
/* If the pin/port is set to receive interrupts, make sure the pin
|
||||
is an input */
|
||||
if ((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@@ -262,8 +262,8 @@ static inline void ss_qmsi_port_config(struct device *port, int flags)
|
||||
static inline int ss_gpio_qmsi_config(struct device *port, int access_op,
|
||||
uint32_t pin, int flags)
|
||||
{
|
||||
if (((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) ||
|
||||
((flags & GPIO_DIR_IN) && (flags & GPIO_DIR_OUT))) {
|
||||
/* check for an invalid pin configuration */
|
||||
if ((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@@ -666,7 +666,7 @@ static void cc2520_rx(int arg, int unused2)
|
||||
}
|
||||
|
||||
net_analyze_stack("CC2520 Rx Fiber stack",
|
||||
cc2520->cc2520_rx_stack,
|
||||
(unsigned char *)cc2520->cc2520_rx_stack,
|
||||
CONFIG_CC2520_RX_STACK_SIZE);
|
||||
goto flush;
|
||||
error:
|
||||
|
||||
@@ -37,12 +37,17 @@ config PINMUX_NAME
|
||||
config PINMUX_INIT_PRIORITY
|
||||
int
|
||||
prompt "Init priority"
|
||||
default 60
|
||||
default 45
|
||||
depends on PINMUX
|
||||
help
|
||||
Device driver initialization priority.
|
||||
The device needs to be initialized after all the devices it
|
||||
uses.
|
||||
Pinmux driver initialization priority.
|
||||
Pinmux driver almost certainly should be initialized before the
|
||||
rest of hardware devices (which may need specific pins already
|
||||
configured for them), and usually after generic GPIO drivers.
|
||||
Thus, its priority should be between KERNEL_INIT_PRIORITY_DEFAULT
|
||||
and KERNEL_INIT_PRIORITY_DEVICE. There are exceptions to this
|
||||
rule for particular boards. Don't change this value unless you
|
||||
know what you are doing.
|
||||
|
||||
config PINMUX_K64
|
||||
bool "Freescale K64-based Pin multiplexer driver"
|
||||
|
||||
@@ -53,5 +53,5 @@ int pinmux_fsl_k64_initialize(struct device *port)
|
||||
/* must be initialized after GPIO */
|
||||
DEVICE_AND_API_INIT(pmux, CONFIG_PINMUX_DEV_NAME, &pinmux_fsl_k64_initialize,
|
||||
NULL, NULL,
|
||||
POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
|
||||
POST_KERNEL, CONFIG_PINMUX_INIT_PRIORITY,
|
||||
&api_funcs);
|
||||
|
||||
@@ -114,4 +114,4 @@ static int fsl_frdm_k64f_pin_init(struct device *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(fsl_frdm_k64f_pin_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
|
||||
SYS_INIT(fsl_frdm_k64f_pin_init, POST_KERNEL, CONFIG_PINMUX_INIT_PRIORITY);
|
||||
|
||||
@@ -66,4 +66,4 @@ static int hexiwear_pin_init(struct device *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(hexiwear_pin_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
|
||||
SYS_INIT(hexiwear_pin_init, POST_KERNEL, CONFIG_PINMUX_INIT_PRIORITY);
|
||||
|
||||
@@ -137,6 +137,7 @@ static int __set_one_port(struct device *dev, qm_pwm_t id, uint32_t pwm,
|
||||
/* No interrupts */
|
||||
cfg.mask_interrupt = true;
|
||||
cfg.callback = NULL;
|
||||
cfg.callback_data = NULL;
|
||||
|
||||
/* Data for the timer to stay high and low */
|
||||
cfg.hi_count = on;
|
||||
|
||||
@@ -132,7 +132,7 @@ static int rtc_qmsi_set_config(struct device *dev, struct rtc_config *cfg)
|
||||
* values defined by clk_rtc_div and by QMSI's clk_rtc_div_t match for
|
||||
* both D2000 and SE.
|
||||
*/
|
||||
qm_cfg.prescaler = RTC_DIVIDER;
|
||||
qm_cfg.prescaler = (clk_rtc_div_t)RTC_DIVIDER;
|
||||
|
||||
rtc_critical_region_start(dev);
|
||||
|
||||
|
||||
@@ -82,29 +82,37 @@ static void bma280_thread_cb(void *arg)
|
||||
struct device *dev = arg;
|
||||
struct bma280_data *drv_data = dev->driver_data;
|
||||
uint8_t status = 0;
|
||||
int err = 0;
|
||||
|
||||
/* check for data ready */
|
||||
i2c_reg_read_byte(drv_data->i2c, BMA280_I2C_ADDRESS,
|
||||
BMA280_REG_INT_STATUS_1, &status);
|
||||
err = i2c_reg_read_byte(drv_data->i2c, BMA280_I2C_ADDRESS,
|
||||
BMA280_REG_INT_STATUS_1, &status);
|
||||
if (status & BMA280_BIT_DATA_INT_STATUS &&
|
||||
drv_data->data_ready_handler != NULL) {
|
||||
drv_data->data_ready_handler != NULL &&
|
||||
err == 0) {
|
||||
drv_data->data_ready_handler(dev,
|
||||
&drv_data->data_ready_trigger);
|
||||
}
|
||||
|
||||
/* check for any motion */
|
||||
i2c_reg_read_byte(drv_data->i2c, BMA280_I2C_ADDRESS,
|
||||
BMA280_REG_INT_STATUS_0, &status);
|
||||
err = i2c_reg_read_byte(drv_data->i2c, BMA280_I2C_ADDRESS,
|
||||
BMA280_REG_INT_STATUS_0, &status);
|
||||
if (status & BMA280_BIT_SLOPE_INT_STATUS &&
|
||||
drv_data->any_motion_handler != NULL) {
|
||||
drv_data->any_motion_handler != NULL &&
|
||||
err == 0) {
|
||||
drv_data->any_motion_handler(dev,
|
||||
&drv_data->data_ready_trigger);
|
||||
|
||||
/* clear latched interrupt */
|
||||
i2c_reg_update_byte(drv_data->i2c, BMA280_I2C_ADDRESS,
|
||||
BMA280_REG_INT_RST_LATCH,
|
||||
BMA280_BIT_INT_LATCH_RESET,
|
||||
BMA280_BIT_INT_LATCH_RESET);
|
||||
err = i2c_reg_update_byte(drv_data->i2c, BMA280_I2C_ADDRESS,
|
||||
BMA280_REG_INT_RST_LATCH,
|
||||
BMA280_BIT_INT_LATCH_RESET,
|
||||
BMA280_BIT_INT_LATCH_RESET);
|
||||
|
||||
if (err < 0) {
|
||||
SYS_LOG_DBG("Could not update clear the interrupt");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
gpio_pin_enable_callback(drv_data->gpio, CONFIG_BMA280_GPIO_PIN_NUM);
|
||||
|
||||
@@ -172,13 +172,19 @@ static const struct sensor_driver_api bme280_api_funcs = {
|
||||
.channel_get = bme280_channel_get,
|
||||
};
|
||||
|
||||
static void bme280_read_compensation(struct bme280_data *data)
|
||||
static int bme280_read_compensation(struct bme280_data *data)
|
||||
{
|
||||
uint16_t buf[12];
|
||||
uint8_t hbuf[7];
|
||||
int err = 0;
|
||||
|
||||
i2c_burst_read(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_COMP_START, (uint8_t *)buf, sizeof(buf));
|
||||
err = i2c_burst_read(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_COMP_START,
|
||||
(uint8_t *)buf, sizeof(buf));
|
||||
|
||||
if (err < 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
data->dig_t1 = sys_le16_to_cpu(buf[0]);
|
||||
data->dig_t2 = sys_le16_to_cpu(buf[1]);
|
||||
@@ -186,8 +192,8 @@ static void bme280_read_compensation(struct bme280_data *data)
|
||||
|
||||
data->dig_p1 = sys_le16_to_cpu(buf[3]);
|
||||
data->dig_p2 = sys_le16_to_cpu(buf[4]);
|
||||
data->dig_p4 = sys_le16_to_cpu(buf[5]);
|
||||
data->dig_p3 = sys_le16_to_cpu(buf[6]);
|
||||
data->dig_p3 = sys_le16_to_cpu(buf[5]);
|
||||
data->dig_p4 = sys_le16_to_cpu(buf[6]);
|
||||
data->dig_p5 = sys_le16_to_cpu(buf[7]);
|
||||
data->dig_p6 = sys_le16_to_cpu(buf[8]);
|
||||
data->dig_p7 = sys_le16_to_cpu(buf[9]);
|
||||
@@ -195,11 +201,20 @@ static void bme280_read_compensation(struct bme280_data *data)
|
||||
data->dig_p9 = sys_le16_to_cpu(buf[11]);
|
||||
|
||||
if (data->chip_id == BME280_CHIP_ID) {
|
||||
i2c_reg_read_byte(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_HUM_COMP_PART1, &data->dig_h1);
|
||||
err = i2c_reg_read_byte(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_HUM_COMP_PART1,
|
||||
&data->dig_h1);
|
||||
|
||||
i2c_burst_read(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_HUM_COMP_PART2, hbuf, 7);
|
||||
if (err < 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
err = i2c_burst_read(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_HUM_COMP_PART2, hbuf, 7);
|
||||
|
||||
if (err < 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
data->dig_h2 = (hbuf[1] << 8) | hbuf[0];
|
||||
data->dig_h3 = hbuf[2];
|
||||
@@ -207,14 +222,20 @@ static void bme280_read_compensation(struct bme280_data *data)
|
||||
data->dig_h5 = ((hbuf[4] >> 4) & 0x0F) | (hbuf[5] << 4);
|
||||
data->dig_h6 = hbuf[6];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bme280_chip_init(struct device *dev)
|
||||
{
|
||||
struct bme280_data *data = (struct bme280_data *) dev->driver_data;
|
||||
|
||||
i2c_reg_read_byte(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_ID, &data->chip_id);
|
||||
int err = i2c_reg_read_byte(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_ID, &data->chip_id);
|
||||
|
||||
if (err < 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (data->chip_id == BME280_CHIP_ID) {
|
||||
SYS_LOG_DBG("BME280 chip detected");
|
||||
@@ -226,7 +247,11 @@ static int bme280_chip_init(struct device *dev)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
bme280_read_compensation(data);
|
||||
err = bme280_read_compensation(data);
|
||||
|
||||
if (err < 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (data->chip_id == BME280_CHIP_ID) {
|
||||
i2c_reg_write_byte(data->i2c_master, data->i2c_slave_addr,
|
||||
|
||||
@@ -225,7 +225,7 @@ static int bmi160_acc_odr_set(struct device *dev, uint16_t freq_int,
|
||||
uint16_t freq_milli)
|
||||
{
|
||||
struct bmi160_device_data *bmi160 = dev->driver_data;
|
||||
uint8_t odr = bmi160_freq_to_odr_val(freq_int, freq_milli);
|
||||
int odr = bmi160_freq_to_odr_val(freq_int, freq_milli);
|
||||
|
||||
if (odr < 0) {
|
||||
return odr;
|
||||
@@ -242,7 +242,7 @@ static int bmi160_acc_odr_set(struct device *dev, uint16_t freq_int,
|
||||
return bmi160_reg_field_update(dev, BMI160_REG_ACC_CONF,
|
||||
BMI160_ACC_CONF_ODR_POS,
|
||||
BMI160_ACC_CONF_ODR_MASK,
|
||||
odr);
|
||||
(uint8_t) odr);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -482,7 +482,7 @@ static int bmi160_acc_config(struct device *dev, enum sensor_channel chan,
|
||||
static int bmi160_gyr_odr_set(struct device *dev, uint16_t freq_int,
|
||||
uint16_t freq_milli)
|
||||
{
|
||||
uint8_t odr = bmi160_freq_to_odr_val(freq_int, freq_milli);
|
||||
int odr = bmi160_freq_to_odr_val(freq_int, freq_milli);
|
||||
|
||||
if (odr < 0) {
|
||||
return odr;
|
||||
@@ -495,7 +495,7 @@ static int bmi160_gyr_odr_set(struct device *dev, uint16_t freq_int,
|
||||
return bmi160_reg_field_update(dev, BMI160_REG_GYR_CONF,
|
||||
BMI160_GYR_CONF_ODR_POS,
|
||||
BMI160_GYR_CONF_ODR_MASK,
|
||||
odr);
|
||||
(uint8_t) odr);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -769,7 +769,7 @@ static inline void bmi160_acc_channel_get(struct device *dev,
|
||||
|
||||
static int bmi160_temp_channel_get(struct device *dev, struct sensor_value *val)
|
||||
{
|
||||
int16_t temp_raw = 0;
|
||||
uint16_t temp_raw = 0;
|
||||
int32_t temp_micro = 0;
|
||||
struct bmi160_device_data *bmi160 = dev->driver_data;
|
||||
|
||||
|
||||
@@ -354,7 +354,7 @@ static struct ss_spi_qmsi_runtime spi_qmsi_mst_1_runtime;
|
||||
|
||||
DEVICE_DEFINE(ss_spi_master_1, CONFIG_SPI_1_NAME, ss_spi_qmsi_init,
|
||||
ss_spi_master_qmsi_device_ctrl, &spi_qmsi_mst_1_runtime,
|
||||
&spi_qmsi_mst_0_config, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,
|
||||
&spi_qmsi_mst_1_config, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,
|
||||
NULL);
|
||||
#endif /* CONFIG_SPI_1 */
|
||||
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
ifdef CONFIG_QMSI
|
||||
|
||||
KBUILD_CPPFLAGS +=-DENABLE_EXTERNAL_ISR_HANDLING
|
||||
|
||||
ifdef CONFIG_QMSI_LIBRARY
|
||||
ZEPHYRINCLUDE += -I$(CONFIG_QMSI_INSTALL_PATH)/include
|
||||
LIB_INCLUDE_DIR += -L$(CONFIG_QMSI_INSTALL_PATH:"%"=%)/lib
|
||||
@@ -21,4 +24,6 @@ SOC_WATCH_ENABLE ?= 0
|
||||
ifeq ($(CONFIG_SOC_WATCH),y)
|
||||
SOC_WATCH_ENABLE := 1
|
||||
CFLAGS += -DSOC_WATCH_ENABLE
|
||||
endif
|
||||
endif
|
||||
|
||||
endif
|
||||
|
||||
@@ -96,8 +96,7 @@ int32_t tc_hmac_set_key(TCHmacState_t ctx,
|
||||
int32_t tc_hmac_init(TCHmacState_t ctx)
|
||||
{
|
||||
/* input sanity check: */
|
||||
if (ctx == (TCHmacState_t) 0 ||
|
||||
ctx->key == (uint8_t *) 0) {
|
||||
if (ctx == (TCHmacState_t) 0) {
|
||||
return TC_CRYPTO_FAIL;
|
||||
}
|
||||
|
||||
@@ -114,7 +113,7 @@ int32_t tc_hmac_update(TCHmacState_t ctx,
|
||||
uint32_t data_length)
|
||||
{
|
||||
/* input sanity check: */
|
||||
if (ctx == (TCHmacState_t) 0 || ctx->key == (uint8_t *) 0) {
|
||||
if (ctx == (TCHmacState_t) 0) {
|
||||
return TC_CRYPTO_FAIL;
|
||||
}
|
||||
|
||||
@@ -128,8 +127,7 @@ int32_t tc_hmac_final(uint8_t *tag, uint32_t taglen, TCHmacState_t ctx)
|
||||
/* input sanity check: */
|
||||
if (tag == (uint8_t *) 0 ||
|
||||
taglen != TC_SHA256_DIGEST_SIZE ||
|
||||
ctx == (TCHmacState_t) 0 ||
|
||||
ctx->key == (uint8_t *) 0) {
|
||||
ctx == (TCHmacState_t) 0) {
|
||||
return TC_CRYPTO_FAIL;
|
||||
}
|
||||
|
||||
|
||||
@@ -66,7 +66,6 @@ int32_t tc_sha256_update(TCSha256State_t s, const uint8_t *data, size_t datalen)
|
||||
{
|
||||
/* input sanity check: */
|
||||
if (s == (TCSha256State_t) 0 ||
|
||||
s->iv == (uint32_t *) 0 ||
|
||||
data == (void *) 0) {
|
||||
return TC_CRYPTO_FAIL;
|
||||
} else if (datalen == 0) {
|
||||
@@ -91,8 +90,7 @@ int32_t tc_sha256_final(uint8_t *digest, TCSha256State_t s)
|
||||
|
||||
/* input sanity check: */
|
||||
if (digest == (uint8_t *) 0 ||
|
||||
s == (TCSha256State_t) 0 ||
|
||||
s->iv == (uint32_t *) 0) {
|
||||
s == (TCSha256State_t) 0) {
|
||||
return TC_CRYPTO_FAIL;
|
||||
}
|
||||
|
||||
|
||||
@@ -23,11 +23,13 @@
|
||||
#ifndef __CORTEX_M_NMI_H
|
||||
#define __CORTEX_M_NMI_H
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
#ifdef CONFIG_RUNTIME_NMI
|
||||
extern void _NmiInit(void);
|
||||
#define NMI_INIT() _NmiInit()
|
||||
#else
|
||||
#define NMI_INIT()
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* __CORTEX_M_NMI_H */
|
||||
|
||||
@@ -381,7 +381,7 @@ static ALWAYS_INLINE void _arch_irq_unlock(unsigned int key)
|
||||
|
||||
/**
|
||||
* The NANO_SOFT_IRQ macro must be used as the value for the @a irq parameter
|
||||
* to NANO_CPU_INT_REGSITER when connecting to an interrupt that does not
|
||||
* to NANO_CPU_INT_REGISTER when connecting to an interrupt that does not
|
||||
* correspond to any IRQ line (such as spurious vector or SW IRQ)
|
||||
*/
|
||||
#define NANO_SOFT_IRQ ((unsigned int) (-1))
|
||||
@@ -397,10 +397,62 @@ extern void _arch_irq_enable(unsigned int irq);
|
||||
*/
|
||||
extern void _arch_irq_disable(unsigned int irq);
|
||||
|
||||
#ifdef CONFIG_FP_SHARING
|
||||
extern void k_float_enable(k_tid_t thread_id, unsigned int options);
|
||||
extern void k_float_disable(k_tid_t thread_id);
|
||||
#endif /* CONFIG_FP_SHARING */
|
||||
/**
|
||||
* @defgroup float_apis Floating Point APIs
|
||||
* @ingroup kernel_apis
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Enable preservation of floating point context information.
|
||||
*
|
||||
* This routine informs the kernel that the specified thread (which may be
|
||||
* the current thread) will be using the floating point registers.
|
||||
* The @a options parameter indicates which floating point register sets
|
||||
* will be used by the specified thread:
|
||||
*
|
||||
* a) K_FP_REGS indicates x87 FPU and MMX registers only
|
||||
* b) K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
|
||||
*
|
||||
* Invoking this routine initializes the thread's floating point context info
|
||||
* to that of an FPU that has been reset. The next time the thread is scheduled
|
||||
* by _Swap() it will either inherit an FPU that is guaranteed to be in a "sane"
|
||||
* state (if the most recent user of the FPU was cooperatively swapped out)
|
||||
* or the thread's own floating point context will be loaded (if the most
|
||||
* recent user of the FPU was pre-empted, or if this thread is the first user
|
||||
* of the FPU). Thereafter, the kernel will protect the thread's FP context
|
||||
* so that it is not altered during a preemptive context switch.
|
||||
*
|
||||
* @warning
|
||||
* This routine should only be used to enable floating point support for a
|
||||
* thread that does not currently have such support enabled already.
|
||||
*
|
||||
* @param thread ID of thread.
|
||||
* @param options Registers to be preserved (K_FP_REGS or K_SSE_REGS).
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
extern void k_float_enable(k_tid_t thread, unsigned int options);
|
||||
|
||||
/**
|
||||
* @brief Disable preservation of floating point context information.
|
||||
*
|
||||
* This routine informs the kernel that the specified thread (which may be
|
||||
* the current thread) will no longer be using the floating point registers.
|
||||
*
|
||||
* @warning
|
||||
* This routine should only be used to disable floating point support for
|
||||
* a thread that currently has such support enabled.
|
||||
*
|
||||
* @param thread ID of thread.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
extern void k_float_disable(k_tid_t thread);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#include <stddef.h> /* for size_t */
|
||||
|
||||
|
||||
354
include/atomic.h
354
include/atomic.h
@@ -26,28 +26,26 @@ extern "C" {
|
||||
typedef int atomic_t;
|
||||
typedef atomic_t atomic_val_t;
|
||||
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
/**
|
||||
* @defgroup atomic_apis Atomic Services APIs
|
||||
* @ingroup kernel_apis
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Atomic compare-and-set.
|
||||
*
|
||||
* @brief Atomic compare-and-set primitive
|
||||
* This routine performs an atomic compare-and-set on @a target. If the current
|
||||
* value of @a target equals @a old_value, @a target is set to @a new_value.
|
||||
* If the current value of @a target does not equal @a old_value, @a target
|
||||
* is left unchanged.
|
||||
*
|
||||
* This routine provides the compare-and-set operator. If the original value at
|
||||
* <target> equals <oldValue>, then <newValue> is stored at <target> and the
|
||||
* function returns 1.
|
||||
*
|
||||
* If the original value at <target> does not equal <oldValue>, then the store
|
||||
* is not done and the function returns 0.
|
||||
*
|
||||
* The reading of the original value at <target>, the comparison,
|
||||
* and the write of the new value (if it occurs) all happen atomically with
|
||||
* respect to both interrupts and accesses of other processors to <target>.
|
||||
*
|
||||
* @param target address to be tested
|
||||
* @param old_value value to compare against
|
||||
* @param new_value value to compare against
|
||||
* @return Returns 1 if <new_value> is written, 0 otherwise.
|
||||
* @param target Address of atomic variable.
|
||||
* @param old_value Original value to compare against.
|
||||
* @param new_value New value to store.
|
||||
* @return 1 if @a new_value is written, 0 otherwise.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline int atomic_cas(atomic_t *target, atomic_val_t old_value,
|
||||
atomic_val_t new_value)
|
||||
{
|
||||
@@ -55,104 +53,121 @@ static inline int atomic_cas(atomic_t *target, atomic_val_t old_value,
|
||||
0, __ATOMIC_SEQ_CST,
|
||||
__ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern int atomic_cas(atomic_t *target, atomic_val_t old_value,
|
||||
atomic_val_t new_value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic addition primitive
|
||||
* @brief Atomic addition.
|
||||
*
|
||||
* This routine provides the atomic addition operator. The <value> is
|
||||
* atomically added to the value at <target>, placing the result at <target>,
|
||||
* and the old value from <target> is returned.
|
||||
* This routine performs an atomic addition on @a target.
|
||||
*
|
||||
* @param target memory location to add to
|
||||
* @param value the value to add
|
||||
* @param target Address of atomic variable.
|
||||
* @param value Value to add.
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
return __atomic_fetch_add(target, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_add(atomic_t *target, atomic_val_t value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic subtraction primitive
|
||||
* @brief Atomic subtraction.
|
||||
*
|
||||
* This routine provides the atomic subtraction operator. The <value> is
|
||||
* atomically subtracted from the value at <target>, placing the result at
|
||||
* <target>, and the old value from <target> is returned.
|
||||
* This routine performs an atomic subtraction on @a target.
|
||||
*
|
||||
* @param target the memory location to subtract from
|
||||
* @param value the value to subtract
|
||||
* @param target Address of atomic variable.
|
||||
* @param value Value to subtract.
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
return __atomic_fetch_sub(target, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic increment primitive
|
||||
* @brief Atomic increment.
|
||||
*
|
||||
* @param target memory location to increment
|
||||
* This routine performs an atomic increment by 1 on @a target.
|
||||
*
|
||||
* This routine provides the atomic increment operator. The value at <target>
|
||||
* is atomically incremented by 1, and the old value from <target> is returned.
|
||||
* @param target Address of atomic variable.
|
||||
*
|
||||
* @return The value from <target> before the increment
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_inc(atomic_t *target)
|
||||
{
|
||||
return atomic_add(target, 1);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_inc(atomic_t *target);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic decrement primitive
|
||||
* @brief Atomic decrement.
|
||||
*
|
||||
* @param target memory location to decrement
|
||||
* This routine performs an atomic decrement by 1 on @a target.
|
||||
*
|
||||
* This routine provides the atomic decrement operator. The value at <target>
|
||||
* is atomically decremented by 1, and the old value from <target> is returned.
|
||||
* @param target Address of atomic variable.
|
||||
*
|
||||
* @return The value from <target> prior to the decrement
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_dec(atomic_t *target)
|
||||
{
|
||||
return atomic_sub(target, 1);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_dec(atomic_t *target);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic get primitive
|
||||
* @brief Atomic get.
|
||||
*
|
||||
* @param target memory location to read from
|
||||
* This routine performs an atomic read on @a target.
|
||||
*
|
||||
* This routine provides the atomic get primitive to atomically read
|
||||
* a value from <target>. It simply does an ordinary load. Note that <target>
|
||||
* is expected to be aligned to a 4-byte boundary.
|
||||
* @param target Address of atomic variable.
|
||||
*
|
||||
* @return The value read from <target>
|
||||
* @return Value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_get(const atomic_t *target)
|
||||
{
|
||||
return __atomic_load_n(target, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_get(const atomic_t *target);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic get-and-set primitive
|
||||
* @brief Atomic get-and-set.
|
||||
*
|
||||
* This routine provides the atomic set operator. The <value> is atomically
|
||||
* written at <target> and the previous value at <target> is returned.
|
||||
* This routine atomically sets @a target to @a value and returns
|
||||
* the previous value of @a target.
|
||||
*
|
||||
* @param target the memory location to write to
|
||||
* @param value the value to write
|
||||
* @param target Address of atomic variable.
|
||||
* @param value Value to write to @a target.
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
/* This builtin, as described by Intel, is not a traditional
|
||||
@@ -161,236 +176,253 @@ static inline atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
|
||||
*/
|
||||
return __atomic_exchange_n(target, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic clear primitive
|
||||
* @brief Atomic clear.
|
||||
*
|
||||
* This routine provides the atomic clear operator. The value of 0 is atomically
|
||||
* written at <target> and the previous value at <target> is returned. (Hence,
|
||||
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
|
||||
* This routine atomically sets @a target to zero and returns its previous
|
||||
* value. (Hence, it is equivalent to atomic_set(target, 0).)
|
||||
*
|
||||
* @param target the memory location to write
|
||||
* @param target Address of atomic variable.
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_clear(atomic_t *target)
|
||||
{
|
||||
return atomic_set(target, 0);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_clear(atomic_t *target);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic bitwise inclusive OR primitive
|
||||
* @brief Atomic bitwise inclusive OR.
|
||||
*
|
||||
* This routine provides the atomic bitwise inclusive OR operator. The <value>
|
||||
* is atomically bitwise OR'ed with the value at <target>, placing the result
|
||||
* at <target>, and the previous value at <target> is returned.
|
||||
* This routine atomically sets @a target to the bitwise inclusive OR of
|
||||
* @a target and @a value.
|
||||
*
|
||||
* @param target the memory location to be modified
|
||||
* @param value the value to OR
|
||||
* @param target Address of atomic variable.
|
||||
* @param value Value to OR.
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
return __atomic_fetch_or(target, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_or(atomic_t *target, atomic_val_t value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic bitwise exclusive OR (XOR) primitive
|
||||
* @brief Atomic bitwise exclusive OR (XOR).
|
||||
*
|
||||
* This routine provides the atomic bitwise exclusive OR operator. The <value>
|
||||
* is atomically bitwise XOR'ed with the value at <target>, placing the result
|
||||
* at <target>, and the previous value at <target> is returned.
|
||||
* This routine atomically sets @a target to the bitwise exclusive OR (XOR) of
|
||||
* @a target and @a value.
|
||||
*
|
||||
* @param target the memory location to be modified
|
||||
* @param value the value to XOR
|
||||
* @param target Address of atomic variable.
|
||||
* @param value Value to XOR
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
return __atomic_fetch_xor(target, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic bitwise AND primitive
|
||||
* @brief Atomic bitwise AND.
|
||||
*
|
||||
* This routine provides the atomic bitwise AND operator. The <value> is
|
||||
* atomically bitwise AND'ed with the value at <target>, placing the result
|
||||
* at <target>, and the previous value at <target> is returned.
|
||||
* This routine atomically sets @a target to the bitwise AND of @a target
|
||||
* and @a value.
|
||||
*
|
||||
* @param target the memory location to be modified
|
||||
* @param value the value to AND
|
||||
* @param target Address of atomic variable.
|
||||
* @param value Value to AND.
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
return __atomic_fetch_and(target, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_and(atomic_t *target, atomic_val_t value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic bitwise NAND primitive
|
||||
* @brief Atomic bitwise NAND.
|
||||
*
|
||||
* This routine provides the atomic bitwise NAND operator. The <value> is
|
||||
* atomically bitwise NAND'ed with the value at <target>, placing the result
|
||||
* at <target>, and the previous value at <target> is returned.
|
||||
* This routine atomically sets @a target to the bitwise NAND of @a target
|
||||
* and @a value. (This operation is equivalent to target = ~(target & value).)
|
||||
*
|
||||
* The operation here is equivalent to *target = ~(tmp & value)
|
||||
* @param target Address of atomic variable.
|
||||
* @param value Value to NAND.
|
||||
*
|
||||
* @param target the memory location to be modified
|
||||
* @param value the value to NAND
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
return __atomic_fetch_nand(target, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_add(atomic_t *target, atomic_val_t value);
|
||||
extern atomic_val_t atomic_and(atomic_t *target, atomic_val_t value);
|
||||
extern atomic_val_t atomic_dec(atomic_t *target);
|
||||
extern atomic_val_t atomic_inc(atomic_t *target);
|
||||
extern atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value);
|
||||
extern atomic_val_t atomic_or(atomic_t *target, atomic_val_t value);
|
||||
extern atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value);
|
||||
extern atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value);
|
||||
extern atomic_val_t atomic_clear(atomic_t *target);
|
||||
extern atomic_val_t atomic_get(const atomic_t *target);
|
||||
extern atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
|
||||
extern int atomic_cas(atomic_t *target, atomic_val_t oldValue,
|
||||
atomic_val_t newValue);
|
||||
#endif /* CONFIG_ATOMIC_OPERATIONS_BUILTIN */
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* @brief Initialize an atomic variable.
|
||||
*
|
||||
* This macro can be used to initialize an atomic variable. For example,
|
||||
* @code atomic_t my_var = ATOMIC_INIT(75); @endcode
|
||||
*
|
||||
* @param i Value to assign to atomic variable.
|
||||
*/
|
||||
#define ATOMIC_INIT(i) (i)
|
||||
|
||||
/**
|
||||
* @cond INTERNAL_HIDDEN
|
||||
*/
|
||||
|
||||
#define ATOMIC_BITS (sizeof(atomic_val_t) * 8)
|
||||
#define ATOMIC_MASK(bit) (1 << ((bit) & (ATOMIC_BITS - 1)))
|
||||
#define ATOMIC_ELEM(addr, bit) ((addr) + ((bit) / ATOMIC_BITS))
|
||||
|
||||
/** @def ATOMIC_DEFINE
|
||||
* @brief Helper to declare an atomic_t array.
|
||||
/**
|
||||
* INTERNAL_HIDDEN @endcond
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Define an array of atomic variables.
|
||||
*
|
||||
* A helper to define an atomic_t array based on the number of needed
|
||||
* bits, e.g. any bit count of 32 or less will produce a single-element
|
||||
* array.
|
||||
* This macro defines an array of atomic variables containing at least
|
||||
* @a num_bits bits.
|
||||
*
|
||||
* @param name Name of atomic_t array.
|
||||
* @param num_bits Maximum number of bits needed.
|
||||
* @note
|
||||
* If used from file scope, the bits of the array are initialized to zero;
|
||||
* if used from within a function, the bits are left uninitialized.
|
||||
*
|
||||
* @return n/a
|
||||
* @param name Name of array of atomic variables.
|
||||
* @param num_bits Number of bits needed.
|
||||
*/
|
||||
#define ATOMIC_DEFINE(name, num_bits) \
|
||||
atomic_t name[1 + ((num_bits) - 1) / ATOMIC_BITS]
|
||||
|
||||
/** @brief Test whether a bit is set
|
||||
/**
|
||||
* @brief Atomically test a bit.
|
||||
*
|
||||
* Test whether bit number bit is set or not.
|
||||
* This routine tests whether bit number @a bit of @a target is set or not.
|
||||
* The target may be a single atomic variable or an array of them.
|
||||
*
|
||||
* Also works for an array of multiple atomic_t variables, in which
|
||||
* case the bit number may go beyond the number of bits in a single
|
||||
* atomic_t variable.
|
||||
* @param target Address of atomic variable or array.
|
||||
* @param bit Bit number (starting from 0).
|
||||
*
|
||||
* @param addr base address to start counting from
|
||||
* @param bit bit number counted from the base address
|
||||
*
|
||||
* @return 1 if the bit was set, 0 if it wasn't
|
||||
* @return 1 if the bit was set, 0 if it wasn't.
|
||||
*/
|
||||
static inline int atomic_test_bit(const atomic_t *addr, int bit)
|
||||
static inline int atomic_test_bit(const atomic_t *target, int bit)
|
||||
{
|
||||
atomic_val_t val = atomic_get(ATOMIC_ELEM(addr, bit));
|
||||
atomic_val_t val = atomic_get(ATOMIC_ELEM(target, bit));
|
||||
|
||||
return (1 & (val >> (bit & (ATOMIC_BITS - 1))));
|
||||
}
|
||||
|
||||
/** @brief Clear a bit and return its old value
|
||||
/**
|
||||
* @brief Atomically test and clear a bit.
|
||||
*
|
||||
* Atomically clear a bit and return its old value.
|
||||
* Atomically clear bit number @a bit of @a target and return its old value.
|
||||
* The target may be a single atomic variable or an array of them.
|
||||
*
|
||||
* Also works for an array of multiple atomic_t variables, in which
|
||||
* case the bit number may go beyond the number of bits in a single
|
||||
* atomic_t variable.
|
||||
* @param target Address of atomic variable or array.
|
||||
* @param bit Bit number (starting from 0).
|
||||
*
|
||||
* @param addr base address to start counting from
|
||||
* @param bit bit number counted from the base address
|
||||
*
|
||||
* @return 1 if the bit was set, 0 if it wasn't
|
||||
* @return 1 if the bit was set, 0 if it wasn't.
|
||||
*/
|
||||
static inline int atomic_test_and_clear_bit(atomic_t *addr, int bit)
|
||||
static inline int atomic_test_and_clear_bit(atomic_t *target, int bit)
|
||||
{
|
||||
atomic_val_t mask = ATOMIC_MASK(bit);
|
||||
atomic_val_t old;
|
||||
|
||||
old = atomic_and(ATOMIC_ELEM(addr, bit), ~mask);
|
||||
old = atomic_and(ATOMIC_ELEM(target, bit), ~mask);
|
||||
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/** @brief Set a bit and return its old value
|
||||
/**
|
||||
* @brief Atomically set a bit.
|
||||
*
|
||||
* Atomically set a bit and return its old value.
|
||||
* Atomically set bit number @a bit of @a target and return its old value.
|
||||
* The target may be a single atomic variable or an array of them.
|
||||
*
|
||||
* Also works for an array of multiple atomic_t variables, in which
|
||||
* case the bit number may go beyond the number of bits in a single
|
||||
* atomic_t variable.
|
||||
* @param target Address of atomic variable or array.
|
||||
* @param bit Bit number (starting from 0).
|
||||
*
|
||||
* @param addr base address to start counting from
|
||||
* @param bit bit number counted from the base address
|
||||
*
|
||||
* @return 1 if the bit was set, 0 if it wasn't
|
||||
* @return 1 if the bit was set, 0 if it wasn't.
|
||||
*/
|
||||
static inline int atomic_test_and_set_bit(atomic_t *addr, int bit)
|
||||
static inline int atomic_test_and_set_bit(atomic_t *target, int bit)
|
||||
{
|
||||
atomic_val_t mask = ATOMIC_MASK(bit);
|
||||
atomic_val_t old;
|
||||
|
||||
old = atomic_or(ATOMIC_ELEM(addr, bit), mask);
|
||||
old = atomic_or(ATOMIC_ELEM(target, bit), mask);
|
||||
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/** @brief Clear a bit
|
||||
/**
|
||||
* @brief Atomically clear a bit.
|
||||
*
|
||||
* Atomically clear a bit.
|
||||
* Atomically clear bit number @a bit of @a target.
|
||||
* The target may be a single atomic variable or an array of them.
|
||||
*
|
||||
* Also works for an array of multiple atomic_t variables, in which
|
||||
* case the bit number may go beyond the number of bits in a single
|
||||
* atomic_t variable.
|
||||
* @param target Address of atomic variable or array.
|
||||
* @param bit Bit number (starting from 0).
|
||||
*
|
||||
* @param addr base address to start counting from
|
||||
* @param bit bit number counted from the base address
|
||||
* @return N/A
|
||||
*/
|
||||
static inline void atomic_clear_bit(atomic_t *addr, int bit)
|
||||
static inline void atomic_clear_bit(atomic_t *target, int bit)
|
||||
{
|
||||
atomic_val_t mask = ATOMIC_MASK(bit);
|
||||
|
||||
atomic_and(ATOMIC_ELEM(addr, bit), ~mask);
|
||||
atomic_and(ATOMIC_ELEM(target, bit), ~mask);
|
||||
}
|
||||
|
||||
/** @brief Set a bit
|
||||
/**
|
||||
* @brief Atomically set a bit.
|
||||
*
|
||||
* Atomically set a bit.
|
||||
* Atomically set bit number @a bit of @a target.
|
||||
* The target may be a single atomic variable or an array of them.
|
||||
*
|
||||
* Also works for an array of multiple atomic_t variables, in which
|
||||
* case the bit number may go beyond the number of bits in a single
|
||||
* atomic_t variable.
|
||||
* @param target Address of atomic variable or array.
|
||||
* @param bit Bit number (starting from 0).
|
||||
*
|
||||
* @param addr base address to start counting from
|
||||
* @param bit bit number counted from the base address
|
||||
* @return N/A
|
||||
*/
|
||||
static inline void atomic_set_bit(atomic_t *addr, int bit)
|
||||
static inline void atomic_set_bit(atomic_t *target, int bit)
|
||||
{
|
||||
atomic_val_t mask = ATOMIC_MASK(bit);
|
||||
|
||||
atomic_or(ATOMIC_ELEM(addr, bit), mask);
|
||||
atomic_or(ATOMIC_ELEM(target, bit), mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -772,7 +772,8 @@ struct bt_gatt_discover_params;
|
||||
*
|
||||
* If discovery procedure has completed this callback will be called with
|
||||
* attr set to NULL. This will not happen if procedure was stopped by returning
|
||||
* BT_GATT_ITER_STOP.
|
||||
* BT_GATT_ITER_STOP. The attribute is read-only and cannot be cached without
|
||||
* copying its contents.
|
||||
*
|
||||
* @return BT_GATT_ITER_CONTINUE if should continue attribute discovery
|
||||
* or BT_GATT_ITER_STOP to stop discovery procedure.
|
||||
|
||||
@@ -50,8 +50,9 @@ extern "C" {
|
||||
/** GPIO pin to be output. */
|
||||
#define GPIO_DIR_OUT (1 << 0)
|
||||
|
||||
/** For internal use. */
|
||||
/** @cond INTERNAL_HIDDEN */
|
||||
#define GPIO_DIR_MASK 0x1
|
||||
/** @endcond */
|
||||
|
||||
/** GPIO pin to trigger interrupt. */
|
||||
#define GPIO_INT (1 << 1)
|
||||
@@ -81,8 +82,9 @@ extern "C" {
|
||||
* GPIO_POL_* define the polarity of the GPIO (1 bit).
|
||||
*/
|
||||
|
||||
/** For internal use. */
|
||||
/** @cond INTERNAL_HIDDEN */
|
||||
#define GPIO_POL_POS 7
|
||||
/** @endcond */
|
||||
|
||||
/** GPIO pin polarity is normal. */
|
||||
#define GPIO_POL_NORMAL (0 << GPIO_POL_POS)
|
||||
@@ -90,15 +92,17 @@ extern "C" {
|
||||
/** GPIO pin polarity is inverted. */
|
||||
#define GPIO_POL_INV (1 << GPIO_POL_POS)
|
||||
|
||||
/** For internal use. */
|
||||
/** @cond INTERNAL_HIDDEN */
|
||||
#define GPIO_POL_MASK (1 << GPIO_POL_POS)
|
||||
/** @endcond */
|
||||
|
||||
/*
|
||||
* GPIO_PUD_* are related to pull-up/pull-down.
|
||||
*/
|
||||
|
||||
/** For internal use. */
|
||||
/** @cond INTERNAL_HIDDEN */
|
||||
#define GPIO_PUD_POS 8
|
||||
/** @endcond */
|
||||
|
||||
/** GPIO pin to have no pull-up or pull-down. */
|
||||
#define GPIO_PUD_NORMAL (0 << GPIO_PUD_POS)
|
||||
@@ -109,8 +113,9 @@ extern "C" {
|
||||
/** Enable GPIO pin pull-down. */
|
||||
#define GPIO_PUD_PULL_DOWN (2 << GPIO_PUD_POS)
|
||||
|
||||
/** For internal use. */
|
||||
/** @cond INTERNAL_HIDDEN */
|
||||
#define GPIO_PUD_MASK (3 << GPIO_PUD_POS)
|
||||
/** @endcond */
|
||||
|
||||
/*
|
||||
* GPIO_PIN_(EN-/DIS-)ABLE are for pin enable / disable.
|
||||
@@ -214,6 +219,7 @@ struct gpio_driver_api {
|
||||
* @param port Pointer to device structure for the driver instance.
|
||||
* @param pin Pin number to configure.
|
||||
* @param flags Flags for pin configuration. IN/OUT, interrupt ...
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_pin_configure(struct device *port, uint8_t pin,
|
||||
int flags)
|
||||
@@ -228,6 +234,7 @@ static inline int gpio_pin_configure(struct device *port, uint8_t pin,
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param pin Pin number where the data is written.
|
||||
* @param value Value set on the pin.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_pin_write(struct device *port, uint32_t pin,
|
||||
uint32_t value)
|
||||
@@ -242,6 +249,7 @@ static inline int gpio_pin_write(struct device *port, uint32_t pin,
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param pin Pin number where data is read.
|
||||
* @param value Integer pointer to receive the data values from the pin.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_pin_read(struct device *port, uint32_t pin,
|
||||
uint32_t *value)
|
||||
@@ -272,6 +280,7 @@ static inline void gpio_init_callback(struct gpio_callback *callback,
|
||||
* @brief Add an application callback.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param callback A valid Application's callback structure pointer.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*
|
||||
* Note: enables to add as many callback as needed on the same port.
|
||||
*/
|
||||
@@ -289,6 +298,7 @@ static inline int gpio_add_callback(struct device *port,
|
||||
* @brief Remove an application callback.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param callback A valid application's callback structure pointer.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*
|
||||
* Note: enables to remove as many callbacks as added through
|
||||
* gpio_add_callback().
|
||||
@@ -307,6 +317,7 @@ static inline int gpio_remove_callback(struct device *port,
|
||||
* @brief Enable callback(s) for a single pin.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param pin Pin number where the callback function is enabled.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*
|
||||
* Note: Depending on the driver implementation, this function will enable
|
||||
* the pin to trigger an interruption. So as a semantic detail, if no
|
||||
@@ -323,6 +334,7 @@ static inline int gpio_pin_enable_callback(struct device *port, uint32_t pin)
|
||||
* @brief Disable callback(s) for a single pin.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param pin Pin number where the callback function is disabled.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_pin_disable_callback(struct device *port, uint32_t pin)
|
||||
{
|
||||
@@ -337,6 +349,7 @@ static inline int gpio_pin_disable_callback(struct device *port, uint32_t pin)
|
||||
*
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param flags Flags for the port configuration. IN/OUT, interrupt ...
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_port_configure(struct device *port, int flags)
|
||||
{
|
||||
@@ -349,6 +362,7 @@ static inline int gpio_port_configure(struct device *port, int flags)
|
||||
* @brief Write a data value to the port.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param value Value to set on the port.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_port_write(struct device *port, uint32_t value)
|
||||
{
|
||||
@@ -361,6 +375,7 @@ static inline int gpio_port_write(struct device *port, uint32_t value)
|
||||
* @brief Read data value from the port.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param value Integer pointer to receive the data value from the port.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_port_read(struct device *port, uint32_t *value)
|
||||
{
|
||||
@@ -372,6 +387,7 @@ static inline int gpio_port_read(struct device *port, uint32_t *value)
|
||||
/**
|
||||
* @brief Enable callback(s) for the port.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*
|
||||
* Note: Depending on the driver implementation, this function will enable
|
||||
* the port to trigger an interruption on all pins, as long as these
|
||||
@@ -388,6 +404,7 @@ static inline int gpio_port_enable_callback(struct device *port)
|
||||
/**
|
||||
* @brief Disable callback(s) for the port.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_port_disable_callback(struct device *port)
|
||||
{
|
||||
|
||||
116
include/irq.h
116
include/irq.h
@@ -30,95 +30,121 @@
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Configure a static interrupt.
|
||||
* @defgroup isr_apis Interrupt Service Routine APIs
|
||||
* @ingroup kernel_apis
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Initialize an interrupt handler.
|
||||
*
|
||||
* All arguments must be computable by the compiler at build time.
|
||||
* This routine initializes an interrupt handler for an IRQ. The IRQ must be
|
||||
* subsequently enabled before the interrupt handler begins servicing
|
||||
* interrupts.
|
||||
*
|
||||
* @param irq_p IRQ line number
|
||||
* @param priority_p Interrupt priority
|
||||
* @param isr_p Interrupt service routine
|
||||
* @param isr_param_p ISR parameter
|
||||
* @param flags_p Arch-specific IRQ configuration flags
|
||||
* @warning
|
||||
* Although this routine is invoked at run-time, all of its arguments must be
|
||||
* computable by the compiler at build time.
|
||||
*
|
||||
* @return The vector assigned to this interrupt
|
||||
* @param irq_p IRQ line number.
|
||||
* @param priority_p Interrupt priority.
|
||||
* @param isr_p Address of interrupt service routine.
|
||||
* @param isr_param_p Parameter passed to interrupt service routine.
|
||||
* @param flags_p Architecture-specific IRQ configuration flags..
|
||||
*
|
||||
* @return Interrupt vector assigned to this interrupt.
|
||||
*/
|
||||
#define IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p)
|
||||
|
||||
/**
|
||||
* @brief Disable all interrupts on the CPU (inline)
|
||||
* @brief Lock interrupts.
|
||||
*
|
||||
* This routine disables interrupts. It can be called from either interrupt,
|
||||
* task or fiber level. This routine returns an architecture-dependent
|
||||
* lock-out key representing the "interrupt disable state" prior to the call;
|
||||
* this key can be passed to irq_unlock() to re-enable interrupts.
|
||||
* This routine disables all interrupts on the CPU. It returns an unsigned
|
||||
* integer "lock-out key", which is an architecture-dependent indicator of
|
||||
* whether interrupts were locked prior to the call. The lock-out key must be
|
||||
* passed to irq_unlock() to re-enable interrupts.
|
||||
*
|
||||
* The lock-out key should only be used as the argument to the irq_unlock()
|
||||
* API. It should never be used to manually re-enable interrupts or to inspect
|
||||
* or manipulate the contents of the source register.
|
||||
* This routine can be called recursively, as long as the caller keeps track
|
||||
* of each lock-out key that is generated. Interrupts are re-enabled by
|
||||
* passing each of the keys to irq_unlock() in the reverse order they were
|
||||
* acquired. (That is, each call to irq_lock() must be balanced by
|
||||
* a corresponding call to irq_unlock().)
|
||||
*
|
||||
* This function can be called recursively: it will return a key to return the
|
||||
* state of interrupt locking to the previous level.
|
||||
* @note
|
||||
* This routine can be called by ISRs or by threads. If it is called by a
|
||||
* thread, the interrupt lock is thread-specific; this means that interrupts
|
||||
* remain disabled only while the thread is running. If the thread performs an
|
||||
* operation that allows another thread to run (for example, giving a semaphore
|
||||
* or sleeping for N milliseconds), the interrupt lock no longer applies and
|
||||
* interrupts may be re-enabled while other processing occurs. When the thread
|
||||
* once again becomes the current thread, the kernel re-establishes its
|
||||
* interrupt lock; this ensures the thread won't be interrupted until it has
|
||||
* explicitly released the interrupt lock it established.
|
||||
*
|
||||
* WARNINGS
|
||||
* Invoking a kernel routine with interrupts locked may result in
|
||||
* interrupts being re-enabled for an unspecified period of time. If the
|
||||
* called routine blocks, interrupts will be re-enabled while another
|
||||
* thread executes, or while the system is idle.
|
||||
*
|
||||
* The "interrupt disable state" is an attribute of a thread. Thus, if a
|
||||
* fiber or task disables interrupts and subsequently invokes a kernel
|
||||
* routine that causes the calling thread to block, the interrupt
|
||||
* disable state will be restored when the thread is later rescheduled
|
||||
* for execution.
|
||||
*
|
||||
* @return An architecture-dependent unsigned int lock-out key representing the
|
||||
* "interrupt disable state" prior to the call.
|
||||
* @warning
|
||||
* The lock-out key should never be used to manually re-enable interrupts
|
||||
* or to inspect or manipulate the contents of the CPU's interrupt bits.
|
||||
*
|
||||
* @return Lock-out key.
|
||||
*/
|
||||
#define irq_lock() _arch_irq_lock()
|
||||
|
||||
/**
|
||||
* @brief Unlock interrupts.
|
||||
*
|
||||
* @brief Enable all interrupts on the CPU (inline)
|
||||
* This routine reverses the effect of a previous call to irq_lock() using
|
||||
* the associated lock-out key. The caller must call the routine once for
|
||||
* each time it called irq_lock(), supplying the keys in the reverse order
|
||||
* they were acquired, before interrupts are enabled.
|
||||
*
|
||||
* This routine re-enables interrupts on the CPU. The @a key parameter
|
||||
* is an architecture-dependent lock-out key that is returned by a previous
|
||||
* invocation of irq_lock().
|
||||
* @note Can be called by ISRs.
|
||||
*
|
||||
* This routine can be called from either interrupt, task or fiber level
|
||||
*
|
||||
* @param key architecture-dependent lock-out key
|
||||
* @param key Lock-out key generated by irq_lock().
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
#define irq_unlock(key) _arch_irq_unlock(key)
|
||||
|
||||
/**
|
||||
* @brief Enable a specific IRQ
|
||||
* @brief Enable an IRQ.
|
||||
*
|
||||
* This routine enables interrupts from source @a irq.
|
||||
*
|
||||
* @param irq IRQ line.
|
||||
*
|
||||
* @param irq IRQ line
|
||||
* @return N/A
|
||||
*/
|
||||
#define irq_enable(irq) _arch_irq_enable(irq)
|
||||
|
||||
/**
|
||||
* @brief Disable a specific IRQ
|
||||
* @brief Disable an IRQ.
|
||||
*
|
||||
* This routine disables interrupts from source @a irq.
|
||||
*
|
||||
* @param irq IRQ line.
|
||||
*
|
||||
* @param irq IRQ line
|
||||
* @return N/A
|
||||
*/
|
||||
#define irq_disable(irq) _arch_irq_disable(irq)
|
||||
|
||||
/**
|
||||
* @brief Return IRQ enable state
|
||||
* @brief Get IRQ enable state.
|
||||
*
|
||||
* This routine indicates if interrupts from source @a irq are enabled.
|
||||
*
|
||||
* @param irq IRQ line.
|
||||
*
|
||||
* @param irq IRQ line
|
||||
* @return interrupt enable state, true or false
|
||||
*/
|
||||
#define irq_is_enabled(irq) _arch_irq_is_enabled(irq)
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
951
include/kernel.h
951
include/kernel.h
File diff suppressed because it is too large
Load Diff
@@ -1060,8 +1060,9 @@ static inline __deprecated void nano_sem_give(struct nano_sem *sem)
|
||||
static inline __deprecated int nano_sem_take(struct nano_sem *sem,
|
||||
int32_t timeout_in_ticks)
|
||||
{
|
||||
return k_sem_take((struct k_sem *)sem, _ticks_to_ms(timeout_in_ticks))
|
||||
== 0 ? 1 : 0;
|
||||
int32_t ms = _ticks_to_ms(timeout_in_ticks);
|
||||
|
||||
return k_sem_take((struct k_sem *)sem, ms) == 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -177,12 +177,13 @@ static inline sys_dnode_t *sys_dlist_peek_head_not_empty(sys_dlist_t *list)
|
||||
* @param node the node from which to get the next element in the list
|
||||
*
|
||||
* @return a pointer to the next element from a node, NULL if node is the tail
|
||||
* or NULL (when node comes from reading the head of an empty list).
|
||||
*/
|
||||
|
||||
static inline sys_dnode_t *sys_dlist_peek_next(sys_dlist_t *list,
|
||||
sys_dnode_t *node)
|
||||
{
|
||||
return node == list->tail ? NULL : node->next;
|
||||
return (!node || node == list->tail) ? NULL : node->next;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -19,202 +19,146 @@
|
||||
* @brief Kernel event logger support.
|
||||
*/
|
||||
|
||||
|
||||
#include <misc/event_logger.h>
|
||||
|
||||
#ifndef __KERNEL_EVENT_LOGGER_H__
|
||||
#define __KERNEL_EVENT_LOGGER_H__
|
||||
|
||||
/**
|
||||
* @brief Kernel Event Logger
|
||||
* @defgroup nanokernel_event_logger Kernel Event Logger
|
||||
* @{
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER
|
||||
/* pre-defined event types */
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
|
||||
#define KERNEL_EVENT_LOGGER_CONTEXT_SWITCH_EVENT_ID 0x0001
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
|
||||
#define KERNEL_EVENT_LOGGER_INTERRUPT_EVENT_ID 0x0002
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
|
||||
#define KERNEL_EVENT_LOGGER_SLEEP_EVENT_ID 0x0003
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TASK_MONITOR
|
||||
#define KERNEL_EVENT_LOGGER_TASK_MON_TASK_STATE_CHANGE_EVENT_ID 0x0004
|
||||
#define KERNEL_EVENT_LOGGER_TASK_MON_CMD_PACKET_EVENT_ID 0x0005
|
||||
#define KERNEL_EVENT_LOGGER_TASK_MON_KEVENT_EVENT_ID 0x0006
|
||||
#endif
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
/**
|
||||
* Global variable of the ring buffer that allows user to implement
|
||||
* their own reading routine.
|
||||
*/
|
||||
struct event_logger sys_k_event_logger;
|
||||
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP
|
||||
|
||||
/**
|
||||
* Callback used to set event timestamp
|
||||
*/
|
||||
typedef uint32_t (*sys_k_timer_func)(void);
|
||||
extern sys_k_timer_func timer_func;
|
||||
|
||||
static inline uint32_t _sys_k_get_time(void)
|
||||
{
|
||||
if (timer_func)
|
||||
return timer_func();
|
||||
else
|
||||
return sys_cycle_get_32();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set kernel event logger timestamp function
|
||||
*
|
||||
* @details Calling this function permits to set the function
|
||||
* to be called by kernel event logger for setting the event
|
||||
* timestamp. By default, kernel event logger is using the
|
||||
* system timer. But on some boards where the timer driver
|
||||
* maintains the system timer cycle accumulator in software,
|
||||
* such as ones using the LOAPIC timer, the system timer behavior
|
||||
* leads to timestamp errors. For example, the timer interrupt is
|
||||
* logged with a wrong timestamp since the HW timer value has been
|
||||
* reset (periodic mode) but accumulated value not updated yet
|
||||
* (done later in the ISR).
|
||||
*
|
||||
* @param func Pointer to a function returning a 32-bit timer
|
||||
* Prototype: uint32_t (*func)(void)
|
||||
*/
|
||||
void sys_k_event_logger_set_timer(sys_k_timer_func func);
|
||||
#else
|
||||
static inline uint32_t _sys_k_get_time(void)
|
||||
{
|
||||
return sys_cycle_get_32();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
|
||||
extern struct event_logger sys_k_event_logger;
|
||||
extern int _sys_k_event_logger_mask;
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
|
||||
extern void _sys_k_event_logger_enter_sleep(void);
|
||||
#else
|
||||
static inline void _sys_k_event_logger_enter_sleep(void) {};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
|
||||
extern void _sys_k_event_logger_interrupt(void);
|
||||
#else
|
||||
static inline void _sys_k_event_logger_interrupt(void) {};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Set kernel event logger filtering mask
|
||||
*
|
||||
* @details Calling this macro sets the mask used to select which events
|
||||
* to store in the kernel event logger ring buffer. This flag can be set
|
||||
* at runtime and at any moment.
|
||||
* This capability is only available when CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
* is set. If enabled, no event is enabled for logging at initialization.
|
||||
* The mask bits shall be set according to events ID defined in
|
||||
* kernel_event_logger.h
|
||||
* For example, to enable interrupt logging the following shall be done:
|
||||
* sys_k_event_logger_set_mask(sys_k_event_logger_get_mask |
|
||||
* (1 << (KERNEL_EVENT_LOGGER_INTERRUPT_EVENT_ID - 1)))
|
||||
* To disable it:
|
||||
* sys_k_event_logger_set_mask(sys_k_event_logger_get_mask &
|
||||
* ~(1 << (KERNEL_EVENT_LOGGER_INTERRUPT_EVENT_ID - 1)))
|
||||
*
|
||||
* WARNING: task monitor events are not covered by this API. Please refer
|
||||
* to sys_k_event_logger_set_monitor_mask / sys_k_event_logger_get_monitor_mask
|
||||
* @brief Kernel Event Logger
|
||||
* @defgroup kernel_event_logger Kernel Event Logger
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @typedef sys_k_timer_func_t
|
||||
* @brief Event timestamp generator function type.
|
||||
*
|
||||
* A timestamp generator function is executed when the kernel event logger
|
||||
* generates an event containing a timestamp.
|
||||
*
|
||||
* @return Timestamp value (application-defined).
|
||||
*/
|
||||
typedef uint32_t (*sys_k_timer_func_t)(void);
|
||||
|
||||
/**
|
||||
* @cond INTERNAL_HIDDEN
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP
|
||||
extern sys_k_timer_func_t _sys_k_get_time;
|
||||
#else
|
||||
#define _sys_k_get_time sys_cycle_get_32
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP */
|
||||
|
||||
/**
|
||||
* INTERNAL_HIDDEN @endcond
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Set kernel event logger timestamp function.
|
||||
*
|
||||
* This routine instructs the kernel event logger to call @a func
|
||||
* whenever it needs to generate an event timestamp. By default,
|
||||
* the kernel's hardware timer is used.
|
||||
*
|
||||
* @note
|
||||
* On some boards the hardware timer is not a pure hardware up counter,
|
||||
* which can lead to timestamp errors. For example, boards using the LOAPIC
|
||||
* timer can run it in periodic mode, which requires software to update
|
||||
* a count of accumulated cycles each time the timer hardware resets itself
|
||||
* to zero. This can result in an incorrect timestamp being generated
|
||||
* if it occurs after the timer hardware has reset but before the timer ISR
|
||||
* has updated accumulated cycle count.
|
||||
*
|
||||
* @param func Address of timestamp function to be used.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP
|
||||
static inline void sys_k_event_logger_set_timer(sys_k_timer_func_t func)
|
||||
{
|
||||
_sys_k_get_time = func;
|
||||
}
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP */
|
||||
|
||||
/**
|
||||
* @brief Set kernel event logger filtering mask.
|
||||
*
|
||||
* This routine specifies which events are recorded by the kernel event logger.
|
||||
* It can only be used when dynamic event logging has been configured.
|
||||
*
|
||||
* Each mask bit corresponds to a kernel event type. The least significant
|
||||
* mask bit corresponds to event type 1, the next bit to event type 2,
|
||||
* and so on.
|
||||
*
|
||||
* @param value Bitmask indicating events to be recorded.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
static inline void sys_k_event_logger_set_mask(int value)
|
||||
{
|
||||
_sys_k_event_logger_mask = value;
|
||||
}
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC */
|
||||
|
||||
/**
|
||||
* @brief Get kernel event logger filtering mask
|
||||
* @brief Get kernel event logger filtering mask.
|
||||
*
|
||||
* @details Calling this macro permits to read the mask used to select which
|
||||
* events are stored in the kernel event logger ring buffer. This macro can be
|
||||
* used at runtime and at any moment.
|
||||
* This capability is only available when CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
* is set. If enabled, no event is enabled for logging at initialization.
|
||||
* This routine indicates which events are currently being recorded by
|
||||
* the kernel event logger. It can only be used when dynamic event logging
|
||||
* has been configured. By default, no events are recorded.
|
||||
*
|
||||
* @see sys_k_event_logger_set_mask(value) for details
|
||||
*
|
||||
* WARNING: task monitor events are not covered by this API. Please refer
|
||||
* to sys_k_event_logger_set_monitor_mask / sys_k_event_logger_get_monitor_mask
|
||||
* @return Bitmask indicating events that are being recorded.
|
||||
*/
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
static inline int sys_k_event_logger_get_mask(void)
|
||||
{
|
||||
return _sys_k_event_logger_mask;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TASK_MONITOR
|
||||
|
||||
extern int _k_monitor_mask;
|
||||
|
||||
/**
|
||||
* @brief Set task monitor filtering mask
|
||||
*
|
||||
* @details Calling this function sets the mask used to select which task monitor
|
||||
* events to store in the kernel event logger ring buffer. This flag can be set
|
||||
* at runtime and at any moment.
|
||||
* This capability is only available when CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
* is set. If enabled, no event is enabled for logging at initialization
|
||||
* so CONFIG_TASK_MONITOR_MASK is ignored
|
||||
*
|
||||
* The mask bits shall be set according to monitor events defined in
|
||||
* micro_private.h
|
||||
*
|
||||
* For example, to enable k_swapper cmd logging the following shall be done:
|
||||
* sys_k_event_logger_set_monitor_mask(sys_k_event_logger_get_monitor_mask |
|
||||
* (1 << (MON_KSERV - 1)))
|
||||
* To disable it:
|
||||
* sys_k_event_logger_set_mask(sys_k_event_logger_get_mask &
|
||||
* ~(1 << (MON_KSERV - 1)))
|
||||
*
|
||||
*/
|
||||
static inline void sys_k_event_logger_set_monitor_mask(int value)
|
||||
{
|
||||
_k_monitor_mask = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get task monitor filtering mask
|
||||
*
|
||||
* @details Calling this function permits to read the mask used to select which
|
||||
* task monitor events to store in the kernel event logger ring buffer. This
|
||||
* function can be used at runtime and at any moment.
|
||||
* This capability is only available when CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
* is set. If enabled, no event is enabled for logging at initialization
|
||||
* so CONFIG_TASK_MONITOR_MASK is ignored
|
||||
*
|
||||
* @see sys_k_event_logger_set_monitor_mask() for details
|
||||
*
|
||||
*/
|
||||
static inline int sys_k_event_logger_get_monitor_mask(void)
|
||||
{
|
||||
return _k_monitor_mask;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_TASK_MONITOR */
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC */
|
||||
|
||||
/**
|
||||
* @brief Check if an event type has to be logged or not
|
||||
* @brief Indicate if an event type is currently being recorded.
|
||||
*
|
||||
* @details This function must be used before calling any sys_k_event_logger_put*
|
||||
* function. In case CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC is enabled, that function
|
||||
* permits to enable or disable the logging of each individual event at runtime
|
||||
* This routine indicates if event type @a event_type should be recorded
|
||||
* by the kernel event logger when the event occurs. The routine should be
|
||||
* used by code that writes an event to the kernel event logger to ensure
|
||||
* that only events of interest to the application are recorded.
|
||||
*
|
||||
* @param event_type The identification of the event.
|
||||
* @param event_type Event ID.
|
||||
*
|
||||
* @return 1 if event should be recorded, or 0 if not.
|
||||
*
|
||||
*/
|
||||
|
||||
static inline int sys_k_must_log_event(int event_type)
|
||||
{
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
@@ -225,159 +169,150 @@ static inline int sys_k_must_log_event(int event_type)
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sends a event message to the kernel event logger.
|
||||
* @brief Write an event to the kernel event logger.
|
||||
*
|
||||
* @details Sends a event message to the kernel event logger
|
||||
* and informs that there are messages available.
|
||||
* This routine writes an event message to the kernel event logger.
|
||||
*
|
||||
* @param event_id The identification of the event.
|
||||
* @param data Pointer to the data of the message.
|
||||
* @param data_size Size of the data in 32-bit words.
|
||||
* @param event_id Event ID.
|
||||
* @param event_data Address of event data.
|
||||
* @param data_size Size of event data (number of 32-bit words).
|
||||
*
|
||||
* @return No return value.
|
||||
* @return N/A
|
||||
*/
|
||||
#define sys_k_event_logger_put(event_id, data, data_size) \
|
||||
sys_event_logger_put(&sys_k_event_logger, event_id, data, data_size)
|
||||
|
||||
static inline void sys_k_event_logger_put(uint16_t event_id,
|
||||
uint32_t *event_data,
|
||||
uint8_t data_size)
|
||||
{
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER
|
||||
sys_event_logger_put(&sys_k_event_logger, event_id,
|
||||
event_data, data_size);
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER */
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Sends a event message to the kernel event logger with the current
|
||||
* timestamp.
|
||||
* @brief Write an event to the kernel event logger (with timestamp only).
|
||||
*
|
||||
* @details Sends a event message to the kernel event logger and informs that
|
||||
* there messages available. The timestamp when the event occurred is stored
|
||||
* as part of the event message.
|
||||
* This routine writes an event message to the kernel event logger.
|
||||
* The event records a single 32-bit word containing a timestamp.
|
||||
*
|
||||
* @param event_id The identification of the event.
|
||||
* @param event_id Event ID.
|
||||
*
|
||||
* @return No return value.
|
||||
* @return N/A
|
||||
*/
|
||||
void sys_k_event_logger_put_timed(uint16_t event_id);
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER
|
||||
extern void sys_k_event_logger_put_timed(uint16_t event_id);
|
||||
#else
|
||||
static inline void sys_k_event_logger_put_timed(uint16_t event_id) {};
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER */
|
||||
|
||||
/**
|
||||
* @brief Retrieves a kernel event message, or returns without waiting.
|
||||
*
|
||||
* This routine retrieves the next recorded event from the kernel event logger,
|
||||
* or returns immediately if no such event exists.
|
||||
*
|
||||
* @param event_id Area to store event type ID.
|
||||
* @param dropped Area to store number of events that were dropped between
|
||||
* the previous event and the retrieved event.
|
||||
* @param event_data Buffer to store event data.
|
||||
* @param data_size Size of event data buffer (number of 32-bit words).
|
||||
*
|
||||
* @retval positive_integer Number of event data words retrieved;
|
||||
* @a event_id, @a dropped, and @a buffer have been updated.
|
||||
* @retval 0 Returned without waiting; no event was retrieved.
|
||||
* @retval -EMSGSIZE Buffer too small; @a data_size now indicates
|
||||
* the size of the event to be retrieved.
|
||||
*/
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER
|
||||
static inline int sys_k_event_logger_get(uint16_t *event_id, uint8_t *dropped,
|
||||
uint32_t *event_data, uint8_t *data_size)
|
||||
{
|
||||
return sys_event_logger_get(&sys_k_event_logger, event_id, dropped,
|
||||
event_data, data_size);
|
||||
}
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER */
|
||||
|
||||
/**
|
||||
* @brief Retrieves a kernel event message.
|
||||
*
|
||||
* @details Retrieves a kernel event message copying it to the provided
|
||||
* buffer. If the buffer is smaller than the message size the function returns
|
||||
* an error. The function retrieves messages in FIFO order.
|
||||
* This routine retrieves the next recorded event from the kernel event logger.
|
||||
* If there is no such event the caller pends until it is available.
|
||||
*
|
||||
* @param event_id Pointer to the id of the event fetched
|
||||
* @param dropped Pointer to how many events were dropped
|
||||
* @param buffer Pointer to the buffer where the message will be copied.
|
||||
* @param buffer_size Size of the buffer in 32-bit words.
|
||||
* @param event_id Area to store event type ID.
|
||||
* @param dropped Area to store number of events that were dropped between
|
||||
* the previous event and the retrieved event.
|
||||
* @param event_data Buffer to store event data.
|
||||
* @param data_size Size of event data buffer (number of 32-bit words).
|
||||
*
|
||||
* @return -EMSGSIZE if the buffer size is smaller than the message size,
|
||||
* the amount of 32-bit words copied or zero if there are no kernel event
|
||||
* messages available.
|
||||
* @retval positive_integer Number of event data words retrieved;
|
||||
* @a event_id, @a dropped, and @a buffer have been updated.
|
||||
* @retval -EMSGSIZE Buffer too small; @a data_size now indicates
|
||||
* the size of the event to be retrieved.
|
||||
*/
|
||||
#define sys_k_event_logger_get(event_id, dropped, buffer, buffer_size) \
|
||||
sys_event_logger_get(&sys_k_event_logger, event_id, dropped, buffer, \
|
||||
buffer_size)
|
||||
|
||||
|
||||
/**
|
||||
* @brief Retrieves a kernel event message, wait if there is no message
|
||||
* available.
|
||||
*
|
||||
* @details Retrieves a kernel event message copying it to the provided
|
||||
* buffer. If the buffer is smaller than the message size the function returns
|
||||
* an error. The function retrieves messages in FIFO order. If there is no
|
||||
* kernel event message available the caller pends until a new message is
|
||||
* logged.
|
||||
*
|
||||
* @param event_id Pointer to the id of the event fetched
|
||||
* @param dropped Pointer to how many events were dropped
|
||||
* @param buffer Pointer to the buffer where the message will be copied.
|
||||
* @param buffer_size Size of the buffer in 32-bit words.
|
||||
*
|
||||
* @return -EMSGSIZE if the buffer size is smaller than the message size, or
|
||||
* the amount of 32-bit words copied.
|
||||
*/
|
||||
#define sys_k_event_logger_get_wait(event_id, dropped, buffer, buffer_size) \
|
||||
sys_event_logger_get_wait(&sys_k_event_logger, event_id, dropped, \
|
||||
buffer, buffer_size)
|
||||
|
||||
|
||||
#ifdef CONFIG_NANO_TIMEOUTS
|
||||
|
||||
/**
|
||||
* @brief Retrieves a kernel event message, wait with a timeout if there is
|
||||
* no profiling event messages available.
|
||||
*
|
||||
* @details Retrieves a kernel event message copying it to the provided
|
||||
* buffer. If the buffer is smaller than the message size the function returns
|
||||
* an error. The function retrieves messages in FIFO order. If there are no
|
||||
* kernel event messages available the caller pends until a new message is
|
||||
* logged or the timeout expires.
|
||||
*
|
||||
* @param event_id Pointer to the id of the event fetched
|
||||
* @param dropped Pointer to how many events were dropped
|
||||
* @param buffer Pointer to the buffer where the message will be copied.
|
||||
* @param buffer_size Size of the buffer in 32-bit words.
|
||||
* @param timeout Timeout in ticks.
|
||||
*
|
||||
* @return -EMSGSIZE if the buffer size is smaller than the message size, the
|
||||
* amount of 32-bit words copied or zero if the timeout expires and the was no
|
||||
* message available.
|
||||
*/
|
||||
#define sys_k_event_logger_get_wait_timeout(event_id, dropped, buffer, buffer_size, \
|
||||
timeout) \
|
||||
sys_event_logger_get_wait_timeout(&sys_k_event_logger, event_id, \
|
||||
dropped, buffer, \
|
||||
buffer_size, timeout)
|
||||
#endif /* CONFIG_NANO_TIMEOUTS */
|
||||
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
|
||||
|
||||
/**
|
||||
* @brief Register the fiber that calls the function as collector
|
||||
*
|
||||
* @details Initialize internal profiling data. This avoid registering
|
||||
* the context switch of the collector fiber when
|
||||
* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH is enable.
|
||||
*
|
||||
* @return No return value.
|
||||
*/
|
||||
void sys_k_event_logger_register_as_collector(void);
|
||||
#else /* !CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
|
||||
static inline void sys_k_event_logger_register_as_collector(void) {};
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
|
||||
void _sys_k_event_logger_enter_sleep(void);
|
||||
#else
|
||||
static inline void _sys_k_event_logger_enter_sleep(void) {};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
|
||||
void _sys_k_event_logger_interrupt(void);
|
||||
#else
|
||||
static inline void _sys_k_event_logger_interrupt(void) {};
|
||||
#endif
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#else /* !CONFIG_KERNEL_EVENT_LOGGER */
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
static inline void sys_k_event_logger_put(uint16_t event_id, uint32_t *event_data,
|
||||
uint8_t data_size) {};
|
||||
static inline void sys_k_event_logger_put_timed(uint16_t event_id) {};
|
||||
static inline void _sys_k_event_logger_enter_sleep(void) {};
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER
|
||||
static inline int sys_k_event_logger_get_wait(uint16_t *event_id,
|
||||
uint8_t *dropped, uint32_t *event_data, uint8_t *data_size)
|
||||
{
|
||||
return sys_event_logger_get_wait(&sys_k_event_logger, event_id, dropped,
|
||||
event_data, data_size);
|
||||
}
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER */
|
||||
|
||||
|
||||
/**
|
||||
* @brief Retrieves a kernel event message, or waits for a specified time.
|
||||
*
|
||||
* This routine retrieves the next recorded event from the kernel event logger.
|
||||
* If there is no such event the caller pends until it is available or until
|
||||
* the specified timeout expires.
|
||||
*
|
||||
* @param event_id Area to store event type ID.
|
||||
* @param dropped Area to store number of events that were dropped between
|
||||
* the previous event and the retrieved event.
|
||||
* @param event_data Buffer to store event data.
|
||||
* @param data_size Size of event data buffer (number of 32-bit words).
|
||||
* @param timeout Timeout in system clock ticks.
|
||||
*
|
||||
* @retval positive_integer Number of event data words retrieved;
|
||||
* @a event_id, @a dropped, and @a buffer have been updated.
|
||||
* @retval 0 Waiting period timed out; no event was retrieved.
|
||||
* @retval -EMSGSIZE Buffer too small; @a data_size now indicates
|
||||
* the size of the event to be retrieved.
|
||||
*/
|
||||
#if defined(CONFIG_KERNEL_EVENT_LOGGER) && defined(CONFIG_NANO_TIMEOUTS)
|
||||
static inline int sys_k_event_logger_get_wait_timeout(uint16_t *event_id,
|
||||
uint8_t *dropped, uint32_t *event_data,
|
||||
uint8_t *data_size, uint32_t timeout)
|
||||
{
|
||||
return sys_event_logger_get_wait_timeout(&sys_k_event_logger, event_id,
|
||||
dropped, event_data,
|
||||
data_size, timeout);
|
||||
}
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER && CONFIG_NANO_TIMEOUTS */
|
||||
|
||||
/**
|
||||
* @brief Register thread that retrieves kernel events.
|
||||
*
|
||||
* This routine instructs the kernel event logger not to record context
|
||||
* switch events for the calling thread. It is typically called by the thread
|
||||
* that retrieves events from the kernel event logger.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
|
||||
void sys_k_event_logger_register_as_collector(void);
|
||||
#else
|
||||
static inline void sys_k_event_logger_register_as_collector(void) {};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @} end defgroup kernel_event_logger
|
||||
*/
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#endif /* __KERNEL_EVENT_LOGGER_H__ */
|
||||
|
||||
@@ -29,13 +29,6 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Ring Buffer APIs
|
||||
* @defgroup nanokernel_ringbuffer Ring Bufer
|
||||
* @ingroup nanokernel_services
|
||||
* @{
|
||||
*/
|
||||
|
||||
#define SIZE32_OF(x) (sizeof((x))/sizeof(uint32_t))
|
||||
|
||||
/**
|
||||
@@ -56,13 +49,26 @@ struct ring_buf {
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Declare a power-of-two sized ring buffer
|
||||
* @defgroup ring_buffer_apis Ring Buffer APIs
|
||||
* @ingroup kernel_apis
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Statically define and initialize a high performance ring buffer.
|
||||
*
|
||||
* Use of this macro is preferred over SYS_RING_BUF_DECLARE_SIZE() as it
|
||||
* will not need to use expensive modulo operations.
|
||||
* This macro establishes a ring buffer whose size must be a power of 2;
|
||||
* that is, the ring buffer contains 2^pow 32-bit words, where @a pow is
|
||||
* the specified ring buffer size exponent. A high performance ring buffer
|
||||
* doesn't require the use of modulo arithmetic operations to maintain itself.
|
||||
*
|
||||
* @param name File-scoped name of the ring buffer to declare
|
||||
* @param pow Create a buffer of 2^pow 32-bit elements
|
||||
* The ring buffer can be accessed outside the module where it is defined
|
||||
* using:
|
||||
*
|
||||
* @code extern struct ring_buf <name>; @endcode
|
||||
*
|
||||
* @param name Name of the ring buffer.
|
||||
* @param pow Ring buffer size exponent.
|
||||
*/
|
||||
#define SYS_RING_BUF_DECLARE_POW2(name, pow) \
|
||||
static uint32_t _ring_buffer_data_##name[1 << (pow)]; \
|
||||
@@ -73,13 +79,18 @@ struct ring_buf {
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Declare an arbitrary sized ring buffer
|
||||
* @brief Statically define and initialize a standard ring buffer.
|
||||
*
|
||||
* A ring buffer declared in this way has more flexibility on buffer size
|
||||
* but will use more expensive modulo operations to maintain itself.
|
||||
* This macro establishes a ring buffer of an arbitrary size. A standard
|
||||
* ring buffer uses modulo arithmetic operations to maintain itself.
|
||||
*
|
||||
* @param name File-scoped name of the ring buffer to declare
|
||||
* @param size32 Size of buffer in 32-bit elements
|
||||
* The ring buffer can be accessed outside the module where it is defined
|
||||
* using:
|
||||
*
|
||||
* @code extern struct ring_buf <name>; @endcode
|
||||
*
|
||||
* @param name Name of the ring buffer.
|
||||
* @param size32 Size of ring buffer (in 32-bit words).
|
||||
*/
|
||||
#define SYS_RING_BUF_DECLARE_SIZE(name, size32) \
|
||||
static uint32_t _ring_buffer_data_##name[size32]; \
|
||||
@@ -89,16 +100,19 @@ struct ring_buf {
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Initialize a ring buffer, in cases where DECLARE_RING_BUF_STATIC
|
||||
* isn't used.
|
||||
* @brief Initialize a ring buffer.
|
||||
*
|
||||
* For optimal performance, use size values that are a power of 2 as they
|
||||
* don't require expensive modulo operations when maintaining the buffer.
|
||||
* This routine initializes a ring buffer, prior to its first use. It is only
|
||||
* used for ring buffers not defined using SYS_RING_BUF_DECLARE_POW2 or
|
||||
* SYS_RING_BUF_DECLARE_SIZE.
|
||||
*
|
||||
* @param buf Ring buffer to initialize
|
||||
* @param size Size of the provided buffer in 32-bit chunks
|
||||
* @param data Data area for the ring buffer, typically
|
||||
* uint32_t data[size]
|
||||
* Setting @a size to a power of 2 establishes a high performance ring buffer
|
||||
* that doesn't require the use of modulo arithmetic operations to maintain
|
||||
* itself.
|
||||
*
|
||||
* @param buf Address of ring buffer.
|
||||
* @param size Ring buffer size (in 32-bit words).
|
||||
* @param data Ring buffer data area (typically uint32_t data[size]).
|
||||
*/
|
||||
static inline void sys_ring_buf_init(struct ring_buf *buf, uint32_t size,
|
||||
uint32_t *data)
|
||||
@@ -118,9 +132,11 @@ static inline void sys_ring_buf_init(struct ring_buf *buf, uint32_t size,
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Determine if a ring buffer is empty
|
||||
* @brief Determine if a ring buffer is empty.
|
||||
*
|
||||
* @return nonzero if the buffer is empty
|
||||
* @param buf Address of ring buffer.
|
||||
*
|
||||
* @return 1 if the ring buffer is empty, or 0 if not.
|
||||
*/
|
||||
static inline int sys_ring_buf_is_empty(struct ring_buf *buf)
|
||||
{
|
||||
@@ -128,10 +144,11 @@ static inline int sys_ring_buf_is_empty(struct ring_buf *buf)
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Obtain available space in a ring buffer
|
||||
* @brief Determine free space in a ring buffer.
|
||||
*
|
||||
* @param buf Ring buffer to examine
|
||||
* @return Available space in the buffer in 32-bit chunks
|
||||
* @param buf Address of ring buffer.
|
||||
*
|
||||
* @return Ring buffer free space (in 32-bit words).
|
||||
*/
|
||||
static inline int sys_ring_buf_space_get(struct ring_buf *buf)
|
||||
{
|
||||
@@ -148,36 +165,52 @@ static inline int sys_ring_buf_space_get(struct ring_buf *buf)
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Place an entry into the ring buffer
|
||||
* @brief Write a data item to a ring buffer.
|
||||
*
|
||||
* Concurrency control is not implemented, however no synchronization is needed
|
||||
* between put() and get() operations as they independently work on the
|
||||
* tail and head values, respectively.
|
||||
* Any use-cases involving multiple producers will need to synchronize use
|
||||
* of this function, by either disabling preemption or using a mutex.
|
||||
* This routine writes a data item to ring buffer @a buf. The data item
|
||||
* is an array of 32-bit words (from zero to 1020 bytes in length),
|
||||
* coupled with a 16-bit type identifier and an 8-bit integer value.
|
||||
*
|
||||
* @param buf Ring buffer to insert data to
|
||||
* @param type Application-specific type identifier
|
||||
* @param value Integral data to include, application specific
|
||||
* @param data Pointer to a buffer containing data to enqueue
|
||||
* @param size32 Size of data buffer, in 32-bit chunks (not bytes)
|
||||
* @return 0 on success, -EMSGSIZE if there isn't sufficient space
|
||||
* @warning
|
||||
* Use cases involving multiple writers to the ring buffer must prevent
|
||||
* concurrent write operations, either by preventing all writers from
|
||||
* being preempted or by using a mutex to govern writes to the ring buffer.
|
||||
*
|
||||
* @param buf Address of ring buffer.
|
||||
* @param type Data item's type identifier (application specific).
|
||||
* @param value Data item's integer value (application specific).
|
||||
* @param data Address of data item.
|
||||
* @param size32 Data item size (number of 32-bit words).
|
||||
*
|
||||
* @retval 0 Data item was written.
|
||||
* @retval -EMSGSIZE Ring buffer has insufficient free space.
|
||||
*/
|
||||
int sys_ring_buf_put(struct ring_buf *buf, uint16_t type, uint8_t value,
|
||||
uint32_t *data, uint8_t size32);
|
||||
|
||||
/**
|
||||
* @brief Fetch data from the ring buffer
|
||||
* @brief Read a data item from a ring buffer.
|
||||
*
|
||||
* @param buf Ring buffer to extract data from
|
||||
* @param type Return storage of the retrieved event type
|
||||
* @param value Return storage of the data value
|
||||
* @param data Buffer to copy data into
|
||||
* @param size32 Indicates the size of the data buffer. On return,
|
||||
* updated with the actual amount of 32-bit chunks written to the buffer
|
||||
* @return 0 on success, -EAGAIN if the ring buffer is empty, -EMSGSIZE
|
||||
* if the supplied buffer is too small (size32 will be updated with
|
||||
* the actual size needed)
|
||||
* This routine reads a data item from ring buffer @a buf. The data item
|
||||
* is an array of 32-bit words (up to 1020 bytes in length),
|
||||
* coupled with a 16-bit type identifier and an 8-bit integer value.
|
||||
*
|
||||
* @warning
|
||||
* Use cases involving multiple reads of the ring buffer must prevent
|
||||
* concurrent read operations, either by preventing all readers from
|
||||
* being preempted or by using a mutex to govern reads to the ring buffer.
|
||||
*
|
||||
* @param buf Address of ring buffer.
|
||||
* @param type Area to store the data item's type identifier.
|
||||
* @param value Area to store the data item's integer value.
|
||||
* @param data Area to store the data item.
|
||||
* @param size32 Size of the data item storage area (number of 32-bit chunks).
|
||||
*
|
||||
* @retval 0 Data item was fetched; @a size32 now contains the number of
|
||||
* 32-bit words read into data area @a data.
|
||||
* @retval -EAGAIN Ring buffer is empty.
|
||||
* @retval -EMSGSIZE Data area @a data is too small; @a size32 now contains
|
||||
* the number of 32-bit words needed.
|
||||
*/
|
||||
int sys_ring_buf_get(struct ring_buf *buf, uint16_t *type, uint8_t *value,
|
||||
uint32_t *data, uint8_t *size32);
|
||||
|
||||
@@ -88,8 +88,28 @@ extern int sys_clock_hw_cycles_per_tick;
|
||||
#define SYS_CLOCK_HW_CYCLES_TO_NS_AVG(X, NCYCLES) \
|
||||
(uint32_t)(SYS_CLOCK_HW_CYCLES_TO_NS64(X) / NCYCLES)
|
||||
|
||||
/**
|
||||
* @defgroup clock_apis Kernel Clock APIs
|
||||
* @ingroup kernel_apis
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Compute nanoseconds from hardware clock cycles.
|
||||
*
|
||||
* This macro converts a time duration expressed in hardware clock cycles
|
||||
* to the equivalent duration expressed in nanoseconds.
|
||||
*
|
||||
* @param X Duration in hardware clock cycles.
|
||||
*
|
||||
* @return Duration in nanoseconds.
|
||||
*/
|
||||
#define SYS_CLOCK_HW_CYCLES_TO_NS(X) (uint32_t)(SYS_CLOCK_HW_CYCLES_TO_NS64(X))
|
||||
|
||||
/**
|
||||
* @} end defgroup clock_apis
|
||||
*/
|
||||
|
||||
extern int64_t _sys_clock_tick_count;
|
||||
|
||||
/*
|
||||
|
||||
@@ -18,15 +18,19 @@
|
||||
|
||||
menu "Unified Kernel Options"
|
||||
|
||||
config KERNEL_V2_DEBUG
|
||||
config KERNEL_DEBUG
|
||||
bool
|
||||
prompt "Kernel V2 debug help"
|
||||
prompt "Kernel debugging"
|
||||
default n
|
||||
select INIT_STACKS
|
||||
help
|
||||
Enable kernel debugging.
|
||||
|
||||
Note that debugging the kernel internals can be very verbose.
|
||||
|
||||
config NUM_COOP_PRIORITIES
|
||||
int
|
||||
prompt "Kernel V2: number of coop priorities"
|
||||
prompt "Number of coop priorities"
|
||||
default 16
|
||||
help
|
||||
Number of cooperative priorities configured in the system. Gives access
|
||||
@@ -41,12 +45,20 @@ config NUM_COOP_PRIORITIES
|
||||
This can be set to zero to disable cooperative scheduling. Cooperative
|
||||
threads always preempt preemptible threads.
|
||||
|
||||
Each priority requires an extra 8 bytes of RAM. If there are more than
|
||||
32 total priorities, an extra 4 bytes is required.
|
||||
Each priority requires an extra 8 bytes of RAM. Each set of 32 extra
|
||||
total priorities require an extra 4 bytes and add one possible
|
||||
iteration to loops that search for the next thread to run.
|
||||
|
||||
The total number of priorities is
|
||||
|
||||
NUM_COOP_PRIORITIES + NUM_PREEMPT_PRIORITIES + 1
|
||||
|
||||
The extra one is for the idle thread, which must run at the lowest
|
||||
priority, and be the only thread at that priority.
|
||||
|
||||
config NUM_PREEMPT_PRIORITIES
|
||||
int
|
||||
prompt "Kernel V2: number of preemptible priorities"
|
||||
prompt "Number of preemptible priorities"
|
||||
default 15
|
||||
help
|
||||
Number of preemptible priorities available in the system. Gives access
|
||||
@@ -54,15 +66,20 @@ config NUM_PREEMPT_PRIORITIES
|
||||
|
||||
This can be set to 0 to disable preemptible scheduling.
|
||||
|
||||
The idle thread is always installed as a preemptible thread of the
|
||||
lowest priority.
|
||||
Each priority requires an extra 8 bytes of RAM. Each set of 32 extra
|
||||
total priorities require an extra 4 bytes and add one possible
|
||||
iteration to loops that search for the next thread to run.
|
||||
|
||||
Each priority requires an extra 8 bytes of RAM. If there are more than
|
||||
32 total priorities, an extra 4 bytes is required.
|
||||
The total number of priorities is
|
||||
|
||||
NUM_COOP_PRIORITIES + NUM_PREEMPT_PRIORITIES + 1
|
||||
|
||||
The extra one is for the idle thread, which must run at the lowest
|
||||
priority, and be the only thread at that priority.
|
||||
|
||||
config PRIORITY_CEILING
|
||||
int
|
||||
prompt "Kernel V2: priority inheritance ceiling"
|
||||
prompt "Priority inheritance ceiling"
|
||||
default 0
|
||||
|
||||
config BOOT_BANNER
|
||||
@@ -285,29 +302,44 @@ config SEMAPHORE_GROUPS
|
||||
the k_sem_give() routine.
|
||||
|
||||
choice
|
||||
prompt "Memory pools auto-defragmentation policy"
|
||||
default MEM_POOL_AD_AFTER_SEARCH_FOR_BIGGERBLOCK
|
||||
prompt "Memory pool block allocation policy"
|
||||
default MEM_POOL_SPLIT_BEFORE_DEFRAG
|
||||
help
|
||||
Memory pool auto-defragmentation is performed if a memory
|
||||
block of the requested size can not be found. Defragmentation
|
||||
can be done:
|
||||
Before trying to find a block in the next largest block set.
|
||||
This is an attempt to preserve the memory pool's larger blocks
|
||||
by fragmenting them only when necessary (i.e. at the cost of
|
||||
doing more frequent auto-defragmentations).
|
||||
After trying to find a block in the next largest block set.
|
||||
This is an attempt to limit the cost of doing auto-defragmentations
|
||||
by doing them only when necessary (i.e. at the cost of fragmenting
|
||||
the memory pool's larger blocks).
|
||||
This option specifies how a memory pool reacts if an unused memory
|
||||
block of the required size is not available.
|
||||
|
||||
config MEM_POOL_AD_NONE
|
||||
bool "No auto-defragmentation"
|
||||
config MEM_POOL_SPLIT_BEFORE_DEFRAG
|
||||
bool "Split a larger block before merging smaller blocks"
|
||||
help
|
||||
This option instructs a memory pool to try splitting a larger unused
|
||||
block if an unused block of the required size is not available; only
|
||||
if no such blocks exist will the memory pool try merging smaller unused
|
||||
blocks. This policy attempts to limit the cost of performing automatic
|
||||
partial defragmention of the memory pool, at the cost of fragmenting
|
||||
the memory pool's larger blocks.
|
||||
|
||||
config MEM_POOL_AD_BEFORE_SEARCH_FOR_BIGGERBLOCK
|
||||
bool "Before trying to find a block in the next largest block set"
|
||||
config MEM_POOL_DEFRAG_BEFORE_SPLIT
|
||||
bool "Merge smaller blocks before splitting a larger block"
|
||||
help
|
||||
This option instructs a memory pool to try merging smaller unused
|
||||
blocks if an unused block of the required size is not available; only
|
||||
if this does not generate a sufficiently large block will the memory
|
||||
pool try splitting a larger unused block. This policy attempts to
|
||||
preserve the memory pool's larger blocks, at the cost of performing
|
||||
automatic partial defragmentations more frequently.
|
||||
|
||||
config MEM_POOL_AD_AFTER_SEARCH_FOR_BIGGERBLOCK
|
||||
bool "After trying to find a block in the next largest block set"
|
||||
config MEM_POOL_SPLIT_ONLY
|
||||
bool "Split a larger block, but never merge smaller blocks"
|
||||
help
|
||||
This option instructs a memory pool to try splitting a larger unused
|
||||
block if an unused block of the required size is not available; if no
|
||||
such blocks exist the block allocation operation fails. This policy
|
||||
attempts to limit the cost of defragmenting the memory pool by avoiding
|
||||
automatic partial defragmentation, at the cost of requiring the
|
||||
application to explicitly request a full defragmentation of the memory
|
||||
pool when an allocation fails. Depending on how a memory pool is used,
|
||||
it may be more efficient for a memory pool to perform an occasional
|
||||
full defragmentation than to perform frequent partial defragmentations.
|
||||
|
||||
endchoice
|
||||
|
||||
@@ -322,3 +354,12 @@ config HEAP_MEM_POOL_SIZE
|
||||
heap memory pool is defined.
|
||||
|
||||
endmenu
|
||||
|
||||
config ARCH_HAS_CUSTOM_SWAP_TO_MAIN
|
||||
bool
|
||||
# hidden
|
||||
default n
|
||||
help
|
||||
It's possible that an architecture port cannot use _Swap() to swap to
|
||||
the _main() thread, but instead must do something custom. It must
|
||||
enable this option in that case.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user