Compare commits
107 Commits
collab-rus
...
v1.6.0-rc2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8f0b4d7f4d | ||
|
|
796a6bb4d8 | ||
|
|
385e02ba52 | ||
|
|
5a5d878252 | ||
|
|
cf6e5cf730 | ||
|
|
7f1d5a47e4 | ||
|
|
aca2baa43a | ||
|
|
bf77e2616d | ||
|
|
cfa7ad5c4a | ||
|
|
30414fd866 | ||
|
|
7d21b5402c | ||
|
|
3ed3f29223 | ||
|
|
e9a4431362 | ||
|
|
56816bdbbf | ||
|
|
420594122a | ||
|
|
0efe69ec31 | ||
|
|
8173b881ba | ||
|
|
e7c091bba2 | ||
|
|
4c11ae8e60 | ||
|
|
af941d5832 | ||
|
|
32de7848c4 | ||
|
|
afe1621713 | ||
|
|
42b956050a | ||
|
|
04af679197 | ||
|
|
b082e12265 | ||
|
|
3cbabecfcc | ||
|
|
4d2ad79207 | ||
|
|
0cddc4b665 | ||
|
|
4cdbfbf9e2 | ||
|
|
c38888bccb | ||
|
|
be0db01093 | ||
|
|
a9cd7c0498 | ||
|
|
6af438c440 | ||
|
|
0ae966be58 | ||
|
|
33d0716c21 | ||
|
|
a176dd3274 | ||
|
|
ed8c6e2d1f | ||
|
|
c280ce6bf9 | ||
|
|
67d49c2344 | ||
|
|
f3c2664e53 | ||
|
|
22bbdc2f85 | ||
|
|
3b626c5e15 | ||
|
|
794a47dedf | ||
|
|
e203a64000 | ||
|
|
25a40b19ea | ||
|
|
9366bc161b | ||
|
|
602295a88f | ||
|
|
829c3ceb12 | ||
|
|
9f36bbc07a | ||
|
|
145a4c93fa | ||
|
|
0ce96e850d | ||
|
|
27d7a9e29f | ||
|
|
391ec95f38 | ||
|
|
cb409a67fd | ||
|
|
cf4ce62590 | ||
|
|
3188ed4e64 | ||
|
|
f0523be409 | ||
|
|
db089efe48 | ||
|
|
628ecfa4d7 | ||
|
|
f8d3ac0130 | ||
|
|
853c11885c | ||
|
|
f1c880aa8a | ||
|
|
653328c6e8 | ||
|
|
d88617db9b | ||
|
|
385770cf21 | ||
|
|
cf00c1c184 | ||
|
|
6a7b6679b1 | ||
|
|
3893503f49 | ||
|
|
b42719243d | ||
|
|
fd45cb4567 | ||
|
|
9f0045a30a | ||
|
|
662c8bee81 | ||
|
|
6d0fa01492 | ||
|
|
afe118fa1d | ||
|
|
247a2a0671 | ||
|
|
cb9033d10f | ||
|
|
bc7e0455c8 | ||
|
|
71e85e390b | ||
|
|
eee3a430dc | ||
|
|
32ef1480e9 | ||
|
|
39410d79fe | ||
|
|
40c944f0b6 | ||
|
|
efcdfce517 | ||
|
|
e50f05df3e | ||
|
|
3f0fcedf00 | ||
|
|
baab38500c | ||
|
|
06869d4499 | ||
|
|
de55b9f73a | ||
|
|
eb7910b9d1 | ||
|
|
95f8f6f3e0 | ||
|
|
b65e208171 | ||
|
|
2ccf0ad045 | ||
|
|
efc7ffde75 | ||
|
|
bee0fd0601 | ||
|
|
c6c73c1b2e | ||
|
|
ae495b7618 | ||
|
|
3b22494192 | ||
|
|
c4a5b9c74e | ||
|
|
ca23390e84 | ||
|
|
2644d370c8 | ||
|
|
bef829cfc0 | ||
|
|
c625aa2636 | ||
|
|
4329e5e24a | ||
|
|
4e4ac94f90 | ||
|
|
8d10dc63fc | ||
|
|
ce596d3c54 | ||
|
|
61b596b0e5 |
10
Makefile
10
Makefile
@@ -1,8 +1,8 @@
|
||||
VERSION_MAJOR = 1
|
||||
VERSION_MINOR = 5
|
||||
PATCHLEVEL = 99
|
||||
VERSION_MINOR = 6
|
||||
PATCHLEVEL = 0
|
||||
VERSION_RESERVED = 0
|
||||
EXTRAVERSION =
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Zephyr Kernel
|
||||
|
||||
export SOURCE_DIR PROJECT MDEF_FILE
|
||||
@@ -796,11 +796,11 @@ libs-y := $(libs-y1) $(libs-y2)
|
||||
export KBUILD_ZEPHYR_MAIN := $(drivers-y) $(libs-y) $(core-y)
|
||||
export LDFLAGS_zephyr
|
||||
|
||||
zephyr-deps := $(KBUILD_LDS) $(KBUILD_ZEPHYR_MAIN) $(app-y)
|
||||
|
||||
ALL_LIBS += $(TOOLCHAIN_LIBS)
|
||||
export ALL_LIBS
|
||||
|
||||
zephyr-deps := $(KBUILD_LDS) $(KBUILD_ZEPHYR_MAIN) $(app-y) $(ALL_LIBS)
|
||||
|
||||
LINK_LIBS := $(foreach l,$(ALL_LIBS), -l$(l))
|
||||
|
||||
OUTPUT_FORMAT ?= elf32-i386
|
||||
|
||||
@@ -84,7 +84,7 @@ static ALWAYS_INLINE void thread_monitor_init(struct k_thread *thread)
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void _new_thread(char *pStackMem, unsigned stackSize,
|
||||
void _new_thread(char *pStackMem, size_t stackSize,
|
||||
void *uk_task_ptr, _thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned options)
|
||||
|
||||
@@ -103,7 +103,11 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
|
||||
bgt _EXIT_EXC
|
||||
|
||||
push {lr}
|
||||
|
||||
/* _is_next_thread_current must be called with interrupts locked */
|
||||
cpsid i
|
||||
blx _is_next_thread_current
|
||||
cpsie i
|
||||
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
|
||||
pop {r1}
|
||||
mov lr, r1
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
#include <kernel_offsets.h>
|
||||
|
||||
GEN_OFFSET_SYM(_thread_arch_t, basepri);
|
||||
GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
|
||||
|
||||
#ifdef CONFIG_FLOAT
|
||||
GEN_OFFSET_SYM(_thread_arch_t, preempt_float);
|
||||
|
||||
@@ -234,16 +234,6 @@ SECTION_FUNC(TEXT, __svc)
|
||||
_context_switch:
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set _Swap()'s default return code to -EAGAIN. This eliminates the
|
||||
* need for the timeout code to invoke fiberRtnValueSet().
|
||||
*/
|
||||
|
||||
mrs r2, PSP /* thread mode, stack frame is on PSP */
|
||||
ldr r3, =_k_neg_eagain
|
||||
ldr r3, [r3, #0]
|
||||
str r3, [r2, #___esf_t_a1_OFFSET]
|
||||
|
||||
/*
|
||||
* Unlock interrupts:
|
||||
* - in a SVC call, so protected against context switches
|
||||
@@ -305,17 +295,21 @@ SECTION_FUNC(TEXT, _Swap)
|
||||
ldr r2, [r1, #_kernel_offset_to_current]
|
||||
str r0, [r2, #_thread_offset_to_basepri]
|
||||
|
||||
/*
|
||||
* Set _Swap()'s default return code to -EAGAIN. This eliminates the need
|
||||
* for the timeout code to set it itself.
|
||||
*/
|
||||
ldr r1, =_k_neg_eagain
|
||||
ldr r1, [r1]
|
||||
str r1, [r2, #_thread_offset_to_swap_return_value]
|
||||
|
||||
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
|
||||
/* No priority-based interrupt masking on M0/M0+,
|
||||
* pending PendSV is used instead of svc
|
||||
*/
|
||||
ldr r1, =_SCS_ICSR
|
||||
ldr r2, =_SCS_ICSR_PENDSV
|
||||
str r2, [r1, #0]
|
||||
|
||||
/* load -EAGAIN as the default return value */
|
||||
ldr r0, =_k_neg_eagain
|
||||
ldr r0, [r0]
|
||||
ldr r3, =_SCS_ICSR_PENDSV
|
||||
str r3, [r1, #0]
|
||||
|
||||
/* Unlock interrupts to allow PendSV, since it's running at prio 0xff
|
||||
*
|
||||
@@ -323,12 +317,10 @@ SECTION_FUNC(TEXT, _Swap)
|
||||
* of a higher priority pending.
|
||||
*/
|
||||
cpsie i
|
||||
|
||||
/* PC stored in stack frame by the hw */
|
||||
bx lr
|
||||
#else /* CONFIG_CPU_CORTEX_M3_M4 */
|
||||
svc #0
|
||||
|
||||
/* r0 contains the return value if needed */
|
||||
bx lr
|
||||
#endif
|
||||
|
||||
/* coming back from exception, r2 still holds the pointer to _current */
|
||||
ldr r0, [r2, #_thread_offset_to_swap_return_value]
|
||||
bx lr
|
||||
|
||||
@@ -80,7 +80,7 @@ static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs)
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
void _new_thread(char *pStackMem, unsigned stackSize,
|
||||
void _new_thread(char *pStackMem, size_t stackSize,
|
||||
void *uk_task_ptr, _thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned options)
|
||||
@@ -140,6 +140,8 @@ void _new_thread(char *pStackMem, unsigned stackSize,
|
||||
tcs->callee_saved.psp = (uint32_t)pInitCtx;
|
||||
tcs->arch.basepri = 0;
|
||||
|
||||
/* swap_return_value can contain garbage */
|
||||
|
||||
_nano_timeout_thread_init(tcs);
|
||||
|
||||
/* initial values in all other registers/TCS entries are irrelevant */
|
||||
|
||||
@@ -142,6 +142,9 @@ struct _thread_arch {
|
||||
/* interrupt locking key */
|
||||
uint32_t basepri;
|
||||
|
||||
/* r0 in stack frame cannot be written to reliably */
|
||||
uint32_t swap_return_value;
|
||||
|
||||
#ifdef CONFIG_FLOAT
|
||||
/*
|
||||
* No cooperative floating point register set structure exists for
|
||||
|
||||
@@ -47,25 +47,10 @@ static ALWAYS_INLINE void nanoArchInit(void)
|
||||
_CpuIdleInit();
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Set the return value for the specified fiber (inline)
|
||||
*
|
||||
* The register used to store the return value from a function call invocation
|
||||
* to <value>. It is assumed that the specified <fiber> is pending, and thus
|
||||
* the fiber's thread is stored in its struct tcs structure.
|
||||
*
|
||||
* @param fiber pointer to the fiber
|
||||
* @param value is the value to set as a return value
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
static ALWAYS_INLINE void
|
||||
_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
struct __esf *esf = (struct __esf *)thread->callee_saved.psp;
|
||||
|
||||
esf->a1 = value;
|
||||
thread->arch.swap_return_value = value;
|
||||
}
|
||||
|
||||
extern void nano_cpu_atomic_idle(unsigned int);
|
||||
|
||||
@@ -30,6 +30,9 @@
|
||||
#define _thread_offset_to_basepri \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_basepri_OFFSET)
|
||||
|
||||
#define _thread_offset_to_swap_return_value \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_swap_return_value_OFFSET)
|
||||
|
||||
#define _thread_offset_to_preempt_float \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_preempt_float_OFFSET)
|
||||
|
||||
|
||||
@@ -87,8 +87,10 @@ int stm32_gpio_flags_to_conf(int flags, int *pincfg)
|
||||
}
|
||||
|
||||
if (direction == GPIO_DIR_OUT) {
|
||||
/* Pin is configured as an output */
|
||||
*pincfg = STM32F10X_PIN_CONFIG_DRIVE_PUSH_PULL;
|
||||
} else if (direction == GPIO_DIR_IN) {
|
||||
} else {
|
||||
/* Pin is configured as an input */
|
||||
int pud = flags & GPIO_PUD_MASK;
|
||||
|
||||
/* pull-{up,down} maybe? */
|
||||
@@ -100,8 +102,6 @@ int stm32_gpio_flags_to_conf(int flags, int *pincfg)
|
||||
/* floating */
|
||||
*pincfg = STM32F10X_PIN_CONFIG_BIAS_HIGH_IMPEDANCE;
|
||||
}
|
||||
} else {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -7,7 +7,6 @@ depends on SOC_SERIES_CC32XX
|
||||
|
||||
config SOC_CC3200
|
||||
bool "CC3200"
|
||||
select CPU_HAS_FPU
|
||||
select HAS_CC3200SDK
|
||||
|
||||
endchoice
|
||||
|
||||
@@ -60,7 +60,7 @@ struct init_stack_frame {
|
||||
};
|
||||
|
||||
|
||||
void _new_thread(char *stack_memory, unsigned stack_size,
|
||||
void _new_thread(char *stack_memory, size_t stack_size,
|
||||
void *uk_task_ptr, _thread_entry_t thread_func,
|
||||
void *arg1, void *arg2, void *arg3,
|
||||
int priority, unsigned options)
|
||||
|
||||
@@ -60,17 +60,12 @@
|
||||
/* SSE control/status register default value (used by assembler code) */
|
||||
extern uint32_t _sse_mxcsr_default_value;
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Save a thread's floating point context information.
|
||||
/*
|
||||
* Save a thread's floating point context information.
|
||||
*
|
||||
* This routine saves the system's "live" floating point context into the
|
||||
* specified thread control block. The SSE registers are saved only if the
|
||||
* thread is actually using them.
|
||||
*
|
||||
* @param tcs Pointer to thread control block.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
static void _FpCtxSave(struct tcs *tcs)
|
||||
{
|
||||
@@ -83,16 +78,11 @@ static void _FpCtxSave(struct tcs *tcs)
|
||||
_do_fp_regs_save(&tcs->arch.preempFloatReg);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Initialize a thread's floating point context information.
|
||||
/*
|
||||
* Initialize a thread's floating point context information.
|
||||
*
|
||||
* This routine initializes the system's "live" floating point context.
|
||||
* The SSE registers are initialized only if the thread is actually using them.
|
||||
*
|
||||
* @param tcs Pointer to thread control block.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
static inline void _FpCtxInit(struct tcs *tcs)
|
||||
{
|
||||
@@ -104,37 +94,9 @@ static inline void _FpCtxInit(struct tcs *tcs)
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Enable preservation of floating point context information.
|
||||
*
|
||||
* @brief Enable preservation of floating point context information.
|
||||
*
|
||||
* This routine informs the kernel that the specified thread (which may be
|
||||
* the current thread) will be using the floating point registers.
|
||||
* The @a options parameter indicates which floating point register sets
|
||||
* will be used by the specified thread:
|
||||
*
|
||||
* a) K_FP_REGS indicates x87 FPU and MMX registers only
|
||||
* b) K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
|
||||
*
|
||||
* Invoking this routine initializes the thread's floating point context info
|
||||
* to that of an FPU that has been reset. The next time the thread is scheduled
|
||||
* by _Swap() it will either inherit an FPU that is guaranteed to be in a "sane"
|
||||
* state (if the most recent user of the FPU was cooperatively swapped out)
|
||||
* or the thread's own floating point context will be loaded (if the most
|
||||
* recent user of the FPU was pre-empted, or if this thread is the first user
|
||||
* of the FPU). Thereafter, the kernel will protect the thread's FP context
|
||||
* so that it is not altered during a preemptive context switch.
|
||||
*
|
||||
* @warning
|
||||
* This routine should only be used to enable floating point support for a
|
||||
* thread that does not currently have such support enabled already.
|
||||
*
|
||||
* @param tcs Pointer to thread control block.
|
||||
* @param options Registers to be preserved (K_FP_REGS or K_SSE_REGS).
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
* @internal
|
||||
* The transition from "non-FP supporting" to "FP supporting" must be done
|
||||
* atomically to avoid confusing the floating point logic used by _Swap(), so
|
||||
* this routine locks interrupts to ensure that a context switch does not occur.
|
||||
@@ -232,21 +194,8 @@ void k_float_enable(struct tcs *tcs, unsigned int options)
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable preservation of floating point context information.
|
||||
*
|
||||
* @brief Disable preservation of floating point context information.
|
||||
*
|
||||
* This routine informs the kernel that the specified thread (which may be
|
||||
* the current thread) will no longer be using the floating point registers.
|
||||
*
|
||||
* @warning
|
||||
* This routine should only be used to disable floating point support for
|
||||
* a thread that currently has such support enabled.
|
||||
*
|
||||
* @param tcs Pointer to thread control block.
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
* @internal
|
||||
* The transition from "FP supporting" to "non-FP supporting" must be done
|
||||
* atomically to avoid confusing the floating point logic used by _Swap(), so
|
||||
* this routine locks interrupts to ensure that a context switch does not occur.
|
||||
@@ -276,9 +225,8 @@ void k_float_disable(struct tcs *tcs)
|
||||
irq_unlock(imask);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Handler for "device not available" exception.
|
||||
/*
|
||||
* Handler for "device not available" exception.
|
||||
*
|
||||
* This routine is registered to handle the "device not available" exception
|
||||
* (vector = 7).
|
||||
@@ -286,10 +234,6 @@ void k_float_disable(struct tcs *tcs)
|
||||
* The processor will generate this exception if any x87 FPU, MMX, or SSEx
|
||||
* instruction is executed while CR0[TS]=1. The handler then enables the
|
||||
* current thread to use all supported floating point registers.
|
||||
*
|
||||
* @param pEsf This value is not used.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void _FpNotAvailableExcHandler(NANO_ESF *pEsf)
|
||||
{
|
||||
|
||||
@@ -245,7 +245,7 @@ __asm__("\t.globl _thread_entry\n"
|
||||
*
|
||||
* @return opaque pointer to initialized k_thread structure
|
||||
*/
|
||||
void _new_thread(char *pStackMem, unsigned stackSize,
|
||||
void _new_thread(char *pStackMem, size_t stackSize,
|
||||
void *uk_task_ptr, _thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned options)
|
||||
|
||||
@@ -7,16 +7,16 @@ Welcome to the Zephyr Project's :abbr:`API (Application Programing Interface)`
|
||||
documentation.
|
||||
|
||||
This section contains the API documentation automatically extracted from the
|
||||
code. To ease navigation, we have split the APIs in nanokernel APIs and
|
||||
microkernel APIs. If you are looking for a specific API, enter it on the
|
||||
search box. The search results display all sections containing information
|
||||
code. If you are looking for a specific API, enter it on the search box.
|
||||
The search results display all sections containing information
|
||||
about that API.
|
||||
|
||||
The use of the Zephyr APIs is the same for all SoCs and boards.
|
||||
The Zephyr APIs are used the same way on all SoCs and boards.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
kernel_api.rst
|
||||
device.rst
|
||||
bluetooth.rst
|
||||
io_interfaces.rst
|
||||
@@ -25,4 +25,3 @@ The use of the Zephyr APIs is the same for all SoCs and boards.
|
||||
power_management_api
|
||||
file_system
|
||||
testing
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
.. _event_logger:
|
||||
|
||||
Event Logger APIs
|
||||
#################
|
||||
Event Logging APIs
|
||||
##################
|
||||
|
||||
.. contents::
|
||||
:depth: 1
|
||||
@@ -11,6 +11,20 @@ Event Logger APIs
|
||||
Event Logger
|
||||
************
|
||||
|
||||
An event logger is an object that can record the occurrence of significant
|
||||
events, which can be subsequently extracted and reviewed.
|
||||
|
||||
.. doxygengroup:: event_logger
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
:content-only:
|
||||
|
||||
Kernel Event Logger
|
||||
*******************
|
||||
|
||||
The kernel event logger records the occurrence of significant kernel events,
|
||||
which can be subsequently extracted and reviewed.
|
||||
(See :ref:`kernel_event_logger_v2`.)
|
||||
|
||||
.. doxygengroup:: kernel_event_logger
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
239
doc/api/kernel_api.rst
Normal file
239
doc/api/kernel_api.rst
Normal file
@@ -0,0 +1,239 @@
|
||||
.. _kernel_apis:
|
||||
|
||||
Kernel APIs
|
||||
###########
|
||||
|
||||
This section contains APIs for the kernel's core services,
|
||||
as described in the :ref:`kernel_v2`.
|
||||
|
||||
.. important::
|
||||
Unless otherwise noted these APIs can be used by threads, but not by ISRs.
|
||||
|
||||
.. contents::
|
||||
:depth: 1
|
||||
:local:
|
||||
:backlinks: top
|
||||
|
||||
Threads
|
||||
*******
|
||||
|
||||
A thread is an independently scheduled series of instructions that implements
|
||||
a portion of an application's processing. Threads are used to perform processing
|
||||
that is too lengthy or too complex to be performed by an ISR.
|
||||
(See :ref:`threads_v2`.)
|
||||
|
||||
.. doxygengroup:: thread_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Workqueues
|
||||
**********
|
||||
|
||||
A workqueue processes a series of work items by executing the associated
|
||||
functions in a dedicated thread. Workqueues are typically used by an ISR
|
||||
or high-priority thread to offload non-urgent processing.
|
||||
(See :ref:`workqueues_v2`.)
|
||||
|
||||
.. doxygengroup:: workqueue_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Clocks
|
||||
******
|
||||
|
||||
Kernel clocks enable threads and ISRs to measure the passage of time
|
||||
with either normal and high precision.
|
||||
(See :ref:`clocks_v2`.)
|
||||
|
||||
.. doxygengroup:: clock_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Timers
|
||||
******
|
||||
|
||||
Timers enable threads to measure the passage of time, and to optionally execute
|
||||
an action when the timer expires.
|
||||
(See :ref:`timers_v2`.)
|
||||
|
||||
.. doxygengroup:: timer_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Memory Slabs
|
||||
************
|
||||
|
||||
Memory slabs enable the dynamic allocation and release of fixed-size
|
||||
memory blocks.
|
||||
(See :ref:`memory_slabs_v2`.)
|
||||
|
||||
.. doxygengroup:: mem_slab_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Memory Pools
|
||||
************
|
||||
|
||||
Memory pools enable the dynamic allocation and release of variable-size
|
||||
memory blocks.
|
||||
(See :ref:`memory_pools_v2`.)
|
||||
|
||||
.. doxygengroup:: mem_pool_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Heap Memory Pool
|
||||
****************
|
||||
|
||||
The heap memory pools enable the dynamic allocation and release of memory
|
||||
in a :cpp:func:`malloc()`-like manner.
|
||||
(See :ref:`heap_v2`.)
|
||||
|
||||
.. doxygengroup:: heap_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Semaphores
|
||||
**********
|
||||
|
||||
Semaphores provide traditional counting semaphore capabilities.
|
||||
(See :ref:`semaphores_v2`.)
|
||||
|
||||
.. doxygengroup:: semaphore_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Mutexes
|
||||
*******
|
||||
|
||||
Mutexes provide traditional reentrant mutex capabilities
|
||||
with basic priority inheritance.
|
||||
(See :ref:`mutexes_v2`.)
|
||||
|
||||
.. doxygengroup:: mutex_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Alerts
|
||||
******
|
||||
|
||||
Alerts enable an application to perform asynchronous signalling,
|
||||
somewhat akin to Unix-style signals.
|
||||
(See :ref:`alerts_v2`.)
|
||||
|
||||
.. doxygengroup:: alert_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Fifos
|
||||
*****
|
||||
|
||||
Fifos provide traditional first in, first out (FIFO) queuing of data items
|
||||
of any size.
|
||||
(See :ref:`fifos_v2`.)
|
||||
|
||||
.. doxygengroup:: fifo_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Lifos
|
||||
*****
|
||||
|
||||
Lifos provide traditional last in, first out (LIFO) queuing of data items
|
||||
of any size.
|
||||
(See :ref:`lifos_v2`.)
|
||||
|
||||
.. doxygengroup:: lifo_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Stacks
|
||||
******
|
||||
|
||||
Stacks provide traditional last in, first out (LIFO) queuing of 32-bit
|
||||
data items.
|
||||
(See :ref:`stacks_v2`.)
|
||||
|
||||
.. doxygengroup:: stack_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Message Queues
|
||||
**************
|
||||
|
||||
Message queues provide a simple message queuing mechanism
|
||||
for fixed-size data items.
|
||||
(See :ref:`message_queues_v2`.)
|
||||
|
||||
.. doxygengroup:: msgq_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Mailboxes
|
||||
*********
|
||||
|
||||
Mailboxes provide an enhanced message queuing mechanism
|
||||
for variable-size messages.
|
||||
(See :ref:`mailboxes_v2`.)
|
||||
|
||||
.. doxygengroup:: mailbox_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Pipes
|
||||
*****
|
||||
|
||||
Pipes provide a traditional anonymous pipe mechanism for sending
|
||||
variable-size chunks of data, in whole or in part.
|
||||
(See :ref:`pipes_v2`.)
|
||||
|
||||
.. doxygengroup:: pipe_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Interrupt Service Routines (ISRs)
|
||||
*********************************
|
||||
|
||||
An interrupt service routine is a series of instructions that is
|
||||
executed asynchronously in response to a hardware or software interrupt.
|
||||
(See :ref:`interrupts_v2`.)
|
||||
|
||||
.. doxygengroup:: isr_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Atomic Services
|
||||
***************
|
||||
|
||||
The atomic services enable multiple threads and ISRs to read and modify
|
||||
32-bit variables in an uninterruptible manner.
|
||||
(See :ref:`atomic_v2`.)
|
||||
|
||||
.. important::
|
||||
All atomic services APIs can be used by both threads and ISRs.
|
||||
|
||||
.. doxygengroup:: atomic_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Floating Point Services
|
||||
***********************
|
||||
|
||||
The floating point services enable threads to use a board's floating point
|
||||
registers.
|
||||
(See :ref:`float_v2`.)
|
||||
|
||||
.. doxygengroup:: float_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
|
||||
Ring Buffers
|
||||
************
|
||||
|
||||
Ring buffers enable simple first in, first out (FIFO) queuing
|
||||
of variable-size data items.
|
||||
(See :ref:`ring_buffers_v2`.)
|
||||
|
||||
.. doxygengroup:: ring_buffer_apis
|
||||
:project: Zephyr
|
||||
:content-only:
|
||||
@@ -71,7 +71,7 @@ The following code defines and initializes an empty fifo.
|
||||
k_fifo_init(&my_fifo);
|
||||
|
||||
Alternatively, an empty fifo can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_FIFO_DEFINE()`.
|
||||
by calling :c:macro:`K_FIFO_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
|
||||
@@ -153,6 +153,7 @@ APIs
|
||||
|
||||
The following fifo APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_FIFO_DEFINE`
|
||||
* :cpp:func:`k_fifo_init()`
|
||||
* :cpp:func:`k_fifo_put()`
|
||||
* :cpp:func:`k_fifo_put_list()`
|
||||
|
||||
@@ -62,7 +62,7 @@ The following defines and initializes an empty lifo.
|
||||
k_lifo_init(&my_lifo);
|
||||
|
||||
Alternatively, an empty lifo can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_LIFO_DEFINE()`.
|
||||
by calling :c:macro:`K_LIFO_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
|
||||
@@ -141,6 +141,7 @@ APIs
|
||||
|
||||
The following lifo APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_LIFO_DEFINE`
|
||||
* :cpp:func:`k_lifo_init()`
|
||||
* :cpp:func:`k_lifo_put()`
|
||||
* :cpp:func:`k_lifo_get()`
|
||||
|
||||
@@ -130,7 +130,7 @@ The following code defines and initializes an empty mailbox.
|
||||
k_mbox_init(&my_mailbox);
|
||||
|
||||
Alternatively, a mailbox can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_MBOX_DEFINE()`.
|
||||
by calling :c:macro:`K_MBOX_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
|
||||
@@ -484,12 +484,12 @@ The receiving thread must then respond as follows:
|
||||
the mailbox has already completed data retrieval and deleted the message.
|
||||
|
||||
* If the message descriptor size is non-zero and the receiving thread still
|
||||
wants to retrieve the data, the thread must call :c:func:`k_mbox_data_get()`
|
||||
wants to retrieve the data, the thread must call :cpp:func:`k_mbox_data_get()`
|
||||
and supply a message buffer large enough to hold the data. The mailbox copies
|
||||
the data into the message buffer and deletes the message.
|
||||
|
||||
* If the message descriptor size is non-zero and the receiving thread does *not*
|
||||
want to retrieve the data, the thread must call :c:func:`k_mbox_data_get()`.
|
||||
want to retrieve the data, the thread must call :cpp:func:`k_mbox_data_get()`.
|
||||
and specify a message buffer of :c:macro:`NULL`. The mailbox deletes
|
||||
the message without copying the data.
|
||||
|
||||
@@ -548,7 +548,7 @@ A receiving thread may choose to retrieve message data into a memory block,
|
||||
rather than a message buffer. This is done in much the same way as retrieving
|
||||
data subsequently into a message buffer --- the receiving thread first
|
||||
receives the message without its data, then retrieves the data by calling
|
||||
:c:func:`k_mbox_data_block_get()`. The mailbox fills in the block descriptor
|
||||
:cpp:func:`k_mbox_data_block_get()`. The mailbox fills in the block descriptor
|
||||
supplied by the receiving thread, allowing the thread to access the data.
|
||||
The mailbox also deletes the received message, since data retrieval
|
||||
has been completed. The receiving thread is then responsible for freeing
|
||||
@@ -634,6 +634,8 @@ APIs
|
||||
|
||||
The following APIs for a mailbox are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_MBOX_DEFINE`
|
||||
* :cpp:func:`k_mbox_init()`
|
||||
* :cpp:func:`k_mbox_put()`
|
||||
* :cpp:func:`k_mbox_async_put()`
|
||||
* :cpp:func:`k_mbox_get()`
|
||||
|
||||
@@ -85,7 +85,7 @@ that is capable of holding 10 items, each of which is 12 bytes long.
|
||||
k_msgq_init(&my_msgq, my_msgq_buffer, sizeof(data_item_type), 10);
|
||||
|
||||
Alternatively, a message queue can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_MSGQ_DEFINE()`.
|
||||
by calling :c:macro:`K_MSGQ_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above. Observe
|
||||
that the macro defines both the message queue and its buffer.
|
||||
@@ -176,6 +176,7 @@ APIs
|
||||
|
||||
The following message queue APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_MSGQ_DEFINE`
|
||||
* :cpp:func:`k_msgq_init()`
|
||||
* :cpp:func:`k_msgq_put()`
|
||||
* :cpp:func:`k_msgq_get()`
|
||||
|
||||
@@ -54,7 +54,7 @@ Implementation
|
||||
|
||||
A pipe is defined using a variable of type :c:type:`struct k_pipe` and an
|
||||
optional character buffer of type :c:type:`unsigned char`. It must then be
|
||||
initialized by calling :c:func:`k_pipe_init()`.
|
||||
initialized by calling :cpp:func:`k_pipe_init()`.
|
||||
|
||||
The following code defines and initializes an empty pipe that has a ring
|
||||
buffer capable of holding 100 bytes and is aligned to a 4-byte boundary.
|
||||
@@ -68,7 +68,7 @@ buffer capable of holding 100 bytes and is aligned to a 4-byte boundary.
|
||||
k_pipe_init(&my_pipe, my_ring_buffer, sizeof(my_ring_buffer));
|
||||
|
||||
Alternatively, a pipe can be defined and initialized at compile time by
|
||||
calling :c:macro:`K_PIPE_DEFINE()`.
|
||||
calling :c:macro:`K_PIPE_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above. Observe
|
||||
that that macro defines both the pipe and its ring buffer.
|
||||
@@ -80,7 +80,7 @@ that that macro defines both the pipe and its ring buffer.
|
||||
Writing to a Pipe
|
||||
=================
|
||||
|
||||
Data is added to a pipe by calling :c:func:`k_pipe_put()`.
|
||||
Data is added to a pipe by calling :cpp:func:`k_pipe_put()`.
|
||||
|
||||
The following code builds on the example above, and uses the pipe to pass
|
||||
data from a producing thread to one or more consuming threads. If the pipe's
|
||||
@@ -126,7 +126,7 @@ waits for a specified amount of time.
|
||||
Reading from a Pipe
|
||||
===================
|
||||
|
||||
Data is read from the pipe by calling :c:func:`k_pipe_get()`.
|
||||
Data is read from the pipe by calling :cpp:func:`k_pipe_get()`.
|
||||
|
||||
The following code builds on the example above, and uses the pipe to
|
||||
process data items generated by one or more producing threads.
|
||||
@@ -141,7 +141,7 @@ process data items generated by one or more producing threads.
|
||||
|
||||
while (1) {
|
||||
rc = k_pipe_get(&my_pipe, buffer, sizeof(buffer), &bytes_read,
|
||||
sizeof(header), 100);
|
||||
sizeof(header), K_MSEC(100));
|
||||
|
||||
if ((rc < 0) || (bytes_read < sizeof (header))) {
|
||||
/* Incomplete message header received */
|
||||
@@ -172,14 +172,15 @@ Configuration Options
|
||||
|
||||
Related configuration options:
|
||||
|
||||
* CONFIG_NUM_PIPE_ASYNC_MSGS
|
||||
* :option:`CONFIG_NUM_PIPE_ASYNC_MSGS`
|
||||
|
||||
APIs
|
||||
****
|
||||
|
||||
The following message queue APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:func:`k_pipe_init()`
|
||||
* :c:func:`k_pipe_put()`
|
||||
* :c:func:`k_pipe_get()`
|
||||
* :c:func:`k_pipe_block_put()`
|
||||
* :c:macro:`K_PIPE_DEFINE`
|
||||
* :cpp:func:`k_pipe_init()`
|
||||
* :cpp:func:`k_pipe_put()`
|
||||
* :cpp:func:`k_pipe_get()`
|
||||
* :cpp:func:`k_pipe_block_put()`
|
||||
|
||||
@@ -69,7 +69,7 @@ up to ten 32-bit data values.
|
||||
k_stack_init(&my_stack, my_stack_array, MAX_ITEMS);
|
||||
|
||||
Alternatively, a stack can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_STACK_DEFINE()`.
|
||||
by calling :c:macro:`K_STACK_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above. Observe
|
||||
that the macro defines both the stack and its array of data values.
|
||||
@@ -136,6 +136,7 @@ APIs
|
||||
|
||||
The following stack APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_STACK_DEFINE`
|
||||
* :cpp:func:`k_stack_init()`
|
||||
* :cpp:func:`k_stack_push()`
|
||||
* :cpp:func:`k_stack_pop()`
|
||||
|
||||
@@ -122,7 +122,7 @@ However, since a memory pool also requires a number of variable-size data
|
||||
structures to represent its block sets and the status of its quad-blocks,
|
||||
the kernel does not support the run-time definition of a memory pool.
|
||||
A memory pool can only be defined and initialized at compile time
|
||||
by calling :c:macro:`K_MEM_POOL_DEFINE()`.
|
||||
by calling :c:macro:`K_MEM_POOL_DEFINE`.
|
||||
|
||||
The following code defines and initializes a memory pool that has 3 blocks
|
||||
of 4096 bytes each, which can be partitioned into blocks as small as 64 bytes
|
||||
@@ -202,9 +202,9 @@ Configuration Options
|
||||
|
||||
Related configuration options:
|
||||
|
||||
* :option:`CONFIG_MEM_POOL_AD_BEFORE_SEARCH_FOR_BIGGERBLOCK`
|
||||
* :option:`CONFIG_MEM_POOL_AD_AFTER_SEARCH_FOR_BIGGERBLOCK`
|
||||
* :option:`CONFIG_MEM_POOL_AD_NONE`
|
||||
* :option:`CONFIG_MEM_POOL_SPLIT_BEFORE_DEFRAG`
|
||||
* :option:`CONFIG_MEM_POOL_DEFRAG_BEFORE_SPLIT`
|
||||
* :option:`CONFIG_MEM_POOL_SPLIT_ONLY`
|
||||
|
||||
|
||||
APIs
|
||||
@@ -212,6 +212,7 @@ APIs
|
||||
|
||||
The following memory pool APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_MEM_POOL_DEFINE`
|
||||
* :cpp:func:`k_mem_pool_alloc()`
|
||||
* :cpp:func:`k_mem_pool_free()`
|
||||
* :cpp:func:`k_mem_pool_defragment()`
|
||||
* :cpp:func:`k_mem_pool_defrag()`
|
||||
|
||||
@@ -81,7 +81,7 @@ that are 400 bytes long, each of which is aligned to a 4-byte boundary..
|
||||
k_mem_slab_init(&my_slab, my_slab_buffer, 400, 6);
|
||||
|
||||
Alternatively, a memory slab can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_MEM_SLAB_DEFINE()`.
|
||||
by calling :c:macro:`K_MEM_SLAB_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above. Observe
|
||||
that the macro defines both the memory slab and its buffer.
|
||||
@@ -146,6 +146,7 @@ APIs
|
||||
|
||||
The following memory slab APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_MEM_SLAB_DEFINE`
|
||||
* :cpp:func:`k_mem_slab_init()`
|
||||
* :cpp:func:`k_mem_slab_alloc()`
|
||||
* :cpp:func:`k_mem_slab_free()`
|
||||
|
||||
@@ -31,7 +31,7 @@ Defining an Atomic Variable
|
||||
An atomic variable is defined using a variable of type :c:type:`atomic_t`.
|
||||
|
||||
By default an atomic variable is initialized to zero. However, it can be given
|
||||
a different value using :c:macro:`ATOMIC_INIT()`:
|
||||
a different value using :c:macro:`ATOMIC_INIT`:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
@@ -65,6 +65,10 @@ by a higher priority context that also calls the routine.
|
||||
Manipulating an Array of Atomic Variables
|
||||
=========================================
|
||||
|
||||
An array of 32-bit atomic variables can be defined in the conventional manner.
|
||||
However, you can also define an N-bit array of atomic variables using
|
||||
:c:macro:`ATOMIC_DEFINE`.
|
||||
|
||||
A single bit in array of atomic variables can be manipulated using
|
||||
the APIs listed at the end of this section that end with :cpp:func:`_bit`.
|
||||
|
||||
@@ -111,6 +115,8 @@ APIs
|
||||
|
||||
The following atomic operation APIs are provided by :file:`atomic.h`:
|
||||
|
||||
* :c:macro:`ATOMIC_INIT`
|
||||
* :c:macro:`ATOMIC_DEFINE`
|
||||
* :cpp:func:`atomic_get()`
|
||||
* :cpp:func:`atomic_set()`
|
||||
* :cpp:func:`atomic_clear()`
|
||||
|
||||
@@ -1,282 +0,0 @@
|
||||
.. _event_logger_v2:
|
||||
|
||||
Kernel Event Logger [TBD]
|
||||
#########################
|
||||
|
||||
Definition
|
||||
**********
|
||||
|
||||
The kernel event logger is a standardized mechanism to record events within the
|
||||
Kernel while providing a single interface for the user to collect the data.
|
||||
This mechanism is currently used to log the following events:
|
||||
|
||||
* Sleep events (entering and exiting low power conditions).
|
||||
* Context switch events.
|
||||
* Interrupt events.
|
||||
|
||||
Kernel Event Logger Configuration
|
||||
*********************************
|
||||
|
||||
Kconfig provides the ability to enable and disable the collection of events and
|
||||
to configure the size of the buffer used by the event logger.
|
||||
|
||||
These options can be found in the following path :file:`kernel/Kconfig`.
|
||||
|
||||
General kernel event logger configuration:
|
||||
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_BUFFER_SIZE`
|
||||
|
||||
Default size: 128 words, 32-bit length.
|
||||
|
||||
Profiling points configuration:
|
||||
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC`
|
||||
|
||||
Allows modifying at runtime the events to record. At boot no event is
|
||||
recorded if enabled This flag adds functions allowing to enable/disable
|
||||
recording of kernel event logger.
|
||||
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP`
|
||||
|
||||
Enables the possibility to set the timer function to be used to populate
|
||||
kernel event logger timestamp. This has to be done at runtime by calling
|
||||
sys_k_event_logger_set_timer and providing the function callback.
|
||||
|
||||
Adding a Kernel Event Logging Point
|
||||
***********************************
|
||||
|
||||
Custom trace points can be added with the following API:
|
||||
|
||||
* :c:func:`sys_k_event_logger_put()`
|
||||
|
||||
Adds the profile of a new event with custom data.
|
||||
|
||||
* :cpp:func:`sys_k_event_logger_put_timed()`
|
||||
|
||||
Adds timestamped profile of a new event.
|
||||
|
||||
.. important::
|
||||
|
||||
The data must be in 32-bit sized blocks.
|
||||
|
||||
Retrieving Kernel Event Data
|
||||
****************************
|
||||
|
||||
Applications are required to implement a cooperative thread for accessing the
|
||||
recorded event messages. Developers can use the provided API to retrieve the
|
||||
data, or may write their own routines using the ring buffer provided by the
|
||||
event logger.
|
||||
|
||||
The API functions provided are:
|
||||
|
||||
* :c:func:`sys_k_event_logger_get()`
|
||||
* :c:func:`sys_k_event_logger_get_wait()`
|
||||
* :c:func:`sys_k_event_logger_get_wait_timeout()`
|
||||
|
||||
The above functions specify various ways to retrieve a event message and to
|
||||
copy it to the provided buffer. When the buffer size is smaller than the
|
||||
message, the function will return an error. All three functions retrieve
|
||||
messages via a FIFO method. The :literal:`wait` and :literal:`wait_timeout`
|
||||
functions allow the caller to pend until a new message is logged, or until the
|
||||
timeout expires.
|
||||
|
||||
Enabling/disabling event recording
|
||||
**********************************
|
||||
|
||||
If KERNEL_EVENT_LOGGER_DYNAMIC is enabled, following functions must be checked
|
||||
for dynamically enabling/disabling event recording at runtime:
|
||||
|
||||
* :cpp:func:`sys_k_event_logger_set_mask()`
|
||||
* :cpp:func:`sys_k_event_logger_get_mask()`
|
||||
|
||||
Each mask bit corresponds to the corresponding event ID (mask is starting at
|
||||
bit 1 not bit 0).
|
||||
|
||||
More details are provided in function description.
|
||||
|
||||
Timestamp
|
||||
*********
|
||||
|
||||
The timestamp used by the kernel event logger is 32-bit LSB of platform HW
|
||||
timer (for example Lakemont APIC timer for Quark SE). This timer period is very
|
||||
small and leads to timestamp wraparound happening quite often (e.g. every 134s
|
||||
for Quark SE).
|
||||
|
||||
see :option:`CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC`
|
||||
|
||||
This wraparound must be considered when analyzing kernel event logger data and
|
||||
care must be taken when tickless idle is enabled and sleep duration can exceed
|
||||
maximum HW timer value.
|
||||
|
||||
Timestamp used by the kernel event logger can be customized by enabling
|
||||
following option: :option:`CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP`
|
||||
|
||||
In case this option is enabled, a callback function returning a 32-bit
|
||||
timestamp must be provided to the kernel event logger by calling the following
|
||||
function at runtime: :cpp:func:`sys_k_event_logger_set_timer()`
|
||||
|
||||
Message Formats
|
||||
***************
|
||||
|
||||
Interrupt-driven Event Messaging
|
||||
--------------------------------
|
||||
|
||||
The data of the interrupt-driven event message comes in two block of 32 bits:
|
||||
|
||||
* The first block contains the timestamp occurrence of the interrupt event.
|
||||
* The second block contains the Id of the interrupt.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
uint32_t data[2];
|
||||
data[0] = timestamp_event;
|
||||
data[1] = interrupt_id;
|
||||
|
||||
Context-switch Event Messaging
|
||||
------------------------------
|
||||
|
||||
The data of the context-switch event message comes in two block of 32 bits:
|
||||
|
||||
* The first block contains the timestamp occurrence of the context-switch event.
|
||||
* The second block contains the thread id of the context involved.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
uint32_t data[2];
|
||||
data[0] = timestamp_event;
|
||||
data[1] = context_id;
|
||||
|
||||
Sleep Event Messaging
|
||||
---------------------
|
||||
|
||||
The data of the sleep event message comes in three block of 32 bits:
|
||||
|
||||
* The first block contains the timestamp when the CPU went to sleep mode.
|
||||
* The second block contains the timestamp when the CPU woke up.
|
||||
* The third block contains the interrupt Id that woke the CPU up.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
uint32_t data[3];
|
||||
data[0] = timestamp_went_sleep;
|
||||
data[1] = timestamp woke_up.
|
||||
data[2] = interrupt_id.
|
||||
|
||||
|
||||
Example: Retrieving Profiling Messages
|
||||
======================================
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
uint32_t data[3];
|
||||
uint8_t data_length = SIZE32_OF(data);
|
||||
uint8_t dropped_count;
|
||||
|
||||
while(1) {
|
||||
/* collect the data */
|
||||
res = sys_k_event_logger_get_wait(&event_id, &dropped_count, data,
|
||||
&data_length);
|
||||
|
||||
if (dropped_count > 0) {
|
||||
/* process the message dropped count */
|
||||
}
|
||||
|
||||
if (res > 0) {
|
||||
/* process the data */
|
||||
switch (event_id) {
|
||||
case KERNEL_EVENT_CONTEXT_SWITCH_EVENT_ID:
|
||||
/* ... Process the context switch event data ... */
|
||||
break;
|
||||
case KERNEL_EVENT_INTERRUPT_EVENT_ID:
|
||||
/* ... Process the interrupt event data ... */
|
||||
break;
|
||||
case KERNEL_EVENT_SLEEP_EVENT_ID:
|
||||
/* ... Process the data for a sleep event ... */
|
||||
break;
|
||||
default:
|
||||
printf("unrecognized event id %d\n", event_id);
|
||||
}
|
||||
} else {
|
||||
if (res == -EMSGSIZE) {
|
||||
/* ERROR - The buffer provided to collect the
|
||||
* profiling events is too small.
|
||||
*/
|
||||
} else if (ret == -EAGAIN) {
|
||||
/* There is no message available in the buffer */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.. note::
|
||||
|
||||
To see an example that shows how to collect the kernel event data, check the
|
||||
project :file:`samples/kernel_event_logger`.
|
||||
|
||||
Example: Adding a Kernel Event Logging Point
|
||||
============================================
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
uint32_t data[2];
|
||||
|
||||
if (sys_k_must_log_event(KERNEL_EVENT_LOGGER_CUSTOM_ID)) {
|
||||
data[0] = custom_data_1;
|
||||
data[1] = custom_data_2;
|
||||
|
||||
sys_k_event_logger_put(KERNEL_EVENT_LOGGER_CUSTOM_ID, data,
|
||||
ARRAY_SIZE(data));
|
||||
}
|
||||
|
||||
Use the following function to register only the time of an event.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
if (sys_k_must_log_event(KERNEL_EVENT_LOGGER_CUSTOM_ID)) {
|
||||
sys_k_event_logger_put_timed(KERNEL_EVENT_LOGGER_CUSTOM_ID);
|
||||
}
|
||||
|
||||
APIs
|
||||
****
|
||||
|
||||
The following APIs are provided by the :file:`k_event_logger.h` file:
|
||||
|
||||
:cpp:func:`sys_k_event_logger_register_as_collector()`
|
||||
Register the current cooperative thread as the collector thread.
|
||||
|
||||
:c:func:`sys_k_event_logger_put()`
|
||||
Enqueue a kernel event logger message with custom data.
|
||||
|
||||
:cpp:func:`sys_k_event_logger_put_timed()`
|
||||
Enqueue a kernel event logger message with the current time.
|
||||
|
||||
:c:func:`sys_k_event_logger_get()`
|
||||
De-queue a kernel event logger message.
|
||||
|
||||
:c:func:`sys_k_event_logger_get_wait()`
|
||||
De-queue a kernel event logger message. Wait if the buffer is empty.
|
||||
|
||||
:c:func:`sys_k_event_logger_get_wait_timeout()`
|
||||
De-queue a kernel event logger message. Wait if the buffer is empty until
|
||||
the timeout expires.
|
||||
|
||||
:cpp:func:`sys_k_must_log_event()`
|
||||
Check if an event type has to be logged or not
|
||||
|
||||
In case KERNEL_EVENT_LOGGER_DYNAMIC is enabled:
|
||||
|
||||
:cpp:func:`sys_k_event_logger_set_mask()`
|
||||
Set kernel event logger event mask
|
||||
|
||||
:cpp:func:`sys_k_event_logger_get_mask()`
|
||||
Get kernel event logger event mask
|
||||
|
||||
In case KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP is enabled:
|
||||
|
||||
:cpp:func:`sys_k_event_logger_set_timer()`
|
||||
Set kernel event logger timestamp function
|
||||
@@ -102,23 +102,23 @@ pre-tag a thread using one of the techniques listed below.
|
||||
|
||||
* A statically-spawned x86 thread can be pre-tagged by passing the
|
||||
:c:macro:`K_FP_REGS` or :c:macro:`K_SSE_REGS` option to
|
||||
:c:macro:`K_THREAD_DEFINE()`.
|
||||
:c:macro:`K_THREAD_DEFINE`.
|
||||
|
||||
* A dynamically-spawned x86 thread can be pre-tagged by passing the
|
||||
:c:macro:`K_FP_REGS` or :c:macro:`K_SSE_REGS` option to
|
||||
:c:func:`k_thread_spawn()`.
|
||||
:cpp:func:`k_thread_spawn()`.
|
||||
|
||||
* An already-spawned x86 thread can pre-tag itself once it has started
|
||||
by passing the :c:macro:`K_FP_REGS` or :c:macro:`K_SSE_REGS` option to
|
||||
:c:func:`k_float_enable()`.
|
||||
:cpp:func:`k_float_enable()`.
|
||||
|
||||
If an x86 thread uses the floating point registers infrequently it can call
|
||||
:c:func:`k_float_disable()` to remove its tagging as an FPU user or SSE user.
|
||||
:cpp:func:`k_float_disable()` to remove its tagging as an FPU user or SSE user.
|
||||
This eliminates the need for the kernel to take steps to preserve
|
||||
the contents of the floating point registers during context switches
|
||||
when there is no need to do so.
|
||||
When the thread again needs to use the floating point registers it can re-tag
|
||||
itself as an FPU user or SSE user by calling :c:func:`k_float_enable()`.
|
||||
itself as an FPU user or SSE user by calling :cpp:func:`k_float_enable()`.
|
||||
|
||||
Implementation
|
||||
**************
|
||||
|
||||
@@ -127,7 +127,7 @@ Implementation
|
||||
Defining an ISR
|
||||
===============
|
||||
|
||||
An ISR is defined at run-time by calling :c:macro:`IRQ_CONNECT()`. It must
|
||||
An ISR is defined at run-time by calling :c:macro:`IRQ_CONNECT`. It must
|
||||
then be enabled by calling :cpp:func:`irq_enable()`.
|
||||
|
||||
.. important::
|
||||
@@ -185,7 +185,7 @@ APIs
|
||||
|
||||
The following interrupt-related APIs are provided by :file:`irq.h`:
|
||||
|
||||
* :c:macro:`IRQ_CONNECT()`
|
||||
* :c:macro:`IRQ_CONNECT`
|
||||
* :cpp:func:`irq_lock()`
|
||||
* :cpp:func:`irq_unlock()`
|
||||
* :cpp:func:`irq_enable()`
|
||||
@@ -195,3 +195,4 @@ The following interrupt-related APIs are provided by :file:`irq.h`:
|
||||
The following interrupt-related APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :cpp:func:`k_is_in_isr()`
|
||||
* :cpp:func:`k_is_preempt_thread`
|
||||
|
||||
252
doc/kernel_v2/other/kernel_event_logger.rst
Normal file
252
doc/kernel_v2/other/kernel_event_logger.rst
Normal file
@@ -0,0 +1,252 @@
|
||||
.. _kernel_event_logger_v2:
|
||||
|
||||
Kernel Event Logger
|
||||
###################
|
||||
|
||||
The kernel event logger records the occurrence of certain types of kernel
|
||||
events, allowing them to be subsequently extracted and reviewed.
|
||||
This capability can be helpful in profiling the operation of an application,
|
||||
either for debugging purposes or for optimizing the performance the application.
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
:depth: 2
|
||||
|
||||
Concepts
|
||||
********
|
||||
|
||||
The kernel event logger does not exist unless it is configured for an
|
||||
application. The capacity of the kernel event logger is also configurable.
|
||||
By default, it has a ring buffer that can hold up to 128 32-bit words
|
||||
of event information.
|
||||
|
||||
The kernel event logger is capable of recording the following pre-defined
|
||||
event types:
|
||||
|
||||
* Interrupts.
|
||||
* Ccontext switching of threads.
|
||||
* Kernel sleep events (i.e. entering and exiting a low power state).
|
||||
|
||||
The kernel event logger only records the pre-defined event types it has been
|
||||
configured to record. Each event type can be enabled independently.
|
||||
|
||||
An application can also define and record custom event types.
|
||||
The information recorded for a custom event, and the times
|
||||
at which it is recorded, must be implemented by the application.
|
||||
|
||||
All events recorded by the kernel event logger remain in its ring buffer
|
||||
until they are retrieved by the application for review and analysis. The
|
||||
retrieval and analysis logic must be implemented by the application.
|
||||
|
||||
.. important::
|
||||
An application must retrieve the events recorded by the kernel event logger
|
||||
in a timely manner, otherwise new events will be dropped once the event
|
||||
logger's ring buffer becomes full. A recommended approach is to use
|
||||
a cooperative thread to retrieve the events, either on a periodic basis
|
||||
or as its sole responsibility.
|
||||
|
||||
By default, the kernel event logger records all occurrences of all event types
|
||||
that have been enabled. However, it can also be configured to allow an
|
||||
application to dynamically start or stop the recording of events at any time,
|
||||
and to control which event types are being recorded. This permits
|
||||
the application to capture only the events that occur during times
|
||||
of particular interest, thereby reducing the work needed to analyze them.
|
||||
|
||||
.. note::
|
||||
The kernel event logger can also be instructed to ignore context switches
|
||||
involving a single specified thread. This can be used to avoid recording
|
||||
context switch events involving the thread that retrieves the events
|
||||
from the kernel event logger.
|
||||
|
||||
Event Formats
|
||||
=============
|
||||
|
||||
Each event recorded by the kernel event logger consists of one or more
|
||||
32-bit words of data that describe the event.
|
||||
|
||||
An **interrupt event** has the following format:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct {
|
||||
uint32_t timestamp; /* time of interrupt */
|
||||
uint32_t interrupt_id; /* ID of interrupt */
|
||||
};
|
||||
|
||||
A **context-switch event** has the following format:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct {
|
||||
uint32_t timestamp; /* time of context switch */
|
||||
uint32_t context_id; /* ID of thread that was switched out */
|
||||
};
|
||||
|
||||
A **sleep event** has the following format:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct {
|
||||
uint32_t sleep_timestamp; /* time when CPU entered sleep mode */
|
||||
uint32_t wake_timestamp; /* time when CPU exited sleep mode */
|
||||
uint32_t interrupt_id; /* ID of interrupt that woke CPU */
|
||||
};
|
||||
|
||||
A **custom event** must have a type ID that does not conflict with
|
||||
any existing pre-defined event type ID. The format of a custom event
|
||||
is application-defined, but must contain at least one 32-bit data word.
|
||||
A custom event may utilize a variable size, to allow different events
|
||||
of a single type to record differing amounts of information.
|
||||
|
||||
Timestamps
|
||||
==========
|
||||
|
||||
By default, the timestamp recorded with each pre-defined event is obtained from
|
||||
the kernel's :ref:`hardware clock <clocks_v2>`. This 32-bit clock counts up
|
||||
extremely rapidly, which means the timestamp value wraps around frequently.
|
||||
(For example, the Lakemont APIC timer for Quark SE wraps every 134 seconds.)
|
||||
This wraparound must be accounted for when analyzing kernel event logger data.
|
||||
In addition, care must be taken when tickless idle is enabled, in case a sleep
|
||||
duration exceeds 2^32 clock cycles.
|
||||
|
||||
If desired, the kernel event logger can be configured to record
|
||||
a custom timestamp, rather than the default timestamp.
|
||||
The application registers the callback function that generates the custom 32-bit
|
||||
timestamp at run-time by calling :cpp:func:`sys_k_event_logger_set_timer()`.
|
||||
|
||||
Implementation
|
||||
**************
|
||||
|
||||
Retrieving An Event
|
||||
===================
|
||||
|
||||
An event can be retrieved from the kernel event logger in a blocking or
|
||||
non-blocking manner using the following APIs:
|
||||
|
||||
* :cpp:func:`sys_k_event_logger_get()`
|
||||
* :cpp:func:`sys_k_event_logger_get_wait()`
|
||||
* :cpp:func:`sys_k_event_logger_get_wait_timeout()`
|
||||
|
||||
In each case, the API also returns the type and size of the event, as well
|
||||
as the event information itself. The API also indicates how many events
|
||||
were dropped between the occurrence of the previous event and the retrieved
|
||||
event.
|
||||
|
||||
The following code illustrates how a thread can retrieve the events
|
||||
recorded by the kernel event logger.
|
||||
A sample application that shows how to collect kernel event data
|
||||
can also be found at :file:`samples/kernel_event_logger`.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
uint16_t event_id;
|
||||
uint8_t dropped_count;
|
||||
uint32_t data[3];
|
||||
uint8_t data_size;
|
||||
|
||||
while(1) {
|
||||
/* retrieve an event */
|
||||
data_size = SIZE32_OF(data);
|
||||
res = sys_k_event_logger_get_wait(&event_id, &dropped_count, data,
|
||||
&data_size);
|
||||
|
||||
if (dropped_count > 0) {
|
||||
/* ... Process the dropped events count ... */
|
||||
}
|
||||
|
||||
if (res > 0) {
|
||||
/* process the event */
|
||||
switch (event_id) {
|
||||
case KERNEL_EVENT_CONTEXT_SWITCH_EVENT_ID:
|
||||
/* ... Process the context switch event ... */
|
||||
break;
|
||||
case KERNEL_EVENT_INTERRUPT_EVENT_ID:
|
||||
/* ... Process the interrupt event ... */
|
||||
break;
|
||||
case KERNEL_EVENT_SLEEP_EVENT_ID:
|
||||
/* ... Process the sleep event ... */
|
||||
break;
|
||||
default:
|
||||
printf("unrecognized event id %d\n", event_id);
|
||||
}
|
||||
} else if (res == -EMSGSIZE) {
|
||||
/* ... Data array is too small to hold the event! ... */
|
||||
}
|
||||
}
|
||||
|
||||
Adding a Custom Event Type
|
||||
==========================
|
||||
|
||||
A custom event type must use an integer type ID that does not duplicate
|
||||
an existing type ID. The type IDs for the pre-defined events can be found
|
||||
in :file:`include/misc/kernel_event_logger.h`. If dynamic recording of
|
||||
events is enabled, the event type ID must not exceed 32.
|
||||
|
||||
Custom events can be written to the kernel event logger using the following
|
||||
APIs:
|
||||
|
||||
* :cpp:func:`sys_k_event_logger_put()`
|
||||
* :cpp:func:`sys_k_event_logger_put_timed()`
|
||||
|
||||
Both of these APIs record an event as long as there is room in the kernel
|
||||
event logger's ring buffer. To enable dynamic recording of a custom event type,
|
||||
the application must first call :cpp:func:`sys_k_must_log_event()` to determine
|
||||
if event recording is currently active for that event type.
|
||||
|
||||
The following code illustrates how an application can write a custom
|
||||
event consisting of two 32-bit words to the kernel event logger.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
#define MY_CUSTOM_EVENT_ID 8
|
||||
|
||||
/* record custom event only if recording is currently wanted */
|
||||
if (sys_k_must_log_event(MY_CUSTOM_EVENT_ID)) {
|
||||
uint32_t data[2];
|
||||
|
||||
data[0] = custom_data_1;
|
||||
data[1] = custom_data_2;
|
||||
|
||||
sys_k_event_logger_put(MY_CUSTOM_EVENT_ID, data, ARRAY_SIZE(data));
|
||||
}
|
||||
|
||||
The following code illustrates how an application can write a custom event
|
||||
that records just a timestamp using a single 32-bit word.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
#define MY_CUSTOM_TIME_ONLY_EVENT_ID 9
|
||||
|
||||
if (sys_k_must_log_event(MY_CUSTOM_TIME_ONLY_EVENT_ID)) {
|
||||
sys_k_event_logger_put_timed(MY_CUSTOM_TIME_ONLY_EVENT_ID);
|
||||
}
|
||||
|
||||
Configuration Options
|
||||
*********************
|
||||
|
||||
Related configuration options:
|
||||
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER`
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH`
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT`
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_SLEEP`
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_BUFFER_SIZE`
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC`
|
||||
* :option:`CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP`
|
||||
|
||||
APIs
|
||||
****
|
||||
|
||||
The following kernel event logger APIs are provided by
|
||||
:file:`kernel_event_logger.h`:
|
||||
|
||||
* :cpp:func:`sys_k_event_logger_register_as_collector()`
|
||||
* :cpp:func:`sys_k_event_logger_get()`
|
||||
* :cpp:func:`sys_k_event_logger_get_wait()`
|
||||
* :cpp:func:`sys_k_event_logger_get_wait_timeout()`
|
||||
* :cpp:func:`sys_k_must_log_event()`
|
||||
* :cpp:func:`sys_k_event_logger_put()`
|
||||
* :cpp:func:`sys_k_event_logger_put_timed()`
|
||||
* :cpp:func:`sys_k_event_logger_get_mask()`
|
||||
* :cpp:func:`sys_k_event_logger_set_mask()`
|
||||
* :cpp:func:`sys_k_event_logger_set_timer()`
|
||||
@@ -12,6 +12,6 @@ This section describes other services provided by the kernel.
|
||||
atomic.rst
|
||||
float.rst
|
||||
ring_buffers.rst
|
||||
event_logger.rst
|
||||
kernel_event_logger.rst
|
||||
c_library.rst
|
||||
cxx_support.rst
|
||||
|
||||
@@ -175,8 +175,8 @@ APIs
|
||||
|
||||
The following ring buffer APIs are provided by :file:`misc/ring_buffer.h`:
|
||||
|
||||
* :c:func:`SYS_RING_BUF_DECLARE_POW2()`
|
||||
* :c:func:`SYS_RING_BUF_DECLARE_SIZE()`
|
||||
* :cpp:func:`SYS_RING_BUF_DECLARE_POW2()`
|
||||
* :cpp:func:`SYS_RING_BUF_DECLARE_SIZE()`
|
||||
* :cpp:func:`sys_ring_buf_init()`
|
||||
* :cpp:func:`sys_ring_buf_is_empty()`
|
||||
* :cpp:func:`sys_ring_buf_space_get()`
|
||||
|
||||
@@ -103,7 +103,7 @@ new pending alerts.
|
||||
k_alert_init(&my_alert, my_alert_handler, 10);
|
||||
|
||||
Alternatively, an alert can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_ALERT_DEFINE()`.
|
||||
by calling :c:macro:`K_ALERT_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
|
||||
@@ -234,5 +234,7 @@ APIs
|
||||
|
||||
The following alert APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_ALERT_DEFINE`
|
||||
* :cpp:func:`k_alert_init()`
|
||||
* :cpp:func:`k_alert_send()`
|
||||
* :cpp:func:`k_alert_recv()`
|
||||
|
||||
@@ -105,7 +105,7 @@ The following code defines and initializes a mutex.
|
||||
k_mutex_init(&my_mutex);
|
||||
|
||||
Alternatively, a mutex can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_MUTEX_DEFINE()`.
|
||||
by calling :c:macro:`K_MUTEX_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
|
||||
@@ -130,7 +130,7 @@ available, and gives a warning if the mutex does not become availablee.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
if (k_mutex_lock(&my_mutex, 100) == 0) {
|
||||
if (k_mutex_lock(&my_mutex, K_MSEC(100)) == 0) {
|
||||
/* mutex successfully locked */
|
||||
} else {
|
||||
printf("Cannot lock XYZ display\n");
|
||||
@@ -166,6 +166,7 @@ APIs
|
||||
|
||||
The following mutex APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_MUTEX_DEFINE`
|
||||
* :cpp:func:`k_mutex_init()`
|
||||
* :cpp:func:`k_mutex_lock()`
|
||||
* :cpp:func:`k_mutex_unlock()`
|
||||
|
||||
@@ -60,7 +60,7 @@ semaphore by setting its count to 0 and its limit to 1.
|
||||
k_sem_init(&my_sem, 0, 1);
|
||||
|
||||
Alternatively, a semaphore can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_SEM_DEFINE()`.
|
||||
by calling :c:macro:`K_SEM_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
|
||||
@@ -101,7 +101,7 @@ A warning is issued if the semaphore is not obtained in time.
|
||||
{
|
||||
...
|
||||
|
||||
if (k_sem_take(&my_sem, 50) != 0) {
|
||||
if (k_sem_take(&my_sem, K_MSEC(50)) != 0) {
|
||||
printk("Input data not available!");
|
||||
} else {
|
||||
/* fetch available data */
|
||||
@@ -130,6 +130,7 @@ APIs
|
||||
|
||||
The following semaphore APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_SEM_DEFINE`
|
||||
* :cpp:func:`k_sem_init()`
|
||||
* :cpp:func:`k_sem_give()`
|
||||
* :cpp:func:`k_sem_take()`
|
||||
|
||||
@@ -78,7 +78,7 @@ automatically aborts a thread if the thread triggers a fatal error condition,
|
||||
such as dereferencing a null pointer.
|
||||
|
||||
A thread can also be aborted by another thread (or by itself)
|
||||
by calling :c:func:`k_thread_abort()`. However, it is typically preferable
|
||||
by calling :cpp:func:`k_thread_abort()`. However, it is typically preferable
|
||||
to signal a thread to terminate itself gracefully, rather than aborting it.
|
||||
|
||||
As with thread termination, the kernel does not reclaim shared resources
|
||||
@@ -92,16 +92,16 @@ Thread Suspension
|
||||
=================
|
||||
|
||||
A thread can be prevented from executing for an indefinite period of time
|
||||
if it becomes **suspended**. The function :c:func:`k_thread_suspend()`
|
||||
if it becomes **suspended**. The function :cpp:func:`k_thread_suspend()`
|
||||
can be used to suspend any thread, including the calling thread.
|
||||
Suspending a thread that is already suspended has no additional effect.
|
||||
|
||||
Once suspended, a thread cannot be scheduled until another thread calls
|
||||
:c:func:`k_thread_resume()` to remove the suspension.
|
||||
:cpp:func:`k_thread_resume()` to remove the suspension.
|
||||
|
||||
.. note::
|
||||
A thread can prevent itself from executing for a specified period of time
|
||||
using :c:func:`k_sleep()`. However, this is different from suspending
|
||||
using :cpp:func:`k_sleep()`. However, this is different from suspending
|
||||
a thread since a sleeping thread becomes executable automatically when the
|
||||
time limit is reached.
|
||||
|
||||
@@ -146,7 +146,7 @@ Spawning a Thread
|
||||
|
||||
A thread is spawned by defining its stack area and then calling
|
||||
:cpp:func:`k_thread_spawn()`. The stack area is an array of bytes
|
||||
whose size must equal :c:func:`sizeof(struct k_thread)` plus the size
|
||||
whose size must equal :c:macro:`K_THREAD_SIZEOF` plus the size
|
||||
of the thread's stack. The stack area must be defined using the
|
||||
:c:macro:`__stack` attribute to ensure it is properly aligned.
|
||||
|
||||
@@ -169,7 +169,7 @@ The following code spawns a thread that starts immediately.
|
||||
MY_PRIORITY, 0, K_NO_WAIT);
|
||||
|
||||
Alternatively, a thread can be spawned at compile time by calling
|
||||
:c:macro:`K_THREAD_DEFINE()`. Observe that the macro defines
|
||||
:c:macro:`K_THREAD_DEFINE`. Observe that the macro defines
|
||||
the stack area and thread id variables automatically.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
@@ -226,8 +226,9 @@ Related configuration options:
|
||||
APIs
|
||||
****
|
||||
|
||||
The following thread APIs are are provided by :file:`kernel.h`:
|
||||
The following thread APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_THREAD_DEFINE`
|
||||
* :cpp:func:`k_thread_spawn()`
|
||||
* :cpp:func:`k_thread_cancel()`
|
||||
* :cpp:func:`k_thread_abort()`
|
||||
|
||||
@@ -135,7 +135,7 @@ are measured in system clock ticks. The time slice size is configurable,
|
||||
but this size can be changed while the application is running.
|
||||
|
||||
At the end of every time slice, the scheduler checks to see if the current
|
||||
thread is preemptible and, if so, implicitly invokes :c:func:`k_yield()`
|
||||
thread is preemptible and, if so, implicitly invokes :cpp:func:`k_yield()`
|
||||
on behalf of the thread. This gives other ready threads of the same priority
|
||||
the opportunity to execute before the current thread is scheduled again.
|
||||
If no threads of equal priority are ready, the current thread remains
|
||||
@@ -234,6 +234,8 @@ APIs
|
||||
The following thread scheduling-related APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :cpp:func:`k_current_get()`
|
||||
* :cpp:func:`k_sched_lock()`
|
||||
* :cpp:func:`k_sched_unlock()`
|
||||
* :cpp:func:`k_yield()`
|
||||
* :cpp:func:`k_sleep()`
|
||||
* :cpp:func:`k_wakeup()`
|
||||
|
||||
@@ -150,7 +150,7 @@ Defining a Workqueue
|
||||
A workqueue is defined using a variable of type :c:type:`struct k_work_q`.
|
||||
The workqueue is initialized by defining the stack area used by its thread
|
||||
and then calling :cpp:func:`k_work_q_start()`. The stack area is an array
|
||||
of bytes whose size must equal :c:func:`sizeof(struct k_thread)` plus the size
|
||||
of bytes whose size must equal :c:macro:`K_THREAD_SIZEOF` plus the size
|
||||
of the thread's stack. The stack area must be defined using the
|
||||
:c:macro:`__stack` attribute to ensure it is properly aligned.
|
||||
|
||||
@@ -158,7 +158,7 @@ The following code defines and initializes a workqueue.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
#define MY_STACK_SIZE 500
|
||||
#define MY_STACK_SIZE (K_THREAD_SIZEOF + 500)
|
||||
#define MY_PRIORITY 5
|
||||
|
||||
char __noinit __stack my_stack_area[MY_STACK_SIZE];
|
||||
|
||||
@@ -164,17 +164,10 @@ The following kernel clock APIs are provided by :file:`kernel.h`:
|
||||
* :cpp:func:`k_uptime_delta()`
|
||||
* :cpp:func:`k_uptime_delta_32()`
|
||||
* :cpp:func:`k_cycle_get_32()`
|
||||
|
||||
The following kernel clock variables are provided by :file:`kernel.h`:
|
||||
|
||||
:c:data:`sys_clock_ticks_per_sec`
|
||||
The number of system clock ticks in a single second.
|
||||
|
||||
:c:data:`sys_clock_hw_cycles_per_sec`
|
||||
The number of hardware clock cycles in a single second.
|
||||
|
||||
:c:data:`sys_clock_us_per_tick`
|
||||
The number of microseconds in a single system clock tick.
|
||||
|
||||
:c:data:`sys_clock_hw_cycles_per_tick`
|
||||
The number of hardware clock cycles in a single system clock tick.
|
||||
* :c:macro:`SYS_CLOCK_HW_CYCLES_TO_NS`
|
||||
* :c:macro:`K_NO_WAIT`
|
||||
* :c:macro:`K_MSEC`
|
||||
* :c:macro:`K_SECONDS`
|
||||
* :c:macro:`K_MINUTES`
|
||||
* :c:macro:`K_HOURS`
|
||||
* :c:macro:`K_FOREVER`
|
||||
|
||||
@@ -112,7 +112,7 @@ The following code defines and initializes a timer.
|
||||
k_timer_init(&my_timer, my_expiry_function, NULL);
|
||||
|
||||
Alternatively, a timer can be defined and initialized at compile time
|
||||
by calling :c:macro:`K_TIMER_DEFINE()`.
|
||||
by calling :c:macro:`K_TIMER_DEFINE`.
|
||||
|
||||
The following code has the same effect as the code segment above.
|
||||
|
||||
@@ -125,23 +125,22 @@ Using a Timer Expiry Function
|
||||
|
||||
The following code uses a timer to perform a non-trivial action on a periodic
|
||||
basis. Since the required work cannot be done at interrupt level,
|
||||
the timer's expiry function uses a :ref:`kernel alert object <alerts_v2>`
|
||||
to do the work in the context of the system workqueue.
|
||||
the timer's expiry function submits a work item to the
|
||||
:ref:`system workqueue <workqueues_v2>`, whose thread performs the work.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int my_alert_handler(struct k_alert *dummy)
|
||||
void my_work_handler(struct k_work *work)
|
||||
{
|
||||
/* do the processing that needs to be done periodically */
|
||||
...
|
||||
return 0;
|
||||
}
|
||||
|
||||
K_ALERT_DEFINE(my_alert, my_alert_handler);
|
||||
struct k_work my_work = K_WORK_INITIALIZER(my_work_handler);
|
||||
|
||||
void my_timer_handler(struct k_timer *dummy)
|
||||
{
|
||||
k_alert_send(&my_alert);
|
||||
k_work_submit(&my_work);
|
||||
}
|
||||
|
||||
K_TIMER_DEFINE(my_timer, my_timer_handler, NULL);
|
||||
@@ -149,7 +148,7 @@ to do the work in the context of the system workqueue.
|
||||
...
|
||||
|
||||
/* start periodic timer that expires once every second */
|
||||
k_timer_start(&my_timer, 1000, 1000);
|
||||
k_timer_start(&my_timer, K_SECONDS(1), K_SECONDS(1));
|
||||
|
||||
Reading Timer Status
|
||||
====================
|
||||
@@ -164,7 +163,7 @@ if the timer has expired on not.
|
||||
...
|
||||
|
||||
/* start one shot timer that expires after 200 ms */
|
||||
k_timer_start(&my_status_timer, 200, 0);
|
||||
k_timer_start(&my_status_timer, K_MSEC(200), 0);
|
||||
|
||||
/* do work */
|
||||
...
|
||||
@@ -195,7 +194,7 @@ are separated by the specified time interval.
|
||||
...
|
||||
|
||||
/* start one shot timer that expires after 500 ms */
|
||||
k_timer_start(&my_sync_timer, 500, 0);
|
||||
k_timer_start(&my_sync_timer, K_MSEC(500), 0);
|
||||
|
||||
/* do other work */
|
||||
...
|
||||
@@ -241,6 +240,7 @@ APIs
|
||||
|
||||
The following timer APIs are provided by :file:`kernel.h`:
|
||||
|
||||
* :c:macro:`K_TIMER_DEFINE`
|
||||
* :cpp:func:`k_timer_init()`
|
||||
* :cpp:func:`k_timer_start()`
|
||||
* :cpp:func:`k_timer_stop()`
|
||||
|
||||
@@ -77,7 +77,7 @@ The stack is split up as follows in the source tree:
|
||||
functionality of the Bluetooth stack, but are not necessary the best
|
||||
source for sample code (see ``samples/bluetooth`` instead).
|
||||
|
||||
``doc/bluetooth/``
|
||||
``doc/subsystems/bluetooth/``
|
||||
Extra documentation, such as PICS documents.
|
||||
|
||||
Further reading
|
||||
|
||||
@@ -68,8 +68,8 @@ static bool reliable_packet(uint8_t type)
|
||||
}
|
||||
|
||||
/* FIXME: Correct timeout */
|
||||
#define H5_RX_ACK_TIMEOUT 250
|
||||
#define H5_TX_ACK_TIMEOUT 250
|
||||
#define H5_RX_ACK_TIMEOUT K_MSEC(250)
|
||||
#define H5_TX_ACK_TIMEOUT K_MSEC(250)
|
||||
|
||||
#define SLIP_DELIMITER 0xc0
|
||||
#define SLIP_ESC 0xdb
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
#endif
|
||||
|
||||
/* Peripheral timeout to initialize Connection Parameter Update procedure */
|
||||
#define CONN_UPDATE_TIMEOUT (5 * MSEC_PER_SEC)
|
||||
#define CONN_UPDATE_TIMEOUT K_SECONDS(5)
|
||||
|
||||
static struct bt_conn conns[CONFIG_BLUETOOTH_MAX_CONN];
|
||||
static struct bt_conn_cb *callback_list;
|
||||
|
||||
@@ -65,8 +65,7 @@ void uart_console_out_debug_hook_install(uart_console_out_debug_hook_t *hook)
|
||||
}
|
||||
#define HANDLE_DEBUG_HOOK_OUT(c) \
|
||||
(debug_hook_out(c) == UART_CONSOLE_DEBUG_HOOK_HANDLED)
|
||||
#else
|
||||
#define HANDLE_DEBUG_HOOK_OUT(c) 0
|
||||
|
||||
#endif /* CONFIG_UART_CONSOLE_DEBUG_SERVER_HOOKS */
|
||||
|
||||
#if 0 /* NOTUSED */
|
||||
@@ -102,12 +101,16 @@ static int console_in(void)
|
||||
|
||||
static int console_out(int c)
|
||||
{
|
||||
#ifdef CONFIG_UART_CONSOLE_DEBUG_SERVER_HOOKS
|
||||
|
||||
int handled_by_debug_server = HANDLE_DEBUG_HOOK_OUT(c);
|
||||
|
||||
if (handled_by_debug_server) {
|
||||
return c;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_UART_CONSOLE_DEBUG_SERVER_HOOKS */
|
||||
|
||||
if ('\n' == c) {
|
||||
uart_poll_out(uart_console_dev, '\r');
|
||||
}
|
||||
|
||||
@@ -132,7 +132,7 @@ static void eth_enc28j60_clear_eth_reg(struct device *dev, uint16_t reg_addr,
|
||||
}
|
||||
|
||||
static void eth_enc28j60_write_mem(struct device *dev, uint8_t *data_buffer,
|
||||
uint8_t buf_len)
|
||||
uint16_t buf_len)
|
||||
{
|
||||
struct eth_enc28j60_runtime *context = dev->driver_data;
|
||||
uint8_t tx_buf[MAX_BUFFER_LENGTH + 1];
|
||||
@@ -149,7 +149,7 @@ static void eth_enc28j60_write_mem(struct device *dev, uint8_t *data_buffer,
|
||||
tx_buf[0] = ENC28J60_SPI_WBM;
|
||||
|
||||
for (int i = 0; i < num_segments;
|
||||
++i, index_buf += i * MAX_BUFFER_LENGTH) {
|
||||
++i, index_buf += MAX_BUFFER_LENGTH) {
|
||||
|
||||
memcpy(tx_buf + 1, index_buf, MAX_BUFFER_LENGTH);
|
||||
|
||||
@@ -164,7 +164,7 @@ static void eth_enc28j60_write_mem(struct device *dev, uint8_t *data_buffer,
|
||||
}
|
||||
|
||||
static void eth_enc28j60_read_mem(struct device *dev, uint8_t *data_buffer,
|
||||
uint8_t buf_len)
|
||||
uint16_t buf_len)
|
||||
{
|
||||
struct eth_enc28j60_runtime *context = dev->driver_data;
|
||||
uint8_t *index_buf;
|
||||
@@ -181,7 +181,7 @@ static void eth_enc28j60_read_mem(struct device *dev, uint8_t *data_buffer,
|
||||
tx_buf[0] = ENC28J60_SPI_RBM;
|
||||
|
||||
for (int i = 0; i < num_segments;
|
||||
++i, index_buf += i * MAX_BUFFER_LENGTH) {
|
||||
++i, index_buf += MAX_BUFFER_LENGTH) {
|
||||
|
||||
spi_transceive(context->spi, tx_buf, MAX_BUFFER_LENGTH + 1,
|
||||
tx_buf, MAX_BUFFER_LENGTH + 1);
|
||||
|
||||
@@ -74,10 +74,12 @@ static void _config(struct device *dev, uint32_t mask, int flags)
|
||||
cfg->port->lsr = mask;
|
||||
}
|
||||
|
||||
if (flags & GPIO_INT_ACTIVE_LOW) {
|
||||
cfg->port->fellsr = mask;
|
||||
} else if (flags & GPIO_INT_ACTIVE_HIGH) {
|
||||
if (flags & GPIO_INT_ACTIVE_HIGH) {
|
||||
/* Trigger in high level or rising edge */
|
||||
cfg->port->rehlsr = mask;
|
||||
} else {
|
||||
/* Trigger in low level or falling edge */
|
||||
cfg->port->fellsr = mask;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -230,8 +230,7 @@ static inline void dw_port_config(struct device *port, int flags)
|
||||
static inline int gpio_dw_config(struct device *port, int access_op,
|
||||
uint32_t pin, int flags)
|
||||
{
|
||||
if (((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) ||
|
||||
((flags & GPIO_DIR_IN) && (flags & GPIO_DIR_OUT))) {
|
||||
if ((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
@@ -40,8 +40,7 @@ static int gpio_k64_config(struct device *dev,
|
||||
uint8_t i;
|
||||
|
||||
/* check for an invalid pin configuration */
|
||||
if (((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) ||
|
||||
((flags & GPIO_DIR_IN) && (flags & GPIO_DIR_OUT))) {
|
||||
if ((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
|
||||
@@ -272,8 +272,9 @@ static inline void qmsi_port_config(struct device *port, int flags)
|
||||
static inline int gpio_qmsi_config(struct device *port,
|
||||
int access_op, uint32_t pin, int flags)
|
||||
{
|
||||
if (((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) ||
|
||||
((flags & GPIO_DIR_IN) && (flags & GPIO_DIR_OUT))) {
|
||||
/* If the pin/port is set to receive interrupts, make sure the pin
|
||||
is an input */
|
||||
if ((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@@ -262,8 +262,8 @@ static inline void ss_qmsi_port_config(struct device *port, int flags)
|
||||
static inline int ss_gpio_qmsi_config(struct device *port, int access_op,
|
||||
uint32_t pin, int flags)
|
||||
{
|
||||
if (((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) ||
|
||||
((flags & GPIO_DIR_IN) && (flags & GPIO_DIR_OUT))) {
|
||||
/* check for an invalid pin configuration */
|
||||
if ((flags & GPIO_INT) && (flags & GPIO_DIR_OUT)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@@ -666,7 +666,7 @@ static void cc2520_rx(int arg, int unused2)
|
||||
}
|
||||
|
||||
net_analyze_stack("CC2520 Rx Fiber stack",
|
||||
cc2520->cc2520_rx_stack,
|
||||
(unsigned char *)cc2520->cc2520_rx_stack,
|
||||
CONFIG_CC2520_RX_STACK_SIZE);
|
||||
goto flush;
|
||||
error:
|
||||
|
||||
@@ -37,12 +37,17 @@ config PINMUX_NAME
|
||||
config PINMUX_INIT_PRIORITY
|
||||
int
|
||||
prompt "Init priority"
|
||||
default 60
|
||||
default 45
|
||||
depends on PINMUX
|
||||
help
|
||||
Device driver initialization priority.
|
||||
The device needs to be initialized after all the devices it
|
||||
uses.
|
||||
Pinmux driver initialization priority.
|
||||
Pinmux driver almost certainly should be initialized before the
|
||||
rest of hardware devices (which may need specific pins already
|
||||
configured for them), and usually after generic GPIO drivers.
|
||||
Thus, its priority should be between KERNEL_INIT_PRIORITY_DEFAULT
|
||||
and KERNEL_INIT_PRIORITY_DEVICE. There are exceptions to this
|
||||
rule for particular boards. Don't change this value unless you
|
||||
know what you are doing.
|
||||
|
||||
config PINMUX_K64
|
||||
bool "Freescale K64-based Pin multiplexer driver"
|
||||
|
||||
@@ -53,5 +53,5 @@ int pinmux_fsl_k64_initialize(struct device *port)
|
||||
/* must be initialized after GPIO */
|
||||
DEVICE_AND_API_INIT(pmux, CONFIG_PINMUX_DEV_NAME, &pinmux_fsl_k64_initialize,
|
||||
NULL, NULL,
|
||||
POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
|
||||
POST_KERNEL, CONFIG_PINMUX_INIT_PRIORITY,
|
||||
&api_funcs);
|
||||
|
||||
@@ -114,4 +114,4 @@ static int fsl_frdm_k64f_pin_init(struct device *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(fsl_frdm_k64f_pin_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
|
||||
SYS_INIT(fsl_frdm_k64f_pin_init, POST_KERNEL, CONFIG_PINMUX_INIT_PRIORITY);
|
||||
|
||||
@@ -66,4 +66,4 @@ static int hexiwear_pin_init(struct device *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(hexiwear_pin_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
|
||||
SYS_INIT(hexiwear_pin_init, POST_KERNEL, CONFIG_PINMUX_INIT_PRIORITY);
|
||||
|
||||
@@ -137,6 +137,7 @@ static int __set_one_port(struct device *dev, qm_pwm_t id, uint32_t pwm,
|
||||
/* No interrupts */
|
||||
cfg.mask_interrupt = true;
|
||||
cfg.callback = NULL;
|
||||
cfg.callback_data = NULL;
|
||||
|
||||
/* Data for the timer to stay high and low */
|
||||
cfg.hi_count = on;
|
||||
|
||||
@@ -132,7 +132,7 @@ static int rtc_qmsi_set_config(struct device *dev, struct rtc_config *cfg)
|
||||
* values defined by clk_rtc_div and by QMSI's clk_rtc_div_t match for
|
||||
* both D2000 and SE.
|
||||
*/
|
||||
qm_cfg.prescaler = RTC_DIVIDER;
|
||||
qm_cfg.prescaler = (clk_rtc_div_t)RTC_DIVIDER;
|
||||
|
||||
rtc_critical_region_start(dev);
|
||||
|
||||
|
||||
@@ -82,29 +82,37 @@ static void bma280_thread_cb(void *arg)
|
||||
struct device *dev = arg;
|
||||
struct bma280_data *drv_data = dev->driver_data;
|
||||
uint8_t status = 0;
|
||||
int err = 0;
|
||||
|
||||
/* check for data ready */
|
||||
i2c_reg_read_byte(drv_data->i2c, BMA280_I2C_ADDRESS,
|
||||
BMA280_REG_INT_STATUS_1, &status);
|
||||
err = i2c_reg_read_byte(drv_data->i2c, BMA280_I2C_ADDRESS,
|
||||
BMA280_REG_INT_STATUS_1, &status);
|
||||
if (status & BMA280_BIT_DATA_INT_STATUS &&
|
||||
drv_data->data_ready_handler != NULL) {
|
||||
drv_data->data_ready_handler != NULL &&
|
||||
err == 0) {
|
||||
drv_data->data_ready_handler(dev,
|
||||
&drv_data->data_ready_trigger);
|
||||
}
|
||||
|
||||
/* check for any motion */
|
||||
i2c_reg_read_byte(drv_data->i2c, BMA280_I2C_ADDRESS,
|
||||
BMA280_REG_INT_STATUS_0, &status);
|
||||
err = i2c_reg_read_byte(drv_data->i2c, BMA280_I2C_ADDRESS,
|
||||
BMA280_REG_INT_STATUS_0, &status);
|
||||
if (status & BMA280_BIT_SLOPE_INT_STATUS &&
|
||||
drv_data->any_motion_handler != NULL) {
|
||||
drv_data->any_motion_handler != NULL &&
|
||||
err == 0) {
|
||||
drv_data->any_motion_handler(dev,
|
||||
&drv_data->data_ready_trigger);
|
||||
|
||||
/* clear latched interrupt */
|
||||
i2c_reg_update_byte(drv_data->i2c, BMA280_I2C_ADDRESS,
|
||||
BMA280_REG_INT_RST_LATCH,
|
||||
BMA280_BIT_INT_LATCH_RESET,
|
||||
BMA280_BIT_INT_LATCH_RESET);
|
||||
err = i2c_reg_update_byte(drv_data->i2c, BMA280_I2C_ADDRESS,
|
||||
BMA280_REG_INT_RST_LATCH,
|
||||
BMA280_BIT_INT_LATCH_RESET,
|
||||
BMA280_BIT_INT_LATCH_RESET);
|
||||
|
||||
if (err < 0) {
|
||||
SYS_LOG_DBG("Could not update clear the interrupt");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
gpio_pin_enable_callback(drv_data->gpio, CONFIG_BMA280_GPIO_PIN_NUM);
|
||||
|
||||
@@ -172,13 +172,19 @@ static const struct sensor_driver_api bme280_api_funcs = {
|
||||
.channel_get = bme280_channel_get,
|
||||
};
|
||||
|
||||
static void bme280_read_compensation(struct bme280_data *data)
|
||||
static int bme280_read_compensation(struct bme280_data *data)
|
||||
{
|
||||
uint16_t buf[12];
|
||||
uint8_t hbuf[7];
|
||||
int err = 0;
|
||||
|
||||
i2c_burst_read(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_COMP_START, (uint8_t *)buf, sizeof(buf));
|
||||
err = i2c_burst_read(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_COMP_START,
|
||||
(uint8_t *)buf, sizeof(buf));
|
||||
|
||||
if (err < 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
data->dig_t1 = sys_le16_to_cpu(buf[0]);
|
||||
data->dig_t2 = sys_le16_to_cpu(buf[1]);
|
||||
@@ -186,8 +192,8 @@ static void bme280_read_compensation(struct bme280_data *data)
|
||||
|
||||
data->dig_p1 = sys_le16_to_cpu(buf[3]);
|
||||
data->dig_p2 = sys_le16_to_cpu(buf[4]);
|
||||
data->dig_p4 = sys_le16_to_cpu(buf[5]);
|
||||
data->dig_p3 = sys_le16_to_cpu(buf[6]);
|
||||
data->dig_p3 = sys_le16_to_cpu(buf[5]);
|
||||
data->dig_p4 = sys_le16_to_cpu(buf[6]);
|
||||
data->dig_p5 = sys_le16_to_cpu(buf[7]);
|
||||
data->dig_p6 = sys_le16_to_cpu(buf[8]);
|
||||
data->dig_p7 = sys_le16_to_cpu(buf[9]);
|
||||
@@ -195,11 +201,20 @@ static void bme280_read_compensation(struct bme280_data *data)
|
||||
data->dig_p9 = sys_le16_to_cpu(buf[11]);
|
||||
|
||||
if (data->chip_id == BME280_CHIP_ID) {
|
||||
i2c_reg_read_byte(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_HUM_COMP_PART1, &data->dig_h1);
|
||||
err = i2c_reg_read_byte(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_HUM_COMP_PART1,
|
||||
&data->dig_h1);
|
||||
|
||||
i2c_burst_read(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_HUM_COMP_PART2, hbuf, 7);
|
||||
if (err < 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
err = i2c_burst_read(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_HUM_COMP_PART2, hbuf, 7);
|
||||
|
||||
if (err < 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
data->dig_h2 = (hbuf[1] << 8) | hbuf[0];
|
||||
data->dig_h3 = hbuf[2];
|
||||
@@ -207,14 +222,20 @@ static void bme280_read_compensation(struct bme280_data *data)
|
||||
data->dig_h5 = ((hbuf[4] >> 4) & 0x0F) | (hbuf[5] << 4);
|
||||
data->dig_h6 = hbuf[6];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bme280_chip_init(struct device *dev)
|
||||
{
|
||||
struct bme280_data *data = (struct bme280_data *) dev->driver_data;
|
||||
|
||||
i2c_reg_read_byte(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_ID, &data->chip_id);
|
||||
int err = i2c_reg_read_byte(data->i2c_master, data->i2c_slave_addr,
|
||||
BME280_REG_ID, &data->chip_id);
|
||||
|
||||
if (err < 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (data->chip_id == BME280_CHIP_ID) {
|
||||
SYS_LOG_DBG("BME280 chip detected");
|
||||
@@ -226,7 +247,11 @@ static int bme280_chip_init(struct device *dev)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
bme280_read_compensation(data);
|
||||
err = bme280_read_compensation(data);
|
||||
|
||||
if (err < 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (data->chip_id == BME280_CHIP_ID) {
|
||||
i2c_reg_write_byte(data->i2c_master, data->i2c_slave_addr,
|
||||
|
||||
@@ -225,7 +225,7 @@ static int bmi160_acc_odr_set(struct device *dev, uint16_t freq_int,
|
||||
uint16_t freq_milli)
|
||||
{
|
||||
struct bmi160_device_data *bmi160 = dev->driver_data;
|
||||
uint8_t odr = bmi160_freq_to_odr_val(freq_int, freq_milli);
|
||||
int odr = bmi160_freq_to_odr_val(freq_int, freq_milli);
|
||||
|
||||
if (odr < 0) {
|
||||
return odr;
|
||||
@@ -242,7 +242,7 @@ static int bmi160_acc_odr_set(struct device *dev, uint16_t freq_int,
|
||||
return bmi160_reg_field_update(dev, BMI160_REG_ACC_CONF,
|
||||
BMI160_ACC_CONF_ODR_POS,
|
||||
BMI160_ACC_CONF_ODR_MASK,
|
||||
odr);
|
||||
(uint8_t) odr);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -482,7 +482,7 @@ static int bmi160_acc_config(struct device *dev, enum sensor_channel chan,
|
||||
static int bmi160_gyr_odr_set(struct device *dev, uint16_t freq_int,
|
||||
uint16_t freq_milli)
|
||||
{
|
||||
uint8_t odr = bmi160_freq_to_odr_val(freq_int, freq_milli);
|
||||
int odr = bmi160_freq_to_odr_val(freq_int, freq_milli);
|
||||
|
||||
if (odr < 0) {
|
||||
return odr;
|
||||
@@ -495,7 +495,7 @@ static int bmi160_gyr_odr_set(struct device *dev, uint16_t freq_int,
|
||||
return bmi160_reg_field_update(dev, BMI160_REG_GYR_CONF,
|
||||
BMI160_GYR_CONF_ODR_POS,
|
||||
BMI160_GYR_CONF_ODR_MASK,
|
||||
odr);
|
||||
(uint8_t) odr);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -769,7 +769,7 @@ static inline void bmi160_acc_channel_get(struct device *dev,
|
||||
|
||||
static int bmi160_temp_channel_get(struct device *dev, struct sensor_value *val)
|
||||
{
|
||||
int16_t temp_raw = 0;
|
||||
uint16_t temp_raw = 0;
|
||||
int32_t temp_micro = 0;
|
||||
struct bmi160_device_data *bmi160 = dev->driver_data;
|
||||
|
||||
|
||||
@@ -354,7 +354,7 @@ static struct ss_spi_qmsi_runtime spi_qmsi_mst_1_runtime;
|
||||
|
||||
DEVICE_DEFINE(ss_spi_master_1, CONFIG_SPI_1_NAME, ss_spi_qmsi_init,
|
||||
ss_spi_master_qmsi_device_ctrl, &spi_qmsi_mst_1_runtime,
|
||||
&spi_qmsi_mst_0_config, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,
|
||||
&spi_qmsi_mst_1_config, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,
|
||||
NULL);
|
||||
#endif /* CONFIG_SPI_1 */
|
||||
|
||||
|
||||
@@ -381,7 +381,7 @@ static ALWAYS_INLINE void _arch_irq_unlock(unsigned int key)
|
||||
|
||||
/**
|
||||
* The NANO_SOFT_IRQ macro must be used as the value for the @a irq parameter
|
||||
* to NANO_CPU_INT_REGSITER when connecting to an interrupt that does not
|
||||
* to NANO_CPU_INT_REGISTER when connecting to an interrupt that does not
|
||||
* correspond to any IRQ line (such as spurious vector or SW IRQ)
|
||||
*/
|
||||
#define NANO_SOFT_IRQ ((unsigned int) (-1))
|
||||
@@ -397,10 +397,62 @@ extern void _arch_irq_enable(unsigned int irq);
|
||||
*/
|
||||
extern void _arch_irq_disable(unsigned int irq);
|
||||
|
||||
#ifdef CONFIG_FP_SHARING
|
||||
extern void k_float_enable(k_tid_t thread_id, unsigned int options);
|
||||
extern void k_float_disable(k_tid_t thread_id);
|
||||
#endif /* CONFIG_FP_SHARING */
|
||||
/**
|
||||
* @defgroup float_apis Floating Point APIs
|
||||
* @ingroup kernel_apis
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Enable preservation of floating point context information.
|
||||
*
|
||||
* This routine informs the kernel that the specified thread (which may be
|
||||
* the current thread) will be using the floating point registers.
|
||||
* The @a options parameter indicates which floating point register sets
|
||||
* will be used by the specified thread:
|
||||
*
|
||||
* a) K_FP_REGS indicates x87 FPU and MMX registers only
|
||||
* b) K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
|
||||
*
|
||||
* Invoking this routine initializes the thread's floating point context info
|
||||
* to that of an FPU that has been reset. The next time the thread is scheduled
|
||||
* by _Swap() it will either inherit an FPU that is guaranteed to be in a "sane"
|
||||
* state (if the most recent user of the FPU was cooperatively swapped out)
|
||||
* or the thread's own floating point context will be loaded (if the most
|
||||
* recent user of the FPU was pre-empted, or if this thread is the first user
|
||||
* of the FPU). Thereafter, the kernel will protect the thread's FP context
|
||||
* so that it is not altered during a preemptive context switch.
|
||||
*
|
||||
* @warning
|
||||
* This routine should only be used to enable floating point support for a
|
||||
* thread that does not currently have such support enabled already.
|
||||
*
|
||||
* @param thread ID of thread.
|
||||
* @param options Registers to be preserved (K_FP_REGS or K_SSE_REGS).
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
extern void k_float_enable(k_tid_t thread, unsigned int options);
|
||||
|
||||
/**
|
||||
* @brief Disable preservation of floating point context information.
|
||||
*
|
||||
* This routine informs the kernel that the specified thread (which may be
|
||||
* the current thread) will no longer be using the floating point registers.
|
||||
*
|
||||
* @warning
|
||||
* This routine should only be used to disable floating point support for
|
||||
* a thread that currently has such support enabled.
|
||||
*
|
||||
* @param thread ID of thread.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
extern void k_float_disable(k_tid_t thread);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#include <stddef.h> /* for size_t */
|
||||
|
||||
|
||||
354
include/atomic.h
354
include/atomic.h
@@ -26,28 +26,26 @@ extern "C" {
|
||||
typedef int atomic_t;
|
||||
typedef atomic_t atomic_val_t;
|
||||
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
/**
|
||||
* @defgroup atomic_apis Atomic Services APIs
|
||||
* @ingroup kernel_apis
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Atomic compare-and-set.
|
||||
*
|
||||
* @brief Atomic compare-and-set primitive
|
||||
* This routine performs an atomic compare-and-set on @a target. If the current
|
||||
* value of @a target equals @a old_value, @a target is set to @a new_value.
|
||||
* If the current value of @a target does not equal @a old_value, @a target
|
||||
* is left unchanged.
|
||||
*
|
||||
* This routine provides the compare-and-set operator. If the original value at
|
||||
* <target> equals <oldValue>, then <newValue> is stored at <target> and the
|
||||
* function returns 1.
|
||||
*
|
||||
* If the original value at <target> does not equal <oldValue>, then the store
|
||||
* is not done and the function returns 0.
|
||||
*
|
||||
* The reading of the original value at <target>, the comparison,
|
||||
* and the write of the new value (if it occurs) all happen atomically with
|
||||
* respect to both interrupts and accesses of other processors to <target>.
|
||||
*
|
||||
* @param target address to be tested
|
||||
* @param old_value value to compare against
|
||||
* @param new_value value to compare against
|
||||
* @return Returns 1 if <new_value> is written, 0 otherwise.
|
||||
* @param target Address of atomic variable.
|
||||
* @param old_value Original value to compare against.
|
||||
* @param new_value New value to store.
|
||||
* @return 1 if @a new_value is written, 0 otherwise.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline int atomic_cas(atomic_t *target, atomic_val_t old_value,
|
||||
atomic_val_t new_value)
|
||||
{
|
||||
@@ -55,104 +53,121 @@ static inline int atomic_cas(atomic_t *target, atomic_val_t old_value,
|
||||
0, __ATOMIC_SEQ_CST,
|
||||
__ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern int atomic_cas(atomic_t *target, atomic_val_t old_value,
|
||||
atomic_val_t new_value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic addition primitive
|
||||
* @brief Atomic addition.
|
||||
*
|
||||
* This routine provides the atomic addition operator. The <value> is
|
||||
* atomically added to the value at <target>, placing the result at <target>,
|
||||
* and the old value from <target> is returned.
|
||||
* This routine performs an atomic addition on @a target.
|
||||
*
|
||||
* @param target memory location to add to
|
||||
* @param value the value to add
|
||||
* @param target Address of atomic variable.
|
||||
* @param value Value to add.
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
return __atomic_fetch_add(target, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_add(atomic_t *target, atomic_val_t value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic subtraction primitive
|
||||
* @brief Atomic subtraction.
|
||||
*
|
||||
* This routine provides the atomic subtraction operator. The <value> is
|
||||
* atomically subtracted from the value at <target>, placing the result at
|
||||
* <target>, and the old value from <target> is returned.
|
||||
* This routine performs an atomic subtraction on @a target.
|
||||
*
|
||||
* @param target the memory location to subtract from
|
||||
* @param value the value to subtract
|
||||
* @param target Address of atomic variable.
|
||||
* @param value Value to subtract.
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
return __atomic_fetch_sub(target, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic increment primitive
|
||||
* @brief Atomic increment.
|
||||
*
|
||||
* @param target memory location to increment
|
||||
* This routine performs an atomic increment by 1 on @a target.
|
||||
*
|
||||
* This routine provides the atomic increment operator. The value at <target>
|
||||
* is atomically incremented by 1, and the old value from <target> is returned.
|
||||
* @param target Address of atomic variable.
|
||||
*
|
||||
* @return The value from <target> before the increment
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_inc(atomic_t *target)
|
||||
{
|
||||
return atomic_add(target, 1);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_inc(atomic_t *target);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic decrement primitive
|
||||
* @brief Atomic decrement.
|
||||
*
|
||||
* @param target memory location to decrement
|
||||
* This routine performs an atomic decrement by 1 on @a target.
|
||||
*
|
||||
* This routine provides the atomic decrement operator. The value at <target>
|
||||
* is atomically decremented by 1, and the old value from <target> is returned.
|
||||
* @param target Address of atomic variable.
|
||||
*
|
||||
* @return The value from <target> prior to the decrement
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_dec(atomic_t *target)
|
||||
{
|
||||
return atomic_sub(target, 1);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_dec(atomic_t *target);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic get primitive
|
||||
* @brief Atomic get.
|
||||
*
|
||||
* @param target memory location to read from
|
||||
* This routine performs an atomic read on @a target.
|
||||
*
|
||||
* This routine provides the atomic get primitive to atomically read
|
||||
* a value from <target>. It simply does an ordinary load. Note that <target>
|
||||
* is expected to be aligned to a 4-byte boundary.
|
||||
* @param target Address of atomic variable.
|
||||
*
|
||||
* @return The value read from <target>
|
||||
* @return Value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_get(const atomic_t *target)
|
||||
{
|
||||
return __atomic_load_n(target, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_get(const atomic_t *target);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic get-and-set primitive
|
||||
* @brief Atomic get-and-set.
|
||||
*
|
||||
* This routine provides the atomic set operator. The <value> is atomically
|
||||
* written at <target> and the previous value at <target> is returned.
|
||||
* This routine atomically sets @a target to @a value and returns
|
||||
* the previous value of @a target.
|
||||
*
|
||||
* @param target the memory location to write to
|
||||
* @param value the value to write
|
||||
* @param target Address of atomic variable.
|
||||
* @param value Value to write to @a target.
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
/* This builtin, as described by Intel, is not a traditional
|
||||
@@ -161,236 +176,253 @@ static inline atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
|
||||
*/
|
||||
return __atomic_exchange_n(target, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic clear primitive
|
||||
* @brief Atomic clear.
|
||||
*
|
||||
* This routine provides the atomic clear operator. The value of 0 is atomically
|
||||
* written at <target> and the previous value at <target> is returned. (Hence,
|
||||
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
|
||||
* This routine atomically sets @a target to zero and returns its previous
|
||||
* value. (Hence, it is equivalent to atomic_set(target, 0).)
|
||||
*
|
||||
* @param target the memory location to write
|
||||
* @param target Address of atomic variable.
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_clear(atomic_t *target)
|
||||
{
|
||||
return atomic_set(target, 0);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_clear(atomic_t *target);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic bitwise inclusive OR primitive
|
||||
* @brief Atomic bitwise inclusive OR.
|
||||
*
|
||||
* This routine provides the atomic bitwise inclusive OR operator. The <value>
|
||||
* is atomically bitwise OR'ed with the value at <target>, placing the result
|
||||
* at <target>, and the previous value at <target> is returned.
|
||||
* This routine atomically sets @a target to the bitwise inclusive OR of
|
||||
* @a target and @a value.
|
||||
*
|
||||
* @param target the memory location to be modified
|
||||
* @param value the value to OR
|
||||
* @param target Address of atomic variable.
|
||||
* @param value Value to OR.
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
return __atomic_fetch_or(target, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_or(atomic_t *target, atomic_val_t value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic bitwise exclusive OR (XOR) primitive
|
||||
* @brief Atomic bitwise exclusive OR (XOR).
|
||||
*
|
||||
* This routine provides the atomic bitwise exclusive OR operator. The <value>
|
||||
* is atomically bitwise XOR'ed with the value at <target>, placing the result
|
||||
* at <target>, and the previous value at <target> is returned.
|
||||
* This routine atomically sets @a target to the bitwise exclusive OR (XOR) of
|
||||
* @a target and @a value.
|
||||
*
|
||||
* @param target the memory location to be modified
|
||||
* @param value the value to XOR
|
||||
* @param target Address of atomic variable.
|
||||
* @param value Value to XOR
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
return __atomic_fetch_xor(target, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic bitwise AND primitive
|
||||
* @brief Atomic bitwise AND.
|
||||
*
|
||||
* This routine provides the atomic bitwise AND operator. The <value> is
|
||||
* atomically bitwise AND'ed with the value at <target>, placing the result
|
||||
* at <target>, and the previous value at <target> is returned.
|
||||
* This routine atomically sets @a target to the bitwise AND of @a target
|
||||
* and @a value.
|
||||
*
|
||||
* @param target the memory location to be modified
|
||||
* @param value the value to AND
|
||||
* @param target Address of atomic variable.
|
||||
* @param value Value to AND.
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
return __atomic_fetch_and(target, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_and(atomic_t *target, atomic_val_t value);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic bitwise NAND primitive
|
||||
* @brief Atomic bitwise NAND.
|
||||
*
|
||||
* This routine provides the atomic bitwise NAND operator. The <value> is
|
||||
* atomically bitwise NAND'ed with the value at <target>, placing the result
|
||||
* at <target>, and the previous value at <target> is returned.
|
||||
* This routine atomically sets @a target to the bitwise NAND of @a target
|
||||
* and @a value. (This operation is equivalent to target = ~(target & value).)
|
||||
*
|
||||
* The operation here is equivalent to *target = ~(tmp & value)
|
||||
* @param target Address of atomic variable.
|
||||
* @param value Value to NAND.
|
||||
*
|
||||
* @param target the memory location to be modified
|
||||
* @param value the value to NAND
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
* @return Previous value of @a target.
|
||||
*/
|
||||
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
|
||||
static inline atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
return __atomic_fetch_nand(target, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
#else
|
||||
extern atomic_val_t atomic_add(atomic_t *target, atomic_val_t value);
|
||||
extern atomic_val_t atomic_and(atomic_t *target, atomic_val_t value);
|
||||
extern atomic_val_t atomic_dec(atomic_t *target);
|
||||
extern atomic_val_t atomic_inc(atomic_t *target);
|
||||
extern atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value);
|
||||
extern atomic_val_t atomic_or(atomic_t *target, atomic_val_t value);
|
||||
extern atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value);
|
||||
extern atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value);
|
||||
extern atomic_val_t atomic_clear(atomic_t *target);
|
||||
extern atomic_val_t atomic_get(const atomic_t *target);
|
||||
extern atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
|
||||
extern int atomic_cas(atomic_t *target, atomic_val_t oldValue,
|
||||
atomic_val_t newValue);
|
||||
#endif /* CONFIG_ATOMIC_OPERATIONS_BUILTIN */
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* @brief Initialize an atomic variable.
|
||||
*
|
||||
* This macro can be used to initialize an atomic variable. For example,
|
||||
* @code atomic_t my_var = ATOMIC_INIT(75); @endcode
|
||||
*
|
||||
* @param i Value to assign to atomic variable.
|
||||
*/
|
||||
#define ATOMIC_INIT(i) (i)
|
||||
|
||||
/**
|
||||
* @cond INTERNAL_HIDDEN
|
||||
*/
|
||||
|
||||
#define ATOMIC_BITS (sizeof(atomic_val_t) * 8)
|
||||
#define ATOMIC_MASK(bit) (1 << ((bit) & (ATOMIC_BITS - 1)))
|
||||
#define ATOMIC_ELEM(addr, bit) ((addr) + ((bit) / ATOMIC_BITS))
|
||||
|
||||
/** @def ATOMIC_DEFINE
|
||||
* @brief Helper to declare an atomic_t array.
|
||||
/**
|
||||
* INTERNAL_HIDDEN @endcond
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Define an array of atomic variables.
|
||||
*
|
||||
* A helper to define an atomic_t array based on the number of needed
|
||||
* bits, e.g. any bit count of 32 or less will produce a single-element
|
||||
* array.
|
||||
* This macro defines an array of atomic variables containing at least
|
||||
* @a num_bits bits.
|
||||
*
|
||||
* @param name Name of atomic_t array.
|
||||
* @param num_bits Maximum number of bits needed.
|
||||
* @note
|
||||
* If used from file scope, the bits of the array are initialized to zero;
|
||||
* if used from within a function, the bits are left uninitialized.
|
||||
*
|
||||
* @return n/a
|
||||
* @param name Name of array of atomic variables.
|
||||
* @param num_bits Number of bits needed.
|
||||
*/
|
||||
#define ATOMIC_DEFINE(name, num_bits) \
|
||||
atomic_t name[1 + ((num_bits) - 1) / ATOMIC_BITS]
|
||||
|
||||
/** @brief Test whether a bit is set
|
||||
/**
|
||||
* @brief Atomically test a bit.
|
||||
*
|
||||
* Test whether bit number bit is set or not.
|
||||
* This routine tests whether bit number @a bit of @a target is set or not.
|
||||
* The target may be a single atomic variable or an array of them.
|
||||
*
|
||||
* Also works for an array of multiple atomic_t variables, in which
|
||||
* case the bit number may go beyond the number of bits in a single
|
||||
* atomic_t variable.
|
||||
* @param target Address of atomic variable or array.
|
||||
* @param bit Bit number (starting from 0).
|
||||
*
|
||||
* @param addr base address to start counting from
|
||||
* @param bit bit number counted from the base address
|
||||
*
|
||||
* @return 1 if the bit was set, 0 if it wasn't
|
||||
* @return 1 if the bit was set, 0 if it wasn't.
|
||||
*/
|
||||
static inline int atomic_test_bit(const atomic_t *addr, int bit)
|
||||
static inline int atomic_test_bit(const atomic_t *target, int bit)
|
||||
{
|
||||
atomic_val_t val = atomic_get(ATOMIC_ELEM(addr, bit));
|
||||
atomic_val_t val = atomic_get(ATOMIC_ELEM(target, bit));
|
||||
|
||||
return (1 & (val >> (bit & (ATOMIC_BITS - 1))));
|
||||
}
|
||||
|
||||
/** @brief Clear a bit and return its old value
|
||||
/**
|
||||
* @brief Atomically test and clear a bit.
|
||||
*
|
||||
* Atomically clear a bit and return its old value.
|
||||
* Atomically clear bit number @a bit of @a target and return its old value.
|
||||
* The target may be a single atomic variable or an array of them.
|
||||
*
|
||||
* Also works for an array of multiple atomic_t variables, in which
|
||||
* case the bit number may go beyond the number of bits in a single
|
||||
* atomic_t variable.
|
||||
* @param target Address of atomic variable or array.
|
||||
* @param bit Bit number (starting from 0).
|
||||
*
|
||||
* @param addr base address to start counting from
|
||||
* @param bit bit number counted from the base address
|
||||
*
|
||||
* @return 1 if the bit was set, 0 if it wasn't
|
||||
* @return 1 if the bit was set, 0 if it wasn't.
|
||||
*/
|
||||
static inline int atomic_test_and_clear_bit(atomic_t *addr, int bit)
|
||||
static inline int atomic_test_and_clear_bit(atomic_t *target, int bit)
|
||||
{
|
||||
atomic_val_t mask = ATOMIC_MASK(bit);
|
||||
atomic_val_t old;
|
||||
|
||||
old = atomic_and(ATOMIC_ELEM(addr, bit), ~mask);
|
||||
old = atomic_and(ATOMIC_ELEM(target, bit), ~mask);
|
||||
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/** @brief Set a bit and return its old value
|
||||
/**
|
||||
* @brief Atomically set a bit.
|
||||
*
|
||||
* Atomically set a bit and return its old value.
|
||||
* Atomically set bit number @a bit of @a target and return its old value.
|
||||
* The target may be a single atomic variable or an array of them.
|
||||
*
|
||||
* Also works for an array of multiple atomic_t variables, in which
|
||||
* case the bit number may go beyond the number of bits in a single
|
||||
* atomic_t variable.
|
||||
* @param target Address of atomic variable or array.
|
||||
* @param bit Bit number (starting from 0).
|
||||
*
|
||||
* @param addr base address to start counting from
|
||||
* @param bit bit number counted from the base address
|
||||
*
|
||||
* @return 1 if the bit was set, 0 if it wasn't
|
||||
* @return 1 if the bit was set, 0 if it wasn't.
|
||||
*/
|
||||
static inline int atomic_test_and_set_bit(atomic_t *addr, int bit)
|
||||
static inline int atomic_test_and_set_bit(atomic_t *target, int bit)
|
||||
{
|
||||
atomic_val_t mask = ATOMIC_MASK(bit);
|
||||
atomic_val_t old;
|
||||
|
||||
old = atomic_or(ATOMIC_ELEM(addr, bit), mask);
|
||||
old = atomic_or(ATOMIC_ELEM(target, bit), mask);
|
||||
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/** @brief Clear a bit
|
||||
/**
|
||||
* @brief Atomically clear a bit.
|
||||
*
|
||||
* Atomically clear a bit.
|
||||
* Atomically clear bit number @a bit of @a target.
|
||||
* The target may be a single atomic variable or an array of them.
|
||||
*
|
||||
* Also works for an array of multiple atomic_t variables, in which
|
||||
* case the bit number may go beyond the number of bits in a single
|
||||
* atomic_t variable.
|
||||
* @param target Address of atomic variable or array.
|
||||
* @param bit Bit number (starting from 0).
|
||||
*
|
||||
* @param addr base address to start counting from
|
||||
* @param bit bit number counted from the base address
|
||||
* @return N/A
|
||||
*/
|
||||
static inline void atomic_clear_bit(atomic_t *addr, int bit)
|
||||
static inline void atomic_clear_bit(atomic_t *target, int bit)
|
||||
{
|
||||
atomic_val_t mask = ATOMIC_MASK(bit);
|
||||
|
||||
atomic_and(ATOMIC_ELEM(addr, bit), ~mask);
|
||||
atomic_and(ATOMIC_ELEM(target, bit), ~mask);
|
||||
}
|
||||
|
||||
/** @brief Set a bit
|
||||
/**
|
||||
* @brief Atomically set a bit.
|
||||
*
|
||||
* Atomically set a bit.
|
||||
* Atomically set bit number @a bit of @a target.
|
||||
* The target may be a single atomic variable or an array of them.
|
||||
*
|
||||
* Also works for an array of multiple atomic_t variables, in which
|
||||
* case the bit number may go beyond the number of bits in a single
|
||||
* atomic_t variable.
|
||||
* @param target Address of atomic variable or array.
|
||||
* @param bit Bit number (starting from 0).
|
||||
*
|
||||
* @param addr base address to start counting from
|
||||
* @param bit bit number counted from the base address
|
||||
* @return N/A
|
||||
*/
|
||||
static inline void atomic_set_bit(atomic_t *addr, int bit)
|
||||
static inline void atomic_set_bit(atomic_t *target, int bit)
|
||||
{
|
||||
atomic_val_t mask = ATOMIC_MASK(bit);
|
||||
|
||||
atomic_or(ATOMIC_ELEM(addr, bit), mask);
|
||||
atomic_or(ATOMIC_ELEM(target, bit), mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -772,7 +772,8 @@ struct bt_gatt_discover_params;
|
||||
*
|
||||
* If discovery procedure has completed this callback will be called with
|
||||
* attr set to NULL. This will not happen if procedure was stopped by returning
|
||||
* BT_GATT_ITER_STOP.
|
||||
* BT_GATT_ITER_STOP. The attribute is read-only and cannot be cached without
|
||||
* copying its contents.
|
||||
*
|
||||
* @return BT_GATT_ITER_CONTINUE if should continue attribute discovery
|
||||
* or BT_GATT_ITER_STOP to stop discovery procedure.
|
||||
|
||||
116
include/irq.h
116
include/irq.h
@@ -30,95 +30,121 @@
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Configure a static interrupt.
|
||||
* @defgroup isr_apis Interrupt Service Routine APIs
|
||||
* @ingroup kernel_apis
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Initialize an interrupt handler.
|
||||
*
|
||||
* All arguments must be computable by the compiler at build time.
|
||||
* This routine initializes an interrupt handler for an IRQ. The IRQ must be
|
||||
* subsequently enabled before the interrupt handler begins servicing
|
||||
* interrupts.
|
||||
*
|
||||
* @param irq_p IRQ line number
|
||||
* @param priority_p Interrupt priority
|
||||
* @param isr_p Interrupt service routine
|
||||
* @param isr_param_p ISR parameter
|
||||
* @param flags_p Arch-specific IRQ configuration flags
|
||||
* @warning
|
||||
* Although this routine is invoked at run-time, all of its arguments must be
|
||||
* computable by the compiler at build time.
|
||||
*
|
||||
* @return The vector assigned to this interrupt
|
||||
* @param irq_p IRQ line number.
|
||||
* @param priority_p Interrupt priority.
|
||||
* @param isr_p Address of interrupt service routine.
|
||||
* @param isr_param_p Parameter passed to interrupt service routine.
|
||||
* @param flags_p Architecture-specific IRQ configuration flags..
|
||||
*
|
||||
* @return Interrupt vector assigned to this interrupt.
|
||||
*/
|
||||
#define IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p)
|
||||
|
||||
/**
|
||||
* @brief Disable all interrupts on the CPU (inline)
|
||||
* @brief Lock interrupts.
|
||||
*
|
||||
* This routine disables interrupts. It can be called from either interrupt,
|
||||
* task or fiber level. This routine returns an architecture-dependent
|
||||
* lock-out key representing the "interrupt disable state" prior to the call;
|
||||
* this key can be passed to irq_unlock() to re-enable interrupts.
|
||||
* This routine disables all interrupts on the CPU. It returns an unsigned
|
||||
* integer "lock-out key", which is an architecture-dependent indicator of
|
||||
* whether interrupts were locked prior to the call. The lock-out key must be
|
||||
* passed to irq_unlock() to re-enable interrupts.
|
||||
*
|
||||
* The lock-out key should only be used as the argument to the irq_unlock()
|
||||
* API. It should never be used to manually re-enable interrupts or to inspect
|
||||
* or manipulate the contents of the source register.
|
||||
* This routine can be called recursively, as long as the caller keeps track
|
||||
* of each lock-out key that is generated. Interrupts are re-enabled by
|
||||
* passing each of the keys to irq_unlock() in the reverse order they were
|
||||
* acquired. (That is, each call to irq_lock() must be balanced by
|
||||
* a corresponding call to irq_unlock().)
|
||||
*
|
||||
* This function can be called recursively: it will return a key to return the
|
||||
* state of interrupt locking to the previous level.
|
||||
* @note
|
||||
* This routine can be called by ISRs or by threads. If it is called by a
|
||||
* thread, the interrupt lock is thread-specific; this means that interrupts
|
||||
* remain disabled only while the thread is running. If the thread performs an
|
||||
* operation that allows another thread to run (for example, giving a semaphore
|
||||
* or sleeping for N milliseconds), the interrupt lock no longer applies and
|
||||
* interrupts may be re-enabled while other processing occurs. When the thread
|
||||
* once again becomes the current thread, the kernel re-establishes its
|
||||
* interrupt lock; this ensures the thread won't be interrupted until it has
|
||||
* explicitly released the interrupt lock it established.
|
||||
*
|
||||
* WARNINGS
|
||||
* Invoking a kernel routine with interrupts locked may result in
|
||||
* interrupts being re-enabled for an unspecified period of time. If the
|
||||
* called routine blocks, interrupts will be re-enabled while another
|
||||
* thread executes, or while the system is idle.
|
||||
*
|
||||
* The "interrupt disable state" is an attribute of a thread. Thus, if a
|
||||
* fiber or task disables interrupts and subsequently invokes a kernel
|
||||
* routine that causes the calling thread to block, the interrupt
|
||||
* disable state will be restored when the thread is later rescheduled
|
||||
* for execution.
|
||||
*
|
||||
* @return An architecture-dependent unsigned int lock-out key representing the
|
||||
* "interrupt disable state" prior to the call.
|
||||
* @warning
|
||||
* The lock-out key should never be used to manually re-enable interrupts
|
||||
* or to inspect or manipulate the contents of the CPU's interrupt bits.
|
||||
*
|
||||
* @return Lock-out key.
|
||||
*/
|
||||
#define irq_lock() _arch_irq_lock()
|
||||
|
||||
/**
|
||||
* @brief Unlock interrupts.
|
||||
*
|
||||
* @brief Enable all interrupts on the CPU (inline)
|
||||
* This routine reverses the effect of a previous call to irq_lock() using
|
||||
* the associated lock-out key. The caller must call the routine once for
|
||||
* each time it called irq_lock(), supplying the keys in the reverse order
|
||||
* they were acquired, before interrupts are enabled.
|
||||
*
|
||||
* This routine re-enables interrupts on the CPU. The @a key parameter
|
||||
* is an architecture-dependent lock-out key that is returned by a previous
|
||||
* invocation of irq_lock().
|
||||
* @note Can be called by ISRs.
|
||||
*
|
||||
* This routine can be called from either interrupt, task or fiber level
|
||||
*
|
||||
* @param key architecture-dependent lock-out key
|
||||
* @param key Lock-out key generated by irq_lock().
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
#define irq_unlock(key) _arch_irq_unlock(key)
|
||||
|
||||
/**
|
||||
* @brief Enable a specific IRQ
|
||||
* @brief Enable an IRQ.
|
||||
*
|
||||
* This routine enables interrupts from source @a irq.
|
||||
*
|
||||
* @param irq IRQ line.
|
||||
*
|
||||
* @param irq IRQ line
|
||||
* @return N/A
|
||||
*/
|
||||
#define irq_enable(irq) _arch_irq_enable(irq)
|
||||
|
||||
/**
|
||||
* @brief Disable a specific IRQ
|
||||
* @brief Disable an IRQ.
|
||||
*
|
||||
* This routine disables interrupts from source @a irq.
|
||||
*
|
||||
* @param irq IRQ line.
|
||||
*
|
||||
* @param irq IRQ line
|
||||
* @return N/A
|
||||
*/
|
||||
#define irq_disable(irq) _arch_irq_disable(irq)
|
||||
|
||||
/**
|
||||
* @brief Return IRQ enable state
|
||||
* @brief Get IRQ enable state.
|
||||
*
|
||||
* This routine indicates if interrupts from source @a irq are enabled.
|
||||
*
|
||||
* @param irq IRQ line.
|
||||
*
|
||||
* @param irq IRQ line
|
||||
* @return interrupt enable state, true or false
|
||||
*/
|
||||
#define irq_is_enabled(irq) _arch_irq_is_enabled(irq)
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
945
include/kernel.h
945
include/kernel.h
File diff suppressed because it is too large
Load Diff
@@ -177,12 +177,13 @@ static inline sys_dnode_t *sys_dlist_peek_head_not_empty(sys_dlist_t *list)
|
||||
* @param node the node from which to get the next element in the list
|
||||
*
|
||||
* @return a pointer to the next element from a node, NULL if node is the tail
|
||||
* or NULL (when node comes from reading the head of an empty list).
|
||||
*/
|
||||
|
||||
static inline sys_dnode_t *sys_dlist_peek_next(sys_dlist_t *list,
|
||||
sys_dnode_t *node)
|
||||
{
|
||||
return node == list->tail ? NULL : node->next;
|
||||
return (!node || node == list->tail) ? NULL : node->next;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -19,202 +19,146 @@
|
||||
* @brief Kernel event logger support.
|
||||
*/
|
||||
|
||||
|
||||
#include <misc/event_logger.h>
|
||||
|
||||
#ifndef __KERNEL_EVENT_LOGGER_H__
|
||||
#define __KERNEL_EVENT_LOGGER_H__
|
||||
|
||||
/**
|
||||
* @brief Kernel Event Logger
|
||||
* @defgroup nanokernel_event_logger Kernel Event Logger
|
||||
* @{
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER
|
||||
/* pre-defined event types */
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
|
||||
#define KERNEL_EVENT_LOGGER_CONTEXT_SWITCH_EVENT_ID 0x0001
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
|
||||
#define KERNEL_EVENT_LOGGER_INTERRUPT_EVENT_ID 0x0002
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
|
||||
#define KERNEL_EVENT_LOGGER_SLEEP_EVENT_ID 0x0003
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TASK_MONITOR
|
||||
#define KERNEL_EVENT_LOGGER_TASK_MON_TASK_STATE_CHANGE_EVENT_ID 0x0004
|
||||
#define KERNEL_EVENT_LOGGER_TASK_MON_CMD_PACKET_EVENT_ID 0x0005
|
||||
#define KERNEL_EVENT_LOGGER_TASK_MON_KEVENT_EVENT_ID 0x0006
|
||||
#endif
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
/**
|
||||
* Global variable of the ring buffer that allows user to implement
|
||||
* their own reading routine.
|
||||
*/
|
||||
struct event_logger sys_k_event_logger;
|
||||
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP
|
||||
|
||||
/**
|
||||
* Callback used to set event timestamp
|
||||
*/
|
||||
typedef uint32_t (*sys_k_timer_func)(void);
|
||||
extern sys_k_timer_func timer_func;
|
||||
|
||||
static inline uint32_t _sys_k_get_time(void)
|
||||
{
|
||||
if (timer_func)
|
||||
return timer_func();
|
||||
else
|
||||
return sys_cycle_get_32();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set kernel event logger timestamp function
|
||||
*
|
||||
* @details Calling this function permits to set the function
|
||||
* to be called by kernel event logger for setting the event
|
||||
* timestamp. By default, kernel event logger is using the
|
||||
* system timer. But on some boards where the timer driver
|
||||
* maintains the system timer cycle accumulator in software,
|
||||
* such as ones using the LOAPIC timer, the system timer behavior
|
||||
* leads to timestamp errors. For example, the timer interrupt is
|
||||
* logged with a wrong timestamp since the HW timer value has been
|
||||
* reset (periodic mode) but accumulated value not updated yet
|
||||
* (done later in the ISR).
|
||||
*
|
||||
* @param func Pointer to a function returning a 32-bit timer
|
||||
* Prototype: uint32_t (*func)(void)
|
||||
*/
|
||||
void sys_k_event_logger_set_timer(sys_k_timer_func func);
|
||||
#else
|
||||
static inline uint32_t _sys_k_get_time(void)
|
||||
{
|
||||
return sys_cycle_get_32();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
|
||||
extern struct event_logger sys_k_event_logger;
|
||||
extern int _sys_k_event_logger_mask;
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
|
||||
extern void _sys_k_event_logger_enter_sleep(void);
|
||||
#else
|
||||
static inline void _sys_k_event_logger_enter_sleep(void) {};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
|
||||
extern void _sys_k_event_logger_interrupt(void);
|
||||
#else
|
||||
static inline void _sys_k_event_logger_interrupt(void) {};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Set kernel event logger filtering mask
|
||||
*
|
||||
* @details Calling this macro sets the mask used to select which events
|
||||
* to store in the kernel event logger ring buffer. This flag can be set
|
||||
* at runtime and at any moment.
|
||||
* This capability is only available when CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
* is set. If enabled, no event is enabled for logging at initialization.
|
||||
* The mask bits shall be set according to events ID defined in
|
||||
* kernel_event_logger.h
|
||||
* For example, to enable interrupt logging the following shall be done:
|
||||
* sys_k_event_logger_set_mask(sys_k_event_logger_get_mask |
|
||||
* (1 << (KERNEL_EVENT_LOGGER_INTERRUPT_EVENT_ID - 1)))
|
||||
* To disable it:
|
||||
* sys_k_event_logger_set_mask(sys_k_event_logger_get_mask &
|
||||
* ~(1 << (KERNEL_EVENT_LOGGER_INTERRUPT_EVENT_ID - 1)))
|
||||
*
|
||||
* WARNING: task monitor events are not covered by this API. Please refer
|
||||
* to sys_k_event_logger_set_monitor_mask / sys_k_event_logger_get_monitor_mask
|
||||
* @brief Kernel Event Logger
|
||||
* @defgroup kernel_event_logger Kernel Event Logger
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @typedef sys_k_timer_func_t
|
||||
* @brief Event timestamp generator function type.
|
||||
*
|
||||
* A timestamp generator function is executed when the kernel event logger
|
||||
* generates an event containing a timestamp.
|
||||
*
|
||||
* @return Timestamp value (application-defined).
|
||||
*/
|
||||
typedef uint32_t (*sys_k_timer_func_t)(void);
|
||||
|
||||
/**
|
||||
* @cond INTERNAL_HIDDEN
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP
|
||||
extern sys_k_timer_func_t _sys_k_get_time;
|
||||
#else
|
||||
#define _sys_k_get_time sys_cycle_get_32
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP */
|
||||
|
||||
/**
|
||||
* INTERNAL_HIDDEN @endcond
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Set kernel event logger timestamp function.
|
||||
*
|
||||
* This routine instructs the kernel event logger to call @a func
|
||||
* whenever it needs to generate an event timestamp. By default,
|
||||
* the kernel's hardware timer is used.
|
||||
*
|
||||
* @note
|
||||
* On some boards the hardware timer is not a pure hardware up counter,
|
||||
* which can lead to timestamp errors. For example, boards using the LOAPIC
|
||||
* timer can run it in periodic mode, which requires software to update
|
||||
* a count of accumulated cycles each time the timer hardware resets itself
|
||||
* to zero. This can result in an incorrect timestamp being generated
|
||||
* if it occurs after the timer hardware has reset but before the timer ISR
|
||||
* has updated accumulated cycle count.
|
||||
*
|
||||
* @param func Address of timestamp function to be used.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP
|
||||
static inline void sys_k_event_logger_set_timer(sys_k_timer_func_t func)
|
||||
{
|
||||
_sys_k_get_time = func;
|
||||
}
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP */
|
||||
|
||||
/**
|
||||
* @brief Set kernel event logger filtering mask.
|
||||
*
|
||||
* This routine specifies which events are recorded by the kernel event logger.
|
||||
* It can only be used when dynamic event logging has been configured.
|
||||
*
|
||||
* Each mask bit corresponds to a kernel event type. The least significant
|
||||
* mask bit corresponds to event type 1, the next bit to event type 2,
|
||||
* and so on.
|
||||
*
|
||||
* @param value Bitmask indicating events to be recorded.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
static inline void sys_k_event_logger_set_mask(int value)
|
||||
{
|
||||
_sys_k_event_logger_mask = value;
|
||||
}
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC */
|
||||
|
||||
/**
|
||||
* @brief Get kernel event logger filtering mask
|
||||
* @brief Get kernel event logger filtering mask.
|
||||
*
|
||||
* @details Calling this macro permits to read the mask used to select which
|
||||
* events are stored in the kernel event logger ring buffer. This macro can be
|
||||
* used at runtime and at any moment.
|
||||
* This capability is only available when CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
* is set. If enabled, no event is enabled for logging at initialization.
|
||||
* This routine indicates which events are currently being recorded by
|
||||
* the kernel event logger. It can only be used when dynamic event logging
|
||||
* has been configured. By default, no events are recorded.
|
||||
*
|
||||
* @see sys_k_event_logger_set_mask(value) for details
|
||||
*
|
||||
* WARNING: task monitor events are not covered by this API. Please refer
|
||||
* to sys_k_event_logger_set_monitor_mask / sys_k_event_logger_get_monitor_mask
|
||||
* @return Bitmask indicating events that are being recorded.
|
||||
*/
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
static inline int sys_k_event_logger_get_mask(void)
|
||||
{
|
||||
return _sys_k_event_logger_mask;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TASK_MONITOR
|
||||
|
||||
extern int _k_monitor_mask;
|
||||
|
||||
/**
|
||||
* @brief Set task monitor filtering mask
|
||||
*
|
||||
* @details Calling this function sets the mask used to select which task monitor
|
||||
* events to store in the kernel event logger ring buffer. This flag can be set
|
||||
* at runtime and at any moment.
|
||||
* This capability is only available when CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
* is set. If enabled, no event is enabled for logging at initialization
|
||||
* so CONFIG_TASK_MONITOR_MASK is ignored
|
||||
*
|
||||
* The mask bits shall be set according to monitor events defined in
|
||||
* micro_private.h
|
||||
*
|
||||
* For example, to enable k_swapper cmd logging the following shall be done:
|
||||
* sys_k_event_logger_set_monitor_mask(sys_k_event_logger_get_monitor_mask |
|
||||
* (1 << (MON_KSERV - 1)))
|
||||
* To disable it:
|
||||
* sys_k_event_logger_set_mask(sys_k_event_logger_get_mask &
|
||||
* ~(1 << (MON_KSERV - 1)))
|
||||
*
|
||||
*/
|
||||
static inline void sys_k_event_logger_set_monitor_mask(int value)
|
||||
{
|
||||
_k_monitor_mask = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get task monitor filtering mask
|
||||
*
|
||||
* @details Calling this function permits to read the mask used to select which
|
||||
* task monitor events to store in the kernel event logger ring buffer. This
|
||||
* function can be used at runtime and at any moment.
|
||||
* This capability is only available when CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
* is set. If enabled, no event is enabled for logging at initialization
|
||||
* so CONFIG_TASK_MONITOR_MASK is ignored
|
||||
*
|
||||
* @see sys_k_event_logger_set_monitor_mask() for details
|
||||
*
|
||||
*/
|
||||
static inline int sys_k_event_logger_get_monitor_mask(void)
|
||||
{
|
||||
return _k_monitor_mask;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_TASK_MONITOR */
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC */
|
||||
|
||||
/**
|
||||
* @brief Check if an event type has to be logged or not
|
||||
* @brief Indicate if an event type is currently being recorded.
|
||||
*
|
||||
* @details This function must be used before calling any sys_k_event_logger_put*
|
||||
* function. In case CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC is enabled, that function
|
||||
* permits to enable or disable the logging of each individual event at runtime
|
||||
* This routine indicates if event type @a event_type should be recorded
|
||||
* by the kernel event logger when the event occurs. The routine should be
|
||||
* used by code that writes an event to the kernel event logger to ensure
|
||||
* that only events of interest to the application are recorded.
|
||||
*
|
||||
* @param event_type The identification of the event.
|
||||
* @param event_type Event ID.
|
||||
*
|
||||
* @return 1 if event should be recorded, or 0 if not.
|
||||
*
|
||||
*/
|
||||
|
||||
static inline int sys_k_must_log_event(int event_type)
|
||||
{
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
|
||||
@@ -225,159 +169,150 @@ static inline int sys_k_must_log_event(int event_type)
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sends a event message to the kernel event logger.
|
||||
* @brief Write an event to the kernel event logger.
|
||||
*
|
||||
* @details Sends a event message to the kernel event logger
|
||||
* and informs that there are messages available.
|
||||
* This routine writes an event message to the kernel event logger.
|
||||
*
|
||||
* @param event_id The identification of the event.
|
||||
* @param data Pointer to the data of the message.
|
||||
* @param data_size Size of the data in 32-bit words.
|
||||
* @param event_id Event ID.
|
||||
* @param event_data Address of event data.
|
||||
* @param data_size Size of event data (number of 32-bit words).
|
||||
*
|
||||
* @return No return value.
|
||||
* @return N/A
|
||||
*/
|
||||
#define sys_k_event_logger_put(event_id, data, data_size) \
|
||||
sys_event_logger_put(&sys_k_event_logger, event_id, data, data_size)
|
||||
|
||||
static inline void sys_k_event_logger_put(uint16_t event_id,
|
||||
uint32_t *event_data,
|
||||
uint8_t data_size)
|
||||
{
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER
|
||||
sys_event_logger_put(&sys_k_event_logger, event_id,
|
||||
event_data, data_size);
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER */
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Sends a event message to the kernel event logger with the current
|
||||
* timestamp.
|
||||
* @brief Write an event to the kernel event logger (with timestamp only).
|
||||
*
|
||||
* @details Sends a event message to the kernel event logger and informs that
|
||||
* there messages available. The timestamp when the event occurred is stored
|
||||
* as part of the event message.
|
||||
* This routine writes an event message to the kernel event logger.
|
||||
* The event records a single 32-bit word containing a timestamp.
|
||||
*
|
||||
* @param event_id The identification of the event.
|
||||
* @param event_id Event ID.
|
||||
*
|
||||
* @return No return value.
|
||||
* @return N/A
|
||||
*/
|
||||
void sys_k_event_logger_put_timed(uint16_t event_id);
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER
|
||||
extern void sys_k_event_logger_put_timed(uint16_t event_id);
|
||||
#else
|
||||
static inline void sys_k_event_logger_put_timed(uint16_t event_id) {};
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER */
|
||||
|
||||
/**
|
||||
* @brief Retrieves a kernel event message, or returns without waiting.
|
||||
*
|
||||
* This routine retrieves the next recorded event from the kernel event logger,
|
||||
* or returns immediately if no such event exists.
|
||||
*
|
||||
* @param event_id Area to store event type ID.
|
||||
* @param dropped Area to store number of events that were dropped between
|
||||
* the previous event and the retrieved event.
|
||||
* @param event_data Buffer to store event data.
|
||||
* @param data_size Size of event data buffer (number of 32-bit words).
|
||||
*
|
||||
* @retval positive_integer Number of event data words retrieved;
|
||||
* @a event_id, @a dropped, and @a buffer have been updated.
|
||||
* @retval 0 Returned without waiting; no event was retrieved.
|
||||
* @retval -EMSGSIZE Buffer too small; @a data_size now indicates
|
||||
* the size of the event to be retrieved.
|
||||
*/
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER
|
||||
static inline int sys_k_event_logger_get(uint16_t *event_id, uint8_t *dropped,
|
||||
uint32_t *event_data, uint8_t *data_size)
|
||||
{
|
||||
return sys_event_logger_get(&sys_k_event_logger, event_id, dropped,
|
||||
event_data, data_size);
|
||||
}
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER */
|
||||
|
||||
/**
|
||||
* @brief Retrieves a kernel event message.
|
||||
*
|
||||
* @details Retrieves a kernel event message copying it to the provided
|
||||
* buffer. If the buffer is smaller than the message size the function returns
|
||||
* an error. The function retrieves messages in FIFO order.
|
||||
* This routine retrieves the next recorded event from the kernel event logger.
|
||||
* If there is no such event the caller pends until it is available.
|
||||
*
|
||||
* @param event_id Pointer to the id of the event fetched
|
||||
* @param dropped Pointer to how many events were dropped
|
||||
* @param buffer Pointer to the buffer where the message will be copied.
|
||||
* @param buffer_size Size of the buffer in 32-bit words.
|
||||
* @param event_id Area to store event type ID.
|
||||
* @param dropped Area to store number of events that were dropped between
|
||||
* the previous event and the retrieved event.
|
||||
* @param event_data Buffer to store event data.
|
||||
* @param data_size Size of event data buffer (number of 32-bit words).
|
||||
*
|
||||
* @return -EMSGSIZE if the buffer size is smaller than the message size,
|
||||
* the amount of 32-bit words copied or zero if there are no kernel event
|
||||
* messages available.
|
||||
* @retval positive_integer Number of event data words retrieved;
|
||||
* @a event_id, @a dropped, and @a buffer have been updated.
|
||||
* @retval -EMSGSIZE Buffer too small; @a data_size now indicates
|
||||
* the size of the event to be retrieved.
|
||||
*/
|
||||
#define sys_k_event_logger_get(event_id, dropped, buffer, buffer_size) \
|
||||
sys_event_logger_get(&sys_k_event_logger, event_id, dropped, buffer, \
|
||||
buffer_size)
|
||||
|
||||
|
||||
/**
|
||||
* @brief Retrieves a kernel event message, wait if there is no message
|
||||
* available.
|
||||
*
|
||||
* @details Retrieves a kernel event message copying it to the provided
|
||||
* buffer. If the buffer is smaller than the message size the function returns
|
||||
* an error. The function retrieves messages in FIFO order. If there is no
|
||||
* kernel event message available the caller pends until a new message is
|
||||
* logged.
|
||||
*
|
||||
* @param event_id Pointer to the id of the event fetched
|
||||
* @param dropped Pointer to how many events were dropped
|
||||
* @param buffer Pointer to the buffer where the message will be copied.
|
||||
* @param buffer_size Size of the buffer in 32-bit words.
|
||||
*
|
||||
* @return -EMSGSIZE if the buffer size is smaller than the message size, or
|
||||
* the amount of 32-bit words copied.
|
||||
*/
|
||||
#define sys_k_event_logger_get_wait(event_id, dropped, buffer, buffer_size) \
|
||||
sys_event_logger_get_wait(&sys_k_event_logger, event_id, dropped, \
|
||||
buffer, buffer_size)
|
||||
|
||||
|
||||
#ifdef CONFIG_NANO_TIMEOUTS
|
||||
|
||||
/**
|
||||
* @brief Retrieves a kernel event message, wait with a timeout if there is
|
||||
* no profiling event messages available.
|
||||
*
|
||||
* @details Retrieves a kernel event message copying it to the provided
|
||||
* buffer. If the buffer is smaller than the message size the function returns
|
||||
* an error. The function retrieves messages in FIFO order. If there are no
|
||||
* kernel event messages available the caller pends until a new message is
|
||||
* logged or the timeout expires.
|
||||
*
|
||||
* @param event_id Pointer to the id of the event fetched
|
||||
* @param dropped Pointer to how many events were dropped
|
||||
* @param buffer Pointer to the buffer where the message will be copied.
|
||||
* @param buffer_size Size of the buffer in 32-bit words.
|
||||
* @param timeout Timeout in ticks.
|
||||
*
|
||||
* @return -EMSGSIZE if the buffer size is smaller than the message size, the
|
||||
* amount of 32-bit words copied or zero if the timeout expires and the was no
|
||||
* message available.
|
||||
*/
|
||||
#define sys_k_event_logger_get_wait_timeout(event_id, dropped, buffer, buffer_size, \
|
||||
timeout) \
|
||||
sys_event_logger_get_wait_timeout(&sys_k_event_logger, event_id, \
|
||||
dropped, buffer, \
|
||||
buffer_size, timeout)
|
||||
#endif /* CONFIG_NANO_TIMEOUTS */
|
||||
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
|
||||
|
||||
/**
|
||||
* @brief Register the fiber that calls the function as collector
|
||||
*
|
||||
* @details Initialize internal profiling data. This avoid registering
|
||||
* the context switch of the collector fiber when
|
||||
* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH is enable.
|
||||
*
|
||||
* @return No return value.
|
||||
*/
|
||||
void sys_k_event_logger_register_as_collector(void);
|
||||
#else /* !CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
|
||||
static inline void sys_k_event_logger_register_as_collector(void) {};
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
|
||||
void _sys_k_event_logger_enter_sleep(void);
|
||||
#else
|
||||
static inline void _sys_k_event_logger_enter_sleep(void) {};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
|
||||
void _sys_k_event_logger_interrupt(void);
|
||||
#else
|
||||
static inline void _sys_k_event_logger_interrupt(void) {};
|
||||
#endif
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#else /* !CONFIG_KERNEL_EVENT_LOGGER */
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
static inline void sys_k_event_logger_put(uint16_t event_id, uint32_t *event_data,
|
||||
uint8_t data_size) {};
|
||||
static inline void sys_k_event_logger_put_timed(uint16_t event_id) {};
|
||||
static inline void _sys_k_event_logger_enter_sleep(void) {};
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER
|
||||
static inline int sys_k_event_logger_get_wait(uint16_t *event_id,
|
||||
uint8_t *dropped, uint32_t *event_data, uint8_t *data_size)
|
||||
{
|
||||
return sys_event_logger_get_wait(&sys_k_event_logger, event_id, dropped,
|
||||
event_data, data_size);
|
||||
}
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER */
|
||||
|
||||
|
||||
/**
|
||||
* @brief Retrieves a kernel event message, or waits for a specified time.
|
||||
*
|
||||
* This routine retrieves the next recorded event from the kernel event logger.
|
||||
* If there is no such event the caller pends until it is available or until
|
||||
* the specified timeout expires.
|
||||
*
|
||||
* @param event_id Area to store event type ID.
|
||||
* @param dropped Area to store number of events that were dropped between
|
||||
* the previous event and the retrieved event.
|
||||
* @param event_data Buffer to store event data.
|
||||
* @param data_size Size of event data buffer (number of 32-bit words).
|
||||
* @param timeout Timeout in system clock ticks.
|
||||
*
|
||||
* @retval positive_integer Number of event data words retrieved;
|
||||
* @a event_id, @a dropped, and @a buffer have been updated.
|
||||
* @retval 0 Waiting period timed out; no event was retrieved.
|
||||
* @retval -EMSGSIZE Buffer too small; @a data_size now indicates
|
||||
* the size of the event to be retrieved.
|
||||
*/
|
||||
#if defined(CONFIG_KERNEL_EVENT_LOGGER) && defined(CONFIG_NANO_TIMEOUTS)
|
||||
static inline int sys_k_event_logger_get_wait_timeout(uint16_t *event_id,
|
||||
uint8_t *dropped, uint32_t *event_data,
|
||||
uint8_t *data_size, uint32_t timeout)
|
||||
{
|
||||
return sys_event_logger_get_wait_timeout(&sys_k_event_logger, event_id,
|
||||
dropped, event_data,
|
||||
data_size, timeout);
|
||||
}
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER && CONFIG_NANO_TIMEOUTS */
|
||||
|
||||
/**
|
||||
* @brief Register thread that retrieves kernel events.
|
||||
*
|
||||
* This routine instructs the kernel event logger not to record context
|
||||
* switch events for the calling thread. It is typically called by the thread
|
||||
* that retrieves events from the kernel event logger.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
|
||||
void sys_k_event_logger_register_as_collector(void);
|
||||
#else
|
||||
static inline void sys_k_event_logger_register_as_collector(void) {};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @} end defgroup kernel_event_logger
|
||||
*/
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#endif /* __KERNEL_EVENT_LOGGER_H__ */
|
||||
|
||||
@@ -29,13 +29,6 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Ring Buffer APIs
|
||||
* @defgroup nanokernel_ringbuffer Ring Bufer
|
||||
* @ingroup nanokernel_services
|
||||
* @{
|
||||
*/
|
||||
|
||||
#define SIZE32_OF(x) (sizeof((x))/sizeof(uint32_t))
|
||||
|
||||
/**
|
||||
@@ -56,13 +49,26 @@ struct ring_buf {
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Declare a power-of-two sized ring buffer
|
||||
* @defgroup ring_buffer_apis Ring Buffer APIs
|
||||
* @ingroup kernel_apis
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Statically define and initialize a high performance ring buffer.
|
||||
*
|
||||
* Use of this macro is preferred over SYS_RING_BUF_DECLARE_SIZE() as it
|
||||
* will not need to use expensive modulo operations.
|
||||
* This macro establishes a ring buffer whose size must be a power of 2;
|
||||
* that is, the ring buffer contains 2^pow 32-bit words, where @a pow is
|
||||
* the specified ring buffer size exponent. A high performance ring buffer
|
||||
* doesn't require the use of modulo arithmetic operations to maintain itself.
|
||||
*
|
||||
* @param name File-scoped name of the ring buffer to declare
|
||||
* @param pow Create a buffer of 2^pow 32-bit elements
|
||||
* The ring buffer can be accessed outside the module where it is defined
|
||||
* using:
|
||||
*
|
||||
* @code extern struct ring_buf <name>; @endcode
|
||||
*
|
||||
* @param name Name of the ring buffer.
|
||||
* @param pow Ring buffer size exponent.
|
||||
*/
|
||||
#define SYS_RING_BUF_DECLARE_POW2(name, pow) \
|
||||
static uint32_t _ring_buffer_data_##name[1 << (pow)]; \
|
||||
@@ -73,13 +79,18 @@ struct ring_buf {
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Declare an arbitrary sized ring buffer
|
||||
* @brief Statically define and initialize a standard ring buffer.
|
||||
*
|
||||
* A ring buffer declared in this way has more flexibility on buffer size
|
||||
* but will use more expensive modulo operations to maintain itself.
|
||||
* This macro establishes a ring buffer of an arbitrary size. A standard
|
||||
* ring buffer uses modulo arithmetic operations to maintain itself.
|
||||
*
|
||||
* @param name File-scoped name of the ring buffer to declare
|
||||
* @param size32 Size of buffer in 32-bit elements
|
||||
* The ring buffer can be accessed outside the module where it is defined
|
||||
* using:
|
||||
*
|
||||
* @code extern struct ring_buf <name>; @endcode
|
||||
*
|
||||
* @param name Name of the ring buffer.
|
||||
* @param size32 Size of ring buffer (in 32-bit words).
|
||||
*/
|
||||
#define SYS_RING_BUF_DECLARE_SIZE(name, size32) \
|
||||
static uint32_t _ring_buffer_data_##name[size32]; \
|
||||
@@ -89,16 +100,19 @@ struct ring_buf {
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Initialize a ring buffer, in cases where DECLARE_RING_BUF_STATIC
|
||||
* isn't used.
|
||||
* @brief Initialize a ring buffer.
|
||||
*
|
||||
* For optimal performance, use size values that are a power of 2 as they
|
||||
* don't require expensive modulo operations when maintaining the buffer.
|
||||
* This routine initializes a ring buffer, prior to its first use. It is only
|
||||
* used for ring buffers not defined using SYS_RING_BUF_DECLARE_POW2 or
|
||||
* SYS_RING_BUF_DECLARE_SIZE.
|
||||
*
|
||||
* @param buf Ring buffer to initialize
|
||||
* @param size Size of the provided buffer in 32-bit chunks
|
||||
* @param data Data area for the ring buffer, typically
|
||||
* uint32_t data[size]
|
||||
* Setting @a size to a power of 2 establishes a high performance ring buffer
|
||||
* that doesn't require the use of modulo arithmetic operations to maintain
|
||||
* itself.
|
||||
*
|
||||
* @param buf Address of ring buffer.
|
||||
* @param size Ring buffer size (in 32-bit words).
|
||||
* @param data Ring buffer data area (typically uint32_t data[size]).
|
||||
*/
|
||||
static inline void sys_ring_buf_init(struct ring_buf *buf, uint32_t size,
|
||||
uint32_t *data)
|
||||
@@ -118,9 +132,11 @@ static inline void sys_ring_buf_init(struct ring_buf *buf, uint32_t size,
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Determine if a ring buffer is empty
|
||||
* @brief Determine if a ring buffer is empty.
|
||||
*
|
||||
* @return nonzero if the buffer is empty
|
||||
* @param buf Address of ring buffer.
|
||||
*
|
||||
* @return 1 if the ring buffer is empty, or 0 if not.
|
||||
*/
|
||||
static inline int sys_ring_buf_is_empty(struct ring_buf *buf)
|
||||
{
|
||||
@@ -128,10 +144,11 @@ static inline int sys_ring_buf_is_empty(struct ring_buf *buf)
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Obtain available space in a ring buffer
|
||||
* @brief Determine free space in a ring buffer.
|
||||
*
|
||||
* @param buf Ring buffer to examine
|
||||
* @return Available space in the buffer in 32-bit chunks
|
||||
* @param buf Address of ring buffer.
|
||||
*
|
||||
* @return Ring buffer free space (in 32-bit words).
|
||||
*/
|
||||
static inline int sys_ring_buf_space_get(struct ring_buf *buf)
|
||||
{
|
||||
@@ -148,36 +165,52 @@ static inline int sys_ring_buf_space_get(struct ring_buf *buf)
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Place an entry into the ring buffer
|
||||
* @brief Write a data item to a ring buffer.
|
||||
*
|
||||
* Concurrency control is not implemented, however no synchronization is needed
|
||||
* between put() and get() operations as they independently work on the
|
||||
* tail and head values, respectively.
|
||||
* Any use-cases involving multiple producers will need to synchronize use
|
||||
* of this function, by either disabling preemption or using a mutex.
|
||||
* This routine writes a data item to ring buffer @a buf. The data item
|
||||
* is an array of 32-bit words (from zero to 1020 bytes in length),
|
||||
* coupled with a 16-bit type identifier and an 8-bit integer value.
|
||||
*
|
||||
* @param buf Ring buffer to insert data to
|
||||
* @param type Application-specific type identifier
|
||||
* @param value Integral data to include, application specific
|
||||
* @param data Pointer to a buffer containing data to enqueue
|
||||
* @param size32 Size of data buffer, in 32-bit chunks (not bytes)
|
||||
* @return 0 on success, -EMSGSIZE if there isn't sufficient space
|
||||
* @warning
|
||||
* Use cases involving multiple writers to the ring buffer must prevent
|
||||
* concurrent write operations, either by preventing all writers from
|
||||
* being preempted or by using a mutex to govern writes to the ring buffer.
|
||||
*
|
||||
* @param buf Address of ring buffer.
|
||||
* @param type Data item's type identifier (application specific).
|
||||
* @param value Data item's integer value (application specific).
|
||||
* @param data Address of data item.
|
||||
* @param size32 Data item size (number of 32-bit words).
|
||||
*
|
||||
* @retval 0 Data item was written.
|
||||
* @retval -EMSGSIZE Ring buffer has insufficient free space.
|
||||
*/
|
||||
int sys_ring_buf_put(struct ring_buf *buf, uint16_t type, uint8_t value,
|
||||
uint32_t *data, uint8_t size32);
|
||||
|
||||
/**
|
||||
* @brief Fetch data from the ring buffer
|
||||
* @brief Read a data item from a ring buffer.
|
||||
*
|
||||
* @param buf Ring buffer to extract data from
|
||||
* @param type Return storage of the retrieved event type
|
||||
* @param value Return storage of the data value
|
||||
* @param data Buffer to copy data into
|
||||
* @param size32 Indicates the size of the data buffer. On return,
|
||||
* updated with the actual amount of 32-bit chunks written to the buffer
|
||||
* @return 0 on success, -EAGAIN if the ring buffer is empty, -EMSGSIZE
|
||||
* if the supplied buffer is too small (size32 will be updated with
|
||||
* the actual size needed)
|
||||
* This routine reads a data item from ring buffer @a buf. The data item
|
||||
* is an array of 32-bit words (up to 1020 bytes in length),
|
||||
* coupled with a 16-bit type identifier and an 8-bit integer value.
|
||||
*
|
||||
* @warning
|
||||
* Use cases involving multiple reads of the ring buffer must prevent
|
||||
* concurrent read operations, either by preventing all readers from
|
||||
* being preempted or by using a mutex to govern reads to the ring buffer.
|
||||
*
|
||||
* @param buf Address of ring buffer.
|
||||
* @param type Area to store the data item's type identifier.
|
||||
* @param value Area to store the data item's integer value.
|
||||
* @param data Area to store the data item.
|
||||
* @param size32 Size of the data item storage area (number of 32-bit chunks).
|
||||
*
|
||||
* @retval 0 Data item was fetched; @a size32 now contains the number of
|
||||
* 32-bit words read into data area @a data.
|
||||
* @retval -EAGAIN Ring buffer is empty.
|
||||
* @retval -EMSGSIZE Data area @a data is too small; @a size32 now contains
|
||||
* the number of 32-bit words needed.
|
||||
*/
|
||||
int sys_ring_buf_get(struct ring_buf *buf, uint16_t *type, uint8_t *value,
|
||||
uint32_t *data, uint8_t *size32);
|
||||
|
||||
@@ -88,8 +88,28 @@ extern int sys_clock_hw_cycles_per_tick;
|
||||
#define SYS_CLOCK_HW_CYCLES_TO_NS_AVG(X, NCYCLES) \
|
||||
(uint32_t)(SYS_CLOCK_HW_CYCLES_TO_NS64(X) / NCYCLES)
|
||||
|
||||
/**
|
||||
* @defgroup clock_apis Kernel Clock APIs
|
||||
* @ingroup kernel_apis
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Compute nanoseconds from hardware clock cycles.
|
||||
*
|
||||
* This macro converts a time duration expressed in hardware clock cycles
|
||||
* to the equivalent duration expressed in nanoseconds.
|
||||
*
|
||||
* @param X Duration in hardware clock cycles.
|
||||
*
|
||||
* @return Duration in nanoseconds.
|
||||
*/
|
||||
#define SYS_CLOCK_HW_CYCLES_TO_NS(X) (uint32_t)(SYS_CLOCK_HW_CYCLES_TO_NS64(X))
|
||||
|
||||
/**
|
||||
* @} end defgroup clock_apis
|
||||
*/
|
||||
|
||||
extern int64_t _sys_clock_tick_count;
|
||||
|
||||
/*
|
||||
|
||||
@@ -18,15 +18,19 @@
|
||||
|
||||
menu "Unified Kernel Options"
|
||||
|
||||
config KERNEL_V2_DEBUG
|
||||
config KERNEL_DEBUG
|
||||
bool
|
||||
prompt "Kernel V2 debug help"
|
||||
prompt "Kernel debugging"
|
||||
default n
|
||||
select INIT_STACKS
|
||||
help
|
||||
Enable kernel debugging.
|
||||
|
||||
Note that debugging the kernel internals can be very verbose.
|
||||
|
||||
config NUM_COOP_PRIORITIES
|
||||
int
|
||||
prompt "Kernel V2: number of coop priorities"
|
||||
prompt "Number of coop priorities"
|
||||
default 16
|
||||
help
|
||||
Number of cooperative priorities configured in the system. Gives access
|
||||
@@ -41,12 +45,20 @@ config NUM_COOP_PRIORITIES
|
||||
This can be set to zero to disable cooperative scheduling. Cooperative
|
||||
threads always preempt preemptible threads.
|
||||
|
||||
Each priority requires an extra 8 bytes of RAM. If there are more than
|
||||
32 total priorities, an extra 4 bytes is required.
|
||||
Each priority requires an extra 8 bytes of RAM. Each set of 32 extra
|
||||
total priorities require an extra 4 bytes and add one possible
|
||||
iteration to loops that search for the next thread to run.
|
||||
|
||||
The total number of priorities is
|
||||
|
||||
NUM_COOP_PRIORITIES + NUM_PREEMPT_PRIORITIES + 1
|
||||
|
||||
The extra one is for the idle thread, which must run at the lowest
|
||||
priority, and be the only thread at that priority.
|
||||
|
||||
config NUM_PREEMPT_PRIORITIES
|
||||
int
|
||||
prompt "Kernel V2: number of preemptible priorities"
|
||||
prompt "Number of preemptible priorities"
|
||||
default 15
|
||||
help
|
||||
Number of preemptible priorities available in the system. Gives access
|
||||
@@ -54,15 +66,20 @@ config NUM_PREEMPT_PRIORITIES
|
||||
|
||||
This can be set to 0 to disable preemptible scheduling.
|
||||
|
||||
The idle thread is always installed as a preemptible thread of the
|
||||
lowest priority.
|
||||
Each priority requires an extra 8 bytes of RAM. Each set of 32 extra
|
||||
total priorities require an extra 4 bytes and add one possible
|
||||
iteration to loops that search for the next thread to run.
|
||||
|
||||
Each priority requires an extra 8 bytes of RAM. If there are more than
|
||||
32 total priorities, an extra 4 bytes is required.
|
||||
The total number of priorities is
|
||||
|
||||
NUM_COOP_PRIORITIES + NUM_PREEMPT_PRIORITIES + 1
|
||||
|
||||
The extra one is for the idle thread, which must run at the lowest
|
||||
priority, and be the only thread at that priority.
|
||||
|
||||
config PRIORITY_CEILING
|
||||
int
|
||||
prompt "Kernel V2: priority inheritance ceiling"
|
||||
prompt "Priority inheritance ceiling"
|
||||
default 0
|
||||
|
||||
config BOOT_BANNER
|
||||
@@ -285,29 +302,44 @@ config SEMAPHORE_GROUPS
|
||||
the k_sem_give() routine.
|
||||
|
||||
choice
|
||||
prompt "Memory pools auto-defragmentation policy"
|
||||
default MEM_POOL_AD_AFTER_SEARCH_FOR_BIGGERBLOCK
|
||||
prompt "Memory pool block allocation policy"
|
||||
default MEM_POOL_SPLIT_BEFORE_DEFRAG
|
||||
help
|
||||
Memory pool auto-defragmentation is performed if a memory
|
||||
block of the requested size can not be found. Defragmentation
|
||||
can be done:
|
||||
Before trying to find a block in the next largest block set.
|
||||
This is an attempt to preserve the memory pool's larger blocks
|
||||
by fragmenting them only when necessary (i.e. at the cost of
|
||||
doing more frequent auto-defragmentations).
|
||||
After trying to find a block in the next largest block set.
|
||||
This is an attempt to limit the cost of doing auto-defragmentations
|
||||
by doing them only when necessary (i.e. at the cost of fragmenting
|
||||
the memory pool's larger blocks).
|
||||
This option specifies how a memory pool reacts if an unused memory
|
||||
block of the required size is not available.
|
||||
|
||||
config MEM_POOL_AD_NONE
|
||||
bool "No auto-defragmentation"
|
||||
config MEM_POOL_SPLIT_BEFORE_DEFRAG
|
||||
bool "Split a larger block before merging smaller blocks"
|
||||
help
|
||||
This option instructs a memory pool to try splitting a larger unused
|
||||
block if an unused block of the required size is not available; only
|
||||
if no such blocks exist will the memory pool try merging smaller unused
|
||||
blocks. This policy attempts to limit the cost of performing automatic
|
||||
partial defragmention of the memory pool, at the cost of fragmenting
|
||||
the memory pool's larger blocks.
|
||||
|
||||
config MEM_POOL_AD_BEFORE_SEARCH_FOR_BIGGERBLOCK
|
||||
bool "Before trying to find a block in the next largest block set"
|
||||
config MEM_POOL_DEFRAG_BEFORE_SPLIT
|
||||
bool "Merge smaller blocks before splitting a larger block"
|
||||
help
|
||||
This option instructs a memory pool to try merging smaller unused
|
||||
blocks if an unused block of the required size is not available; only
|
||||
if this does not generate a sufficiently large block will the memory
|
||||
pool try splitting a larger unused block. This policy attempts to
|
||||
preserve the memory pool's larger blocks, at the cost of performing
|
||||
automatic partial defragmentations more frequently.
|
||||
|
||||
config MEM_POOL_AD_AFTER_SEARCH_FOR_BIGGERBLOCK
|
||||
bool "After trying to find a block in the next largest block set"
|
||||
config MEM_POOL_SPLIT_ONLY
|
||||
bool "Split a larger block, but never merge smaller blocks"
|
||||
help
|
||||
This option instructs a memory pool to try splitting a larger unused
|
||||
block if an unused block of the required size is not available; if no
|
||||
such blocks exist the block allocation operation fails. This policy
|
||||
attempts to limit the cost of defragmenting the memory pool by avoiding
|
||||
automatic partial defragmentation, at the cost of requiring the
|
||||
application to explicitly request a full defragmentation of the memory
|
||||
pool when an allocation fails. Depending on how a memory pool is used,
|
||||
it may be more efficient for a memory pool to perform an occasional
|
||||
full defragmentation than to perform frequent partial defragmentations.
|
||||
|
||||
endchoice
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ int sys_event_logger_get_wait_timeout(struct event_logger *logger,
|
||||
uint32_t *buffer, uint8_t *buffer_size,
|
||||
uint32_t timeout)
|
||||
{
|
||||
if (k_sem_take(&(logger->sync_sema), timeout)) {
|
||||
if (k_sem_take(&(logger->sync_sema), __ticks_to_ms(timeout))) {
|
||||
return event_logger_get(logger, event_id, dropped_event_count,
|
||||
buffer, buffer_size);
|
||||
}
|
||||
|
||||
@@ -57,15 +57,14 @@
|
||||
*
|
||||
* #include <kernel_structs.h>
|
||||
* GEN_ABS_SYM_BEGIN (_OffsetAbsSyms) /@ the name parameter is arbitrary @/
|
||||
* /@ tNANO structure member offsets @/
|
||||
* /@ _kernel_t structure member offsets @/
|
||||
*
|
||||
* GEN_OFFSET_SYM (tNANO, fiber);
|
||||
* GEN_OFFSET_SYM (tNANO, task);
|
||||
* GEN_OFFSET_SYM (tNANO, current);
|
||||
* GEN_OFFSET_SYM (tNANO, nested);
|
||||
* GEN_OFFSET_SYM (tNANO, irq_stack);
|
||||
* GEN_OFFSET_SYM (_kernel_t, nested);
|
||||
* GEN_OFFSET_SYM (_kernel_t, irq_stack);
|
||||
* GEN_OFFSET_SYM (_kernel_t, current);
|
||||
* GEN_OFFSET_SYM (_kernel_t, idle);
|
||||
*
|
||||
* GEN_ABSOLUTE_SYM (__tNANO_SIZEOF, sizeof(tNANO));
|
||||
* GEN_ABSOLUTE_SYM (___kernel_t_SIZEOF, sizeof(_kernel_t));
|
||||
*
|
||||
* GEN_ABS_SYM_END
|
||||
* <END of sample source file: offsets.c>
|
||||
@@ -73,11 +72,10 @@
|
||||
* Compiling the sample offsets.c results in the following symbols in offsets.o:
|
||||
*
|
||||
* $ nm offsets.o
|
||||
* 00000010 A __tNANO_irq_stack_OFFSET
|
||||
* 00000008 A __tNANO_current_OFFSET
|
||||
* 0000000c A __tNANO_nested_OFFSET
|
||||
* 00000000 A __tNANO_fiber_OFFSET
|
||||
* 00000004 A __tNANO_task_OFFSET
|
||||
* 00000000 A ___kernel_t_nested_OFFSET
|
||||
* 00000004 A ___kernel_t_irq_stack_OFFSET
|
||||
* 00000008 A ___kernel_t_current_OFFSET
|
||||
* 0000000c A ___kernel_t_idle_OFFSET
|
||||
*/
|
||||
|
||||
#ifndef _GEN_OFFSET_H
|
||||
|
||||
@@ -25,8 +25,6 @@
|
||||
|
||||
GEN_ABS_SYM_BEGIN(_OffsetAbsSyms)
|
||||
|
||||
/* arch-agnostic tNANO structure member offsets */
|
||||
|
||||
GEN_OFFSET_SYM(_kernel_t, current);
|
||||
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
|
||||
@@ -107,7 +107,7 @@ struct _ready_q {
|
||||
struct k_thread *cache;
|
||||
|
||||
/* bitmap of priorities that contain at least one ready thread */
|
||||
uint32_t prio_bmap[1];
|
||||
uint32_t prio_bmap[K_NUM_PRIO_BITMAPS];
|
||||
|
||||
/* ready queues, one per priority */
|
||||
sys_dlist_t q[K_NUM_PRIORITIES];
|
||||
@@ -135,7 +135,7 @@ struct _kernel {
|
||||
|
||||
/*
|
||||
* ready queue: can be big, keep after small fields, since some
|
||||
* assembly (e.g. ARC are limited in the encoding of the offset)
|
||||
* assembly (e.g. ARC) are limited in the encoding of the offset
|
||||
*/
|
||||
struct _ready_q ready_q;
|
||||
|
||||
|
||||
@@ -167,17 +167,32 @@ static inline int _get_ready_q_q_index(int prio)
|
||||
return prio + CONFIG_NUM_COOP_PRIORITIES;
|
||||
}
|
||||
|
||||
#if (K_NUM_PRIORITIES > 32)
|
||||
#error not supported yet
|
||||
#endif
|
||||
|
||||
/* find out the currently highest priority where a thread is ready to run */
|
||||
/* interrupts must be locked */
|
||||
static inline int _get_highest_ready_prio(void)
|
||||
{
|
||||
uint32_t ready = _ready_q.prio_bmap[0];
|
||||
int bitmap = 0;
|
||||
uint32_t ready_range;
|
||||
|
||||
return find_lsb_set(ready) - 1 - CONFIG_NUM_COOP_PRIORITIES;
|
||||
#if (K_NUM_PRIORITIES <= 32)
|
||||
ready_range = _ready_q.prio_bmap[0];
|
||||
#else
|
||||
for (;; bitmap++) {
|
||||
|
||||
__ASSERT(bitmap < K_NUM_PRIO_BITMAPS, "prio out-of-range\n");
|
||||
|
||||
if (_ready_q.prio_bmap[bitmap]) {
|
||||
ready_range = _ready_q.prio_bmap[bitmap];
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int abs_prio = (find_lsb_set(ready_range) - 1) + (bitmap << 5);
|
||||
|
||||
__ASSERT(abs_prio < K_NUM_PRIORITIES, "prio out-of-range\n");
|
||||
|
||||
return abs_prio - CONFIG_NUM_COOP_PRIORITIES;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -204,7 +219,7 @@ static inline void _sched_lock(void)
|
||||
atomic_inc(&_current->base.sched_locked);
|
||||
|
||||
K_DEBUG("scheduler locked (%p:%d)\n",
|
||||
_current, _current->sched_locked);
|
||||
_current, _current->base.sched_locked);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -28,6 +28,8 @@
|
||||
#define K_NUM_PRIORITIES \
|
||||
(CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES + 1)
|
||||
|
||||
#define K_NUM_PRIO_BITMAPS ((K_NUM_PRIORITIES + 31) >> 5)
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
#ifdef __cplusplus
|
||||
@@ -54,7 +56,7 @@ typedef void (*_thread_entry_t)(void *, void *, void *);
|
||||
extern void _thread_entry(void (*)(void *, void *, void *),
|
||||
void *, void *, void *);
|
||||
|
||||
extern void _new_thread(char *pStack, unsigned stackSize,
|
||||
extern void _new_thread(char *pStack, size_t stackSize,
|
||||
void *uk_task_ptr,
|
||||
void (*pEntry)(void *, void *, void *),
|
||||
void *p1, void *p2, void *p3,
|
||||
|
||||
@@ -27,6 +27,8 @@
|
||||
#include <kernel_event_logger_arch.h>
|
||||
#include <misc/__assert.h>
|
||||
|
||||
struct event_logger sys_k_event_logger;
|
||||
|
||||
uint32_t _sys_k_event_logger_buffer[CONFIG_KERNEL_EVENT_LOGGER_BUFFER_SIZE];
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
|
||||
@@ -58,15 +60,19 @@ static int _sys_k_event_logger_init(struct device *arg)
|
||||
return 0;
|
||||
}
|
||||
SYS_INIT(_sys_k_event_logger_init,
|
||||
NANOKERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
||||
POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP
|
||||
sys_k_timer_func timer_func;
|
||||
void sys_k_event_logger_set_timer(sys_k_timer_func func)
|
||||
{
|
||||
timer_func = func;
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* _sys_k_get_time()
|
||||
*
|
||||
* This function pointer can be invoked to generate an event timestamp.
|
||||
* By default it uses the kernel's hardware clock, but can be changed
|
||||
* to point to an application-defined routine.
|
||||
*
|
||||
*/
|
||||
sys_k_timer_func_t _sys_k_get_time = sys_cycle_get_32;
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP */
|
||||
|
||||
void sys_k_event_logger_put_timed(uint16_t event_id)
|
||||
{
|
||||
|
||||
@@ -342,7 +342,7 @@ static char *get_block_recursive(struct k_mem_pool *pool,
|
||||
return found;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEM_POOL_AD_BEFORE_SEARCH_FOR_BIGGERBLOCK
|
||||
#ifdef CONFIG_MEM_POOL_DEFRAG_BEFORE_SPLIT
|
||||
/*
|
||||
* do a partial defragmentation of memory pool & try allocating again
|
||||
* - do this on initial invocation only, not recursive ones
|
||||
@@ -385,7 +385,7 @@ static char *get_block_recursive(struct k_mem_pool *pool,
|
||||
return larger_block;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEM_POOL_AD_AFTER_SEARCH_FOR_BIGGERBLOCK
|
||||
#ifdef CONFIG_MEM_POOL_SPLIT_BEFORE_DEFRAG
|
||||
/*
|
||||
* do a partial defragmentation of memory pool & try allocating again
|
||||
* - do this on initial invocation only, not recursive ones
|
||||
|
||||
@@ -121,7 +121,7 @@ static void adjust_owner_prio(struct k_mutex *mutex, int new_prio)
|
||||
K_DEBUG("%p (ready (y/n): %c) prio changed to %d (was %d)\n",
|
||||
mutex->owner, _is_thread_ready(mutex->owner) ?
|
||||
'y' : 'n',
|
||||
new_prio, mutex->owner->prio);
|
||||
new_prio, mutex->owner->base.prio);
|
||||
|
||||
_thread_priority_set(mutex->owner, new_prio);
|
||||
}
|
||||
@@ -238,7 +238,7 @@ void k_mutex_unlock(struct k_mutex *mutex)
|
||||
struct k_thread *new_owner = _unpend_first_thread(&mutex->wait_q);
|
||||
|
||||
K_DEBUG("new owner of mutex %p: %p (prio: %d)\n",
|
||||
mutex, new_owner, new_owner ? new_owner->prio : -1000);
|
||||
mutex, new_owner, new_owner ? new_owner->base.prio : -1000);
|
||||
|
||||
if (new_owner) {
|
||||
_abort_thread_timeout(new_owner);
|
||||
|
||||
@@ -123,7 +123,7 @@ void k_sched_unlock(void)
|
||||
atomic_dec(&_current->base.sched_locked);
|
||||
|
||||
K_DEBUG("scheduler unlocked (%p:%d)\n",
|
||||
_current, _current->sched_locked);
|
||||
_current, _current->base.sched_locked);
|
||||
|
||||
_reschedule_threads(key);
|
||||
}
|
||||
@@ -220,7 +220,7 @@ struct k_thread *_get_next_ready_thread(void)
|
||||
int __must_switch_threads(void)
|
||||
{
|
||||
K_DEBUG("current prio: %d, highest prio: %d\n",
|
||||
_current->prio, _get_highest_ready_prio());
|
||||
_current->base.prio, _get_highest_ready_prio());
|
||||
|
||||
extern void _dump_ready_q(void);
|
||||
_dump_ready_q();
|
||||
@@ -348,7 +348,11 @@ k_tid_t k_current_get(void)
|
||||
/* debug aid */
|
||||
void _dump_ready_q(void)
|
||||
{
|
||||
K_DEBUG("bitmap: %x\n", _ready_q.prio_bmap[0]);
|
||||
K_DEBUG("bitmaps: ");
|
||||
for (int bitmap = 0; bitmap < K_NUM_PRIO_BITMAPS; bitmap++) {
|
||||
K_DEBUG("%x", _ready_q.prio_bmap[bitmap]);
|
||||
}
|
||||
K_DEBUG("\n");
|
||||
for (int prio = 0; prio < K_NUM_PRIORITIES; prio++) {
|
||||
K_DEBUG("prio: %d, head: %p\n",
|
||||
prio - CONFIG_NUM_COOP_PRIORITIES,
|
||||
|
||||
@@ -230,10 +230,10 @@ static void schedule_new_thread(struct k_thread *thread, int32_t delay)
|
||||
#endif
|
||||
}
|
||||
|
||||
k_tid_t k_thread_spawn(char *stack, unsigned stack_size,
|
||||
k_tid_t k_thread_spawn(char *stack, size_t stack_size,
|
||||
void (*entry)(void *, void *, void*),
|
||||
void *p1, void *p2, void *p3,
|
||||
int32_t prio, uint32_t options, int32_t delay)
|
||||
int prio, uint32_t options, int32_t delay)
|
||||
{
|
||||
__ASSERT(!_is_in_isr(), "");
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ static void work_q_main(void *work_q_ptr, void *p2, void *p3)
|
||||
}
|
||||
|
||||
void k_work_q_start(struct k_work_q *work_q, char *stack,
|
||||
unsigned stack_size, unsigned prio)
|
||||
size_t stack_size, int prio)
|
||||
{
|
||||
k_fifo_init(&work_q->fifo);
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ static int decode_delta(int num, const uint8_t *buf, int16_t buflen,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
num = sys_be16_to_cpu((uint16_t)*buf) + 269;
|
||||
num = sys_get_be16(buf) + 269;
|
||||
hdrlen += 2;
|
||||
break;
|
||||
case 15:
|
||||
|
||||
@@ -893,7 +893,7 @@ static void net_tx_fiber(void)
|
||||
|
||||
wait_next:
|
||||
/* Check stack usage (no-op if not enabled) */
|
||||
net_analyze_stack("TX fiber", tx_fiber_stack,
|
||||
net_analyze_stack("TX fiber", (unsigned char *)tx_fiber_stack,
|
||||
sizeof(tx_fiber_stack));
|
||||
|
||||
net_print_statistics();
|
||||
@@ -911,7 +911,7 @@ static void net_rx_fiber(void)
|
||||
buf = net_buf_get_timeout(&netdev.rx_queue, 0, TICKS_UNLIMITED);
|
||||
|
||||
/* Check stack usage (no-op if not enabled) */
|
||||
net_analyze_stack("RX fiber", rx_fiber_stack,
|
||||
net_analyze_stack("RX fiber", (unsigned char *)rx_fiber_stack,
|
||||
sizeof(rx_fiber_stack));
|
||||
|
||||
NET_DBG("Received buf %p\n", buf);
|
||||
@@ -965,8 +965,8 @@ static void net_timer_fiber(void)
|
||||
uint32_t new_print;
|
||||
|
||||
net_analyze_stack("timer fiber",
|
||||
timer_fiber_stack,
|
||||
sizeof(timer_fiber_stack));
|
||||
(unsigned char *)timer_fiber_stack,
|
||||
sizeof(timer_fiber_stack));
|
||||
new_print = curr + PRINT_CYCLE;
|
||||
if (new_print > curr) {
|
||||
next_print = new_print;
|
||||
|
||||
@@ -121,7 +121,8 @@ static void net_rx_15_4_fiber(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
net_analyze_stack("802.15.4 RX", rx_fiber_stack,
|
||||
net_analyze_stack("802.15.4 RX",
|
||||
(unsigned char *)rx_fiber_stack,
|
||||
sizeof(rx_fiber_stack));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,6 +62,7 @@ void main(void)
|
||||
gpiob = device_get_binding(PORT);
|
||||
if (!gpiob) {
|
||||
printk("error\n");
|
||||
return;
|
||||
}
|
||||
|
||||
gpio_pin_configure(gpiob, PIN,
|
||||
@@ -73,7 +74,7 @@ void main(void)
|
||||
gpio_pin_enable_callback(gpiob, PIN);
|
||||
|
||||
while (1) {
|
||||
int val = 0;
|
||||
uint32_t val = 0;
|
||||
|
||||
gpio_pin_read(gpiob, PIN, &val);
|
||||
k_sleep(SLEEP_TIME);
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
#define EDS_VERSION 0x00
|
||||
#define EDS_URL_READ_OFFSET 2
|
||||
#define EDS_URL_WRITE_OFFSET 4
|
||||
#define EDS_IDLE_TIMEOUT (30 * MSEC_PER_SEC)
|
||||
#define EDS_IDLE_TIMEOUT K_SECONDS(30)
|
||||
|
||||
/* Idle timer */
|
||||
struct k_delayed_work idle_work;
|
||||
|
||||
@@ -64,7 +64,7 @@ void cb(void *param)
|
||||
|
||||
aio_cmp_dev = device_get_binding("AIO_CMP_0");
|
||||
|
||||
printf("*** %s triggered %s.\n", &p->name,
|
||||
printf("*** %s triggered %s.\n", p->name,
|
||||
(p->pol == AIO_CMP_POL_RISE) ? "rising" : "falling"
|
||||
);
|
||||
|
||||
@@ -93,7 +93,7 @@ void main(void)
|
||||
cb, &cb_data);
|
||||
if (ret)
|
||||
printf("ERROR registering callback for %s (%d)\n",
|
||||
&cb_data.name, ret);
|
||||
cb_data.name, ret);
|
||||
}
|
||||
|
||||
while (1) {
|
||||
|
||||
@@ -161,6 +161,7 @@ void main(void)
|
||||
gpio_dev = device_get_binding(GPIO_DRV_NAME);
|
||||
if (!gpio_dev) {
|
||||
printk("Cannot find %s!\n", GPIO_DRV_NAME);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Setup GPIO output */
|
||||
|
||||
@@ -91,6 +91,7 @@ void main(void)
|
||||
i2c_dev = device_get_binding("I2C_0");
|
||||
if (!i2c_dev) {
|
||||
printk("I2C: Device not found.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Do one-byte read/write */
|
||||
|
||||
@@ -100,7 +100,7 @@ void apa102c_led_program(struct device *gpio_dev, uint32_t rgb)
|
||||
apa102c_rgb_send(gpio_dev, APA102C_END_FRAME);
|
||||
}
|
||||
|
||||
void apds9960_reg_write(struct device *i2c_dev,
|
||||
int apds9960_reg_write(struct device *i2c_dev,
|
||||
uint8_t reg_addr, uint8_t reg_val)
|
||||
{
|
||||
struct i2c_msg msg;
|
||||
@@ -119,14 +119,12 @@ void apds9960_reg_write(struct device *i2c_dev,
|
||||
if (ret) {
|
||||
printf("Cannot write APDS9960 reg 0x%X to 0x%X\n",
|
||||
reg_addr, reg_val);
|
||||
|
||||
while (ret) {
|
||||
/* spin if error */
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void apds9960_reg_read(struct device *i2c_dev, uint8_t reg_addr,
|
||||
int apds9960_reg_read(struct device *i2c_dev, uint8_t reg_addr,
|
||||
uint8_t *data, uint8_t data_len)
|
||||
{
|
||||
struct i2c_msg msgs[2];
|
||||
@@ -147,11 +145,9 @@ void apds9960_reg_read(struct device *i2c_dev, uint8_t reg_addr,
|
||||
ret = i2c_transfer(i2c_dev, msgs, 2, APDS9960_ADDR);
|
||||
if (ret) {
|
||||
printf("Cannot read from APDS9960 reg 0x%X\n", reg_addr);
|
||||
|
||||
while (ret) {
|
||||
/* spin if error */
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void apds9960_setup(struct device *i2c_dev, int gain)
|
||||
@@ -198,19 +194,13 @@ void main(void)
|
||||
gpio_dev = device_get_binding(GPIO_DRV_NAME);
|
||||
if (!gpio_dev) {
|
||||
printf("Cannot find %s!\n", GPIO_DRV_NAME);
|
||||
|
||||
while (!gpio_dev) {
|
||||
/* spin if error */
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
i2c_dev = device_get_binding(I2C_DRV_NAME);
|
||||
if (!i2c_dev) {
|
||||
printf("Cannot find %s!\n", I2C_DRV_NAME);
|
||||
|
||||
while (!i2c_dev) {
|
||||
/* spin if error */
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -55,6 +55,7 @@ void main(void)
|
||||
glcd = device_get_binding(GROVE_LCD_NAME);
|
||||
if (!glcd) {
|
||||
printk("Grove LCD: Device not found.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Now configure the LCD the way we want it */
|
||||
|
||||
@@ -210,7 +210,7 @@ void fork_manager_entry(void)
|
||||
void busy_task_entry(void)
|
||||
{
|
||||
int ticks_when_awake;
|
||||
int i;
|
||||
int i = 0;
|
||||
|
||||
while (1) {
|
||||
/*
|
||||
|
||||
@@ -164,7 +164,7 @@ static void receive_data(const char *taskname, struct net_context *ctx)
|
||||
PRINT("%s: %s(): received %d bytes\n", taskname,
|
||||
__func__, ip_buf_appdatalen(buf));
|
||||
if (memcmp(ip_buf_appdata(buf),
|
||||
lorem_ipsum, sizeof(lorem_ipsum))) {
|
||||
lorem_ipsum, strlen(lorem_ipsum))) {
|
||||
PRINT("ERROR: data does not match\n");
|
||||
|
||||
#ifdef CONFIG_NET_SANITY_TEST
|
||||
|
||||
@@ -3,5 +3,5 @@ CONFIG_INIT_STACKS=y
|
||||
CONFIG_SYS_CLOCK_TICKS_PER_SEC=100
|
||||
CONFIG_ASSERT=y
|
||||
CONFIG_ASSERT_LEVEL=2
|
||||
CONFIG_NUM_COOP_PRIORITIES=2
|
||||
CONFIG_NUM_PREEMPT_PRIORITIES=6
|
||||
CONFIG_NUM_COOP_PRIORITIES=29
|
||||
CONFIG_NUM_PREEMPT_PRIORITIES=40
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
BOARD ?= qemu_x86
|
||||
CONF_FILE = prj.conf
|
||||
|
||||
include ${ZEPHYR_BASE}/Makefile.inc
|
||||
@@ -1,18 +0,0 @@
|
||||
Title: Power management hooks template
|
||||
|
||||
Description:
|
||||
|
||||
A template app that defines the power management hooks and
|
||||
enables the power management related CONFIG flags. This
|
||||
app will enable build testing of power management code inside
|
||||
the CONFIG flags.
|
||||
|
||||
This project is intended only for build testing. For running
|
||||
real PM tests use other applications that are full implementations
|
||||
specific to SOCs
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Building Project:
|
||||
|
||||
make pristine && make
|
||||
@@ -1,3 +0,0 @@
|
||||
CONFIG_SYS_POWER_MANAGEMENT=y
|
||||
CONFIG_DEVICE_POWER_MANAGEMENT=y
|
||||
CONFIG_TICKLESS_IDLE=y
|
||||
@@ -1 +0,0 @@
|
||||
obj-y = main.o
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user