Compare commits
56 Commits
v1.6.0-rc2
...
v1.6.0-rc4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4311814a05 | ||
|
|
70c57b2b96 | ||
|
|
bdd6b19a52 | ||
|
|
a599dbd128 | ||
|
|
4344299c00 | ||
|
|
0307d6ea5f | ||
|
|
f38cbb5744 | ||
|
|
4c0d57ed3e | ||
|
|
4540aa0877 | ||
|
|
1c7c4dd43e | ||
|
|
a16bc64bf8 | ||
|
|
3d37868d09 | ||
|
|
98a001e1f8 | ||
|
|
bf2eb5542a | ||
|
|
51859b8ea0 | ||
|
|
7aa536789e | ||
|
|
05a0c6fef0 | ||
|
|
9471d1f6c8 | ||
|
|
09f4f54e72 | ||
|
|
c2fe55bbe5 | ||
|
|
70028dd97f | ||
|
|
d15b758632 | ||
|
|
999c15d1b5 | ||
|
|
d108946a1b | ||
|
|
97ab403573 | ||
|
|
63538db423 | ||
|
|
272ec5e219 | ||
|
|
82804fe115 | ||
|
|
672dc9cc89 | ||
|
|
74d75f2bd5 | ||
|
|
c5a40d60bb | ||
|
|
189e5d0006 | ||
|
|
039a130861 | ||
|
|
61ec28adb6 | ||
|
|
4e38776774 | ||
|
|
14dc173c1f | ||
|
|
0a48547bc6 | ||
|
|
16f5611f3e | ||
|
|
7336b2a978 | ||
|
|
42cf1ab802 | ||
|
|
05291174df | ||
|
|
6c8409c083 | ||
|
|
bf591e9edf | ||
|
|
cf05794924 | ||
|
|
60a31d6ed1 | ||
|
|
6b6572629d | ||
|
|
96fc793c25 | ||
|
|
a30942dbb8 | ||
|
|
963d04d67e | ||
|
|
b792e4277f | ||
|
|
b006b1bb9a | ||
|
|
0fc5801607 | ||
|
|
271ab7d583 | ||
|
|
9f0e4d2a90 | ||
|
|
979aedc2d3 | ||
|
|
850877b95d |
@@ -379,6 +379,13 @@ M: Anas Nashif <anas.nashif@intel.com>
|
||||
S: Supported
|
||||
F: arch/x86/soc/intel_quark/quark_x1000/
|
||||
|
||||
RELEASE NOTES
|
||||
M: Anas Nashif <anas.nashif@intel.com>
|
||||
M: Javier B Perez <javier.b.perez.hernandez@intel.com>
|
||||
M: Kinder, David <david.b.kinder@intel.com>
|
||||
S: Supported
|
||||
F: release-notes.rst
|
||||
|
||||
SANITYCHECK
|
||||
M: Andrew Boie <andrew.p.boie@intel.com>
|
||||
S: Supported
|
||||
|
||||
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@ VERSION_MAJOR = 1
|
||||
VERSION_MINOR = 6
|
||||
PATCHLEVEL = 0
|
||||
VERSION_RESERVED = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Zephyr Kernel
|
||||
|
||||
export SOURCE_DIR PROJECT MDEF_FILE
|
||||
|
||||
@@ -28,11 +28,26 @@
|
||||
#include <sections.h>
|
||||
#include <arch/cpu.h>
|
||||
|
||||
#ifdef CONFIG_HARVARD
|
||||
#define _TOP_OF_MEMORY (CONFIG_DCCM_BASE_ADDRESS + CONFIG_DCCM_SIZE * 1024)
|
||||
/* harvard places the initial stack in the dccm memory */
|
||||
GDATA(_interrupt_stack)
|
||||
GDATA(_firq_stack)
|
||||
GDATA(_main_stack)
|
||||
|
||||
/* use one of the available interrupt stacks during init */
|
||||
|
||||
/* FIRQ only ? */
|
||||
#if CONFIG_NUM_IRQ_PRIO_LEVELS == 1
|
||||
|
||||
/* FIRQ, but uses _interrupt_stack ? */
|
||||
#if CONFIG_RGF_NUM_BANKS == 1
|
||||
#define INIT_STACK _interrupt_stack
|
||||
#define INIT_STACK_SIZE CONFIG_ISR_STACK_SIZE
|
||||
#else
|
||||
#define INIT_STACK _firq_stack
|
||||
#define INIT_STACK_SIZE CONFIG_FIRQ_STACK_SIZE
|
||||
#endif
|
||||
#else
|
||||
#define _TOP_OF_MEMORY (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_SIZE * 1024)
|
||||
#define INIT_STACK _interrupt_stack
|
||||
#define INIT_STACK_SIZE CONFIG_ISR_STACK_SIZE
|
||||
#endif
|
||||
|
||||
GTEXT(__reset)
|
||||
@@ -58,7 +73,30 @@ SECTION_FUNC(TEXT,__start)
|
||||
/* lock interrupts: will get unlocked when switch to main task */
|
||||
clri
|
||||
|
||||
/* setup a stack at the end of MEMORY */
|
||||
mov sp, _TOP_OF_MEMORY
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
/*
|
||||
* use the main stack to call memset on the interrupt stack and the
|
||||
* FIRQ stack when CONFIG_INIT_STACKS is enabled before switching to
|
||||
* one of them for the rest of the early boot
|
||||
*/
|
||||
mov sp, _main_stack
|
||||
add sp, sp, CONFIG_MAIN_STACK_SIZE
|
||||
|
||||
mov_s r0, _interrupt_stack
|
||||
mov_s r1, 0xaa
|
||||
mov_s r2, CONFIG_ISR_STACK_SIZE
|
||||
jl memset
|
||||
|
||||
#if CONFIG_RGF_NUM_BANKS != 1
|
||||
mov_s r0, _firq_stack
|
||||
mov_s r1, 0xaa
|
||||
mov_s r2, CONFIG_FIRQ_STACK_SIZE
|
||||
jl memset
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_INIT_STACKS */
|
||||
|
||||
mov sp, INIT_STACK
|
||||
add sp, sp, INIT_STACK_SIZE
|
||||
|
||||
j @_PrepC
|
||||
|
||||
@@ -85,7 +85,7 @@ static ALWAYS_INLINE void thread_monitor_init(struct k_thread *thread)
|
||||
* @return N/A
|
||||
*/
|
||||
void _new_thread(char *pStackMem, size_t stackSize,
|
||||
void *uk_task_ptr, _thread_entry_t pEntry,
|
||||
_thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned options)
|
||||
{
|
||||
@@ -124,14 +124,11 @@ void _new_thread(char *pStackMem, size_t stackSize,
|
||||
pInitCtx->status32 = _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL);
|
||||
#endif
|
||||
|
||||
/* k_q_node initialized upon first insertion in a list */
|
||||
thread->base.flags = options | K_PRESTART;
|
||||
thread->base.sched_locked = 0;
|
||||
_init_thread_base(&thread->base, priority, K_PRESTART, options);
|
||||
|
||||
/* static threads overwrite them afterwards with real values */
|
||||
thread->init_data = NULL;
|
||||
thread->fn_abort = NULL;
|
||||
thread->base.prio = priority;
|
||||
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
/* Initialize custom data field (value is opaque to kernel) */
|
||||
@@ -147,8 +144,6 @@ void _new_thread(char *pStackMem, size_t stackSize,
|
||||
thread->entry = (struct __thread_entry *)(pInitCtx);
|
||||
#endif
|
||||
|
||||
ARG_UNUSED(uk_task_ptr);
|
||||
|
||||
/*
|
||||
* intlock_key is constructed based on ARCv2 ISA Programmer's
|
||||
* Reference Manual CLRI instruction description:
|
||||
@@ -160,8 +155,6 @@ void _new_thread(char *pStackMem, size_t stackSize,
|
||||
thread->callee_saved.sp =
|
||||
(uint32_t)pInitCtx - ___callee_saved_stack_t_SIZEOF;
|
||||
|
||||
_nano_timeout_thread_init(thread);
|
||||
|
||||
/* initial values in all other regs/k_thread entries are irrelevant */
|
||||
|
||||
thread_monitor_init(thread);
|
||||
|
||||
@@ -128,24 +128,6 @@ typedef struct _callee_saved_stack _callee_saved_stack_t;
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
/* Bitmask definitions for the struct tcs->flags bit field */
|
||||
|
||||
#define K_STATIC 0x00000800
|
||||
|
||||
#define K_READY 0x00000000 /* Thread is ready to run */
|
||||
#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */
|
||||
#define K_PENDING 0x00002000 /* Thread is waiting on an object */
|
||||
#define K_PRESTART 0x00004000 /* Thread has not yet started */
|
||||
#define K_DEAD 0x00008000 /* Thread has terminated */
|
||||
#define K_SUSPENDED 0x00010000 /* Thread is suspended */
|
||||
#define K_DUMMY 0x00020000 /* Not a real thread */
|
||||
#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \
|
||||
K_DEAD | K_SUSPENDED | K_DUMMY)
|
||||
|
||||
#define K_FP_REGS 0x010 /* 1 = thread uses floating point registers */
|
||||
#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */
|
||||
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
|
||||
|
||||
/* stacks */
|
||||
|
||||
#define STACK_ALIGN_SIZE 4
|
||||
|
||||
@@ -20,12 +20,13 @@
|
||||
#include <power.h>
|
||||
#include <soc_power.h>
|
||||
#include <init.h>
|
||||
#include <kernel_structs.h>
|
||||
|
||||
#include "ss_power_states.h"
|
||||
|
||||
#define SLEEP_MODE_CORE_OFF (0x0)
|
||||
#define SLEEP_MODE_CORE_TIMERS_RTC_OFF (0x60)
|
||||
#define ENABLE_INTERRUPTS BIT(4)
|
||||
#define ENABLE_INTERRUPTS (BIT(4) | _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL))
|
||||
|
||||
#define ARC_SS1 (SLEEP_MODE_CORE_OFF | ENABLE_INTERRUPTS)
|
||||
#define ARC_SS2 (SLEEP_MODE_CORE_TIMERS_RTC_OFF | ENABLE_INTERRUPTS)
|
||||
|
||||
@@ -28,6 +28,7 @@ config CPU_CORTEX_M
|
||||
# Omit prompt to signify "hidden" option
|
||||
default n
|
||||
select CPU_CORTEX
|
||||
select ARCH_HAS_CUSTOM_SWAP_TO_MAIN
|
||||
help
|
||||
This option signifies the use of a CPU of the Cortex-M family.
|
||||
|
||||
|
||||
@@ -135,6 +135,7 @@ config NUM_IRQ_PRIO_BITS
|
||||
config RUNTIME_NMI
|
||||
bool
|
||||
prompt "Attach an NMI handler at runtime"
|
||||
select REBOOT
|
||||
default n
|
||||
help
|
||||
The kernel provides a simple NMI handler that simply hangs in a tight
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <nanokernel.h>
|
||||
#include <arch/cpu.h>
|
||||
#include <misc/printk.h>
|
||||
#include <misc/reboot.h>
|
||||
#include <toolchain.h>
|
||||
#include <sections.h>
|
||||
|
||||
@@ -51,7 +52,8 @@ static _NmiHandler_t handler = _SysNmiOnReset;
|
||||
static void _DefaultHandler(void)
|
||||
{
|
||||
printk("NMI received! Rebooting...\n");
|
||||
_ScbSystemReset();
|
||||
/* In ARM implementation sys_reboot ignores the parameter */
|
||||
sys_reboot(0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -33,6 +33,8 @@
|
||||
_ASM_FILE_PROLOGUE
|
||||
|
||||
GTEXT(__reset)
|
||||
GTEXT(memset)
|
||||
GDATA(_interrupt_stack)
|
||||
|
||||
/**
|
||||
*
|
||||
@@ -77,20 +79,29 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
|
||||
msr BASEPRI, r0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set PSP and use it to boot without using MSP, so that it
|
||||
* gets set to _interrupt_stack during nanoInit().
|
||||
*/
|
||||
ldr r0, =__CORTEXM_BOOT_PSP
|
||||
msr PSP, r0
|
||||
movs.n r0, #2 /* switch to using PSP (bit1 of CONTROL reg) */
|
||||
msr CONTROL, r0
|
||||
|
||||
#ifdef CONFIG_WDOG_INIT
|
||||
/* board-specific watchdog initialization is necessary */
|
||||
bl _WdogInit
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
ldr r0, =_interrupt_stack
|
||||
ldr r1, =0xaa
|
||||
ldr r2, =CONFIG_ISR_STACK_SIZE
|
||||
bl memset
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set PSP and use it to boot without using MSP, so that it
|
||||
* gets set to _interrupt_stack during nanoInit().
|
||||
*/
|
||||
ldr r0, =_interrupt_stack
|
||||
ldr r1, =CONFIG_ISR_STACK_SIZE
|
||||
adds r0, r0, r1
|
||||
msr PSP, r0
|
||||
movs.n r0, #2 /* switch to using PSP (bit1 of CONTROL reg) */
|
||||
msr CONTROL, r0
|
||||
|
||||
b _PrepC
|
||||
|
||||
#if defined(CONFIG_SOC_TI_LM3S6965_QEMU)
|
||||
|
||||
@@ -36,6 +36,8 @@
|
||||
|
||||
_ASM_FILE_PROLOGUE
|
||||
|
||||
GDATA(_main_stack)
|
||||
|
||||
SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
|
||||
|
||||
/* in XIP kernels. the entry point is also the start of the vector table */
|
||||
@@ -43,7 +45,13 @@ SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
|
||||
SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,__start)
|
||||
#endif
|
||||
|
||||
.word __CORTEXM_BOOT_MSP
|
||||
/*
|
||||
* setting the _very_ early boot on the main stack allows to use memset
|
||||
* on the interrupt stack when CONFIG_INIT_STACKS is enabled before
|
||||
* switching to the interrupt stack for the rest of the early boot
|
||||
*/
|
||||
.word _main_stack + CONFIG_MAIN_STACK_SIZE
|
||||
|
||||
.word __reset
|
||||
.word __nmi
|
||||
|
||||
|
||||
@@ -42,10 +42,6 @@ extern "C" {
|
||||
#include <sections.h>
|
||||
#include <misc/util.h>
|
||||
|
||||
/* location of MSP and PSP upon boot: at the end of SRAM */
|
||||
.equ __CORTEXM_BOOT_MSP, (CONFIG_SRAM_BASE_ADDRESS + KB(CONFIG_SRAM_SIZE) - 8)
|
||||
.equ __CORTEXM_BOOT_PSP, (__CORTEXM_BOOT_MSP - 0x100)
|
||||
|
||||
GTEXT(__start)
|
||||
GTEXT(_vector_table)
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
|
||||
* Our policy is to express priority levels with special properties
|
||||
* via flags
|
||||
*/
|
||||
if (flags | IRQ_ZERO_LATENCY) {
|
||||
if (flags & IRQ_ZERO_LATENCY) {
|
||||
prio = 2;
|
||||
} else {
|
||||
prio += IRQ_PRIORITY_OFFSET;
|
||||
|
||||
@@ -81,7 +81,7 @@ static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs)
|
||||
*/
|
||||
|
||||
void _new_thread(char *pStackMem, size_t stackSize,
|
||||
void *uk_task_ptr, _thread_entry_t pEntry,
|
||||
_thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned options)
|
||||
{
|
||||
@@ -112,14 +112,11 @@ void _new_thread(char *pStackMem, size_t stackSize,
|
||||
pInitCtx->xpsr =
|
||||
0x01000000UL; /* clear all, thumb bit is 1, even if RO */
|
||||
|
||||
/* k_q_node initialized upon first insertion in a list */
|
||||
tcs->base.flags = options | K_PRESTART;
|
||||
tcs->base.sched_locked = 0;
|
||||
_init_thread_base(&tcs->base, priority, K_PRESTART, options);
|
||||
|
||||
/* static threads overwrite it afterwards with real value */
|
||||
tcs->init_data = NULL;
|
||||
tcs->fn_abort = NULL;
|
||||
tcs->base.prio = priority;
|
||||
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
/* Initialize custom data field (value is opaque to kernel) */
|
||||
@@ -135,15 +132,11 @@ void _new_thread(char *pStackMem, size_t stackSize,
|
||||
tcs->entry = (struct __thread_entry *)(pInitCtx);
|
||||
#endif
|
||||
|
||||
ARG_UNUSED(uk_task_ptr);
|
||||
|
||||
tcs->callee_saved.psp = (uint32_t)pInitCtx;
|
||||
tcs->arch.basepri = 0;
|
||||
|
||||
/* swap_return_value can contain garbage */
|
||||
|
||||
_nano_timeout_thread_init(tcs);
|
||||
|
||||
/* initial values in all other registers/TCS entries are irrelevant */
|
||||
|
||||
thread_monitor_init(tcs);
|
||||
|
||||
@@ -86,24 +86,6 @@ typedef struct __esf _esf_t;
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
/* Bitmask definitions for the struct tcs.flags bit field */
|
||||
|
||||
#define K_STATIC 0x00000800
|
||||
|
||||
#define K_READY 0x00000000 /* Thread is ready to run */
|
||||
#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */
|
||||
#define K_PENDING 0x00002000 /* Thread is waiting on an object */
|
||||
#define K_PRESTART 0x00004000 /* Thread has not yet started */
|
||||
#define K_DEAD 0x00008000 /* Thread has terminated */
|
||||
#define K_SUSPENDED 0x00010000 /* Thread is suspended */
|
||||
#define K_DUMMY 0x00020000 /* Not a real thread */
|
||||
#define K_EXECUTION_MASK \
|
||||
(K_TIMING | K_PENDING | K_PRESTART | K_DEAD | K_SUSPENDED | K_DUMMY)
|
||||
|
||||
#define K_FP_REGS 0x010 /* 1 = thread uses floating point registers */
|
||||
#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */
|
||||
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
|
||||
|
||||
/* stacks */
|
||||
|
||||
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
|
||||
|
||||
@@ -47,6 +47,47 @@ static ALWAYS_INLINE void nanoArchInit(void)
|
||||
_CpuIdleInit();
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
_arch_switch_to_main_thread(char *main_stack, size_t main_stack_size,
|
||||
_thread_entry_t _main)
|
||||
{
|
||||
/* get high address of the stack, i.e. its start (stack grows down) */
|
||||
char *start_of_main_stack;
|
||||
|
||||
start_of_main_stack = main_stack + main_stack_size;
|
||||
start_of_main_stack = (void *)STACK_ROUND_DOWN(start_of_main_stack);
|
||||
|
||||
_current = (void *)main_stack;
|
||||
|
||||
__asm__ __volatile__(
|
||||
|
||||
/* move to main() thread stack */
|
||||
"msr PSP, %0 \t\n"
|
||||
|
||||
/* unlock interrupts */
|
||||
#ifdef CONFIG_CPU_CORTEX_M0_M0PLUS
|
||||
"cpsie i \t\n"
|
||||
#else
|
||||
"movs %%r1, #0 \n\t"
|
||||
"msr BASEPRI, %%r1 \n\t"
|
||||
#endif
|
||||
|
||||
/* branch to _thread_entry(_main, 0, 0, 0) */
|
||||
"mov %%r0, %1 \n\t"
|
||||
"bx %2 \t\n"
|
||||
|
||||
/* never gets here */
|
||||
|
||||
:
|
||||
: "r"(start_of_main_stack),
|
||||
"r"(_main), "r"(_thread_entry)
|
||||
|
||||
: "r0", "r1", "sp"
|
||||
);
|
||||
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
|
||||
@@ -61,7 +61,7 @@ struct init_stack_frame {
|
||||
|
||||
|
||||
void _new_thread(char *stack_memory, size_t stack_size,
|
||||
void *uk_task_ptr, _thread_entry_t thread_func,
|
||||
_thread_entry_t thread_func,
|
||||
void *arg1, void *arg2, void *arg3,
|
||||
int priority, unsigned options)
|
||||
{
|
||||
@@ -85,11 +85,8 @@ void _new_thread(char *stack_memory, size_t stack_size,
|
||||
|
||||
/* Initialize various struct k_thread members */
|
||||
thread = (struct k_thread *)stack_memory;
|
||||
thread->base.prio = priority;
|
||||
|
||||
/* k_q_node initialized upon first insertion in a list */
|
||||
thread->base.flags = options | K_PRESTART;
|
||||
thread->base.sched_locked = 0;
|
||||
_init_thread_base(&thread->base, priority, K_PRESTART, options);
|
||||
|
||||
/* static threads overwrite it afterwards with real value */
|
||||
thread->init_data = NULL;
|
||||
@@ -99,16 +96,10 @@ void _new_thread(char *stack_memory, size_t stack_size,
|
||||
/* Initialize custom data field (value is opaque to kernel) */
|
||||
thread->custom_data = NULL;
|
||||
#endif
|
||||
ARG_UNUSED(uk_task_ptr);
|
||||
|
||||
thread->callee_saved.sp = (uint32_t)iframe;
|
||||
thread->callee_saved.ra = (uint32_t)_thread_entry_wrapper;
|
||||
thread->callee_saved.key = NIOS2_STATUS_PIE_MSK;
|
||||
/* Leave the rest of thread->callee_saved junk */
|
||||
|
||||
#ifdef CONFIG_NANO_TIMEOUTS
|
||||
_nano_timeout_thread_init(thread);
|
||||
#endif
|
||||
|
||||
thread_monitor_init(thread);
|
||||
}
|
||||
|
||||
@@ -47,24 +47,13 @@ extern "C" {
|
||||
#include <misc/dlist.h>
|
||||
#endif
|
||||
|
||||
/* Bitmask definitions for the struct tcs->flags bit field */
|
||||
#define K_STATIC 0x00000800
|
||||
/* nios2 bitmask definitions for the struct k_thread->flags bit field */
|
||||
|
||||
#define K_READY 0x00000000 /* Thread is ready to run */
|
||||
#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */
|
||||
#define K_PENDING 0x00002000 /* Thread is waiting on an object */
|
||||
#define K_PRESTART 0x00004000 /* Thread has not yet started */
|
||||
#define K_DEAD 0x00008000 /* Thread has terminated */
|
||||
#define K_SUSPENDED 0x00010000 /* Thread is suspended */
|
||||
#define K_DUMMY 0x00020000 /* Not a real thread */
|
||||
#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \
|
||||
K_DEAD | K_SUSPENDED | K_DUMMY)
|
||||
/* 1 = executing context is interrupt handler */
|
||||
#define INT_ACTIVE (1 << 1)
|
||||
|
||||
#define INT_ACTIVE 0x002 /* 1 = executing context is interrupt handler */
|
||||
#define EXC_ACTIVE 0x004 /* 1 = executing context is exception handler */
|
||||
#define K_FP_REGS 0x010 /* 1 = thread uses floating point registers */
|
||||
#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */
|
||||
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
|
||||
/* 1 = executing context is exception handler */
|
||||
#define EXC_ACTIVE (1 << 2)
|
||||
|
||||
/* stacks */
|
||||
|
||||
|
||||
@@ -312,10 +312,6 @@ alreadyOnIntStack:
|
||||
* _Swap() to determine whether non-floating registers need to be
|
||||
* preserved using the lazy save/restore algorithm, or to indicate to
|
||||
* debug tools that a preemptive context switch has occurred.
|
||||
*
|
||||
* Setting the NO_METRICS bit tells _Swap() that the per-execution context
|
||||
* [totalRunTime] calculation has already been performed and that
|
||||
* there is no need to do it again.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)
|
||||
|
||||
@@ -76,22 +76,18 @@ static ALWAYS_INLINE void thread_monitor_init(struct k_thread *thread)
|
||||
* @return N/A
|
||||
*/
|
||||
static void _new_thread_internal(char *pStackMem, unsigned stackSize,
|
||||
void *uk_task_ptr, int priority,
|
||||
int priority,
|
||||
unsigned options)
|
||||
{
|
||||
unsigned long *pInitialCtx;
|
||||
/* ptr to the new task's k_thread */
|
||||
struct k_thread *thread = (struct k_thread *)pStackMem;
|
||||
|
||||
thread->base.prio = priority;
|
||||
#if (defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO))
|
||||
thread->arch.excNestCount = 0;
|
||||
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
||||
|
||||
/* k_q_node initialized upon first insertion in a list */
|
||||
|
||||
thread->base.flags = options | K_PRESTART;
|
||||
thread->base.sched_locked = 0;
|
||||
_init_thread_base(&thread->base, priority, K_PRESTART, options);
|
||||
|
||||
/* static threads overwrite it afterwards with real value */
|
||||
thread->init_data = NULL;
|
||||
@@ -103,8 +99,6 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize,
|
||||
thread->custom_data = NULL;
|
||||
#endif
|
||||
|
||||
ARG_UNUSED(uk_task_ptr);
|
||||
|
||||
/*
|
||||
* The creation of the initial stack for the task has already been done.
|
||||
* Now all that is needed is to set the ESP. However, we have been passed
|
||||
@@ -139,8 +133,6 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize,
|
||||
PRINTK("\nstruct thread * = 0x%x", thread);
|
||||
|
||||
thread_monitor_init(thread);
|
||||
|
||||
_nano_timeout_thread_init(thread);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO) \
|
||||
@@ -246,7 +238,7 @@ __asm__("\t.globl _thread_entry\n"
|
||||
* @return opaque pointer to initialized k_thread structure
|
||||
*/
|
||||
void _new_thread(char *pStackMem, size_t stackSize,
|
||||
void *uk_task_ptr, _thread_entry_t pEntry,
|
||||
_thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned options)
|
||||
{
|
||||
@@ -308,5 +300,5 @@ void _new_thread(char *pStackMem, size_t stackSize,
|
||||
* aside for the thread's stack.
|
||||
*/
|
||||
|
||||
_new_thread_internal(pStackMem, stackSize, uk_task_ptr, priority, options);
|
||||
_new_thread_internal(pStackMem, stackSize, priority, options);
|
||||
}
|
||||
|
||||
@@ -52,36 +52,21 @@
|
||||
|
||||
#define STACK_ALIGN_SIZE 4
|
||||
|
||||
/*
|
||||
* Bitmask definitions for the struct k_thread->flags bit field
|
||||
*/
|
||||
/* x86 Bitmask definitions for the struct k_thread->flags bit field */
|
||||
|
||||
#define K_STATIC 0x00000800
|
||||
/* executing context is interrupt handler */
|
||||
#define INT_ACTIVE (1 << 1)
|
||||
|
||||
#define K_READY 0x00000000 /* Thread is ready to run */
|
||||
#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */
|
||||
#define K_PENDING 0x00002000 /* Thread is waiting on an object */
|
||||
#define K_PRESTART 0x00004000 /* Thread has not yet started */
|
||||
#define K_DEAD 0x00008000 /* Thread has terminated */
|
||||
#define K_SUSPENDED 0x00010000 /* Thread is suspended */
|
||||
#define K_DUMMY 0x00020000 /* Not a real thread */
|
||||
#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \
|
||||
K_DEAD | K_SUSPENDED | K_DUMMY)
|
||||
|
||||
#define INT_ACTIVE 0x2 /* 1 = executing context is interrupt handler */
|
||||
#define EXC_ACTIVE 0x4 /* 1 = executing context is exception handler */
|
||||
#if defined(CONFIG_FP_SHARING)
|
||||
#define K_FP_REGS 0x10 /* 1 = thread uses floating point registers */
|
||||
#endif
|
||||
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
|
||||
#define K_SSE_REGS 0x20 /* 1 = thread uses SSEx (and also FP) registers */
|
||||
#endif
|
||||
#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */
|
||||
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
|
||||
#define NO_METRICS_BIT_OFFSET 0xa /* Bit position of NO_METRICS */
|
||||
/* executing context is exception handler */
|
||||
#define EXC_ACTIVE (1 << 2)
|
||||
|
||||
#define INT_OR_EXC_MASK (INT_ACTIVE | EXC_ACTIVE)
|
||||
|
||||
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
|
||||
/* thread uses SSEx (and also FP) registers */
|
||||
#define K_SSE_REGS (1 << 5)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
|
||||
#define _FP_USER_MASK (K_FP_REGS | K_SSE_REGS)
|
||||
#elif defined(CONFIG_FP_SHARING)
|
||||
|
||||
@@ -30,15 +30,15 @@ uint64_t _pm_save_gdtr;
|
||||
uint64_t _pm_save_idtr;
|
||||
uint32_t _pm_save_esp;
|
||||
|
||||
extern void _power_soc_sleep(void);
|
||||
extern void _power_restore_cpu_context(void);
|
||||
extern void _power_soc_deep_sleep(void);
|
||||
|
||||
#if (defined(CONFIG_SYS_POWER_DEEP_SLEEP))
|
||||
static uint32_t *__x86_restore_info = (uint32_t *)CONFIG_BSP_SHARED_RAM_ADDR;
|
||||
|
||||
static void _deep_sleep(enum power_states state)
|
||||
{
|
||||
int restore;
|
||||
|
||||
__asm__ volatile ("wbinvd");
|
||||
|
||||
/*
|
||||
* Setting resume vector inside the restore_cpu_context
|
||||
* function since we have nothing to do before cpu context
|
||||
@@ -47,22 +47,20 @@ static void _deep_sleep(enum power_states state)
|
||||
* can be done before cpu context is restored and control
|
||||
* transferred to _sys_soc_suspend.
|
||||
*/
|
||||
qm_x86_set_resume_vector(_sys_soc_restore_cpu_context,
|
||||
qm_x86_set_resume_vector(_power_restore_cpu_context,
|
||||
*__x86_restore_info);
|
||||
|
||||
restore = _sys_soc_save_cpu_context();
|
||||
power_soc_set_x86_restore_flag();
|
||||
|
||||
if (!restore) {
|
||||
power_soc_set_x86_restore_flag();
|
||||
|
||||
switch (state) {
|
||||
case SYS_POWER_STATE_DEEP_SLEEP_1:
|
||||
power_soc_sleep();
|
||||
case SYS_POWER_STATE_DEEP_SLEEP:
|
||||
power_soc_deep_sleep();
|
||||
default:
|
||||
break;
|
||||
}
|
||||
switch (state) {
|
||||
case SYS_POWER_STATE_DEEP_SLEEP_1:
|
||||
_power_soc_sleep();
|
||||
break;
|
||||
case SYS_POWER_STATE_DEEP_SLEEP:
|
||||
_power_soc_deep_sleep();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -23,30 +23,24 @@ GDATA(_pm_save_gdtr)
|
||||
GDATA(_pm_save_idtr)
|
||||
GDATA(_pm_save_esp)
|
||||
|
||||
GTEXT(_sys_soc_save_cpu_context)
|
||||
GTEXT(_sys_soc_restore_cpu_context)
|
||||
GTEXT(_sys_soc_resume_from_deep_sleep)
|
||||
GTEXT(_power_restore_cpu_context)
|
||||
GTEXT(_power_soc_sleep)
|
||||
GTEXT(_power_soc_deep_sleep)
|
||||
|
||||
SECTION_FUNC(TEXT, save_cpu_context)
|
||||
movl %esp, %eax /* save ptr to return address */
|
||||
|
||||
SECTION_FUNC(TEXT, _sys_soc_save_cpu_context)
|
||||
movl %esp, %eax /* save ptr to return address */
|
||||
pushf /* save flags */
|
||||
pusha /* save GPRs */
|
||||
|
||||
movl %esp, _pm_save_esp /* save stack ptr */
|
||||
sidtl _pm_save_idtr /* save idtr */
|
||||
sgdtl _pm_save_gdtr /* save gdtr */
|
||||
|
||||
pushl (%eax) /* push return address */
|
||||
|
||||
xorl %eax, %eax /* 0 indicates saved context */
|
||||
pushl (%eax) /* push return address */
|
||||
ret
|
||||
|
||||
SECTION_FUNC(TEXT, _sys_soc_restore_cpu_context)
|
||||
/*
|
||||
* Will transfer control to _sys_power_save_cpu_context,
|
||||
* from where it will return 1 indicating the function
|
||||
* is exiting after a context switch.
|
||||
*/
|
||||
SECTION_FUNC(TEXT, _power_restore_cpu_context)
|
||||
lgdtl _pm_save_gdtr /* restore gdtr */
|
||||
lidtl _pm_save_idtr /* restore idtr */
|
||||
movl _pm_save_esp, %esp /* restore saved stack ptr */
|
||||
@@ -54,18 +48,32 @@ SECTION_FUNC(TEXT, _sys_soc_restore_cpu_context)
|
||||
popf /* restore saved flags */
|
||||
|
||||
/*
|
||||
* At this point context is restored as it was saved
|
||||
* in _sys_soc_save_cpu_context. The following ret
|
||||
* will emulate a return from that function. Move 1
|
||||
* to eax to emulate a return 1. The caller of
|
||||
* _sys_soc_save_cpu_context will identify it is
|
||||
* returning from a context restore based on the
|
||||
* return value = 1.
|
||||
* At this point the stack contents will be as follows:
|
||||
*
|
||||
* Saved context
|
||||
* ESP ---> Return address of save_cpu_context
|
||||
* Return address of _power_soc_sleep/deep_sleep
|
||||
*
|
||||
* We just popped the saved context. Next we pop out the address
|
||||
* of the caller of save_cpu_context.Then the ret would return
|
||||
* to caller of _power_soc_sleep or _power_soc_deep_sleep.
|
||||
*
|
||||
*/
|
||||
xorl %eax, %eax
|
||||
incl %eax
|
||||
addl $4, %esp
|
||||
ret
|
||||
|
||||
SECTION_FUNC(TEXT, _power_soc_sleep)
|
||||
call save_cpu_context
|
||||
wbinvd
|
||||
call power_soc_sleep
|
||||
/* Does not return */
|
||||
|
||||
SECTION_FUNC(TEXT, _power_soc_deep_sleep)
|
||||
call save_cpu_context
|
||||
wbinvd
|
||||
call power_soc_deep_sleep
|
||||
/* Does not return */
|
||||
|
||||
/*
|
||||
* This is an example function to handle the deep sleep resume notification
|
||||
* in the absence of bootloader context restore support.
|
||||
@@ -78,8 +86,8 @@ SECTION_FUNC(TEXT, _sys_soc_restore_cpu_context)
|
||||
*/
|
||||
SECTION_FUNC(TEXT, _sys_soc_resume_from_deep_sleep)
|
||||
movl $CONFIG_BSP_SHARED_RAM_ADDR, %eax
|
||||
cmpl $_sys_soc_restore_cpu_context, (%eax)
|
||||
je _sys_soc_restore_cpu_context
|
||||
cmpl $_power_restore_cpu_context, (%eax)
|
||||
je _power_restore_cpu_context
|
||||
ret
|
||||
|
||||
#endif
|
||||
|
||||
@@ -30,35 +30,6 @@ enum power_states {
|
||||
SYS_POWER_STATE_MAX
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Save CPU context
|
||||
*
|
||||
* This function would save the CPU context in the stack. It
|
||||
* would also save the idtr and gdtr registers. When context is
|
||||
* restored by _sys_soc_restore_cpu_context(), control will be
|
||||
* transferred into this function where the context was originally
|
||||
* saved. The return values would indicate whether it is returning
|
||||
* after saving context or after a context restore transferred
|
||||
* control to it.
|
||||
*
|
||||
* @retval 0 Indicates it is returning after saving cpu context
|
||||
* @retval 1 Indicates cpu context restore transferred control to it.
|
||||
*/
|
||||
int _sys_soc_save_cpu_context(void);
|
||||
|
||||
/**
|
||||
* @brief Restore CPU context
|
||||
*
|
||||
* This function would restore the CPU context that was saved in
|
||||
* the stack by _sys_soc_save_cpu_context(). It would also restore
|
||||
* the idtr and gdtr registers.
|
||||
*
|
||||
* After context is restored, control will be transferred into
|
||||
* _sys_soc_save_cpu_context() function where the context was originally
|
||||
* saved.
|
||||
*/
|
||||
FUNC_NORETURN void _sys_soc_restore_cpu_context(void);
|
||||
|
||||
/**
|
||||
* @brief Put processor into low power state
|
||||
*
|
||||
|
||||
@@ -15,3 +15,4 @@ CONFIG_UART_QMSI=y
|
||||
CONFIG_CONSOLE=y
|
||||
CONFIG_UART_CONSOLE=y
|
||||
CONFIG_SERIAL=y
|
||||
CONFIG_OMIT_FRAME_POINTER=y
|
||||
|
||||
@@ -18,3 +18,4 @@ CONFIG_UART_NS16550_PORT_1=y
|
||||
CONFIG_UART_NS16550_PORT_0=n
|
||||
CONFIG_UART_INTERRUPT_DRIVEN=y
|
||||
CONFIG_GPIO=y
|
||||
CONFIG_OMIT_FRAME_POINTER=y
|
||||
|
||||
@@ -15,3 +15,4 @@ CONFIG_UART_CONSOLE=y
|
||||
CONFIG_UART_QMSI=y
|
||||
CONFIG_UART_CONSOLE=y
|
||||
CONFIG_SERIAL=y
|
||||
CONFIG_OMIT_FRAME_POINTER=y
|
||||
|
||||
@@ -83,7 +83,7 @@ is capable of holding 64 words of data and metadata information.
|
||||
#define MY_RING_BUF_SIZE 64
|
||||
|
||||
struct my_struct {
|
||||
struct ring_buffer rb;
|
||||
struct ring_buf rb;
|
||||
uint32_t buffer[MY_RING_BUF_SIZE];
|
||||
...
|
||||
};
|
||||
|
||||
@@ -3,175 +3,98 @@
|
||||
Power Management
|
||||
################
|
||||
|
||||
The power management infrastructure consists of interfaces exported by the
|
||||
power management subsystem. This subsystem exports interfaces that a
|
||||
:abbr:`Power Management Application (PMA)` uses to implement power management
|
||||
policies.
|
||||
Zephyr RTOS power management subsystem provides several means for a system
|
||||
integrator to implement power management support that can take full
|
||||
advantage of the power saving features of SOCs.
|
||||
|
||||
|
||||
Terminology
|
||||
***********
|
||||
|
||||
:dfn:`PMA`
|
||||
:dfn:`SOC interface`
|
||||
This is a general term for the components that have knowledge of the
|
||||
SOC and provide interfaces to the hardware features. It will abstract
|
||||
the SOC specific implementations to the applications and the OS.
|
||||
|
||||
The system integrator provides the :abbr:`PMA (Power Manager
|
||||
Application)`. The PMA maintains any power management policies and
|
||||
executes the power management actions based on those policies.
|
||||
The PMA must be integrated into the main Zephyr application.
|
||||
:dfn:`CPU LPS (Low Power State)`
|
||||
Refers to any one of the low power states supported by the CPU. The CPU is
|
||||
usually powered on while the clocks are power gated.
|
||||
|
||||
:dfn:`LPS`
|
||||
:dfn:`Active State`
|
||||
The CPU and clocks are powered on. This is the normal operating state when
|
||||
the system is running.
|
||||
|
||||
:abbr:`LPS (Low Power States)` refers to any one of the low power states supported by the CPU.
|
||||
:dfn:`Deep Sleep State`
|
||||
The CPU is power gated and loses context. Most peripherals would also be
|
||||
power gated. RAM is selectively retained.
|
||||
|
||||
:dfn:`SoC Power State`
|
||||
:dfn:`SOC Power State`
|
||||
SOC Power State describes processor and device power states implemented at
|
||||
the SOC level. Deep Sleep State is an example of SOC Power State.
|
||||
|
||||
An SoC Power State describes processor and device power statuses
|
||||
implemented at the SoC level.
|
||||
:dfn:`Idle Thread`
|
||||
A system thread that runs when there are no other threads ready to run.
|
||||
|
||||
:dfn:`Hook function`
|
||||
|
||||
A Hook function is a callback function that one component implements and
|
||||
another component calls. For example, the PMA implements functions that
|
||||
the kernel calls.
|
||||
|
||||
Architecture and SoC dependent Power States:
|
||||
============================================
|
||||
|
||||
On x86:
|
||||
-------
|
||||
|
||||
`Active`
|
||||
The CPU is active and running in the hardware defined C0 C-state.
|
||||
|
||||
`Idle`
|
||||
The CPU is not active but continues to be powered.
|
||||
The CPU may be in one of any lower C-states: C1, C2, etc.
|
||||
|
||||
`Deep Sleep`
|
||||
The Power is off to the processor and system clock. RAM is retained.
|
||||
|
||||
On ARM
|
||||
------
|
||||
|
||||
`Active`
|
||||
The CPU is active and running.
|
||||
|
||||
`Idle`
|
||||
Stops the processor clock. The ARM documentation describes
|
||||
this as *Sleep*.
|
||||
|
||||
`Deep Sleep`
|
||||
Stops the system clock and switches off the PLL and flash
|
||||
memory. RAM is retained.
|
||||
|
||||
On ARC
|
||||
------
|
||||
|
||||
`Active`
|
||||
The CPU is currently active and running in the SS0 state.
|
||||
|
||||
`Idle`
|
||||
Defined as the SS1 and SS2 states.
|
||||
|
||||
The power states described here are generic terms that map to the power
|
||||
states commonly supported by processors and SoCs based on the three
|
||||
architectures. When coding a PMA, please refer to the data sheet of the SoC
|
||||
to get details on each power state.
|
||||
:dfn:`Power gating`
|
||||
Power gating reduces power consumption by shutting off current to blocks of
|
||||
the integrated circuit that are not in use.
|
||||
|
||||
Overview
|
||||
********
|
||||
|
||||
The Zephyr power management subsystem provides interfaces that a system
|
||||
integrator can use to create a PMA. The PMA then enforces any policies
|
||||
needed. The design is based on the philosophy of not enforcing any policies
|
||||
in the kernel giving full flexibility to the PMA.
|
||||
The interfaces and APIs provided by the power management subsystem
|
||||
are designed to be architecture and SOC independent. This enables power
|
||||
management implementations to be easily adapted to different SOCs and
|
||||
architectures. The kernel does not implement any power schemes of its own, giving
|
||||
the system integrator the flexibility of implementing custom power schemes.
|
||||
|
||||
The provided infrastructure has an architecture independent interface.
|
||||
The kernel notifies the PMA when it is about to
|
||||
enter or exit a system idle state. The PMA can perform the power management
|
||||
policy operations during these notifications.
|
||||
The architecture and SOC independence is achieved by separating the core
|
||||
infrastructure and the SOC specific implementations. The SOC specific
|
||||
implementations are abstracted to the application and the OS using hardware
|
||||
abstraction layers.
|
||||
|
||||
Policies
|
||||
********
|
||||
The power management features are classified into the following categories.
|
||||
|
||||
When the power management subsystem notifies the PMA that the kernel is about
|
||||
to enter a system idle state, it specifies the period of time the system
|
||||
intends to stay idle. The PMA performs any power management operations during
|
||||
this time. The PMA can perform various operations. For example, put the
|
||||
processor or the SoC in a low power state, turn off some or all of the
|
||||
peripherals, and gate device clocks. Using combinations of these operations,
|
||||
the PMA can create fine grain custom power management policies.
|
||||
* Tickless Idle
|
||||
* System Power Management
|
||||
* Device Power Management
|
||||
|
||||
Different levels of power savings and different wake latencies characterize
|
||||
these fine grain policies. In general, operations that save more power have a
|
||||
higher wake latency. When making policy decisions, the PMA chooses the
|
||||
policy that saves the most power. At the same time, the policy's total
|
||||
execution time must fit well within the idle time allotted by the power
|
||||
management subsystem.
|
||||
Tickless Idle
|
||||
*************
|
||||
|
||||
The Zephyr power management subsystem classifies policies into categories
|
||||
based on relative power savings and the corresponding wake latencies. These
|
||||
policies also loosely map to common processor and SoC power states in the
|
||||
supported architectures. The PMA should map the fine grain custom policies to
|
||||
the policy categories of the power management subsystem. The power management
|
||||
subsystem defines three categories:
|
||||
This is the name used to identify the event-based idling mechanism of the
|
||||
Zephyr RTOS kernel scheduler. The kernel scheduler can run in two modes. During
|
||||
normal operation, when at least one thread is active, it sets up the system
|
||||
timer in periodic mode and runs in an interval-based scheduling mode. The
|
||||
interval-based mode allows it to time slice between tasks. Many times, the
|
||||
threads would be waiting on semaphores, timeouts or for events. When there
|
||||
are no threads running, it is inefficient for the kernel scheduler to run
|
||||
in interval-based mode. This is because, in this mode the timer would trigger
|
||||
an interrupt at fixed intervals causing the scheduler to be invoked at each
|
||||
interval. The scheduler checks if any thread is ready to run. If no thread
|
||||
is ready to run then it is a waste of power because of the unnecessary CPU
|
||||
processing. This is avoided by the kernel switching to event-based idling
|
||||
mode whenever there is no thread ready to run.
|
||||
|
||||
* SYS_PM_LOW_POWER_STATE
|
||||
* SYS_PM_DEEP_SLEEP
|
||||
* SYS_PM_DEVICE_SUSPEND_ONLY
|
||||
The kernel holds an ordered list of thread timeouts in the system. These are
|
||||
the amount of time each thread has requested to wait. When the last active
|
||||
thread goes to wait, the idle thread is scheduled. The idle thread programs
|
||||
the timer to one-shot mode and programs the count to the earliest timeout
|
||||
from the ordered thread timeout list. When the timer expires, a timer event
|
||||
is generated. The ISR of this event will invoke the scheduler, which would
|
||||
schedule the thread associated with the timeout. Before scheduling the
|
||||
thread, the scheduler would switch the timer again to periodic mode. This
|
||||
method saves power because the CPU is removed from the wait only when there
|
||||
is a thread ready to run or if an external event occurred.
|
||||
|
||||
SYS_PM_LOW_POWER_STATE
|
||||
======================
|
||||
System Power Management
|
||||
***********************
|
||||
|
||||
In this policy category, the PMA performs power management operations on some
|
||||
or all devices and puts the processor into a low power state. The device
|
||||
power management operations can involve turning off peripherals and gating
|
||||
device clocks. When any of those operations causes the device registers to
|
||||
lose their state, then those states must be saved and restored. The PMA
|
||||
should map fine grain policies with relatively less wake latency to this
|
||||
category. Policies with larger wake latency should be mapped to the
|
||||
`SYS_PM_DEEP_SLEEP`_ category. Policies in this category exit from an
|
||||
external interrupt, a wake up event set by the PMA, or when the idle time
|
||||
alloted by the power management subsystem expires.
|
||||
|
||||
SYS_PM_DEEP_SLEEP
|
||||
=================
|
||||
|
||||
In this policy category, the PMA puts the system into the deep sleep power
|
||||
states supported by SoCs. In this state, the system clock is turned off. The
|
||||
processor is turned off and loses its state. RAM is expected to be retained
|
||||
and can save and restore processor states. Only the devices necessary to wake
|
||||
up the system from the deep sleep power state stay on. The SoC turns off the
|
||||
power to all other devices. Since this causes device registers to lose their
|
||||
state, they must be saved and restored. The PMA should map fine grain
|
||||
policies with the highest wake latency to this policy category. Policies in
|
||||
this category exit from SoC dependent wake events.
|
||||
|
||||
SYS_PM_DEVICE_SUSPEND_ONLY
|
||||
==========================
|
||||
|
||||
In this policy category, the PMA performs power management operations on some
|
||||
devices but none that result in a processor or SoC power state transition.
|
||||
The PMA should map its fine grain policies that have the lowest wake latency
|
||||
to this policy category. Policies in this category exit from an external
|
||||
interrupt or when the idle time alloted by the power management subsystem
|
||||
expires.
|
||||
|
||||
Some policy categories names are similar to the power states of processors or
|
||||
SoCs, for example, :code:`SYS_PM_DEEP_SLEEP`. However, they must be seen
|
||||
as policy categories and do not indicate any specific processor or SoC power
|
||||
state by themselves.
|
||||
|
||||
.. _pm_hook_infra:
|
||||
|
||||
Power Management Hook Infrastructure
|
||||
************************************
|
||||
|
||||
This infrastructure consists of the hook functions that the PMA implemented.
|
||||
The power management subsystem calls these hook functions when the kernel
|
||||
enters and exits the idle state, in other words, when the kernel has nothing
|
||||
to schedule. This section provides a general overview and general concepts of
|
||||
the hook functions. Refer to :ref:`power_management_api` for the detailed
|
||||
description of the APIs.
|
||||
This consists of the hook functions that the power management subsystem calls
|
||||
when the kernel enters and exits the idle state, in other words, when the kernel
|
||||
has nothing to schedule. This section provides a general overview of the hook
|
||||
functions. Refer to :ref:`power_management_api` for the detailed description of
|
||||
the APIs.
|
||||
|
||||
Suspend Hook function
|
||||
=====================
|
||||
@@ -181,39 +104,31 @@ Suspend Hook function
|
||||
int _sys_soc_suspend(int32_t ticks);
|
||||
|
||||
When the kernel is about to go idle, the power management subsystem calls the
|
||||
:code:`_sys_soc_suspend()` function, notifying the PMA that the kernel is
|
||||
ready to enter the idle state.
|
||||
:code:`_sys_soc_suspend()` function, notifying the SOC interface that the kernel
|
||||
is ready to enter the idle state.
|
||||
|
||||
At this point, the kernel has disabled interrupts and computed the maximum
|
||||
number of ticks the system can remain idle. The function passes the time that
|
||||
the system can remain idle to the PMA along with the notification. When
|
||||
notified, the PMA selects and executes one of the fine grain power policies
|
||||
that can be executed within the allotted time.
|
||||
time the system can remain idle. The function passes the time that
|
||||
the system can remain idle. The SOC interface performs power operations that
|
||||
can be done in the available time. The power management operation must halt
|
||||
execution on a CPU or SOC low power state. Before entering the low power state,
|
||||
the SOC interface must setup a wake event.
|
||||
|
||||
The power management subsystem expects the :code:`_sys_soc_suspend()` to
|
||||
return one of the following values based on the power management operations
|
||||
the PMA executed:
|
||||
the SOC interface executed:
|
||||
|
||||
:code:`SYS_PM_NOT_HANDLED`
|
||||
|
||||
No power management operations. Indicates that the PMA could not
|
||||
accomplish any actions in the time allotted by the kernel.
|
||||
|
||||
:code:`SYS_PM_DEVICE_SUSPEND_ONLY`
|
||||
|
||||
Only devices are suspended. Indicates that the PMA could accomplish any
|
||||
device suspend operations. These operations do not include any processor
|
||||
or SOC power operations.
|
||||
Indicates that no power management operations were performed.
|
||||
|
||||
:code:`SYS_PM_LOW_POWER_STATE`
|
||||
|
||||
Entered a LPS. Indicates that the PMA could put the processor into a low
|
||||
power state.
|
||||
Indicates that the CPU was put in a low power state.
|
||||
|
||||
:code:`SYS_PM_DEEP_SLEEP`
|
||||
|
||||
Entered deep sleep. Indicates that the PMA could put the SoC in a deep
|
||||
sleep state.
|
||||
Indicates that the SOC was put in a deep sleep state.
|
||||
|
||||
Resume Hook function
|
||||
====================
|
||||
@@ -222,29 +137,126 @@ Resume Hook function
|
||||
|
||||
void _sys_soc_resume(void);
|
||||
|
||||
The kernel calls this hook function when exiting from an idle state or a low
|
||||
power state. Based on which policy the PMA executed in the
|
||||
:code:`_sys_soc_suspend()` function, the PMA performs the necessary recovery
|
||||
operations in this hook function.
|
||||
The power management subsystem optionally calls this hook function when exiting
|
||||
kernel idling if power management operations were performed in
|
||||
:code:`_sys_soc_suspend()`. Any necessary recovery operations can be performed
|
||||
in this function before the kernel scheduler schedules another thread. Some
|
||||
power states may not need this notification. It can be disabled by calling
|
||||
:code:`_sys_soc_pm_idle_exit_notification_disable()` from
|
||||
:code:`_sys_soc_suspend()`.
|
||||
|
||||
Since the hook functions are called with the interrupts disabled, the PMA
|
||||
should ensure that its operations are completed quickly. Thus, the PMA
|
||||
ensures that the kernel's scheduling performance is not disrupted.
|
||||
Resume From Deep Sleep Hook function
|
||||
====================================
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
void _sys_soc_resume_from_deep_sleep(void);
|
||||
|
||||
This function is optionally called when exiting from deep sleep if the SOC
|
||||
interface does not have bootloader support to handle resume from deep sleep.
|
||||
This function should restore context to the point where system entered
|
||||
the deep sleep state.
|
||||
|
||||
.. note::
|
||||
|
||||
Since the hook functions are called with the interrupts disabled, the SOC
|
||||
interface should ensure that its operations are completed quickly. Thus, the
|
||||
SOC interface ensures that the kernel's scheduling performance is not
|
||||
disrupted.
|
||||
|
||||
Power Schemes
|
||||
*************
|
||||
|
||||
When the power management subsystem notifies the SOC interface that the kernel
|
||||
is about to enter a system idle state, it specifies the period of time the
|
||||
system intends to stay idle. The SOC interface can perform various power
|
||||
management operations during this time. For example, put the processor or the
|
||||
SOC in a low power state, turn off some or all of the peripherals or power gate
|
||||
device clocks.
|
||||
|
||||
Different levels of power savings and different wake latencies characterize
|
||||
these power schemes. In general, operations that save more power have a
|
||||
higher wake latency. When making decisions, the SOC interface chooses the
|
||||
scheme that saves the most power. At the same time, the scheme's total
|
||||
execution time must fit within the idle time allotted by the power management
|
||||
subsystem.
|
||||
|
||||
The power management subsystem classifies power management schemes
|
||||
into two categories based on whether the CPU loses execution context during the
|
||||
power state transition.
|
||||
|
||||
* SYS_PM_LOW_POWER_STATE
|
||||
* SYS_PM_DEEP_SLEEP
|
||||
|
||||
SYS_PM_LOW_POWER_STATE
|
||||
======================
|
||||
|
||||
CPU does not lose execution context. Devices also do not lose power while
|
||||
entering power states in this category. The wake latencies of power states
|
||||
in this category are relatively low.
|
||||
|
||||
SYS_PM_DEEP_SLEEP
|
||||
=================
|
||||
|
||||
CPU is power gated and loses execution context. Execution will resume at
|
||||
OS startup code or at a resume point determined by a bootloader that supports
|
||||
deep sleep resume. Depending on the SOC's implementation of the power saving
|
||||
feature, it may turn off power to most devices. RAM may be retained by some
|
||||
implementations, while others may remove power from RAM saving considerable
|
||||
power. Power states in this category save more power than
|
||||
`SYS_PM_LOW_POWER_STATE`_ and would have higher wake latencies.
|
||||
|
||||
Device Power Management Infrastructure
|
||||
**************************************
|
||||
|
||||
The device power management infrastructure consists of interfaces to the Zephyr
|
||||
device model. These APIs send control commands to the device driver
|
||||
The device power management infrastructure consists of interfaces to the
|
||||
Zephyr RTOS device model. These APIs send control commands to the device driver
|
||||
to update its power state or to get its current power state.
|
||||
Refer to :ref:`power_management_api` for detailed descriptions of the APIs.
|
||||
|
||||
Zephyr RTOS supports two methods of doing device power management.
|
||||
|
||||
* Distributed method
|
||||
* Central method
|
||||
|
||||
Distributed method
|
||||
==================
|
||||
|
||||
In this method, the application or any component that deals with devices directly
|
||||
and has the best knowledge of their use does the device power management. This
|
||||
saves power if some devices that are not in use can be turned off or put
|
||||
in power saving mode. This method allows saving power even when the CPU is
|
||||
active. The components that use the devices need to be power aware and should
|
||||
be able to make decisions related to managing device power. In this method, the
|
||||
SOC interface can enter CPU or SOC low power states quickly when
|
||||
:code:`_sys_soc_suspend()` gets called. This is because it does not need to
|
||||
spend time doing device power management if the devices are already put in
|
||||
the appropriate low power state by the application or component managing the
|
||||
devices.
|
||||
|
||||
Central method
|
||||
==============
|
||||
|
||||
In this method device power management is mostly done inside
|
||||
:code:`_sys_soc_suspend()` along with entering a CPU or SOC low power state.
|
||||
|
||||
If a decision to enter deep sleep is made, the implementation would enter it
|
||||
only after checking if the devices are not in the middle of a hardware
|
||||
transaction that cannot be interrupted. This method can be used in
|
||||
implementations where the applications and components using devices are not
|
||||
expected to be power aware and do not implement device power management.
|
||||
|
||||
This method can also be used to emulate a hardware feature supported by some
|
||||
SOCs which cause automatic entry to deep sleep when all devices are idle.
|
||||
Refer to `Busy Status Indication`_ to see how to indicate whether a device is busy
|
||||
or idle.
|
||||
|
||||
Device Power Management States
|
||||
==============================
|
||||
The Zephyr OS power management subsystem defines four device states.
|
||||
These states are classified based on the degree of context that gets lost in
|
||||
those states, kind of operations done to save power and the impact on the device
|
||||
behavior due to the state transition. Device context include device hardware
|
||||
The Zephyr RTOS power management subsystem defines four device states.
|
||||
These states are classified based on the degree of device context that gets lost
|
||||
in those states, kind of operations done to save power, and the impact on the
|
||||
device behavior due to the state transition. Device context includes device
|
||||
registers, clocks, memory etc.
|
||||
|
||||
The four device power states:
|
||||
@@ -271,15 +283,13 @@ The four device power states:
|
||||
Device Power Management Operations
|
||||
==================================
|
||||
|
||||
Zephyr OS provides a generic API function to send control commands to the driver.
|
||||
Currently the supported control commands are:
|
||||
Zephyr RTOS power management subsystem provides a control function interface
|
||||
to device drivers to indicate power management operations to perform.
|
||||
The supported PM control commands are:
|
||||
|
||||
* DEVICE_PM_SET_POWER_STATE
|
||||
* DEVICE_PM_GET_POWER_STATE
|
||||
|
||||
In the future Zephyr OS may support additional control commands.
|
||||
Drivers can implement the control command handler to support the device driver's
|
||||
power management functionality.
|
||||
Each device driver defines:
|
||||
|
||||
* The device's supported power states.
|
||||
@@ -299,20 +309,20 @@ Device Model with Power Management Support
|
||||
|
||||
Drivers initialize the devices using macros. See :ref:`device_drivers` for
|
||||
details on how these macros are used. Use the DEVICE_DEFINE macro to initialize
|
||||
drivers providing power management support via the control function.
|
||||
One of the macro parameters is the pointer to the device_control handler function.
|
||||
drivers providing power management support via the PM control function.
|
||||
One of the macro parameters is the pointer to the device_pm_control handler function.
|
||||
|
||||
Default Initializer Function
|
||||
----------------------------
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int device_control_nop(struct device *unused_device, uint32_t unused_ctrl_command, void *unused_context);
|
||||
int device_pm_control_nop(struct device *unused_device, uint32_t unused_ctrl_command, void *unused_context);
|
||||
|
||||
|
||||
If the driver doesn't implement any power control operations, the driver can
|
||||
initialize the corresponding pointer with this default nop function. This
|
||||
default initializer function does nothing and should be used instead of
|
||||
default nop function does nothing and should be used instead of
|
||||
implementing a dummy function to avoid wasting code memory in the driver.
|
||||
|
||||
|
||||
@@ -329,18 +339,14 @@ Get Device List
|
||||
|
||||
void device_list_get(struct device **device_list, int *device_count);
|
||||
|
||||
The Zephyr kernel internally maintains a list of all devices in the system.
|
||||
The PMA uses this API to get the device list. The PMA can use the list to
|
||||
The Zephyr RTOS kernel internally maintains a list of all devices in the system.
|
||||
The SOC interface uses this API to get the device list. The SOC interface can use the list to
|
||||
identify the devices on which to execute power management operations.
|
||||
|
||||
The PMA can use this list to create a sorted order list based on device
|
||||
dependencies. The PMA creates device groups to execute different policies
|
||||
on each device group.
|
||||
|
||||
.. note::
|
||||
|
||||
Ensure that the PMA does not alter the original list. Since the kernel
|
||||
uses the original list, it should remain unchanged.
|
||||
Ensure that the SOC interface does not alter the original list. Since the kernel
|
||||
uses the original list, it must remain unchanged.
|
||||
|
||||
Device Set Power State
|
||||
----------------------
|
||||
@@ -349,7 +355,7 @@ Device Set Power State
|
||||
|
||||
int device_set_power_state(struct device *device, uint32_t device_power_state);
|
||||
|
||||
Calls the :c:func:`device_control()` handler function implemented by the
|
||||
Calls the :c:func:`device_pm_control()` handler function implemented by the
|
||||
device driver with DEVICE_PM_SET_POWER_STATE command.
|
||||
|
||||
Device Get Power State
|
||||
@@ -359,28 +365,37 @@ Device Get Power State
|
||||
|
||||
int device_get_power_state(struct device *device, uint32_t * device_power_state);
|
||||
|
||||
Calls the :c:func:`device_control()` handler function implemented by the
|
||||
Calls the :c:func:`device_pm_control()` handler function implemented by the
|
||||
device driver with DEVICE_PM_GET_POWER_STATE command.
|
||||
|
||||
Busy Status Indication
|
||||
======================
|
||||
|
||||
The PMA executes some power policies that can turn off power to devices,
|
||||
The SOC interface executes some power policies that can turn off power to devices,
|
||||
causing them to lose their state. If the devices are in the middle of some
|
||||
hardware transaction, like writing to flash memory when the power is turned
|
||||
off, then such transactions would be left in an inconsistent state. This
|
||||
infrastructure guards such transactions by indicating to the PMA that
|
||||
infrastructure guards such transactions by indicating to the SOC interface that
|
||||
the device is in the middle of a hardware transaction.
|
||||
|
||||
When the :code:`_sys_soc_suspend()` is called, the PMA checks if any device
|
||||
is busy. The PMA can then decide to execute a policy other than deep sleep or
|
||||
When the :code:`_sys_soc_suspend()` is called, the SOC interface checks if any device
|
||||
is busy. The SOC interface can then decide to execute a power management scheme other than deep sleep or
|
||||
to defer power management operations until the next call of
|
||||
:code:`_sys_soc_suspend()`.
|
||||
|
||||
If other recovery or retrieval methods are in place, the driver can avoid
|
||||
guarding the transactions. Not all hardware transactions must be guarded. The
|
||||
Zephyr kernel provides the following APIs for the device drivers and the PMA
|
||||
to decide whether a particular transaction must be guarded.
|
||||
An alternative to using the busy status mechanism is to use the
|
||||
`distributed method`_ of device power management. In such a method where the
|
||||
device power management is handled in a distributed manner rather than centrally in
|
||||
:code:`_sys_soc_suspend()`, the decision to enter deep sleep can be made based
|
||||
on whether all devices are already turned off.
|
||||
|
||||
This feature can be also used to emulate a hardware feature found in some SOCs
|
||||
that causes the system to automatically enter deep sleep when all devices are idle.
|
||||
In such an usage, the busy status can be set by default and cleared as each
|
||||
device becomes idle. When :code:`_sys_soc_suspend()` is called, deep sleep can
|
||||
be entered if no device is found to be busy.
|
||||
|
||||
Here are the APIs used to set, clear, and check the busy status of devices.
|
||||
|
||||
Indicate Busy Status API
|
||||
------------------------
|
||||
@@ -422,8 +437,6 @@ Check Busy Status of All Devices API
|
||||
|
||||
Checks if any device is busy. The API returns 0 if no device in the system is busy.
|
||||
|
||||
.. _pm_config_flags:
|
||||
|
||||
Power Management Configuration Flags
|
||||
************************************
|
||||
|
||||
@@ -434,9 +447,13 @@ the following configuration flags.
|
||||
|
||||
This flag enables the power management subsystem.
|
||||
|
||||
:code:`CONFIG_TICKLESS_IDLE`
|
||||
|
||||
This flag enables the tickless idle power saving feature.
|
||||
|
||||
:code:`CONFIG_SYS_POWER_LOW_POWER_STATE`
|
||||
|
||||
The PMA enables this flag to use the :code:`SYS_PM_LOW_POWER_STATE` policy.
|
||||
The SOC interface enables this flag to use the :code:`SYS_PM_LOW_POWER_STATE` policy.
|
||||
|
||||
:code:`CONFIG_SYS_POWER_DEEP_SLEEP`
|
||||
|
||||
@@ -444,155 +461,6 @@ the following configuration flags.
|
||||
|
||||
:code:`CONFIG_DEVICE_POWER_MANAGEMENT`
|
||||
|
||||
This flag is enabled if the PMA and the devices support device power
|
||||
This flag is enabled if the SOC interface and the devices support device power
|
||||
management.
|
||||
|
||||
Writing a Power Management Application
|
||||
**************************************
|
||||
|
||||
A typical PMA executes policies through power management APIS. This section
|
||||
details various scenarios that can be used to help developers write their own
|
||||
custom PMAs.
|
||||
|
||||
The PMA is part of a larger application doing more than just power
|
||||
management. This section focuses on the power management aspects of the
|
||||
application.
|
||||
|
||||
Initial Setup
|
||||
=============
|
||||
|
||||
To enable the power management support, the application must do the following:
|
||||
|
||||
#. Enable the :code:`CONFIG_SYS_POWER_MANAGEMENT` flag
|
||||
|
||||
#. Enable other required config flags described in :ref:`pm_config_flags`.
|
||||
|
||||
#. Implement the hook functions described in :ref:`pm_hook_infra`.
|
||||
|
||||
Device List and Policies
|
||||
========================
|
||||
|
||||
The PMA retrieves the list of enabled devices in the system using the
|
||||
:c:func:`device_list_get()` function. Since the PMA is part of the
|
||||
application, the PMA starts after all devices in the system have been
|
||||
initialized. Thus, the list of devices will not change once the application
|
||||
has begun.
|
||||
|
||||
Once the device list has been retrieved and stored, the PMA can form device
|
||||
groups and sorted lists based on device dependencies. The PMA uses the device
|
||||
lists and the known aggregate wake latency of the combination of power
|
||||
operations to create the fine grain custom power policies. Finally, the PMA
|
||||
maps these custom policies to the policy categories defined by the power
|
||||
management subsystem as described in `Policies`_.
|
||||
|
||||
Scenarios During Suspend
|
||||
========================
|
||||
|
||||
When the power management subsystem calls the :code:`_sys_soc_suspend()`
|
||||
function, the PMA can select between multiple scenarios.
|
||||
|
||||
Scenario 1
|
||||
----------
|
||||
|
||||
The time allotted is too short for any power management.
|
||||
|
||||
In this case, the PMA leaves the interrupts disabled, and returns the code
|
||||
:code:`SYS_PM_NOT_HANDLED`. This actions allow the Zephyr kernel to continue
|
||||
with its normal idling process.
|
||||
|
||||
Scenario 2
|
||||
----------
|
||||
|
||||
The time allotted allows the suspension of some devices.
|
||||
|
||||
The PMA scans through the devices that meet the criteria and calls the
|
||||
:c:func:`device_set_power_state()` function with DEVICE_PM_SUSPEND_STATE state
|
||||
for each device.
|
||||
|
||||
After all devices are suspended properly, the PMA executes the following
|
||||
operations:
|
||||
|
||||
* If the time allotted is enough for the :code:`SYS_PM_LOW_POWER_STATE`
|
||||
policy:
|
||||
|
||||
#. The PMA sets up the wake event, puts the CPU in a LPS, and re- enables
|
||||
the interrupts at the same time.
|
||||
|
||||
#. The PMA returns the :code:`SYS_PM_LOW_POWER_STATE` code.
|
||||
|
||||
* If the time allotted is not enough for the :code:`SYS_PM_LOW_POWER_STATE`
|
||||
policy, the PMA returns the :code:`SYS_PM_DEVICE_SUSPEND_ONLY` code.
|
||||
|
||||
When a device fails to suspend, the PMA executes the following operations:
|
||||
|
||||
* If the system integrator determined that the device is not essential to the
|
||||
suspend process, the PMA can ignore the failure.
|
||||
|
||||
* If the system integrator determined that the device is essential to the
|
||||
suspend process, the PMA takes any necessary recovery actions and
|
||||
returns the :code:`SYS_PM_NOT_HANDLED` code.
|
||||
|
||||
Scenario 3
|
||||
----------
|
||||
|
||||
The time allotted is enough for all devices to be suspended.
|
||||
|
||||
The PMA calls the :c:func:`device_set_power_stated()` function with
|
||||
DEVICE_PM_SUSPEND_STATE state for each device.
|
||||
|
||||
After all devices are suspended properly and the time allotted is enough for
|
||||
the :code:`SYS_PM_DEEP_SLEEP` policy, the PMA executes the following
|
||||
operations:
|
||||
|
||||
#. Calls the :c:func:`device_any_busy_check()` function to get device busy
|
||||
status. If any device is busy, the PMA must choose a policy other than
|
||||
:code:`SYS_PM_DEEP_SLEEP`.
|
||||
#. Sets up wake event.
|
||||
#. Puts the SOC in the deep sleep state.
|
||||
#. Re-enables interrupts.
|
||||
#. Returns the :code:`SYS_PM_DEEP_SLEEP` code.
|
||||
|
||||
If, on the other hand, the time allotted is only enough for the
|
||||
:code:`SYS_PM_LOW_POWER_STATE` policy, The PMA executes the following
|
||||
operations:
|
||||
|
||||
#. Sets up wake event.
|
||||
#. Puts the CPU in a LPS re-enabling interrupts at the same time.
|
||||
#. Returns the :code:`SYS_PM_LOW_POWER_STATE` code.
|
||||
|
||||
If the time allotted is not enough for any CPU or SOC power management
|
||||
operations, the PMA returns the :code:`SYS_PM_DEVICE_SUSPEND_ONLY` code.
|
||||
|
||||
When a device fails to suspend, the PMA executes the following operations:
|
||||
|
||||
* If the system integrator determined that the device is not essential to the
|
||||
suspend process the PMA can ignore the failure.
|
||||
|
||||
* If the system integrator determined that the device is essential to the
|
||||
suspend process, the PMA takes any necessary recovery actions and
|
||||
returns the :code:`SYS_PM_NOT_HANDLED` code.
|
||||
|
||||
Policy Decision Summary
|
||||
=======================
|
||||
|
||||
+---------------------------------+---------------------------------------+
|
||||
| PM operations | Policy and Return Code |
|
||||
+=================================+=======================================+
|
||||
| Suspend some devices and | :code:`SYS_PM_LOW_POWER_STATE` |
|
||||
| | |
|
||||
| Enter Low Power State | |
|
||||
+---------------------------------+---------------------------------------+
|
||||
| Suspend all devices and | :code:`SYS_PM_LOW_POWER_STATE` |
|
||||
| | |
|
||||
| Enter Low Power State | |
|
||||
+---------------------------------+---------------------------------------+
|
||||
| Suspend all devices and | :code:`SYS_PM_DEEP_SLEEP` |
|
||||
| | |
|
||||
| Enter Deep Sleep | |
|
||||
+---------------------------------+---------------------------------------+
|
||||
| Suspend some or all devices and | :code:`SYS_PM_DEVICE_SUSPEND_ONLY` |
|
||||
| | |
|
||||
| No CPU/SoC PM Operation | |
|
||||
+---------------------------------+---------------------------------------+
|
||||
| No PM operation | :code:`SYS_PM_NOT_HANDLED` |
|
||||
+---------------------------------+---------------------------------------+
|
||||
|
||||
@@ -62,6 +62,8 @@ static int aio_qmsi_cmp_disable(struct device *dev, uint8_t index)
|
||||
/* Disable comparator according to index */
|
||||
config.int_en &= ~(1 << index);
|
||||
config.power &= ~(1 << index);
|
||||
config.reference &= ~(1 << index);
|
||||
config.polarity &= ~(1 << index);
|
||||
|
||||
if (qm_ac_set_config(&config) != 0) {
|
||||
return -EINVAL;
|
||||
|
||||
@@ -47,6 +47,7 @@ config CLOCK_CONTROL_NRF5_K32SRC_DRV_NAME
|
||||
choice
|
||||
prompt "32KHz clock source"
|
||||
default CLOCK_CONTROL_NRF5_K32SRC_XTAL
|
||||
depends on CLOCK_CONTROL_NRF5
|
||||
|
||||
config CLOCK_CONTROL_NRF5_K32SRC_RC
|
||||
bool
|
||||
@@ -61,6 +62,7 @@ endchoice
|
||||
choice
|
||||
prompt "32KHz clock accuracy"
|
||||
default CLOCK_CONTROL_NRF5_K32SRC_20PPM
|
||||
depends on CLOCK_CONTROL_NRF5
|
||||
|
||||
config CLOCK_CONTROL_NRF5_K32SRC_500PPM
|
||||
bool
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
ifdef CONFIG_QMSI
|
||||
|
||||
KBUILD_CPPFLAGS +=-DENABLE_EXTERNAL_ISR_HANDLING
|
||||
|
||||
ifdef CONFIG_QMSI_LIBRARY
|
||||
ZEPHYRINCLUDE += -I$(CONFIG_QMSI_INSTALL_PATH)/include
|
||||
LIB_INCLUDE_DIR += -L$(CONFIG_QMSI_INSTALL_PATH:"%"=%)/lib
|
||||
@@ -21,4 +24,6 @@ SOC_WATCH_ENABLE ?= 0
|
||||
ifeq ($(CONFIG_SOC_WATCH),y)
|
||||
SOC_WATCH_ENABLE := 1
|
||||
CFLAGS += -DSOC_WATCH_ENABLE
|
||||
endif
|
||||
endif
|
||||
|
||||
endif
|
||||
|
||||
@@ -96,8 +96,7 @@ int32_t tc_hmac_set_key(TCHmacState_t ctx,
|
||||
int32_t tc_hmac_init(TCHmacState_t ctx)
|
||||
{
|
||||
/* input sanity check: */
|
||||
if (ctx == (TCHmacState_t) 0 ||
|
||||
ctx->key == (uint8_t *) 0) {
|
||||
if (ctx == (TCHmacState_t) 0) {
|
||||
return TC_CRYPTO_FAIL;
|
||||
}
|
||||
|
||||
@@ -114,7 +113,7 @@ int32_t tc_hmac_update(TCHmacState_t ctx,
|
||||
uint32_t data_length)
|
||||
{
|
||||
/* input sanity check: */
|
||||
if (ctx == (TCHmacState_t) 0 || ctx->key == (uint8_t *) 0) {
|
||||
if (ctx == (TCHmacState_t) 0) {
|
||||
return TC_CRYPTO_FAIL;
|
||||
}
|
||||
|
||||
@@ -128,8 +127,7 @@ int32_t tc_hmac_final(uint8_t *tag, uint32_t taglen, TCHmacState_t ctx)
|
||||
/* input sanity check: */
|
||||
if (tag == (uint8_t *) 0 ||
|
||||
taglen != TC_SHA256_DIGEST_SIZE ||
|
||||
ctx == (TCHmacState_t) 0 ||
|
||||
ctx->key == (uint8_t *) 0) {
|
||||
ctx == (TCHmacState_t) 0) {
|
||||
return TC_CRYPTO_FAIL;
|
||||
}
|
||||
|
||||
|
||||
@@ -66,7 +66,6 @@ int32_t tc_sha256_update(TCSha256State_t s, const uint8_t *data, size_t datalen)
|
||||
{
|
||||
/* input sanity check: */
|
||||
if (s == (TCSha256State_t) 0 ||
|
||||
s->iv == (uint32_t *) 0 ||
|
||||
data == (void *) 0) {
|
||||
return TC_CRYPTO_FAIL;
|
||||
} else if (datalen == 0) {
|
||||
@@ -91,8 +90,7 @@ int32_t tc_sha256_final(uint8_t *digest, TCSha256State_t s)
|
||||
|
||||
/* input sanity check: */
|
||||
if (digest == (uint8_t *) 0 ||
|
||||
s == (TCSha256State_t) 0 ||
|
||||
s->iv == (uint32_t *) 0) {
|
||||
s == (TCSha256State_t) 0) {
|
||||
return TC_CRYPTO_FAIL;
|
||||
}
|
||||
|
||||
|
||||
@@ -23,11 +23,13 @@
|
||||
#ifndef __CORTEX_M_NMI_H
|
||||
#define __CORTEX_M_NMI_H
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
#ifdef CONFIG_RUNTIME_NMI
|
||||
extern void _NmiInit(void);
|
||||
#define NMI_INIT() _NmiInit()
|
||||
#else
|
||||
#define NMI_INIT()
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* __CORTEX_M_NMI_H */
|
||||
|
||||
@@ -50,8 +50,9 @@ extern "C" {
|
||||
/** GPIO pin to be output. */
|
||||
#define GPIO_DIR_OUT (1 << 0)
|
||||
|
||||
/** For internal use. */
|
||||
/** @cond INTERNAL_HIDDEN */
|
||||
#define GPIO_DIR_MASK 0x1
|
||||
/** @endcond */
|
||||
|
||||
/** GPIO pin to trigger interrupt. */
|
||||
#define GPIO_INT (1 << 1)
|
||||
@@ -81,8 +82,9 @@ extern "C" {
|
||||
* GPIO_POL_* define the polarity of the GPIO (1 bit).
|
||||
*/
|
||||
|
||||
/** For internal use. */
|
||||
/** @cond INTERNAL_HIDDEN */
|
||||
#define GPIO_POL_POS 7
|
||||
/** @endcond */
|
||||
|
||||
/** GPIO pin polarity is normal. */
|
||||
#define GPIO_POL_NORMAL (0 << GPIO_POL_POS)
|
||||
@@ -90,15 +92,17 @@ extern "C" {
|
||||
/** GPIO pin polarity is inverted. */
|
||||
#define GPIO_POL_INV (1 << GPIO_POL_POS)
|
||||
|
||||
/** For internal use. */
|
||||
/** @cond INTERNAL_HIDDEN */
|
||||
#define GPIO_POL_MASK (1 << GPIO_POL_POS)
|
||||
/** @endcond */
|
||||
|
||||
/*
|
||||
* GPIO_PUD_* are related to pull-up/pull-down.
|
||||
*/
|
||||
|
||||
/** For internal use. */
|
||||
/** @cond INTERNAL_HIDDEN */
|
||||
#define GPIO_PUD_POS 8
|
||||
/** @endcond */
|
||||
|
||||
/** GPIO pin to have no pull-up or pull-down. */
|
||||
#define GPIO_PUD_NORMAL (0 << GPIO_PUD_POS)
|
||||
@@ -109,8 +113,9 @@ extern "C" {
|
||||
/** Enable GPIO pin pull-down. */
|
||||
#define GPIO_PUD_PULL_DOWN (2 << GPIO_PUD_POS)
|
||||
|
||||
/** For internal use. */
|
||||
/** @cond INTERNAL_HIDDEN */
|
||||
#define GPIO_PUD_MASK (3 << GPIO_PUD_POS)
|
||||
/** @endcond */
|
||||
|
||||
/*
|
||||
* GPIO_PIN_(EN-/DIS-)ABLE are for pin enable / disable.
|
||||
@@ -214,6 +219,7 @@ struct gpio_driver_api {
|
||||
* @param port Pointer to device structure for the driver instance.
|
||||
* @param pin Pin number to configure.
|
||||
* @param flags Flags for pin configuration. IN/OUT, interrupt ...
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_pin_configure(struct device *port, uint8_t pin,
|
||||
int flags)
|
||||
@@ -228,6 +234,7 @@ static inline int gpio_pin_configure(struct device *port, uint8_t pin,
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param pin Pin number where the data is written.
|
||||
* @param value Value set on the pin.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_pin_write(struct device *port, uint32_t pin,
|
||||
uint32_t value)
|
||||
@@ -242,6 +249,7 @@ static inline int gpio_pin_write(struct device *port, uint32_t pin,
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param pin Pin number where data is read.
|
||||
* @param value Integer pointer to receive the data values from the pin.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_pin_read(struct device *port, uint32_t pin,
|
||||
uint32_t *value)
|
||||
@@ -272,6 +280,7 @@ static inline void gpio_init_callback(struct gpio_callback *callback,
|
||||
* @brief Add an application callback.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param callback A valid Application's callback structure pointer.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*
|
||||
* Note: enables to add as many callback as needed on the same port.
|
||||
*/
|
||||
@@ -289,6 +298,7 @@ static inline int gpio_add_callback(struct device *port,
|
||||
* @brief Remove an application callback.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param callback A valid application's callback structure pointer.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*
|
||||
* Note: enables to remove as many callbacks as added through
|
||||
* gpio_add_callback().
|
||||
@@ -307,6 +317,7 @@ static inline int gpio_remove_callback(struct device *port,
|
||||
* @brief Enable callback(s) for a single pin.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param pin Pin number where the callback function is enabled.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*
|
||||
* Note: Depending on the driver implementation, this function will enable
|
||||
* the pin to trigger an interruption. So as a semantic detail, if no
|
||||
@@ -323,6 +334,7 @@ static inline int gpio_pin_enable_callback(struct device *port, uint32_t pin)
|
||||
* @brief Disable callback(s) for a single pin.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param pin Pin number where the callback function is disabled.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_pin_disable_callback(struct device *port, uint32_t pin)
|
||||
{
|
||||
@@ -337,6 +349,7 @@ static inline int gpio_pin_disable_callback(struct device *port, uint32_t pin)
|
||||
*
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param flags Flags for the port configuration. IN/OUT, interrupt ...
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_port_configure(struct device *port, int flags)
|
||||
{
|
||||
@@ -349,6 +362,7 @@ static inline int gpio_port_configure(struct device *port, int flags)
|
||||
* @brief Write a data value to the port.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param value Value to set on the port.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_port_write(struct device *port, uint32_t value)
|
||||
{
|
||||
@@ -361,6 +375,7 @@ static inline int gpio_port_write(struct device *port, uint32_t value)
|
||||
* @brief Read data value from the port.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @param value Integer pointer to receive the data value from the port.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_port_read(struct device *port, uint32_t *value)
|
||||
{
|
||||
@@ -372,6 +387,7 @@ static inline int gpio_port_read(struct device *port, uint32_t *value)
|
||||
/**
|
||||
* @brief Enable callback(s) for the port.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*
|
||||
* Note: Depending on the driver implementation, this function will enable
|
||||
* the port to trigger an interruption on all pins, as long as these
|
||||
@@ -388,6 +404,7 @@ static inline int gpio_port_enable_callback(struct device *port)
|
||||
/**
|
||||
* @brief Disable callback(s) for the port.
|
||||
* @param port Pointer to the device structure for the driver instance.
|
||||
* @return 0 if successful, negative errno code on failure.
|
||||
*/
|
||||
static inline int gpio_port_disable_callback(struct device *port)
|
||||
{
|
||||
|
||||
@@ -1694,6 +1694,12 @@ extern void k_sem_init(struct k_sem *sem, unsigned int initial_count,
|
||||
* @param timeout Waiting period to take the semaphore (in milliseconds),
|
||||
* or one of the special values K_NO_WAIT and K_FOREVER.
|
||||
*
|
||||
* @note When porting code from the nanokernel legacy API to the new API, be
|
||||
* careful with the return value of this function. The return value is the
|
||||
* reverse of the one of nano_sem_take family of APIs: 0 means success, and
|
||||
* non-zero means failure, while the nano_sem_take family returns 1 for success
|
||||
* and 0 for failure.
|
||||
*
|
||||
* @retval 0 Semaphore taken.
|
||||
* @retval -EBUSY Returned without waiting.
|
||||
* @retval -EAGAIN Waiting period timed out.
|
||||
|
||||
@@ -1060,8 +1060,9 @@ static inline __deprecated void nano_sem_give(struct nano_sem *sem)
|
||||
static inline __deprecated int nano_sem_take(struct nano_sem *sem,
|
||||
int32_t timeout_in_ticks)
|
||||
{
|
||||
return k_sem_take((struct k_sem *)sem, _ticks_to_ms(timeout_in_ticks))
|
||||
== 0 ? 1 : 0;
|
||||
int32_t ms = _ticks_to_ms(timeout_in_ticks);
|
||||
|
||||
return k_sem_take((struct k_sem *)sem, ms) == 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -354,3 +354,12 @@ config HEAP_MEM_POOL_SIZE
|
||||
heap memory pool is defined.
|
||||
|
||||
endmenu
|
||||
|
||||
config ARCH_HAS_CUSTOM_SWAP_TO_MAIN
|
||||
bool
|
||||
# hidden
|
||||
default n
|
||||
help
|
||||
It's possible that an architecture port cannot use _Swap() to swap to
|
||||
the _main() thread, but instead must do something custom. It must
|
||||
enable this option in that case.
|
||||
|
||||
@@ -23,6 +23,39 @@
|
||||
#include <misc/dlist.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Common bitmask definitions for the struct tcs->flags bit field.
|
||||
*
|
||||
* Must be before kerneL_arch_data.h because it might need them to be already
|
||||
* defined.
|
||||
*/
|
||||
|
||||
/* thread is defined statically */
|
||||
#define K_STATIC (1 << 8)
|
||||
|
||||
/* Thread is waiting on an object */
|
||||
#define K_PENDING (1 << 13)
|
||||
|
||||
/* Thread has not yet started */
|
||||
#define K_PRESTART (1 << 14)
|
||||
|
||||
/* Thread has terminated */
|
||||
#define K_DEAD (1 << 15)
|
||||
|
||||
/* Thread is suspended */
|
||||
#define K_SUSPENDED (1 << 16)
|
||||
|
||||
/* Not a real thread */
|
||||
#define K_DUMMY (1 << 17)
|
||||
|
||||
#if defined(CONFIG_FP_SHARING)
|
||||
/* thread uses floating point registers */
|
||||
#define K_FP_REGS (1 << 4)
|
||||
#endif
|
||||
|
||||
/* system thread that must not abort */
|
||||
#define K_ESSENTIAL (1 << 9)
|
||||
|
||||
#include <kernel_arch_data.h>
|
||||
|
||||
#if !defined(_ASMLANGUAGE)
|
||||
@@ -181,6 +214,10 @@ _set_thread_return_value_with_data(struct k_thread *thread,
|
||||
thread->base.swap_data = data;
|
||||
}
|
||||
|
||||
extern void _init_thread_base(struct _thread_base *thread_base,
|
||||
int priority, uint32_t initial_state,
|
||||
unsigned int options);
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#endif /* _kernel_structs__h_ */
|
||||
|
||||
@@ -258,22 +258,14 @@ static inline void _mark_thread_as_not_suspended(struct k_thread *thread)
|
||||
thread->base.flags &= ~K_SUSPENDED;
|
||||
}
|
||||
|
||||
/* mark a thread as being in the timer queue */
|
||||
static inline void _mark_thread_as_timing(struct k_thread *thread)
|
||||
/* check if a thread is on the timeout queue */
|
||||
static inline int _is_thread_timeout_active(struct k_thread *thread)
|
||||
{
|
||||
thread->base.flags |= K_TIMING;
|
||||
}
|
||||
|
||||
/* mark a thread as not being in the timer queue */
|
||||
static inline void _mark_thread_as_not_timing(struct k_thread *thread)
|
||||
{
|
||||
thread->base.flags &= ~K_TIMING;
|
||||
}
|
||||
|
||||
/* check if a thread is on the timer queue */
|
||||
static inline int _is_thread_timing(struct k_thread *thread)
|
||||
{
|
||||
return !!(thread->base.flags & K_TIMING);
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
return thread->base.timeout.delta_ticks_from_prev != -1;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int _has_thread_started(struct k_thread *thread)
|
||||
@@ -281,10 +273,19 @@ static inline int _has_thread_started(struct k_thread *thread)
|
||||
return !(thread->base.flags & K_PRESTART);
|
||||
}
|
||||
|
||||
static inline int _is_thread_prevented_from_running(struct k_thread *thread)
|
||||
{
|
||||
return thread->base.flags & (K_PENDING | K_PRESTART |
|
||||
K_DEAD | K_DUMMY |
|
||||
K_SUSPENDED);
|
||||
|
||||
}
|
||||
|
||||
/* check if a thread is ready */
|
||||
static inline int _is_thread_ready(struct k_thread *thread)
|
||||
{
|
||||
return (thread->base.flags & K_EXECUTION_MASK) == K_READY;
|
||||
return !(_is_thread_prevented_from_running(thread) ||
|
||||
_is_thread_timeout_active(thread));
|
||||
}
|
||||
|
||||
/* mark a thread as pending in its TCS */
|
||||
@@ -305,11 +306,22 @@ static inline int _is_thread_pending(struct k_thread *thread)
|
||||
return !!(thread->base.flags & K_PENDING);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the thread as not being in the timer queue. If this makes it ready,
|
||||
* then add it to the ready queue according to its priority.
|
||||
/**
|
||||
* @brief Mark a thread as started
|
||||
*
|
||||
* This routine must be called with interrupts locked.
|
||||
*/
|
||||
static inline void _mark_thread_as_started(struct k_thread *thread)
|
||||
{
|
||||
thread->base.flags &= ~K_PRESTART;
|
||||
}
|
||||
|
||||
/*
|
||||
* Put the thread in the ready queue according to its priority if it is not
|
||||
* blocked for another reason (eg. suspended).
|
||||
*
|
||||
* Must be called with interrupts locked.
|
||||
*/
|
||||
/* must be called with interrupts locked */
|
||||
static inline void _ready_thread(struct k_thread *thread)
|
||||
{
|
||||
__ASSERT(_is_prio_higher(thread->base.prio, K_LOWEST_THREAD_PRIO) ||
|
||||
@@ -324,24 +336,14 @@ static inline void _ready_thread(struct k_thread *thread)
|
||||
"thread %p prio too high (id %d, cannot be higher than %d)",
|
||||
thread, thread->base.prio, K_HIGHEST_THREAD_PRIO);
|
||||
|
||||
/* K_PRESTART is needed to handle the start-with-delay case */
|
||||
_reset_thread_states(thread, K_TIMING|K_PRESTART);
|
||||
/* needed to handle the start-with-delay case */
|
||||
_mark_thread_as_started(thread);
|
||||
|
||||
if (_is_thread_ready(thread)) {
|
||||
_add_thread_to_ready_q(thread);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Mark a thread as started
|
||||
*
|
||||
* This routine must be called with interrupts locked.
|
||||
*/
|
||||
static inline void _mark_thread_as_started(struct k_thread *thread)
|
||||
{
|
||||
thread->base.flags &= ~K_PRESTART;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Mark thread as dead
|
||||
*
|
||||
|
||||
@@ -57,7 +57,6 @@ extern void _thread_entry(void (*)(void *, void *, void *),
|
||||
void *, void *, void *);
|
||||
|
||||
extern void _new_thread(char *pStack, size_t stackSize,
|
||||
void *uk_task_ptr,
|
||||
void (*pEntry)(void *, void *, void *),
|
||||
void *p1, void *p2, void *p3,
|
||||
int prio, unsigned options);
|
||||
|
||||
@@ -65,18 +65,10 @@ static inline void _init_timeout(struct _timeout *t, _timeout_func_t func)
|
||||
*/
|
||||
}
|
||||
|
||||
static inline void _init_thread_timeout(struct k_thread *thread)
|
||||
static ALWAYS_INLINE void
|
||||
_init_thread_timeout(struct _thread_base *thread_base)
|
||||
{
|
||||
_init_timeout(&thread->base.timeout, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX - backwards compatibility until the arch part is updated to call
|
||||
* _init_thread_timeout()
|
||||
*/
|
||||
static inline void _nano_timeout_thread_init(struct k_thread *thread)
|
||||
{
|
||||
_init_thread_timeout(thread);
|
||||
_init_timeout(&thread_base->timeout, NULL);
|
||||
}
|
||||
|
||||
/* remove a thread timing out from kernel object's wait queue */
|
||||
@@ -106,6 +98,8 @@ static inline struct _timeout *_handle_one_timeout(
|
||||
struct _timeout *t = (void *)sys_dlist_get(timeout_q);
|
||||
struct k_thread *thread = t->thread;
|
||||
|
||||
t->delta_ticks_from_prev = -1;
|
||||
|
||||
K_DEBUG("timeout %p\n", t);
|
||||
if (thread != NULL) {
|
||||
_unpend_thread_timing_out(thread, t);
|
||||
@@ -113,14 +107,6 @@ static inline struct _timeout *_handle_one_timeout(
|
||||
} else if (t->func) {
|
||||
t->func(t);
|
||||
}
|
||||
/*
|
||||
* Note: t->func() may add timeout again. Make sure that
|
||||
* delta_ticks_from_prev is set to -1 only if timeout is
|
||||
* still expired (delta_ticks_from_prev == 0)
|
||||
*/
|
||||
if (t->delta_ticks_from_prev == 0) {
|
||||
t->delta_ticks_from_prev = -1;
|
||||
}
|
||||
|
||||
return (struct _timeout *)sys_dlist_peek_head(timeout_q);
|
||||
}
|
||||
|
||||
@@ -30,10 +30,25 @@ extern "C" {
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
#include <timeout_q.h>
|
||||
#else
|
||||
#define _init_thread_timeout(thread) do { } while ((0))
|
||||
#define _nano_timeout_thread_init(thread) _init_thread_timeout(thread)
|
||||
#define _add_thread_timeout(thread, wait_q, timeout) do { } while (0)
|
||||
static inline int _abort_thread_timeout(struct k_thread *thread) { return 0; }
|
||||
static ALWAYS_INLINE void _init_thread_timeout(struct _thread_base *thread_base)
|
||||
{
|
||||
ARG_UNUSED(thread_base);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
_add_thread_timeout(struct k_thread *thread, _wait_q_t *wait_q, int32_t timeout)
|
||||
{
|
||||
ARG_UNUSED(thread);
|
||||
ARG_UNUSED(wait_q);
|
||||
ARG_UNUSED(timeout);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE int _abort_thread_timeout(struct k_thread *thread)
|
||||
{
|
||||
ARG_UNUSED(thread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#define _get_next_timeout_expiry() (K_FOREVER)
|
||||
#endif
|
||||
|
||||
|
||||
@@ -94,11 +94,11 @@ uint64_t __noinit __idle_tsc; /* timestamp when CPU goes idle */
|
||||
#define MAIN_STACK_SIZE CONFIG_MAIN_STACK_SIZE
|
||||
#endif
|
||||
|
||||
static char __noinit __stack main_stack[MAIN_STACK_SIZE];
|
||||
static char __noinit __stack idle_stack[IDLE_STACK_SIZE];
|
||||
char __noinit __stack _main_stack[MAIN_STACK_SIZE];
|
||||
char __noinit __stack _idle_stack[IDLE_STACK_SIZE];
|
||||
|
||||
k_tid_t const _main_thread = (k_tid_t)main_stack;
|
||||
k_tid_t const _idle_thread = (k_tid_t)idle_stack;
|
||||
k_tid_t const _main_thread = (k_tid_t)_main_stack;
|
||||
k_tid_t const _idle_thread = (k_tid_t)_idle_stack;
|
||||
|
||||
/*
|
||||
* storage space for the interrupt stack
|
||||
@@ -241,6 +241,9 @@ void __weak main(void)
|
||||
*/
|
||||
static void prepare_multithreading(struct k_thread *dummy_thread)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
|
||||
ARG_UNUSED(dummy_thread);
|
||||
#else
|
||||
/*
|
||||
* Initialize the current execution thread to permit a level of
|
||||
* debugging output if an exception should happen during nanokernel
|
||||
@@ -257,6 +260,7 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
|
||||
*/
|
||||
dummy_thread->base.flags = K_ESSENTIAL;
|
||||
dummy_thread->base.prio = K_PRIO_COOP(0);
|
||||
#endif
|
||||
|
||||
/* _kernel.ready_q is all zeroes */
|
||||
|
||||
@@ -277,13 +281,13 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
|
||||
sys_dlist_init(&_ready_q.q[ii]);
|
||||
}
|
||||
|
||||
_new_thread(main_stack, MAIN_STACK_SIZE, NULL,
|
||||
_new_thread(_main_stack, MAIN_STACK_SIZE,
|
||||
_main, NULL, NULL, NULL,
|
||||
CONFIG_MAIN_THREAD_PRIORITY, K_ESSENTIAL);
|
||||
_mark_thread_as_started(_main_thread);
|
||||
_add_thread_to_ready_q(_main_thread);
|
||||
|
||||
_new_thread(idle_stack, IDLE_STACK_SIZE, NULL,
|
||||
_new_thread(_idle_stack, IDLE_STACK_SIZE,
|
||||
idle, NULL, NULL, NULL,
|
||||
K_LOWEST_THREAD_PRIO, K_ESSENTIAL);
|
||||
_mark_thread_as_started(_idle_thread);
|
||||
@@ -298,6 +302,9 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
|
||||
|
||||
static void switch_to_main_thread(void)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
|
||||
_arch_switch_to_main_thread(_main_stack, MAIN_STACK_SIZE, _main);
|
||||
#else
|
||||
/*
|
||||
* Context switch to main task (entry function is _main()): the
|
||||
* current fake thread is not on a wait queue or ready queue, so it
|
||||
@@ -305,6 +312,7 @@ static void switch_to_main_thread(void)
|
||||
*/
|
||||
|
||||
_Swap(irq_lock());
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_STACK_CANARIES
|
||||
@@ -357,9 +365,14 @@ extern void *__stack_chk_guard;
|
||||
*/
|
||||
FUNC_NORETURN void _Cstart(void)
|
||||
{
|
||||
/* floating point operations are NOT performed during nanokernel init */
|
||||
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
|
||||
void *dummy_thread = NULL;
|
||||
#else
|
||||
/* floating point is NOT used during nanokernel init */
|
||||
|
||||
char __stack dummy_thread[_K_THREAD_NO_FLOAT_SIZEOF];
|
||||
char __stack dummy_stack[_K_THREAD_NO_FLOAT_SIZEOF];
|
||||
void *dummy_thread = dummy_stack;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize nanokernel data structures. This step includes
|
||||
@@ -367,7 +380,7 @@ FUNC_NORETURN void _Cstart(void)
|
||||
* before the hardware initialization phase.
|
||||
*/
|
||||
|
||||
prepare_multithreading((struct k_thread *)&dummy_thread);
|
||||
prepare_multithreading(dummy_thread);
|
||||
|
||||
/* Deprecated */
|
||||
_sys_device_do_config_level(_SYS_INIT_LEVEL_PRIMARY);
|
||||
|
||||
@@ -33,7 +33,6 @@ void _legacy_sleep(int32_t ticks)
|
||||
|
||||
int key = irq_lock();
|
||||
|
||||
_mark_thread_as_timing(_current);
|
||||
_remove_thread_from_ready_q(_current);
|
||||
_add_thread_timeout(_current, NULL, ticks);
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ static int init_mbox_module(struct device *dev)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < CONFIG_NUM_MBOX_ASYNC_MSGS; i++) {
|
||||
async_msg[i].thread.flags = K_DUMMY;
|
||||
_init_thread_base(&async_msg[i].thread, 0, K_DUMMY, 0);
|
||||
k_stack_push(&async_msg_free, (uint32_t)&async_msg[i]);
|
||||
}
|
||||
#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */
|
||||
|
||||
@@ -168,9 +168,9 @@ void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, int32_t timeout)
|
||||
_mark_thread_as_pending(thread);
|
||||
|
||||
if (timeout != K_FOREVER) {
|
||||
_mark_thread_as_timing(thread);
|
||||
_add_thread_timeout(thread, wait_q,
|
||||
_TICK_ALIGN + _ms_to_ticks(timeout));
|
||||
int32_t ticks = _TICK_ALIGN + _ms_to_ticks(timeout);
|
||||
|
||||
_add_thread_timeout(thread, wait_q, ticks);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -306,12 +306,11 @@ void k_sleep(int32_t duration)
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t ticks = _TICK_ALIGN + _ms_to_ticks(duration);
|
||||
int key = irq_lock();
|
||||
|
||||
_mark_thread_as_timing(_current);
|
||||
_remove_thread_from_ready_q(_current);
|
||||
_add_thread_timeout(_current, NULL,
|
||||
_TICK_ALIGN + _ms_to_ticks(duration));
|
||||
_add_thread_timeout(_current, NULL, ticks);
|
||||
|
||||
_Swap(key);
|
||||
}
|
||||
|
||||
@@ -123,10 +123,8 @@ int k_sem_group_take(struct k_sem *sem_array[], struct k_sem **sem,
|
||||
_current->base.swap_data = &list;
|
||||
|
||||
for (int i = 0; i < num; i++) {
|
||||
wait_objects[i].dummy.flags = K_DUMMY;
|
||||
wait_objects[i].dummy.prio = priority;
|
||||
|
||||
_init_thread_timeout((struct k_thread *)&wait_objects[i].dummy);
|
||||
_init_thread_base(&wait_objects[i].dummy, priority, K_DUMMY, 0);
|
||||
|
||||
sys_dlist_append(&list, &wait_objects[i].desc.semg_node);
|
||||
wait_objects[i].desc.thread = _current;
|
||||
@@ -224,8 +222,8 @@ static int handle_sem_group(struct k_sem *sem, struct k_thread *thread)
|
||||
*/
|
||||
|
||||
if (!_is_thread_ready(desc->thread)) {
|
||||
_reset_thread_states(desc->thread, K_PENDING | K_TIMING);
|
||||
_abort_thread_timeout(desc->thread);
|
||||
_mark_thread_as_not_pending(desc->thread);
|
||||
if (_is_thread_ready(desc->thread)) {
|
||||
_add_thread_to_ready_q(desc->thread);
|
||||
}
|
||||
|
||||
@@ -220,9 +220,9 @@ static void schedule_new_thread(struct k_thread *thread, int32_t delay)
|
||||
if (delay == 0) {
|
||||
start_thread(thread);
|
||||
} else {
|
||||
_mark_thread_as_timing(thread);
|
||||
_add_thread_timeout(thread, NULL,
|
||||
_TICK_ALIGN + _ms_to_ticks(delay));
|
||||
int32_t ticks = _TICK_ALIGN + _ms_to_ticks(delay);
|
||||
|
||||
_add_thread_timeout(thread, NULL, ticks);
|
||||
}
|
||||
#else
|
||||
ARG_UNUSED(delay);
|
||||
@@ -239,7 +239,7 @@ k_tid_t k_thread_spawn(char *stack, size_t stack_size,
|
||||
|
||||
struct k_thread *new_thread = (struct k_thread *)stack;
|
||||
|
||||
_new_thread(stack, stack_size, NULL, entry, p1, p2, p3, prio, options);
|
||||
_new_thread(stack, stack_size, entry, p1, p2, p3, prio, options);
|
||||
|
||||
schedule_new_thread(new_thread, delay);
|
||||
|
||||
@@ -252,7 +252,8 @@ int k_thread_cancel(k_tid_t tid)
|
||||
|
||||
int key = irq_lock();
|
||||
|
||||
if (_has_thread_started(thread) || !_is_thread_timing(thread)) {
|
||||
if (_has_thread_started(thread) ||
|
||||
!_is_thread_timeout_active(thread)) {
|
||||
irq_unlock(key);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -365,9 +366,8 @@ void _k_thread_single_abort(struct k_thread *thread)
|
||||
if (_is_thread_pending(thread)) {
|
||||
_unpend_thread(thread);
|
||||
}
|
||||
if (_is_thread_timing(thread)) {
|
||||
if (_is_thread_timeout_active(thread)) {
|
||||
_abort_thread_timeout(thread);
|
||||
_mark_thread_as_not_timing(thread);
|
||||
}
|
||||
}
|
||||
_mark_thread_as_dead(thread);
|
||||
@@ -382,7 +382,6 @@ void _init_static_threads(void)
|
||||
_new_thread(
|
||||
thread_data->init_stack,
|
||||
thread_data->init_stack_size,
|
||||
NULL,
|
||||
thread_data->init_entry,
|
||||
thread_data->init_p1,
|
||||
thread_data->init_p2,
|
||||
@@ -417,6 +416,22 @@ void _init_static_threads(void)
|
||||
k_sched_unlock();
|
||||
}
|
||||
|
||||
void _init_thread_base(struct _thread_base *thread_base, int priority,
|
||||
uint32_t initial_state, unsigned int options)
|
||||
{
|
||||
/* k_q_node is initialized upon first insertion in a list */
|
||||
|
||||
thread_base->flags = options | initial_state;
|
||||
|
||||
thread_base->prio = priority;
|
||||
|
||||
thread_base->sched_locked = 0;
|
||||
|
||||
/* swap_data does not need to be initialized */
|
||||
|
||||
_init_thread_timeout(thread_base);
|
||||
}
|
||||
|
||||
uint32_t _k_thread_group_mask_get(struct k_thread *thread)
|
||||
{
|
||||
struct _static_thread_data *thread_data = thread->init_data;
|
||||
|
||||
@@ -520,28 +520,15 @@ int _prf(int (*func)(), void *dest, char *format, va_list vargs)
|
||||
if (strchr("hlLz", c) != NULL) {
|
||||
i = c;
|
||||
c = *format++;
|
||||
switch (i) {
|
||||
case 'h':
|
||||
if (strchr("diouxX", c) == NULL)
|
||||
break;
|
||||
break;
|
||||
|
||||
case 'l':
|
||||
if (strchr("diouxX", c) == NULL)
|
||||
break;
|
||||
break;
|
||||
|
||||
case 'L':
|
||||
if (strchr("eEfgG", c) == NULL)
|
||||
break;
|
||||
break;
|
||||
|
||||
case 'z':
|
||||
if (strchr("diouxX", c) == NULL)
|
||||
break;
|
||||
break;
|
||||
|
||||
}
|
||||
/*
|
||||
* Here there was a switch() block
|
||||
* which was doing nothing useful, I
|
||||
* am still puzzled at why it was left
|
||||
* over. Maybe before it contained
|
||||
* stuff that was needed, but in its
|
||||
* current form, it was being
|
||||
* optimized out.
|
||||
*/
|
||||
}
|
||||
|
||||
need_justifying = false;
|
||||
|
||||
@@ -45,12 +45,8 @@ int snprintf(char *_Restrict s, size_t len, const char *_Restrict format, ...)
|
||||
int r;
|
||||
char dummy;
|
||||
|
||||
if ((int) len <= 0) {
|
||||
if (len == 0) {
|
||||
s = &dummy; /* write final NUL to dummy, since can't change *s */
|
||||
} else {
|
||||
len = 0x7fffffff; /* allow up to "maxint" characters */
|
||||
}
|
||||
if (len == 0) {
|
||||
s = &dummy; /* write final NUL to dummy, can't change *s */
|
||||
}
|
||||
|
||||
p.ptr = s;
|
||||
@@ -88,12 +84,8 @@ int vsnprintf(char *_Restrict s, size_t len, const char *_Restrict format, va_li
|
||||
int r;
|
||||
char dummy;
|
||||
|
||||
if ((int) len <= 0) {
|
||||
if (len == 0) {
|
||||
s = &dummy; /* write final NUL to dummy, since can't change *s */
|
||||
} else {
|
||||
len = 0x7fffffff; /* allow up to "maxint" characters */
|
||||
}
|
||||
if (len == 0) {
|
||||
s = &dummy; /* write final NUL to dummy, can't change * *s */
|
||||
}
|
||||
|
||||
p.ptr = s;
|
||||
|
||||
328
release-notes.rst
Normal file
328
release-notes.rst
Normal file
@@ -0,0 +1,328 @@
|
||||
Zephyr Kernel 1.6.0 Release Notes
|
||||
#################################
|
||||
|
||||
We are pleased to announce the release of Zephyr kernel version 1.6.0. This
|
||||
release introduces a Unified Kernel replacing the separate nano- and
|
||||
micro-kernels, simplifying the overall Zephyr architecture and programming
|
||||
interfaces.
|
||||
In this release we added support for the ARM Cortex-M0/M0+ family and expanded
|
||||
board support for Cortex-M.
|
||||
Additionally, this release adds many improvements for documentation, build
|
||||
infrastructure, and testing.
|
||||
|
||||
Major enhancements included with the release:
|
||||
|
||||
* Introduced the Unified Kernel; the nano and micro kernel were removed.
|
||||
* The legacy API is still supported but deprecated. All legacy tests were moved
|
||||
to tests/legacy.
|
||||
* Added Unified Kernel documentation.
|
||||
* Added support for several ARM Cortex-M boards
|
||||
* Added support for USB mass storage and access to the filesystem.
|
||||
* Added native Bluetooth Controller support. Currently nRF51 & nRF52 are supported.
|
||||
|
||||
A detailed list of changes since v1.5.0 by component follows:
|
||||
|
||||
Kernel
|
||||
******
|
||||
|
||||
* Introduced Unified kernel.
|
||||
* Removed deprecated Tasks IRQs.
|
||||
* Removed deprecated dynamic interrupt API.
|
||||
* Added DLIST to operate in all elements of a doubly-linked list.
|
||||
* SLIST: Added sys_slist_get() to fetch and remove the head, also Added
|
||||
append_list and merge_slist.
|
||||
* Added nano_work_pending to check if it is pending execution.
|
||||
* Unified: Added support for k_malloc and k_free.
|
||||
* Renamed kernel objects event to alert and memory map to memory slab.
|
||||
* Changed memory pool, memory maps, message queues and event handling APIs.
|
||||
|
||||
Architectures
|
||||
*************
|
||||
|
||||
* ARC: Removed CONFIG_TIMER0_CLOCK_FREQ.
|
||||
* ARC: Unified linker scripts.
|
||||
* ARC: Removed dynamic interrupts.
|
||||
* ARM: Added choice to use floating point ABI.
|
||||
* ARM: Added NXP Kinetis kconfig options to configure clocks.
|
||||
* ARM: Removed dynamic interrupts and exceptions.
|
||||
* ARM: Atmel: Added constants and structures for watchdog registers.
|
||||
* ARM: Added support for ARM Cortex-M0/M0+.
|
||||
* x86: Removed dynamic interrupts and exceptions.
|
||||
* x86: Declared internal API for interrupt controllers.
|
||||
* x86: Changed IRQ controller to return -1 if cannot determine source vector.
|
||||
* x86: Grouped Quark SoC's under intel_quark family.
|
||||
* x86: Optimized and simplified IRQ and exception stubs.
|
||||
|
||||
Boards
|
||||
******
|
||||
|
||||
* Renamed board Quark SE devboard to Quark SE C1000 devboard.
|
||||
* Renamed board Quark SE SSS devboard to Quark SE C1000 SS devboard.
|
||||
* Quark SE C1000: Disabled IPM and enabled UART0 on the Sensor SubSytem.
|
||||
* Removed basic_cortex_m3 and basic_minuteia boards.
|
||||
* Arduino 101: Removed backup/restore scripts. To restore original bootloader
|
||||
use flashpack utility instead.
|
||||
* Renamed nRF52 Nitrogen to 96Boards Nitrogen.
|
||||
* Added ARM LTD Beetle SoC and V2M Beetle board.
|
||||
* Added Texas Instruments CC3200 LaunchXL support.
|
||||
* Added support for Nordic Semiconductor nRF51822.
|
||||
* Added support for NXP Hexiwear board.
|
||||
|
||||
Drivers and Sensors
|
||||
*******************
|
||||
|
||||
* SPI: Fixed typos in SPI port numbers.
|
||||
* Pinmux: Removed Quark dev unused file.
|
||||
* I2C: Added KSDK shim driver.
|
||||
* Ethernet: Added KSDK shim driver.
|
||||
* Flash: Added KSDK shim driver
|
||||
* I2C: Changed config parameters to SoC specific.
|
||||
* QMSI: Implemented suspend and resume functions QMSI shim drivers
|
||||
* Added HP206C sensor.
|
||||
* Changed config_info pointers to const.
|
||||
* Added support for SoCWatch driver.
|
||||
* Added FXOS8700 accelerometer / magnetometer sensor driver.
|
||||
|
||||
Networking
|
||||
**********
|
||||
|
||||
* Minor fixes to uIP networking stack (This will be deprecated in 1.7)
|
||||
|
||||
Bluetooth
|
||||
*********
|
||||
|
||||
* Added native Bluetooth Controller support. Currently nRF51 & nRF52 are supported.
|
||||
* New location for Controller & Host implementations: subsys/bluetooth/
|
||||
* Added raw HCI API to enable physical HCI transport for a Controller-only build.
|
||||
* Added sample raw HCI apps for USB and UART.
|
||||
* Added cross-transport pairing support for the Security Manager Protocol.
|
||||
* Added RFCOMM support (for Bluetooth Classic)
|
||||
* Added basic persistent storage support (filesystem-backed)
|
||||
* Renamed bt_driver API to bt_hci_driver, in anticipation of Bluetooth radio drivers.
|
||||
|
||||
Build Infrastructure
|
||||
********************
|
||||
|
||||
* Makefile: Changed outdir into board-specific directory to avoid build collisions.
|
||||
* Makefile: Changed to use HOST_OS environment variable.
|
||||
* Makefile: Added support for third party build systems.
|
||||
* printk: Added support for modifiers.
|
||||
* Sanity: Added support to filter using environment variables.
|
||||
* Sanity: Added support for multiple toolchains.
|
||||
* Sanity: Added ISSM and ARM GCC embedded toolchains to the supported toolchains.
|
||||
* Sanity: Added extra arguments to be passed to the build.
|
||||
* Sanity: Removed linker VMA/LMA offset check.
|
||||
* Sysgen: Added --kernel_type argument.
|
||||
* Modified build infrastructure to support unified kernel.
|
||||
* SDK: Zephyr: Added check for minimum required version.
|
||||
* Imported get_maintainer.pl from Linux kernel.
|
||||
|
||||
Libraries
|
||||
*********
|
||||
|
||||
* libc: Added subset of standard types in inttypes.h.
|
||||
* libc: Added support for 'z' length specifier.
|
||||
* libc: Removed stddef.h which is provided by the compiler.
|
||||
* libc: printf: Improved code for printing.
|
||||
* Added CoAP implementation for Zephyr.
|
||||
* File system: Added API to grow or shrink a file.
|
||||
* File system: Added API to get volume statistics.
|
||||
* File system: Added API to flush cache of an opened file.
|
||||
|
||||
HALs
|
||||
****
|
||||
|
||||
* QMSI: Updated to version 1.3.1.
|
||||
* HAL: Imported CC3200 SDK.
|
||||
* Imported Nordic MDK nRF51 files.
|
||||
* Imported Kinetis SDK Ethernet phy driver.
|
||||
* Imported SDK RNGA driver.
|
||||
|
||||
Documentation
|
||||
*************
|
||||
|
||||
* Drivers: Improved Zephyr Driver model.
|
||||
* Updated device power management API.
|
||||
* Unified Kernel primer.
|
||||
* Moved supported board information to the wiki.zephyrproject.org site.
|
||||
* Revised documentation for Kernel Event logger and Timing.
|
||||
|
||||
Test and Samples
|
||||
****************
|
||||
|
||||
* Fixed incorrect printk usage.
|
||||
* Removed test for dynamic exceptions.
|
||||
* Added USB sample.
|
||||
* Added tests and samples for CoAP client and server.
|
||||
* Added philosophers unified sample.
|
||||
* Removed printf/printk wrappers.
|
||||
* Added Unified kernel API samples.
|
||||
* Imported tinycrypt test cases for CTR, ECC DSA and ECC DH algorithm.
|
||||
|
||||
Deprecations
|
||||
************
|
||||
|
||||
* Deprecated microkernel and nanokernel APIs.
|
||||
* Removed dynamic IRQs and exceptions.
|
||||
* Removed Tasks IRQs.
|
||||
|
||||
JIRA Related Items
|
||||
******************
|
||||
|
||||
Epic
|
||||
====
|
||||
|
||||
* [ZEP-308] - Build System cleanup and Kernel / Application build separation
|
||||
* [ZEP-334] - Unified Kernel
|
||||
* [ZEP-760] - Clean up samples and sanitise them.
|
||||
* [ZEP-766] - USB Mass Storage access to internal filesystem
|
||||
* [ZEP-1090] - CPU x86 save/restore using new QMSI bootloader flow
|
||||
|
||||
Story
|
||||
=====
|
||||
|
||||
* [ZEP-48] - define API for interrupt controllers
|
||||
* [ZEP-233] - Support USB mass storage device class
|
||||
* [ZEP-237] - Support pre-built host tools
|
||||
* [ZEP-240] - printk/printf usage in samples
|
||||
* [ZEP-248] - Add a BOARD/SOC porting guide
|
||||
* [ZEP-328] - HW Encryption Abstraction
|
||||
* [ZEP-342] - USB DFU
|
||||
* [ZEP-451] - Quark SE output by default redirected to IPM
|
||||
* [ZEP-521] - ARM - add choice to floating point ABI selection
|
||||
* [ZEP-546] - UART interrupts not triggered on ARC
|
||||
* [ZEP-584] - warn user if SDK is out of date
|
||||
* [ZEP-614] - Port tinycrypt 2.0 test cases to Zephyr
|
||||
* [ZEP-622] - Add FS API to truncate/shrink a file
|
||||
* [ZEP-627] - Port Trickle support from Contiki into current stack
|
||||
* [ZEP-635] - Add FS API to grow a file
|
||||
* [ZEP-636] - Add FS API to get volume total and free space
|
||||
* [ZEP-640] - Remove dynamic IRQs/exceptions from Zephyr
|
||||
* [ZEP-653] - QMSI shim driver: Watchdog: Implement suspend and resume callbacks
|
||||
* [ZEP-654] - QMSI shim driver: I2C: Implement suspend and resume callbacks
|
||||
* [ZEP-657] - QMSI shim driver: AONPT: Implement suspend and resume callbacks
|
||||
* [ZEP-661] - QMSI shim driver: SPI: Implement suspend and resume callbacks
|
||||
* [ZEP-688] - unify duplicated sections of arch linker scripts
|
||||
* [ZEP-715] - Add K64F clock configurations
|
||||
* [ZEP-716] - Add Hexiwear board support
|
||||
* [ZEP-717] - Add ksdk I2C shim driver
|
||||
* [ZEP-718] - Add ksdk ethernet shim driver
|
||||
* [ZEP-721] - Add FXOS8700 accelerometer/magnetometer sensor driver
|
||||
* [ZEP-737] - Update host tools from upstream: fixdep.c
|
||||
* [ZEP-745] - Revisit design of PWM Driver API
|
||||
* [ZEP-750] - Arduino 101 board should support one configuration using original bootloader
|
||||
* [ZEP-758] - Rename Quark SE Devboard to its official name: Quark SE C1000
|
||||
* [ZEP-767] - Add FS API to flush cache of an open file
|
||||
* [ZEP-775] - Enable USB CDC by default on Arduino 101 and redirect serial to USB
|
||||
* [ZEP-783] - ARM Cortex-M0/M0+ support
|
||||
* [ZEP-784] - Add support for Nordic Semiconductor nRF51822 SoC
|
||||
* [ZEP-850] - remove obsolete boards basic_minuteia and basic_cortex_m3
|
||||
* [ZEP-906] - [unified] Add scheduler time slicing support
|
||||
* [ZEP-907] - Test memory pool support (with mailboxes)
|
||||
* [ZEP-908] - Add task offload to fiber support
|
||||
* [ZEP-909] - Adapt tickless idle + power management for ARM
|
||||
* [ZEP-910] - Adapt tickless idle for x86
|
||||
* [ZEP-911] - Refine thread priorities & locking
|
||||
* [ZEP-912] - Finish renaming kernel object types
|
||||
* [ZEP-916] - Eliminate kernel object API anomalies
|
||||
* [ZEP-920] - Investigate malloc/free support
|
||||
* [ZEP-921] - Miscellaneous documentation work
|
||||
* [ZEP-922] - Revise documentation for Kernel Event Logger
|
||||
* [ZEP-923] - Revise documentation for Timing
|
||||
* [ZEP-924] - Revise documentation for Interrupts
|
||||
* [ZEP-925] - API changes to message queues
|
||||
* [ZEP-926] - API changes to memory pools
|
||||
* [ZEP-927] - API changes to memory maps
|
||||
* [ZEP-928] - API changes to event handling
|
||||
* [ZEP-930] - Cutover to unified kernel
|
||||
* [ZEP-933] - Unified kernel ARC port
|
||||
* [ZEP-934] - NIOS_II port
|
||||
* [ZEP-935] - Kernel logger support (validation)
|
||||
* [ZEP-954] - Update device PM API to allow setting additional power states
|
||||
* [ZEP-957] - Create example sample for new unified kernel API usage
|
||||
* [ZEP-959] - sync checkpatch.pl with upstream Linux
|
||||
* [ZEP-981] - Add doxygen documentation to both include/kernel.h and include/legacy.h
|
||||
* [ZEP-989] - Cache next ready thread instead of finding out the long way
|
||||
* [ZEP-993] - Quark SE (x86): Refactor save/restore execution context feature
|
||||
* [ZEP-994] - Quark SE (ARC): Add PMA sample
|
||||
* [ZEP-996] - Refactor save/restore feature from i2c_qmsi driver
|
||||
* [ZEP-997] - Refactor save/restore feature from spi_qmsi driver
|
||||
* [ZEP-998] - Refactor save/restore feature from uart_qmsi driver
|
||||
* [ZEP-999] - Refactor save/restore feature from gpio_qmsi driver
|
||||
* [ZEP-1000] - Refactor save/restore feature from rtc_qmsi driver
|
||||
* [ZEP-1001] - Refactor save/restore feature from wdt_qmsi driver
|
||||
* [ZEP-1002] - Refactor save/restore feature from counter_qmsi_aonpt driver
|
||||
* [ZEP-1004] - Extend counter_qmsi_aon driver to support save/restore peripheral context
|
||||
* [ZEP-1005] - Extend dma_qmsi driver to support save/restore peripheral context
|
||||
* [ZEP-1006] - Extend soc_flash_qmsi driver to support save/restore peripheral context
|
||||
* [ZEP-1008] - Extend pwm_qmsi driver to support save/restore peripheral context
|
||||
* [ZEP-1030] - Enable QMSI shim drivers of SoC peripherals on the sensor subsystem
|
||||
* [ZEP-1045] - Add/Enhance shim layer to wrap SOC specific PM implementations
|
||||
* [ZEP-1046] - Implement RAM sharing between bootloader and Zephyr
|
||||
* [ZEP-1047] - Adapt to new PM related boot flow changes in QMSI boot loader
|
||||
* [ZEP-1106] - Fix all test failures from TCF
|
||||
* [ZEP-1107] - Update QMSI to 1.3
|
||||
* [ZEP-1109] - Texas Instruments CC3200 LaunchXL Support
|
||||
* [ZEP-1119] - move top level usb/ to sys/usb
|
||||
* [ZEP-1120] - move top level fs/ to sys/fs
|
||||
* [ZEP-1121] - Add config support for enabling SoCWatch in Zephyr
|
||||
* [ZEP-1188] - Add an API to retrieve pending interrupts for wake events
|
||||
* [ZEP-1191] - Create wiki page for Hexiwear board
|
||||
* [ZEP-1245] - ARM LTD V2M Beetle Support
|
||||
* [ZEP-1313] - porting and user guides must include a security section
|
||||
|
||||
Task
|
||||
====
|
||||
|
||||
* [ZEP-592] - Sanitycheck support for multiple toolchains
|
||||
* [ZEP-740] - PWM API: Check if 'flags' argument is really required
|
||||
|
||||
Bug
|
||||
===
|
||||
|
||||
* [ZEP-145] - no 'make flash' for Arduino Due
|
||||
* [ZEP-199] - Zephyr driver model is undocumented
|
||||
* [ZEP-471] - Ethernet packet with multicast address is not working
|
||||
* [ZEP-472] - Ethernet packets are getting missed if sent in quick succession.
|
||||
* [ZEP-517] - build on windows failed "zephyr/Makefile:869: \*\*\* multiple target patterns"
|
||||
* [ZEP-528] - ARC has 2 almost identical copies of the linker script
|
||||
* [ZEP-577] - Sample application source does not compile on Windows
|
||||
* [ZEP-601] - enable CONFIG_DEBUG_INFO
|
||||
* [ZEP-602] - unhandled CPU exceptions/interrupts report wrong faulting vector if triggered by CPU
|
||||
* [ZEP-615] - Un-supported flash erase size listed in SPI flash w25qxxdv driver header file
|
||||
* [ZEP-639] - device_pm_ops structure should be defined as static
|
||||
* [ZEP-686] - docs: Info in "Application Development Primer" and "Developing an Application and the Build System" is largely duplicated
|
||||
* [ZEP-698] - samples/task_profiler issues
|
||||
* [ZEP-707] - mem_safe test stomps on top of .data and bottom of .noinit
|
||||
* [ZEP-724] - build on windows failed: 'make: execvp: uname: File or path name too long'
|
||||
* [ZEP-733] - Minimal libc shouldn't be providing stddef.h
|
||||
* [ZEP-762] - unexpected "abspath" and "notdir" from mingw make system
|
||||
* [ZEP-777] - samples/driver/i2c_stts751: kconfig build warning from "select DMA_QMSI"
|
||||
* [ZEP-778] - Samples/drivers/i2c_lsm9ds0: kconfig build warning from "select DMA_QMSI"
|
||||
* [ZEP-779] - Using current MinGW gcc version 5.3.0 breaks Zephyr build on Windows
|
||||
* [ZEP-905] - hello_world compilation for arduino_due target fails when using CROSS_COMPILE
|
||||
* [ZEP-950] - USB: Device is not listed by USB20CV test suite
|
||||
* [ZEP-961] - samples: other cases cannot execute after run aon_counter case
|
||||
* [ZEP-1025] - Unified kernel build sometimes breaks on a missing .d dependency file.
|
||||
* [ZEP-1027] - Doccumentation for GCC ARM is not accurate
|
||||
* [ZEP-1048] - grove_lcd sample: sample does not work if you disable serial
|
||||
* [ZEP-1100] - Current master still identifies itself as 1.5.0
|
||||
* [ZEP-1101] - SYS_KERNEL_VER_PATCHLEVEL() and friends artificially limit version numbers to 4 bits
|
||||
* [ZEP-1130] - region 'RAM' overflowed occurs while building test_hmac_prng
|
||||
* [ZEP-1141] - Tinycrypt SHA256 test fails with system crash using unified kernel type
|
||||
* [ZEP-1144] - Tinycrypt AES128 fixed-key with variable-text test fails using unified kernel type
|
||||
* [ZEP-1145] - system hang after tinycrypt HMAC test
|
||||
* [ZEP-1146] - zephyrproject.org home page needs technical scrub for 1.6 release
|
||||
* [ZEP-1149] - port ztest framework to unified kernel
|
||||
* [ZEP-1155] - Fix filesystem API namespace
|
||||
* [ZEP-1163] - LIB_INCLUDE_DIR is clobbered in Makefile second pass
|
||||
* [ZEP-1164] - ztest skip waiting the test case to finish its execution
|
||||
* [ZEP-1179] - Build issues when compiling with LLVM from ISSM (icx)
|
||||
* [ZEP-1226] - cortex M7 port assembler error
|
||||
* [ZEP-1287] - ARC SPI 1 Port is not working
|
||||
* [ZEP-1297] - test/legacy/kernel/test_mail: failure on ARC platforms
|
||||
* [ZEP-1299] - System can't resume completely with DMA suspend and resume operation
|
||||
* [ZEP-1303] - Configuration talks about >32 thread prios, but the kernel does not support it
|
||||
* [ZEP-1309] - ARM uses the end of memory for its init stack
|
||||
* [ZEP-1310] - ARC uses the end of memory for its init stack
|
||||
* [ZEP-1319] - Zephyr is unable to compile when CONFIG_RUNTIME_NMI is enabled on ARM platforms
|
||||
@@ -369,11 +369,14 @@ void main(void)
|
||||
k_fifo_init(&tx_queue);
|
||||
k_fifo_init(&rx_queue);
|
||||
|
||||
/* Enable the raw interface, this will in turn open the HCI driver */
|
||||
bt_enable_raw(&rx_queue);
|
||||
/* Spawn the TX thread and start feeding commands and data to the
|
||||
* controller
|
||||
*/
|
||||
k_thread_spawn(tx_thread_stack, STACK_SIZE, tx_thread, NULL, NULL,
|
||||
NULL, K_PRIO_COOP(7), 0, K_NO_WAIT);
|
||||
|
||||
bt_enable_raw(&rx_queue);
|
||||
|
||||
while (1) {
|
||||
struct net_buf *buf;
|
||||
|
||||
|
||||
@@ -110,7 +110,7 @@ void register_context_switch_data(uint32_t timestamp, uint32_t thread_id)
|
||||
|
||||
void register_interrupt_event_data(uint32_t timestamp, uint32_t irq)
|
||||
{
|
||||
if ((irq >= 0) && (irq < 255)) {
|
||||
if (irq < 255) {
|
||||
interrupt_counters[irq] += 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -600,10 +600,22 @@ int main(int ac, char **av)
|
||||
if (!name)
|
||||
break;
|
||||
if ((strcmp(name, "") != 0) && (strcmp(name, "1") != 0)) {
|
||||
if (conf_read_simple(name, S_DEF_USER)) {
|
||||
/*
|
||||
* "640kb ought to be enough for anybody" sic
|
||||
*
|
||||
* Limit the _name variable, as environment
|
||||
* wise it is not limited and this way we
|
||||
* ensure there can be no attacks through it.
|
||||
*
|
||||
* Coverity made me do it.
|
||||
*/
|
||||
char _name[256];
|
||||
|
||||
strncpy(_name, name, sizeof(_name));
|
||||
if (conf_read_simple(_name, S_DEF_USER)) {
|
||||
fprintf(stderr,
|
||||
_("*** Can't read seed configuration \"%s\"!\n"),
|
||||
name);
|
||||
_name);
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
|
||||
menuconfig BLUETOOTH
|
||||
bool "Bluetooth support"
|
||||
select NANO_TIMEOUTS
|
||||
select NET_BUF
|
||||
help
|
||||
This option enables Bluetooth support.
|
||||
|
||||
@@ -246,7 +246,6 @@ endif # BLUETOOTH_CONN
|
||||
config BLUETOOTH_TINYCRYPT_ECC
|
||||
bool "Use TinyCrypt library for ECDH"
|
||||
select TINYCRYPT_ECC_DH
|
||||
depends on MICROKERNEL
|
||||
help
|
||||
If this option is set TinyCrypt library is used for emulating the
|
||||
ECDH HCI commands and events needed by e.g. LE Secure Connections.
|
||||
|
||||
@@ -838,6 +838,7 @@ static void att_find_type_rsp(struct bt_conn *conn, uint8_t err,
|
||||
for (i = 0; length >= sizeof(rsp->list[i]);
|
||||
i++, length -= sizeof(rsp->list[i])) {
|
||||
struct bt_gatt_attr attr = {};
|
||||
struct bt_gatt_service value;
|
||||
|
||||
start_handle = sys_le16_to_cpu(rsp->list[i].start_handle);
|
||||
end_handle = sys_le16_to_cpu(rsp->list[i].end_handle);
|
||||
@@ -851,7 +852,11 @@ static void att_find_type_rsp(struct bt_conn *conn, uint8_t err,
|
||||
attr.uuid = BT_UUID_GATT_SECONDARY;
|
||||
}
|
||||
|
||||
value.end_handle = end_handle;
|
||||
value.uuid = params->uuid;
|
||||
|
||||
attr.handle = start_handle;
|
||||
attr.user_data = &value;
|
||||
|
||||
if (params->func(conn, &attr, params) == BT_GATT_ITER_STOP) {
|
||||
return;
|
||||
|
||||
@@ -1251,7 +1251,7 @@ static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan,
|
||||
{
|
||||
uint16_t sdu_len;
|
||||
|
||||
if (!k_sem_take(&chan->rx.credits, K_NO_WAIT)) {
|
||||
if (k_sem_take(&chan->rx.credits, K_NO_WAIT)) {
|
||||
BT_ERR("No credits to receive packet");
|
||||
bt_l2cap_chan_disconnect(&chan->chan);
|
||||
return;
|
||||
@@ -1593,7 +1593,9 @@ segment:
|
||||
}
|
||||
|
||||
/* Don't send more that TX MPS including SDU length */
|
||||
len = min(buf->len, ch->tx.mps - sdu_hdr_len);
|
||||
len = min(net_buf_tailroom(seg), ch->tx.mps - sdu_hdr_len);
|
||||
/* Limit if original buffer is smaller than the segment */
|
||||
len = min(buf->len, len);
|
||||
memcpy(net_buf_add(seg, len), buf->data, len);
|
||||
net_buf_pull(buf, len);
|
||||
|
||||
|
||||
@@ -1,17 +1,26 @@
|
||||
/*
|
||||
* Certain structures and defines in this file are from mbed's implementation.
|
||||
*
|
||||
* Copyright (c) 2010-2011 mbed.org, MIT License
|
||||
* Copyright (c) 2016 Intel Corporation.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Makefile - Bluetooth shell Makefile for microkernel
|
||||
# Makefile - Bluetooth shell
|
||||
|
||||
#
|
||||
# Copyright (c) 2015-2016 Intel Corporation
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Makefile - Bluetooth tester Makefile for microkernel
|
||||
# Makefile - Bluetooth tester
|
||||
|
||||
#
|
||||
# Copyright (c) 2015-2016 Intel Corporation
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
BOARD ?= qemu_x86
|
||||
MDEF_FILE = prj.mdef
|
||||
# In QEMU x86, we have to force CONFIG_PCI being defined, as the defconfig does not have it.
|
||||
ifeq ($(BOARD), qemu_x86)
|
||||
CONF_FILE = prj_$(BOARD).conf
|
||||
else
|
||||
CONF_FILE = prj.conf
|
||||
endif
|
||||
|
||||
include ${ZEPHYR_BASE}/Makefile.inc
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
CONFIG_STDOUT_CONSOLE=y
|
||||
CONFIG_PCI_ENUMERATION=y
|
||||
CONFIG_PCI_DEBUG=y
|
||||
CONFIG_ZTEST=y
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
% Application : PCI enumeration application
|
||||
|
||||
% TASK NAME PRIO ENTRY STACK GROUPS
|
||||
% ===========================================
|
||||
TASK TASKA 7 task_enum_pci 1024 [EXE]
|
||||
5
tests/drivers/pci_enum/prj_qemu_x86.conf
Normal file
5
tests/drivers/pci_enum/prj_qemu_x86.conf
Normal file
@@ -0,0 +1,5 @@
|
||||
CONFIG_PCI=y
|
||||
CONFIG_STDOUT_CONSOLE=y
|
||||
CONFIG_PCI_ENUMERATION=y
|
||||
CONFIG_PCI_DEBUG=y
|
||||
CONFIG_ZTEST=y
|
||||
@@ -1,5 +0,0 @@
|
||||
# @testcase dynamic
|
||||
# @targets \
|
||||
# board:(galileo|qemu_x86|qemu_cortex_m3)
|
||||
#
|
||||
# @eval console-rx %(console)s:20 Enumeration complete on %(bsp)s
|
||||
@@ -1,3 +1,3 @@
|
||||
include $(ZEPHYR_BASE)/tests/Makefile.test
|
||||
ccflags-y += -I${ZEPHYR_BASE}/include/drivers
|
||||
|
||||
obj-y = pci_enum.o
|
||||
|
||||
@@ -15,12 +15,10 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include <zephyr.h>
|
||||
#include <stdint.h>
|
||||
#include <misc/printk.h>
|
||||
#include <ztest.h>
|
||||
#include <pci/pci.h>
|
||||
|
||||
void pci_enumerate(void)
|
||||
static void pci_enumerate(void)
|
||||
{
|
||||
struct pci_dev_info info = {
|
||||
.function = PCI_FUNCTION_ANY,
|
||||
@@ -39,28 +37,8 @@ void pci_enumerate(void)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MICROKERNEL
|
||||
|
||||
|
||||
static int done;
|
||||
|
||||
void task_enum_pci(void)
|
||||
void test_main(void)
|
||||
{
|
||||
if (done) {
|
||||
task_yield();
|
||||
}
|
||||
|
||||
pci_enumerate();
|
||||
printk("Enumeration complete on %s", CONFIG_ARCH);
|
||||
done = 1;
|
||||
ztest_test_suite(pci_test, ztest_unit_test(pci_enumerate));
|
||||
ztest_run_test_suite(pci_test);
|
||||
}
|
||||
|
||||
#else /* CONFIG_NANOKERNEL */
|
||||
|
||||
void main(void)
|
||||
{
|
||||
pci_enumerate();
|
||||
printk("Enumeration complete on %s", CONFIG_ARCH);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MICROKERNEL */
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
[test]
|
||||
build_only = true
|
||||
tags = samples
|
||||
filter = CONFIG_PCI
|
||||
platform_whitelist = qemu_x86 galileo
|
||||
|
||||
@@ -235,27 +235,6 @@ int vsnprintfTest(void)
|
||||
int status = TC_PASS;
|
||||
char buffer[100];
|
||||
|
||||
/*
|
||||
* The string size may be handled in a non-standard manner.
|
||||
* If a negative value is supplied for the string size, it is converted
|
||||
* to 0x7fffffff--maximum integer size. Since there is insufficient
|
||||
* memory to test a string of that length, we just check that the string
|
||||
* was fully written so that we can exercise the code path.
|
||||
*/
|
||||
buffer[0] = '\0';
|
||||
len = tvsnprintf(buffer, (size_t)(-4), "%x", DEADBEEF);
|
||||
if (len != strlen(DEADBEEF_LHEX_STR)) {
|
||||
TC_ERROR("vsnprintf(%%x). Expected return value %d, not %d\n",
|
||||
strlen(DEADBEEF_LHEX_STR), len);
|
||||
status = TC_FAIL;
|
||||
}
|
||||
|
||||
if (strcmp(buffer, DEADBEEF_LHEX_STR) != 0) {
|
||||
TC_ERROR("vsnprintf(%%x). Expected '%s', got '%s'\n",
|
||||
DEADBEEF_LHEX_STR, buffer);
|
||||
status = TC_FAIL;
|
||||
}
|
||||
|
||||
/*******************/
|
||||
buffer[0] = '\0';
|
||||
len = tvsnprintf(buffer, 0, "%x", DEADBEEF);
|
||||
@@ -356,26 +335,6 @@ int snprintfTest(void)
|
||||
int status = TC_PASS;
|
||||
char buffer[100];
|
||||
|
||||
/*
|
||||
* The string size may be handled in a non-standard manner.
|
||||
* If a negative value is supplied for the string size, it is converted
|
||||
* to 0x7fffffff--maximum integer size. Since there is insufficient
|
||||
* memory to test a string of that length, we just check that the string
|
||||
* was fully written so that we can exercise the code path.
|
||||
*/
|
||||
buffer[0] = '\0';
|
||||
len = snprintf(buffer, (size_t)(-4), "%x", DEADBEEF);
|
||||
if (len != strlen(DEADBEEF_LHEX_STR)) {
|
||||
TC_ERROR("snprintf(%%x). Expected return value %d, not %d\n",
|
||||
strlen(DEADBEEF_LHEX_STR), len);
|
||||
status = TC_FAIL;
|
||||
}
|
||||
|
||||
if (strcmp(buffer, DEADBEEF_LHEX_STR) != 0) {
|
||||
TC_ERROR("snprintf(%%x). Expected '%s', got '%s'\n",
|
||||
DEADBEEF_LHEX_STR, buffer);
|
||||
status = TC_FAIL;
|
||||
}
|
||||
|
||||
/*******************/
|
||||
buffer[0] = '\0';
|
||||
|
||||
@@ -305,7 +305,7 @@ int _sys_soc_suspend(int32_t ticks)
|
||||
if (!post_ops_done) {
|
||||
post_ops_done = 1;
|
||||
printk("Exiting %s state\n", state_to_string(state));
|
||||
_sys_soc_power_state_post_ops(current_state);
|
||||
_sys_soc_power_state_post_ops(state);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -323,7 +323,7 @@ void _sys_soc_resume(void)
|
||||
if (!post_ops_done) {
|
||||
post_ops_done = 1;
|
||||
printk("Exiting %s state\n", state_to_string(state));
|
||||
_sys_soc_power_state_post_ops(current_state);
|
||||
_sys_soc_power_state_post_ops(state);
|
||||
}
|
||||
break;
|
||||
case SYS_POWER_STATE_DEEP_SLEEP:
|
||||
|
||||
@@ -55,37 +55,75 @@ void _init_mock(void)
|
||||
|
||||
#else
|
||||
|
||||
static struct parameter params[CONFIG_ZTEST_PARAMETER_COUNT];
|
||||
static struct k_fifo *fifo;
|
||||
/*
|
||||
* FIXME: move to sys_io.h once the argument signature for bitmap has
|
||||
* been fixed to void* or similar ZEP-1347
|
||||
*/
|
||||
#define BITS_PER_UL (8 * sizeof(unsigned long int))
|
||||
#define DEFINE_BITFIELD(name, bits) \
|
||||
unsigned long int (name)[((bits) + BITS_PER_UL - 1) / BITS_PER_UL]
|
||||
|
||||
static void free_parameter(struct parameter *param)
|
||||
static inline
|
||||
int sys_bitfield_find_first_clear(const unsigned long *bitmap,
|
||||
unsigned int bits)
|
||||
{
|
||||
if (param) {
|
||||
k_fifo_put(fifo, param);
|
||||
unsigned int words = (bits + BITS_PER_UL - 1) / BITS_PER_UL;
|
||||
unsigned int cnt;
|
||||
unsigned int long neg_bitmap;
|
||||
|
||||
/*
|
||||
* By bitwise negating the bitmap, we are actually implemeting
|
||||
* ffc (find first clear) using ffs (find first set).
|
||||
*/
|
||||
for (cnt = 0; cnt < words; cnt++) {
|
||||
neg_bitmap = ~bitmap[cnt];
|
||||
if (neg_bitmap == 0) /* all full */
|
||||
continue;
|
||||
else if (neg_bitmap == ~0UL) /* first bit */
|
||||
return cnt * BITS_PER_UL;
|
||||
else
|
||||
return cnt * BITS_PER_UL + __builtin_ffsl(neg_bitmap);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
static struct parameter *alloc_parameter(void)
|
||||
|
||||
static DEFINE_BITFIELD(params_allocation, CONFIG_ZTEST_PARAMETER_COUNT);
|
||||
static struct parameter params[CONFIG_ZTEST_PARAMETER_COUNT];
|
||||
|
||||
static
|
||||
void free_parameter(struct parameter *param)
|
||||
{
|
||||
unsigned int allocation_index = param - params;
|
||||
|
||||
if (param == NULL)
|
||||
return;
|
||||
__ASSERT(allocation_index < CONFIG_ZTEST_PARAMETER_COUNT,
|
||||
"param %p given to free is not in the static buffer %p:%u",
|
||||
param, params, CONFIG_ZTEST_PARAMETER_COUNT);
|
||||
sys_bitfield_clear_bit((mem_addr_t) params_allocation,
|
||||
allocation_index);
|
||||
}
|
||||
|
||||
static
|
||||
struct parameter *alloc_parameter(void)
|
||||
{
|
||||
int allocation_index;
|
||||
struct parameter *param;
|
||||
|
||||
param = k_fifo_get(fifo, K_NO_WAIT);
|
||||
if (!param) {
|
||||
PRINT("Failed to allocate mock parameter\n");
|
||||
allocation_index = sys_bitfield_find_first_clear(
|
||||
params_allocation, CONFIG_ZTEST_PARAMETER_COUNT);
|
||||
if (allocation_index == -1) {
|
||||
printk("No more mock parameters available for allocation\n");
|
||||
ztest_test_fail();
|
||||
}
|
||||
|
||||
sys_bitfield_set_bit((mem_addr_t) params_allocation, allocation_index);
|
||||
param = params + allocation_index;
|
||||
memset(param, 0, sizeof(*param));
|
||||
return param;
|
||||
}
|
||||
|
||||
void _init_mock(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
k_fifo_init(fifo);
|
||||
for (i = 0; i < CONFIG_ZTEST_PARAMETER_COUNT; i++) {
|
||||
|
||||
k_fifo_put(fifo, ¶ms[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -157,8 +195,7 @@ void _ztest_check_expected_value(const char *fn, const char *name,
|
||||
* provide inttypes.h
|
||||
*/
|
||||
PRINT("%s received wrong value: Got %lu, expected %lu\n",
|
||||
fn, (unsigned long)val,
|
||||
(unsigned long)expected);
|
||||
fn, (unsigned long)val, (unsigned long)expected);
|
||||
ztest_test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
[test]
|
||||
tags = test_framework
|
||||
arch_whitelist = x86 arc
|
||||
# sys_bitfield_*() still not implemented for ARM, ZEP-82
|
||||
arch_exclude = arm
|
||||
|
||||
[test_unit]
|
||||
type = unit
|
||||
|
||||
Reference in New Issue
Block a user