Compare commits
71 Commits
main
...
v2.5-branc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
52e06334fb | ||
|
|
8be935fe5c | ||
|
|
37139f93d9 | ||
|
|
3b369ee0c1 | ||
|
|
82db759ce0 | ||
|
|
5a430eea41 | ||
|
|
79d575bf35 | ||
|
|
dabf237584 | ||
|
|
4e8b382e03 | ||
|
|
b02636053a | ||
|
|
66f77f9d92 | ||
|
|
9e3c7b995f | ||
|
|
f642feba06 | ||
|
|
f9dd4c24e5 | ||
|
|
fc47320d93 | ||
|
|
ec0aa8331a | ||
|
|
92e005122a | ||
|
|
bd0fe17fa0 | ||
|
|
fe3f71bdad | ||
|
|
14bd22db75 | ||
|
|
d72a1078b2 | ||
|
|
8da72d16b7 | ||
|
|
1e320a352f | ||
|
|
6eb998bd78 | ||
|
|
26e335cc42 | ||
|
|
871ab25c2f | ||
|
|
cd0ed04627 | ||
|
|
2f580e1930 | ||
|
|
651afd7c39 | ||
|
|
ef7cbbfb60 | ||
|
|
d87cc6f51a | ||
|
|
860bb75a69 | ||
|
|
ff3659ca51 | ||
|
|
c8d190e318 | ||
|
|
0dfa18307e | ||
|
|
6c2ae13e9a | ||
|
|
c418c9aecf | ||
|
|
61682bd6d9 | ||
|
|
0249a62662 | ||
|
|
550939e441 | ||
|
|
56c712d2ca | ||
|
|
ab2476d88e | ||
|
|
1626b87d92 | ||
|
|
500c4f2cd3 | ||
|
|
0fa47361c8 | ||
|
|
6cf2fdc5d1 | ||
|
|
343a26d1ae | ||
|
|
7179e1d6a9 | ||
|
|
664df1e75c | ||
|
|
cb97dd13ff | ||
|
|
0e64b47a5d | ||
|
|
18ddabc335 | ||
|
|
c6921d2026 | ||
|
|
4c46e0072d | ||
|
|
dd0f451c23 | ||
|
|
eb3d194475 | ||
|
|
b4b2903e5a | ||
|
|
dfabe3ed34 | ||
|
|
acb8a1f0cf | ||
|
|
cdfd0a9a8d | ||
|
|
a36d098117 | ||
|
|
73a3eba539 | ||
|
|
8ff077a65a | ||
|
|
116425ccbe | ||
|
|
ca83ca0065 | ||
|
|
213d348b92 | ||
|
|
205c16afa6 | ||
|
|
af67c16b9f | ||
|
|
46d95a23f0 | ||
|
|
3de4ba01e4 | ||
|
|
6031f04519 |
1
.github/workflows/manifest.yml
vendored
1
.github/workflows/manifest.yml
vendored
@@ -15,6 +15,7 @@ jobs:
|
||||
path: zephyrproject/zephyr
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Manifest
|
||||
uses: zephyrproject-rtos/action-manifest@main
|
||||
|
||||
@@ -573,6 +573,24 @@ add_custom_command(
|
||||
)
|
||||
|
||||
add_custom_target(${SYSCALL_LIST_H_TARGET} DEPENDS ${syscall_list_h})
|
||||
|
||||
# This only works for CMake version >=3.15, but as the property is ignored on
|
||||
# older CMake versions there is no reason to check for version.
|
||||
set_property(TARGET ${SYSCALL_LIST_H_TARGET}
|
||||
APPEND PROPERTY
|
||||
ADDITIONAL_CLEAN_FILES
|
||||
${CMAKE_CURRENT_BINARY_DIR}/include/generated/syscalls
|
||||
)
|
||||
|
||||
# Only works with make.
|
||||
if(${CMAKE_VERSION} VERSION_LESS 3.15)
|
||||
set_property(DIRECTORY
|
||||
APPEND PROPERTY
|
||||
ADDITIONAL_MAKE_CLEAN_FILES
|
||||
${CMAKE_CURRENT_BINARY_DIR}/include/generated/syscalls
|
||||
)
|
||||
endif()
|
||||
|
||||
add_custom_target(${PARSE_SYSCALLS_TARGET}
|
||||
DEPENDS
|
||||
${syscalls_json}
|
||||
@@ -725,7 +743,7 @@ if(CONFIG_GEN_ISR_TABLES)
|
||||
# isr_tables.c is generated from ${ZEPHYR_PREBUILT_EXECUTABLE} by
|
||||
# gen_isr_tables.py
|
||||
add_custom_command(
|
||||
OUTPUT isr_tables.c
|
||||
OUTPUT isr_tables.c isrList.bin
|
||||
COMMAND $<TARGET_PROPERTY:bintools,elfconvert_command>
|
||||
$<TARGET_PROPERTY:bintools,elfconvert_flag>
|
||||
$<TARGET_PROPERTY:bintools,elfconvert_flag_intarget>${OUTPUT_FORMAT}
|
||||
@@ -981,6 +999,9 @@ if(CONFIG_USERSPACE)
|
||||
LIBRARIES_POST_SCRIPT ""
|
||||
DEPENDENCIES ${CODE_RELOCATION_DEP}
|
||||
)
|
||||
target_byproducts(TARGET app_smem_unaligned_prebuilt
|
||||
BYPRODUCTS ${PROJECT_BINARY_DIR}/app_smem_unaligned_prebuilt.map
|
||||
)
|
||||
set_property(TARGET app_smem_unaligned_prebuilt PROPERTY LINK_DEPENDS ${PROJECT_BINARY_DIR}/linker_app_smem_unaligned.cmd)
|
||||
add_dependencies( app_smem_unaligned_prebuilt linker_app_smem_unaligned_script ${OFFSETS_LIB})
|
||||
|
||||
@@ -1012,6 +1033,9 @@ toolchain_ld_link_elf(
|
||||
LINKER_SCRIPT ${PROJECT_BINARY_DIR}/linker.cmd
|
||||
DEPENDENCIES ${CODE_RELOCATION_DEP}
|
||||
)
|
||||
target_byproducts(TARGET ${ZEPHYR_PREBUILT_EXECUTABLE}
|
||||
BYPRODUCTS ${PROJECT_BINARY_DIR}/${ZEPHYR_PREBUILT_EXECUTABLE}.map
|
||||
)
|
||||
set_property(TARGET ${ZEPHYR_PREBUILT_EXECUTABLE} PROPERTY LINK_DEPENDS ${PROJECT_BINARY_DIR}/linker.cmd)
|
||||
add_dependencies( ${ZEPHYR_PREBUILT_EXECUTABLE} ${LINKER_SCRIPT_TARGET} ${OFFSETS_LIB})
|
||||
|
||||
@@ -1054,6 +1078,9 @@ else()
|
||||
LIBRARIES_POST_SCRIPT ""
|
||||
DEPENDENCIES ${CODE_RELOCATION_DEP}
|
||||
)
|
||||
target_byproducts(TARGET ${ZEPHYR_FINAL_EXECUTABLE}
|
||||
BYPRODUCTS ${PROJECT_BINARY_DIR}/${ZEPHYR_FINAL_EXECUTABLE}.map
|
||||
)
|
||||
set_property(TARGET ${ZEPHYR_FINAL_EXECUTABLE} PROPERTY LINK_DEPENDS ${PROJECT_BINARY_DIR}/linker_pass_final.cmd)
|
||||
add_dependencies( ${ZEPHYR_FINAL_EXECUTABLE} ${LINKER_PASS_FINAL_SCRIPT_TARGET})
|
||||
|
||||
@@ -1079,6 +1106,7 @@ list(APPEND
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E rename ${logical_target_for_zephyr_elf}.map ${KERNEL_MAP_NAME}
|
||||
)
|
||||
list(APPEND post_build_byproducts ${KERNEL_MAP_NAME})
|
||||
|
||||
if(NOT CONFIG_BUILD_NO_GAP_FILL)
|
||||
# Use ';' as separator to get proper space in resulting command.
|
||||
|
||||
4
VERSION
4
VERSION
@@ -1,5 +1,5 @@
|
||||
VERSION_MAJOR = 2
|
||||
VERSION_MINOR = 5
|
||||
PATCHLEVEL = 0
|
||||
PATCHLEVEL = 1
|
||||
VERSION_TWEAK = 0
|
||||
EXTRAVERSION =
|
||||
EXTRAVERSION = rc1
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
#include <mem.h>
|
||||
#include <arm/armv8-m.dtsi>
|
||||
#include <dt-bindings/i2c/i2c.h>
|
||||
|
||||
@@ -25,8 +26,8 @@
|
||||
chosen {
|
||||
zephyr,console = &uart0;
|
||||
zephyr,shell-uart = &uart0;
|
||||
zephyr,sram = &sram0;
|
||||
zephyr,flash = &flash0;
|
||||
zephyr,sram = &sram2_3;
|
||||
zephyr,flash = &sram1;
|
||||
};
|
||||
|
||||
leds {
|
||||
@@ -72,13 +73,25 @@
|
||||
};
|
||||
};
|
||||
|
||||
sram0: memory@30000000 {
|
||||
/*
|
||||
* The memory regions defined below are according to AN521:
|
||||
* https://documentation-service.arm.com/static/5fa12fe9b1a7c5445f29017f
|
||||
* Please see tables from 3-1 to 3-4.
|
||||
*/
|
||||
|
||||
sram1: memory@10000000 {
|
||||
compatible = "mmio-sram";
|
||||
reg = <0x30000000 0x1000000>;
|
||||
reg = <0x10000000 DT_SIZE_M(4)>;
|
||||
};
|
||||
|
||||
flash0: flash@10000000 {
|
||||
reg = <0x10000000 0xE000000>;
|
||||
sram2_3: memory@38000000 {
|
||||
compatible = "mmio-sram";
|
||||
reg = <0x38000000 DT_SIZE_M(4)>;
|
||||
};
|
||||
|
||||
psram: memory@80000000 {
|
||||
device_type = "memory";
|
||||
reg = <0x80000000 DT_SIZE_M(16)>;
|
||||
};
|
||||
|
||||
soc {
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
#include <mem.h>
|
||||
#include <arm/armv8-m.dtsi>
|
||||
#include <dt-bindings/i2c/i2c.h>
|
||||
|
||||
@@ -25,8 +26,8 @@
|
||||
chosen {
|
||||
zephyr,console = &uart0;
|
||||
zephyr,shell-uart = &uart0;
|
||||
zephyr,sram = &sram0;
|
||||
zephyr,flash = &flash0;
|
||||
zephyr,sram = &ram;
|
||||
zephyr,flash = &code;
|
||||
};
|
||||
|
||||
leds {
|
||||
@@ -72,13 +73,45 @@
|
||||
};
|
||||
};
|
||||
|
||||
sram0: memory@28100000 {
|
||||
/*
|
||||
* The memory regions defined below are according to AN521:
|
||||
* https://documentation-service.arm.com/static/5fa12fe9b1a7c5445f29017f
|
||||
* Please see tables from 3-1 to 3-4.
|
||||
*/
|
||||
|
||||
sram1: memory@0 {
|
||||
compatible = "mmio-sram";
|
||||
reg = <0x28100000 0x100000>;
|
||||
reg = <0x0 DT_SIZE_M(4)>;
|
||||
};
|
||||
|
||||
flash0: flash@100000 {
|
||||
reg = <0x100000 0xDF00000>;
|
||||
sram2_3: memory@28000000 {
|
||||
compatible = "mmio-sram";
|
||||
reg = <0x28000000 DT_SIZE_M(4)>;
|
||||
};
|
||||
|
||||
psram: memory@80000000 {
|
||||
device_type = "memory";
|
||||
reg = <0x80000000 DT_SIZE_M(16)>;
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
|
||||
/* The memory regions defined below must match what the TF-M
|
||||
* project has defined for that board - a single image boot is
|
||||
* assumed. Please see the memory layout in:
|
||||
* https://git.trustedfirmware.org/TF-M/trusted-firmware-m.git/tree/platform/ext/target/mps2/an521/partition/flash_layout.h
|
||||
*/
|
||||
|
||||
code: memory@100000 {
|
||||
reg = <0x00100000 DT_SIZE_K(512)>;
|
||||
};
|
||||
|
||||
ram: memory@28100000 {
|
||||
reg = <0x28100000 DT_SIZE_M(1)>;
|
||||
};
|
||||
};
|
||||
|
||||
soc {
|
||||
|
||||
@@ -2136,3 +2136,27 @@ function(zephyr_get_targets directory types targets)
|
||||
endforeach()
|
||||
set(${targets} ${${targets}} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
# Usage:
|
||||
# target_byproducts(TARGET <target> BYPRODUCTS <file> [<file>...])
|
||||
#
|
||||
# Specify additional BYPRODUCTS that this target produces.
|
||||
#
|
||||
# This function allows the build system to specify additional byproducts to
|
||||
# target created with `add_executable()`. When linking an executable the linker
|
||||
# may produce additional files, like map files. Those files are not known to the
|
||||
# build system. This function makes it possible to describe such additional
|
||||
# byproducts in an easy manner.
|
||||
function(target_byproducts)
|
||||
cmake_parse_arguments(TB "" "TARGET" "BYPRODUCTS" ${ARGN})
|
||||
|
||||
if(NOT DEFINED TB_TARGET)
|
||||
message(FATAL_ERROR "target_byproducts() missing parameter: TARGET <target>")
|
||||
endif()
|
||||
|
||||
add_custom_command(TARGET ${TB_TARGET}
|
||||
POST_BUILD COMMAND ${CMAKE_COMMAND} -E echo ""
|
||||
BYPRODUCTS ${TB_BYPRODUCTS}
|
||||
COMMENT "Logical command for additional byproducts on target: ${TB_TARGET}"
|
||||
)
|
||||
endfunction()
|
||||
|
||||
@@ -14,7 +14,7 @@ function(gen_kobj gen_dir_out)
|
||||
file(MAKE_DIRECTORY ${gen_dir})
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT ${KOBJ_TYPES} ${KOBJ_OTYPE}
|
||||
OUTPUT ${KOBJ_TYPES} ${KOBJ_OTYPE} ${KOBJ_SIZE}
|
||||
COMMAND
|
||||
${PYTHON_EXECUTABLE}
|
||||
${ZEPHYR_BASE}/scripts/gen_kobject_list.py
|
||||
|
||||
@@ -2,6 +2,26 @@
|
||||
|
||||
.. _zephyr_2.5:
|
||||
|
||||
.. _zephyr_2.1.1:
|
||||
|
||||
Zephyr 2.5.1
|
||||
#############
|
||||
|
||||
This is a maintenance release with the following fixes.
|
||||
|
||||
Security Vulnerability Related
|
||||
******************************
|
||||
|
||||
The following security vulnerabilities (CVEs) were addressed in this
|
||||
release:
|
||||
|
||||
More detailed information can be found in:
|
||||
https://docs.zephyrproject.org/latest/security/vulnerabilities.html
|
||||
|
||||
Issues Fixed
|
||||
|
||||
.. _zephyr_2.5.0:
|
||||
|
||||
Zephyr 2.5.0
|
||||
#############
|
||||
|
||||
|
||||
@@ -175,7 +175,7 @@ static int update_sampling_pnt(uint32_t ts, uint32_t sp, struct can_timing *res,
|
||||
res->phase_seg1 = ts1 - res->prop_seg;
|
||||
res->phase_seg2 = ts2;
|
||||
|
||||
sp_calc = (CAN_SYNC_SEG + ts1 * 1000) / ts;
|
||||
sp_calc = (CAN_SYNC_SEG + ts1) * 1000 / ts;
|
||||
|
||||
return sp_calc > sp ? sp_calc - sp : sp - sp_calc;
|
||||
}
|
||||
|
||||
@@ -5,6 +5,12 @@
|
||||
|
||||
if SOC_SERIES_STM32F1X
|
||||
|
||||
config CLOCK_STM32_PLL_XTPRE
|
||||
bool "HSE to PLL /2 prescaler"
|
||||
depends on SOC_STM32F10X_DENSITY_DEVICE && CLOCK_STM32_PLL_SRC_HSE
|
||||
help
|
||||
Enable this option to enable /2 prescaler on HSE to PLL clock signal
|
||||
|
||||
config CLOCK_STM32_PLL_MULTIPLIER
|
||||
int "PLL multiplier"
|
||||
depends on CLOCK_STM32_SYSCLK_SRC_PLL
|
||||
@@ -18,7 +24,7 @@ config CLOCK_STM32_PLL_MULTIPLIER
|
||||
|
||||
config CLOCK_STM32_PLL_PREDIV1
|
||||
int "PREDIV1 Prescaler"
|
||||
depends on CLOCK_STM32_SYSCLK_SRC_PLL
|
||||
depends on SOC_STM32F10X_CONNECTIVITY_LINE_DEVICE && CLOCK_STM32_SYSCLK_SRC_PLL
|
||||
default 1
|
||||
range 1 16
|
||||
help
|
||||
|
||||
@@ -36,16 +36,40 @@
|
||||
void config_pll_init(LL_UTILS_PLLInitTypeDef *pllinit)
|
||||
{
|
||||
/*
|
||||
* PLLMUL on SOC_STM32F10X_DENSITY_DEVICE
|
||||
* 2 -> LL_RCC_PLL_MUL_2 -> 0x00000000
|
||||
* 3 -> LL_RCC_PLL_MUL_3 -> 0x00040000
|
||||
* 4 -> LL_RCC_PLL_MUL_4 -> 0x00080000
|
||||
* ...
|
||||
* 16 -> LL_RCC_PLL_MUL_16 -> 0x00380000
|
||||
*
|
||||
* PLLMUL on SOC_STM32F10X_CONNECTIVITY_LINE_DEVICE
|
||||
* 4 -> LL_RCC_PLL_MUL_4 -> 0x00080000
|
||||
* ...
|
||||
* 9 -> LL_RCC_PLL_MUL_9 -> 0x001C0000
|
||||
* 13 -> LL_RCC_PLL_MUL_6_5 -> 0x00340000
|
||||
*/
|
||||
pllinit->PLLMul = ((CONFIG_CLOCK_STM32_PLL_MULTIPLIER - 2)
|
||||
<< RCC_CFGR_PLLMULL_Pos);
|
||||
|
||||
#ifdef CONFIG_SOC_STM32F10X_DENSITY_DEVICE
|
||||
/* PLL prediv */
|
||||
#ifdef CONFIG_CLOCK_STM32_PLL_XTPRE
|
||||
/*
|
||||
* SOC_STM32F10X_DENSITY_DEVICE:
|
||||
* PLLXPTRE (depends on PLL source HSE)
|
||||
* HSE/2 used as PLL source
|
||||
*/
|
||||
pllinit->Prediv = LL_RCC_PREDIV_DIV_2;
|
||||
#else
|
||||
/*
|
||||
* SOC_STM32F10X_DENSITY_DEVICE:
|
||||
* PLLXPTRE (depends on PLL source HSE)
|
||||
* HSE used as direct PLL source
|
||||
*/
|
||||
pllinit->Prediv = LL_RCC_PREDIV_DIV_1;
|
||||
#endif /* CONFIG_CLOCK_STM32_PLL_XTPRE */
|
||||
#else
|
||||
/*
|
||||
* SOC_STM32F10X_CONNECTIVITY_LINE_DEVICE
|
||||
* 1 -> LL_RCC_PREDIV_DIV_1 -> 0x00000000
|
||||
@@ -55,6 +79,7 @@ void config_pll_init(LL_UTILS_PLLInitTypeDef *pllinit)
|
||||
* 16 -> LL_RCC_PREDIV_DIV_16 -> 0x0000000F
|
||||
*/
|
||||
pllinit->Prediv = CONFIG_CLOCK_STM32_PLL_PREDIV1 - 1;
|
||||
#endif /* CONFIG_SOC_STM32F10X_DENSITY_DEVICE */
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CLOCK_STM32_SYSCLK_SRC_PLL */
|
||||
|
||||
@@ -165,12 +165,14 @@ static int rtc_stm32_set_alarm(const struct device *dev, uint8_t chan_id,
|
||||
* that tick+1 event occurs before alarm setting is finished.
|
||||
*/
|
||||
ticks += now + 1;
|
||||
alarm_val = (time_t)(counter_ticks_to_us(dev, ticks) / USEC_PER_SEC)
|
||||
+ T_TIME_OFFSET;
|
||||
} else {
|
||||
alarm_val = (time_t)(counter_ticks_to_us(dev, ticks) / USEC_PER_SEC);
|
||||
}
|
||||
|
||||
LOG_DBG("Set Alarm: %d\n", ticks);
|
||||
|
||||
alarm_val = (time_t)(counter_ticks_to_us(dev, ticks) / USEC_PER_SEC);
|
||||
|
||||
gmtime_r(&alarm_val, &alarm_tm);
|
||||
|
||||
/* Apply ALARM_A */
|
||||
|
||||
@@ -167,4 +167,11 @@ config ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM
|
||||
|
||||
endif # ESPI_PERIPHERAL_CHANNEL
|
||||
|
||||
config ESPI_OOB_CHANNEL_RX_ASYNC
|
||||
bool "OOB host-initiated traffic handling"
|
||||
depends on ESPI_OOB_CHANNEL
|
||||
help
|
||||
Enables asynchronous handling for host-initiated OOB traffic.
|
||||
Otherwise OOB traffic is assumed to be always client-initiated.
|
||||
|
||||
endif # ESPI
|
||||
|
||||
@@ -21,8 +21,8 @@
|
||||
*/
|
||||
#define ESPI_XEC_VWIRE_SEND_TIMEOUT 100ul
|
||||
|
||||
/* 100ms */
|
||||
#define MAX_OOB_TIMEOUT 100ul
|
||||
/* 200ms */
|
||||
#define MAX_OOB_TIMEOUT 200ul
|
||||
/* 1s */
|
||||
#define MAX_FLASH_TIMEOUT 1000ul
|
||||
|
||||
@@ -529,25 +529,23 @@ static int espi_xec_send_oob(const struct device *dev,
|
||||
static int espi_xec_receive_oob(const struct device *dev,
|
||||
struct espi_oob_packet *pckt)
|
||||
{
|
||||
int ret;
|
||||
uint8_t err_mask = MCHP_ESPI_OOB_RX_STS_IBERR |
|
||||
MCHP_ESPI_OOB_RX_STS_OVRUN;
|
||||
struct espi_xec_data *data = (struct espi_xec_data *)(dev->data);
|
||||
|
||||
if (ESPI_OOB_REGS->TX_STS & err_mask) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Enable Rx only when we want to receive data */
|
||||
ESPI_OOB_REGS->RX_IEN |= MCHP_ESPI_OOB_RX_IEN;
|
||||
ESPI_OOB_REGS->RX_CTRL |= MCHP_ESPI_OOB_RX_CTRL_AVAIL;
|
||||
#ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
|
||||
int ret;
|
||||
struct espi_xec_data *data = (struct espi_xec_data *)(dev->data);
|
||||
|
||||
/* Wait until ISR or timeout */
|
||||
ret = k_sem_take(&data->rx_lock, K_MSEC(MAX_OOB_TIMEOUT));
|
||||
if (ret == -EAGAIN) {
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
#endif
|
||||
/* Check if buffer passed to driver can fit the received buffer */
|
||||
uint32_t rcvd_len = ESPI_OOB_REGS->RX_LEN & MCHP_ESPI_OOB_RX_LEN_MASK;
|
||||
|
||||
@@ -558,6 +556,12 @@ static int espi_xec_receive_oob(const struct device *dev,
|
||||
|
||||
pckt->len = rcvd_len;
|
||||
memcpy(pckt->buf, slave_rx_mem, pckt->len);
|
||||
memset(slave_rx_mem, 0, pckt->len);
|
||||
|
||||
/* Only after data has been copied from SRAM, indicate channel
|
||||
* is available for next packet
|
||||
*/
|
||||
ESPI_OOB_REGS->RX_CTRL |= MCHP_ESPI_OOB_RX_CTRL_AVAIL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -656,8 +660,6 @@ static int espi_xec_flash_write(const struct device *dev,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
memcpy(pckt->buf, slave_rx_mem, pckt->len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -754,6 +756,12 @@ static void espi_init_oob(const struct device *dev)
|
||||
/* Enable OOB Tx channel enable change status interrupt */
|
||||
ESPI_OOB_REGS->TX_IEN |= MCHP_ESPI_OOB_TX_IEN_CHG_EN |
|
||||
MCHP_ESPI_OOB_TX_IEN_DONE;
|
||||
|
||||
/* Enable Rx channel to receive data any time
|
||||
* there are case where OOB is not initiated by a previous OOB Tx
|
||||
*/
|
||||
ESPI_OOB_REGS->RX_IEN |= MCHP_ESPI_OOB_RX_IEN;
|
||||
ESPI_OOB_REGS->RX_CTRL |= MCHP_ESPI_OOB_RX_CTRL_AVAIL;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -950,6 +958,11 @@ static void espi_oob_down_isr(const struct device *dev)
|
||||
{
|
||||
uint32_t status;
|
||||
struct espi_xec_data *data = (struct espi_xec_data *)(dev->data);
|
||||
#ifdef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
|
||||
struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_OOB_RECEIVED,
|
||||
.evt_details = 0,
|
||||
.evt_data = 0 };
|
||||
#endif
|
||||
|
||||
status = ESPI_OOB_REGS->RX_STS;
|
||||
|
||||
@@ -958,10 +971,12 @@ static void espi_oob_down_isr(const struct device *dev)
|
||||
/* Register is write-on-clear, ensure only 1 bit is affected */
|
||||
ESPI_OOB_REGS->RX_STS = MCHP_ESPI_OOB_RX_STS_DONE;
|
||||
|
||||
/* Disable Rx interrupt */
|
||||
ESPI_OOB_REGS->RX_IEN &= ~MCHP_ESPI_OOB_RX_IEN;
|
||||
|
||||
#ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
|
||||
k_sem_give(&data->rx_lock);
|
||||
#else
|
||||
evt.evt_details = ESPI_OOB_REGS->RX_LEN & MCHP_ESPI_OOB_RX_LEN_MASK;
|
||||
espi_send_callbacks(&data->callbacks, dev, evt);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1378,7 +1393,9 @@ static int espi_xec_init(const struct device *dev)
|
||||
ESPI_CAP_REGS->OOB_CAP |= MCHP_ESPI_OOB_CAP_MAX_PLD_SZ_73;
|
||||
|
||||
k_sem_init(&data->tx_lock, 0, 1);
|
||||
#ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
|
||||
k_sem_init(&data->rx_lock, 0, 1);
|
||||
#endif /* CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC */
|
||||
#else
|
||||
ESPI_CAP_REGS->GLB_CAP0 &= ~MCHP_ESPI_GBL_CAP0_OOB_SUPP;
|
||||
#endif
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <drivers/flash.h>
|
||||
#include <string.h>
|
||||
#include <nrfx_nvmc.h>
|
||||
#include <nrf_erratas.h>
|
||||
|
||||
#include "soc_flash_nrf.h"
|
||||
|
||||
@@ -73,6 +74,17 @@ static struct k_sem sem_lock;
|
||||
#define SYNC_UNLOCK()
|
||||
#endif
|
||||
|
||||
#if NRF52_ERRATA_242_PRESENT
|
||||
#include <hal/nrf_power.h>
|
||||
static int suspend_pofwarn(void);
|
||||
static void restore_pofwarn(void);
|
||||
|
||||
#define SUSPEND_POFWARN() suspend_pofwarn()
|
||||
#define RESUME_POFWARN() restore_pofwarn()
|
||||
#else
|
||||
#define SUSPEND_POFWARN() 0
|
||||
#define RESUME_POFWARN()
|
||||
#endif /* NRF52_ERRATA_242_PRESENT */
|
||||
|
||||
static int write(off_t addr, const void *data, size_t len);
|
||||
static int erase(uint32_t addr, uint32_t size);
|
||||
@@ -343,12 +355,20 @@ static int erase_op(void *context)
|
||||
|
||||
#ifdef CONFIG_SOC_FLASH_NRF_UICR
|
||||
if (e_ctx->flash_addr == (off_t)NRF_UICR) {
|
||||
if (SUSPEND_POFWARN()) {
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
(void)nrfx_nvmc_uicr_erase();
|
||||
RESUME_POFWARN();
|
||||
return FLASH_OP_DONE;
|
||||
}
|
||||
#endif
|
||||
|
||||
do {
|
||||
if (SUSPEND_POFWARN()) {
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SOC_FLASH_NRF_PARTIAL_ERASE)
|
||||
if (e_ctx->flash_addr == e_ctx->flash_addr_next) {
|
||||
@@ -367,6 +387,8 @@ static int erase_op(void *context)
|
||||
e_ctx->flash_addr += pg_size;
|
||||
#endif /* CONFIG_SOC_FLASH_NRF_PARTIAL_ERASE */
|
||||
|
||||
RESUME_POFWARN();
|
||||
|
||||
#ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE
|
||||
i++;
|
||||
|
||||
@@ -410,10 +432,15 @@ static int write_op(void *context)
|
||||
count = w_ctx->len;
|
||||
}
|
||||
|
||||
if (SUSPEND_POFWARN()) {
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
nrfx_nvmc_bytes_write(w_ctx->flash_addr,
|
||||
(const void *)w_ctx->data_addr,
|
||||
count);
|
||||
|
||||
RESUME_POFWARN();
|
||||
shift_write_context(count, w_ctx);
|
||||
|
||||
#ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE
|
||||
@@ -428,9 +455,13 @@ static int write_op(void *context)
|
||||
#endif /* CONFIG_SOC_FLASH_NRF_EMULATE_ONE_BYTE_WRITE_ACCESS */
|
||||
/* Write all the 4-byte aligned data */
|
||||
while (w_ctx->len >= sizeof(uint32_t)) {
|
||||
if (SUSPEND_POFWARN()) {
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
nrfx_nvmc_word_write(w_ctx->flash_addr,
|
||||
UNALIGNED_GET((uint32_t *)w_ctx->data_addr));
|
||||
|
||||
RESUME_POFWARN();
|
||||
shift_write_context(sizeof(uint32_t), w_ctx);
|
||||
|
||||
#ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE
|
||||
@@ -447,10 +478,14 @@ static int write_op(void *context)
|
||||
#if IS_ENABLED(CONFIG_SOC_FLASH_NRF_EMULATE_ONE_BYTE_WRITE_ACCESS)
|
||||
/* Write remaining unaligned data */
|
||||
if (w_ctx->len) {
|
||||
if (SUSPEND_POFWARN()) {
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
nrfx_nvmc_bytes_write(w_ctx->flash_addr,
|
||||
(const void *)w_ctx->data_addr,
|
||||
w_ctx->len);
|
||||
|
||||
RESUME_POFWARN();
|
||||
shift_write_context(w_ctx->len, w_ctx);
|
||||
}
|
||||
#endif /* CONFIG_SOC_FLASH_NRF_EMULATE_ONE_BYTE_WRITE_ACCESS */
|
||||
@@ -488,3 +523,50 @@ static int write(off_t addr, const void *data, size_t len)
|
||||
|
||||
return write_op(&context);
|
||||
}
|
||||
|
||||
#if NRF52_ERRATA_242_PRESENT
|
||||
/* Disable POFWARN by writing POFCON before a write or erase operation.
|
||||
* Do not attempt to write or erase if EVENTS_POFWARN is already asserted.
|
||||
*/
|
||||
static bool pofcon_enabled;
|
||||
|
||||
static int suspend_pofwarn(void)
|
||||
{
|
||||
if (!nrf52_errata_242()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool enabled;
|
||||
nrf_power_pof_thr_t pof_thr;
|
||||
|
||||
pof_thr = nrf_power_pofcon_get(NRF_POWER, &enabled);
|
||||
|
||||
if (enabled) {
|
||||
nrf_power_pofcon_set(NRF_POWER, false, pof_thr);
|
||||
|
||||
/* This check need to be reworked once POFWARN event will be
|
||||
* served by zephyr.
|
||||
*/
|
||||
if (nrf_power_event_check(NRF_POWER, NRF_POWER_EVENT_POFWARN)) {
|
||||
nrf_power_pofcon_set(NRF_POWER, true, pof_thr);
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
pofcon_enabled = enabled;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void restore_pofwarn(void)
|
||||
{
|
||||
nrf_power_pof_thr_t pof_thr;
|
||||
|
||||
if (pofcon_enabled) {
|
||||
pof_thr = nrf_power_pofcon_get(NRF_POWER, NULL);
|
||||
|
||||
nrf_power_pofcon_set(NRF_POWER, true, pof_thr);
|
||||
pofcon_enabled = false;
|
||||
}
|
||||
}
|
||||
#endif /* NRF52_ERRATA_242_PRESENT */
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
#include <kernel.h>
|
||||
|
||||
#define FLASH_OP_DONE (0) /* 0 for compliance with the driver API. */
|
||||
#define FLASH_OP_ONGOING (-1)
|
||||
#define FLASH_OP_ONGOING 1
|
||||
|
||||
struct flash_context {
|
||||
uint32_t data_addr; /* Address of data to write. */
|
||||
@@ -53,7 +53,8 @@ struct flash_context {
|
||||
*
|
||||
* @param context pointer to flash_context structure.
|
||||
* @retval @ref FLASH_OP_DONE once operation was done, @ref FLASH_OP_ONGOING if
|
||||
* operation needs more time for execution.
|
||||
* operation needs more time for execution and a negative error code if
|
||||
* operation was aborted.
|
||||
*/
|
||||
typedef int (*flash_op_handler_t) (void *context);
|
||||
|
||||
@@ -92,12 +93,13 @@ bool nrf_flash_sync_is_required(void);
|
||||
* to timing settings requested by nrf_flash_sync_set_context().
|
||||
* This routine need to be called the handler as many time as it returns
|
||||
* FLASH_OP_ONGOING, howewer an operation timeot should be implemented.
|
||||
* When the handler() returns FLASH_OP_DONE, no further execution windows are
|
||||
* needed so function should return as the handler() finished its operation.
|
||||
* When the handler() returns FLASH_OP_DONE or an error code, no further
|
||||
* execution windows are needed so function should return as the handler()
|
||||
* finished its operation.
|
||||
*
|
||||
* @retval 0 if op_desc->handler() was executed and
|
||||
* finished its operation. Otherwise (timeout, couldn't schedule execution...)
|
||||
* a negative error code.
|
||||
* @retval 0 if op_desc->handler() was executed and finished its operation
|
||||
* successfully. Otherwise (handler returned error, timeout, couldn't schedule
|
||||
* execution...) a negative error code.
|
||||
*
|
||||
* execution window
|
||||
* Driver task task
|
||||
|
||||
@@ -58,18 +58,20 @@ static void time_slot_callback_work(uint32_t ticks_at_expire,
|
||||
struct flash_op_desc *op_desc;
|
||||
uint8_t instance_index;
|
||||
uint8_t ticker_id;
|
||||
int rc;
|
||||
|
||||
__ASSERT(ll_radio_state_is_idle(),
|
||||
"Radio is on during flash operation.\n");
|
||||
|
||||
op_desc = context;
|
||||
if (op_desc->handler(op_desc->context) == FLASH_OP_DONE) {
|
||||
rc = op_desc->handler(op_desc->context);
|
||||
if (rc != FLASH_OP_ONGOING) {
|
||||
ll_timeslice_ticker_id_get(&instance_index, &ticker_id);
|
||||
|
||||
/* Stop the time slot ticker */
|
||||
_ticker_stop(instance_index, 0, ticker_id);
|
||||
|
||||
_ticker_sync_context.result = 0;
|
||||
_ticker_sync_context.result = (rc == FLASH_OP_DONE) ? 0 : rc;
|
||||
|
||||
/* notify thread that data is available */
|
||||
k_sem_give(&sem_sync);
|
||||
@@ -90,7 +92,8 @@ static void time_slot_delay(uint32_t ticks_at_expire, uint32_t ticks_delay,
|
||||
* Radio h/w.
|
||||
*/
|
||||
err = ticker_start(instance_index, /* Radio instance ticker */
|
||||
0, /* user_id */
|
||||
1, /* user id for link layer ULL_HIGH */
|
||||
/* (MAYFLY_CALL_ID_WORKER) */
|
||||
(ticker_id + 1), /* ticker_id */
|
||||
ticks_at_expire, /* current tick */
|
||||
ticks_delay, /* one-shot delayed timeout */
|
||||
|
||||
@@ -617,14 +617,11 @@ end:
|
||||
int32_t stm32_i2c_msg_write(const struct device *dev, struct i2c_msg *msg,
|
||||
uint8_t *next_msg_flags, uint16_t saddr)
|
||||
{
|
||||
const struct i2c_stm32_config *cfg = DEV_CFG(dev);
|
||||
struct i2c_stm32_data *data = DEV_DATA(dev);
|
||||
I2C_TypeDef *i2c = cfg->i2c;
|
||||
|
||||
msg_init(dev, msg, next_msg_flags, saddr, I2C_REQUEST_WRITE);
|
||||
|
||||
stm32_i2c_enable_transfer_interrupts(dev);
|
||||
LL_I2C_EnableIT_TX(i2c);
|
||||
|
||||
if (k_sem_take(&data->device_sync_sem,
|
||||
K_MSEC(STM32_I2C_TRANSFER_TIMEOUT_MSEC)) != 0) {
|
||||
|
||||
@@ -255,7 +255,6 @@ int ioapic_suspend(const struct device *port)
|
||||
store_flags(irq, rte_lo);
|
||||
}
|
||||
}
|
||||
ioapic_device_power_state = DEVICE_PM_SUSPEND_STATE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -285,7 +284,6 @@ int ioapic_resume_from_suspend(const struct device *port)
|
||||
ioApicRedSetHi(irq, DEFAULT_RTE_DEST);
|
||||
ioApicRedSetLo(irq, rteValue);
|
||||
}
|
||||
ioapic_device_power_state = DEVICE_PM_ACTIVE_STATE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -300,10 +298,28 @@ static int ioapic_device_ctrl(const struct device *device,
|
||||
int ret = 0;
|
||||
|
||||
if (ctrl_command == DEVICE_PM_SET_POWER_STATE) {
|
||||
if (*((uint32_t *)context) == DEVICE_PM_SUSPEND_STATE) {
|
||||
uint32_t new_state = *((uint32_t *)context);
|
||||
|
||||
switch (new_state) {
|
||||
case DEVICE_PM_LOW_POWER_STATE:
|
||||
break;
|
||||
case DEVICE_PM_ACTIVE_STATE:
|
||||
if (ioapic_device_power_state !=
|
||||
DEVICE_PM_LOW_POWER_STATE) {
|
||||
ret = ioapic_resume_from_suspend(device);
|
||||
}
|
||||
break;
|
||||
case DEVICE_PM_SUSPEND_STATE:
|
||||
case DEVICE_PM_FORCE_SUSPEND_STATE:
|
||||
case DEVICE_PM_OFF_STATE:
|
||||
ret = ioapic_suspend(device);
|
||||
} else if (*((uint32_t *)context) == DEVICE_PM_ACTIVE_STATE) {
|
||||
ret = ioapic_resume_from_suspend(device);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOTSUP;
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
ioapic_device_power_state = new_state;
|
||||
}
|
||||
} else if (ctrl_command == DEVICE_PM_GET_POWER_STATE) {
|
||||
*((uint32_t *)context) = ioapic_device_power_state;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016 Linaro Limited
|
||||
* Copyright (c) 2021, Linaro Limited.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@@ -217,8 +217,18 @@ static int uart_cmsdk_apb_fifo_fill(const struct device *dev,
|
||||
{
|
||||
volatile struct uart_cmsdk_apb *uart = UART_STRUCT(dev);
|
||||
|
||||
/* No hardware FIFO present */
|
||||
/*
|
||||
* No hardware FIFO present. Only 1 byte
|
||||
* to write if TX buffer is empty.
|
||||
*/
|
||||
if (len && !(uart->state & UART_TX_BF)) {
|
||||
/*
|
||||
* Clear TX int. pending flag before pushing byte to "FIFO".
|
||||
* If TX interrupt is enabled the UART_TX_IN bit will be set
|
||||
* again automatically by the UART hardware machinery once
|
||||
* the "FIFO" becomes empty again.
|
||||
*/
|
||||
uart->intclear = UART_TX_IN;
|
||||
uart->data = *tx_data;
|
||||
return 1;
|
||||
}
|
||||
@@ -240,8 +250,18 @@ static int uart_cmsdk_apb_fifo_read(const struct device *dev,
|
||||
{
|
||||
volatile struct uart_cmsdk_apb *uart = UART_STRUCT(dev);
|
||||
|
||||
/* No hardware FIFO present */
|
||||
/*
|
||||
* No hardware FIFO present. Only 1 byte
|
||||
* to read if RX buffer is full.
|
||||
*/
|
||||
if (size && uart->state & UART_RX_BF) {
|
||||
/*
|
||||
* Clear RX int. pending flag before popping byte from "FIFO".
|
||||
* If RX interrupt is enabled the UART_RX_IN bit will be set
|
||||
* again automatically by the UART hardware machinery once
|
||||
* the "FIFO" becomes full again.
|
||||
*/
|
||||
uart->intclear = UART_RX_IN;
|
||||
*rx_data = (unsigned char)uart->data;
|
||||
return 1;
|
||||
}
|
||||
@@ -262,10 +282,12 @@ static void uart_cmsdk_apb_irq_tx_enable(const struct device *dev)
|
||||
|
||||
UART_STRUCT(dev)->ctrl |= UART_TX_IN_EN;
|
||||
/* The expectation is that TX is a level interrupt, active for as
|
||||
* long as TX buffer is empty. But in CMSDK UART, it appears to be
|
||||
* edge interrupt, firing on a state change of TX buffer. So, we
|
||||
* need to "prime" it here by calling ISR directly, to get interrupt
|
||||
* processing going.
|
||||
* long as TX buffer is empty. But in CMSDK UART it's an edge
|
||||
* interrupt, firing on a state change of TX buffer from full to
|
||||
* empty. So, we need to "prime" it here by calling ISR directly,
|
||||
* to get interrupt processing going, as there is no previous
|
||||
* full state to allow a transition from full to empty buffer
|
||||
* that will trigger a TX interrupt.
|
||||
*/
|
||||
key = irq_lock();
|
||||
uart_cmsdk_apb_isr(dev);
|
||||
@@ -282,6 +304,8 @@ static void uart_cmsdk_apb_irq_tx_enable(const struct device *dev)
|
||||
static void uart_cmsdk_apb_irq_tx_disable(const struct device *dev)
|
||||
{
|
||||
UART_STRUCT(dev)->ctrl &= ~UART_TX_IN_EN;
|
||||
/* Clear any pending TX interrupt after disabling it */
|
||||
UART_STRUCT(dev)->intclear = UART_TX_IN;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -318,6 +342,8 @@ static void uart_cmsdk_apb_irq_rx_enable(const struct device *dev)
|
||||
static void uart_cmsdk_apb_irq_rx_disable(const struct device *dev)
|
||||
{
|
||||
UART_STRUCT(dev)->ctrl &= ~UART_RX_IN_EN;
|
||||
/* Clear any pending RX interrupt after disabling it */
|
||||
UART_STRUCT(dev)->intclear = UART_RX_IN;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -377,9 +403,7 @@ static void uart_cmsdk_apb_irq_err_disable(const struct device *dev)
|
||||
*/
|
||||
static int uart_cmsdk_apb_irq_is_pending(const struct device *dev)
|
||||
{
|
||||
/* Return true if rx buffer full or tx buffer empty */
|
||||
return (UART_STRUCT(dev)->state & (UART_RX_BF | UART_TX_BF))
|
||||
!= UART_TX_BF;
|
||||
return (UART_STRUCT(dev)->intstatus & (UART_RX_IN | UART_TX_IN));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -421,12 +445,8 @@ static void uart_cmsdk_apb_irq_callback_set(const struct device *dev,
|
||||
*/
|
||||
void uart_cmsdk_apb_isr(const struct device *dev)
|
||||
{
|
||||
volatile struct uart_cmsdk_apb *uart = UART_STRUCT(dev);
|
||||
struct uart_cmsdk_apb_dev_data *data = DEV_DATA(dev);
|
||||
|
||||
/* Clear pending interrupts */
|
||||
uart->intclear = UART_RX_IN | UART_TX_IN;
|
||||
|
||||
/* Verify if the callback has been registered */
|
||||
if (data->irq_cb) {
|
||||
data->irq_cb(dev, data->irq_cb_data);
|
||||
@@ -490,7 +510,7 @@ DEVICE_DT_INST_DEFINE(0,
|
||||
&uart_cmsdk_apb_driver_api);
|
||||
|
||||
#ifdef CONFIG_UART_INTERRUPT_DRIVEN
|
||||
#if DT_INST_IRQ_HAS_CELL(0, irq)
|
||||
#if DT_NUM_IRQS(DT_DRV_INST(0)) == 1
|
||||
static void uart_cmsdk_apb_irq_config_func_0(const struct device *dev)
|
||||
{
|
||||
IRQ_CONNECT(DT_INST_IRQN(0),
|
||||
@@ -555,7 +575,7 @@ DEVICE_DT_INST_DEFINE(1,
|
||||
&uart_cmsdk_apb_driver_api);
|
||||
|
||||
#ifdef CONFIG_UART_INTERRUPT_DRIVEN
|
||||
#if DT_INST_IRQ_HAS_CELL(1, irq)
|
||||
#if DT_NUM_IRQS(DT_DRV_INST(1)) == 1
|
||||
static void uart_cmsdk_apb_irq_config_func_1(const struct device *dev)
|
||||
{
|
||||
IRQ_CONNECT(DT_INST_IRQN(1),
|
||||
@@ -620,15 +640,15 @@ DEVICE_DT_INST_DEFINE(2,
|
||||
&uart_cmsdk_apb_driver_api);
|
||||
|
||||
#ifdef CONFIG_UART_INTERRUPT_DRIVEN
|
||||
#ifdef CMSDK_APB_UART_2_IRQ
|
||||
#if DT_NUM_IRQS(DT_DRV_INST(2)) == 1
|
||||
static void uart_cmsdk_apb_irq_config_func_2(const struct device *dev)
|
||||
{
|
||||
IRQ_CONNECT(CMSDK_APB_UART_2_IRQ,
|
||||
IRQ_CONNECT(DT_INST_IRQN(2),
|
||||
DT_INST_IRQ_BY_NAME(2, priority, irq),
|
||||
uart_cmsdk_apb_isr,
|
||||
DEVICE_DT_INST_GET(2),
|
||||
0);
|
||||
irq_enable(CMSDK_APB_UART_2_IRQ);
|
||||
irq_enable(DT_INST_IRQN(2));
|
||||
}
|
||||
#else
|
||||
static void uart_cmsdk_apb_irq_config_func_2(const struct device *dev)
|
||||
@@ -685,15 +705,15 @@ DEVICE_DT_INST_DEFINE(3,
|
||||
&uart_cmsdk_apb_driver_api);
|
||||
|
||||
#ifdef CONFIG_UART_INTERRUPT_DRIVEN
|
||||
#ifdef CMSDK_APB_UART_3_IRQ
|
||||
#if DT_NUM_IRQS(DT_DRV_INST(3)) == 1
|
||||
static void uart_cmsdk_apb_irq_config_func_3(const struct device *dev)
|
||||
{
|
||||
IRQ_CONNECT(CMSDK_APB_UART_3_IRQ,
|
||||
DT_INST_IRQ_BY_NAME(3, priority, irq),
|
||||
IRQ_CONNECT(DT_INST_IRQN(3),
|
||||
DT_INST_IRQ(3, priority),
|
||||
uart_cmsdk_apb_isr,
|
||||
DEVICE_DT_INST_GET(3),
|
||||
0);
|
||||
irq_enable(CMSDK_APB_UART_3_IRQ);
|
||||
irq_enable(DT_INST_IRQN(3));
|
||||
}
|
||||
#else
|
||||
static void uart_cmsdk_apb_irq_config_func_3(const struct device *dev)
|
||||
@@ -750,15 +770,15 @@ DEVICE_DT_INST_DEFINE(4,
|
||||
&uart_cmsdk_apb_driver_api);
|
||||
|
||||
#ifdef CONFIG_UART_INTERRUPT_DRIVEN
|
||||
#ifdef CMSDK_APB_UART_4_IRQ
|
||||
#if DT_NUM_IRQS(DT_DRV_INST(4)) == 1
|
||||
static void uart_cmsdk_apb_irq_config_func_4(const struct device *dev)
|
||||
{
|
||||
IRQ_CONNECT(CMSDK_APB_UART_4_IRQ,
|
||||
IRQ_CONNECT(DT_INST_IRQN(4),
|
||||
DT_INST_IRQ_BY_NAME(4, priority, irq),
|
||||
uart_cmsdk_apb_isr,
|
||||
DEVICE_DT_INST_GET(4),
|
||||
0);
|
||||
irq_enable(CMSDK_APB_UART_4_IRQ);
|
||||
irq_enable(DT_INST_IRQN(4));
|
||||
}
|
||||
#else
|
||||
static void uart_cmsdk_apb_irq_config_func_4(const struct device *dev)
|
||||
|
||||
@@ -306,48 +306,45 @@ static int uart_stm32_configure(const struct device *dev,
|
||||
const uint32_t flowctrl = uart_stm32_cfg2ll_hwctrl(cfg->flow_ctrl);
|
||||
|
||||
/* Hardware doesn't support mark or space parity */
|
||||
if ((UART_CFG_PARITY_MARK == cfg->parity) ||
|
||||
(UART_CFG_PARITY_SPACE == cfg->parity)) {
|
||||
if ((cfg->parity == UART_CFG_PARITY_MARK) ||
|
||||
(cfg->parity == UART_CFG_PARITY_SPACE)) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
#if defined(LL_USART_STOPBITS_0_5) && HAS_LPUART_1
|
||||
if (IS_LPUART_INSTANCE(UartInstance) &&
|
||||
UART_CFG_STOP_BITS_0_5 == cfg->stop_bits) {
|
||||
(cfg->stop_bits == UART_CFG_STOP_BITS_0_5)) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
#else
|
||||
if (UART_CFG_STOP_BITS_0_5 == cfg->stop_bits) {
|
||||
if (cfg->stop_bits == UART_CFG_STOP_BITS_0_5) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(LL_USART_STOPBITS_1_5) && HAS_LPUART_1
|
||||
if (IS_LPUART_INSTANCE(UartInstance) &&
|
||||
UART_CFG_STOP_BITS_1_5 == cfg->stop_bits) {
|
||||
(cfg->stop_bits == UART_CFG_STOP_BITS_1_5)) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
#else
|
||||
if (UART_CFG_STOP_BITS_1_5 == cfg->stop_bits) {
|
||||
if (cfg->stop_bits == UART_CFG_STOP_BITS_1_5) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Driver doesn't support 5 or 6 databits and potentially 7 or 9 */
|
||||
if ((UART_CFG_DATA_BITS_5 == cfg->data_bits) ||
|
||||
(UART_CFG_DATA_BITS_6 == cfg->data_bits)
|
||||
if ((cfg->data_bits == UART_CFG_DATA_BITS_5) ||
|
||||
(cfg->data_bits == UART_CFG_DATA_BITS_6)
|
||||
#ifndef LL_USART_DATAWIDTH_7B
|
||||
|| (UART_CFG_DATA_BITS_7 == cfg->data_bits)
|
||||
|| (cfg->data_bits == UART_CFG_DATA_BITS_7)
|
||||
#endif /* LL_USART_DATAWIDTH_7B */
|
||||
#ifndef LL_USART_DATAWIDTH_9B
|
||||
|| (UART_CFG_DATA_BITS_9 == cfg->data_bits)
|
||||
#endif /* LL_USART_DATAWIDTH_9B */
|
||||
) {
|
||||
|| (cfg->data_bits == UART_CFG_DATA_BITS_9)) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
/* Driver supports only RTS CTS flow control */
|
||||
if (UART_CFG_FLOW_CTRL_NONE != cfg->flow_ctrl) {
|
||||
if (cfg->flow_ctrl != UART_CFG_FLOW_CTRL_NONE) {
|
||||
if (!IS_UART_HWFLOW_INSTANCE(UartInstance) ||
|
||||
UART_CFG_FLOW_CTRL_RTS_CTS != cfg->flow_ctrl) {
|
||||
return -ENOTSUP;
|
||||
|
||||
@@ -67,6 +67,20 @@ K_KERNEL_STACK_DEFINE(esp_workq_stack,
|
||||
|
||||
struct esp_data esp_driver_data;
|
||||
|
||||
static void esp_configure_hostname(struct esp_data *data)
|
||||
{
|
||||
#if defined(CONFIG_NET_HOSTNAME_ENABLE)
|
||||
char cmd[sizeof("AT+CWHOSTNAME=\"\"") + NET_HOSTNAME_MAX_LEN];
|
||||
|
||||
snprintk(cmd, sizeof(cmd), "AT+CWHOSTNAME=\"%s\"", net_hostname_get());
|
||||
cmd[sizeof(cmd) - 1] = '\0';
|
||||
|
||||
esp_cmd_send(data, NULL, 0, cmd, ESP_CMD_TIMEOUT);
|
||||
#else
|
||||
ARG_UNUSED(data);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint8_t esp_mode_from_flags(struct esp_data *data)
|
||||
{
|
||||
uint8_t flags = data->flags;
|
||||
@@ -111,10 +125,25 @@ static int esp_mode_switch(struct esp_data *data, uint8_t mode)
|
||||
static int esp_mode_switch_if_needed(struct esp_data *data)
|
||||
{
|
||||
uint8_t new_mode = esp_mode_from_flags(data);
|
||||
uint8_t old_mode = data->mode;
|
||||
int err;
|
||||
|
||||
if (data->mode != new_mode) {
|
||||
data->mode = new_mode;
|
||||
return esp_mode_switch(data, new_mode);
|
||||
if (old_mode == new_mode) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
data->mode = new_mode;
|
||||
|
||||
err = esp_mode_switch(data, new_mode);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!(old_mode & ESP_MODE_STA) && (new_mode & ESP_MODE_STA)) {
|
||||
/*
|
||||
* Hostname change is applied only when STA is enabled.
|
||||
*/
|
||||
esp_configure_hostname(data);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -877,20 +906,6 @@ static int esp_mgmt_ap_disable(const struct device *dev)
|
||||
return esp_mode_flags_clear(data, EDF_AP_ENABLED);
|
||||
}
|
||||
|
||||
static void esp_configure_hostname(struct esp_data *data)
|
||||
{
|
||||
#if defined(CONFIG_NET_HOSTNAME_ENABLE)
|
||||
char cmd[sizeof("AT+CWHOSTNAME=\"\"") + NET_HOSTNAME_MAX_LEN];
|
||||
|
||||
snprintk(cmd, sizeof(cmd), "AT+CWHOSTNAME=\"%s\"", net_hostname_get());
|
||||
cmd[sizeof(cmd) - 1] = '\0';
|
||||
|
||||
esp_cmd_send(data, NULL, 0, cmd, ESP_CMD_TIMEOUT);
|
||||
#else
|
||||
ARG_UNUSED(data);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void esp_init_work(struct k_work *work)
|
||||
{
|
||||
struct esp_data *dev;
|
||||
@@ -981,7 +996,16 @@ static void esp_init_work(struct k_work *work)
|
||||
net_if_set_link_addr(dev->net_iface, dev->mac_addr,
|
||||
sizeof(dev->mac_addr), NET_LINK_ETHERNET);
|
||||
|
||||
esp_configure_hostname(dev);
|
||||
if (IS_ENABLED(CONFIG_WIFI_ESP_AT_VERSION_1_7)) {
|
||||
/* This is the mode entered in above setup commands */
|
||||
dev->mode = ESP_MODE_STA;
|
||||
|
||||
/*
|
||||
* In case of ESP 1.7 this is the first time CWMODE is entered
|
||||
* STA mode, so request hostname change now.
|
||||
*/
|
||||
esp_configure_hostname(dev);
|
||||
}
|
||||
|
||||
LOG_INF("ESP Wi-Fi ready");
|
||||
|
||||
|
||||
@@ -702,7 +702,7 @@ static inline int device_pm_put_sync(const struct device *dev) { return -ENOTSUP
|
||||
COND_CODE_1(DT_NODE_EXISTS(node_id), (), (static)) \
|
||||
const Z_DECL_ALIGN(struct device) \
|
||||
DEVICE_NAME_GET(dev_name) __used \
|
||||
__attribute__((__section__(".device_" #level STRINGIFY(prio)))) = { \
|
||||
__attribute__((__section__(".device_" #level STRINGIFY(prio)"_"))) = { \
|
||||
.name = drv_name, \
|
||||
.config = (cfg_ptr), \
|
||||
.api = (api_ptr), \
|
||||
|
||||
@@ -85,7 +85,7 @@ void z_sys_init_run_level(int32_t _level);
|
||||
#define Z_INIT_ENTRY_DEFINE(_entry_name, _init_fn, _device, _level, _prio) \
|
||||
static const Z_DECL_ALIGN(struct init_entry) \
|
||||
_CONCAT(__init_, _entry_name) __used \
|
||||
__attribute__((__section__(".init_" #_level STRINGIFY(_prio)))) = { \
|
||||
__attribute__((__section__(".init_" #_level STRINGIFY(_prio)"_"))) = { \
|
||||
.init = (_init_fn), \
|
||||
.dev = (_device), \
|
||||
}
|
||||
|
||||
@@ -233,6 +233,17 @@ extern void k_thread_foreach_unlocked(
|
||||
*/
|
||||
#define K_INHERIT_PERMS (BIT(3))
|
||||
|
||||
/**
|
||||
* @brief Callback item state
|
||||
*
|
||||
* @details
|
||||
* This is a single bit of state reserved for "callback manager"
|
||||
* utilities (p4wq initially) who need to track operations invoked
|
||||
* from within a user-provided callback they have been invoked.
|
||||
* Effectively it serves as a tiny bit of zero-overhead TLS data.
|
||||
*/
|
||||
#define K_CALLBACK_STATE (BIT(4))
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
/* x86 Bitmask definitions for threads user options */
|
||||
|
||||
|
||||
@@ -116,8 +116,8 @@
|
||||
*/
|
||||
#define CREATE_OBJ_LEVEL(object, level) \
|
||||
__##object##_##level##_start = .; \
|
||||
KEEP(*(SORT(.object##_##level[0-9]*))); \
|
||||
KEEP(*(SORT(.object##_##level[1-9][0-9]*)));
|
||||
KEEP(*(SORT(.object##_##level[0-9]_*))); \
|
||||
KEEP(*(SORT(.object##_##level[1-9][0-9]_*)));
|
||||
|
||||
/*
|
||||
* link in shell initialization objects for all modules that use shell and
|
||||
|
||||
@@ -2431,7 +2431,8 @@ struct net_if_api {
|
||||
*/
|
||||
#define NET_DEVICE_DT_OFFLOAD_DEFINE(node_id, init_fn, pm_control_fn, \
|
||||
data, cfg, prio, api, mtu) \
|
||||
Z_NET_DEVICE_OFFLOAD_INIT(node_id, node_id, DT_LABEL(node_id), \
|
||||
Z_NET_DEVICE_OFFLOAD_INIT(node_id, Z_DEVICE_DT_DEV_NAME(node_id), \
|
||||
DT_PROP_OR(node_id, label, NULL), \
|
||||
init_fn, pm_control_fn, data, cfg, \
|
||||
prio, api, mtu)
|
||||
|
||||
|
||||
@@ -36,6 +36,21 @@ static bool rb_lessthan(struct rbnode *a, struct rbnode *b)
|
||||
return (uintptr_t)a < (uintptr_t)b;
|
||||
}
|
||||
|
||||
static void thread_set_requeued(struct k_thread *th)
|
||||
{
|
||||
th->base.user_options |= K_CALLBACK_STATE;
|
||||
}
|
||||
|
||||
static void thread_clear_requeued(struct k_thread *th)
|
||||
{
|
||||
th->base.user_options &= ~K_CALLBACK_STATE;
|
||||
}
|
||||
|
||||
static bool thread_was_requeued(struct k_thread *th)
|
||||
{
|
||||
return !!(th->base.user_options & K_CALLBACK_STATE);
|
||||
}
|
||||
|
||||
/* Slightly different semantics: rb_lessthan must be perfectly
|
||||
* symmetric (to produce a single tree structure) and will use the
|
||||
* pointer value to break ties where priorities are equal, here we
|
||||
@@ -70,6 +85,7 @@ static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2)
|
||||
w->thread = _current;
|
||||
sys_dlist_append(&queue->active, &w->dlnode);
|
||||
set_prio(_current, w);
|
||||
thread_clear_requeued(_current);
|
||||
|
||||
k_spin_unlock(&queue->lock, k);
|
||||
w->handler(w);
|
||||
@@ -78,7 +94,7 @@ static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2)
|
||||
/* Remove from the active list only if it
|
||||
* wasn't resubmitted already
|
||||
*/
|
||||
if (w->thread == _current) {
|
||||
if (!thread_was_requeued(_current)) {
|
||||
sys_dlist_remove(&w->dlnode);
|
||||
w->thread = NULL;
|
||||
}
|
||||
@@ -142,6 +158,7 @@ void k_p4wq_submit(struct k_p4wq *queue, struct k_p4wq_work *item)
|
||||
/* Resubmission from within handler? Remove from active list */
|
||||
if (item->thread == _current) {
|
||||
sys_dlist_remove(&item->dlnode);
|
||||
thread_set_requeued(_current);
|
||||
item->thread = NULL;
|
||||
}
|
||||
__ASSERT_NO_MSG(item->thread == NULL);
|
||||
|
||||
@@ -7,3 +7,5 @@ CONFIG_LOG_PROCESS_THREAD_SLEEP_MS=100
|
||||
CONFIG_ESPI_AUTOMATIC_WARNING_ACKNOWLEDGE=n
|
||||
# Sample code doesn't handle ACPI host communication
|
||||
CONFIG_ESPI_PERIPHERAL_HOST_IO=n
|
||||
# This only makes sense for system using this board
|
||||
CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC=y
|
||||
|
||||
214
samples/drivers/espi/src/espi_oob_handler.c
Normal file
214
samples/drivers/espi/src/espi_oob_handler.c
Normal file
@@ -0,0 +1,214 @@
|
||||
/*
|
||||
* Copyright (c) 2020 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <device.h>
|
||||
#include <soc.h>
|
||||
#include <drivers/gpio.h>
|
||||
#include <drivers/espi.h>
|
||||
#include <logging/log_ctrl.h>
|
||||
#include <logging/log.h>
|
||||
#include "espi_oob_handler.h"
|
||||
|
||||
LOG_MODULE_DECLARE(espi, CONFIG_ESPI_LOG_LEVEL);
|
||||
|
||||
struct oob_header {
|
||||
uint8_t dest_slave_addr;
|
||||
uint8_t oob_cmd_code;
|
||||
uint8_t byte_cnt;
|
||||
uint8_t src_slave_addr;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
|
||||
|
||||
#define OOB_THREAD_STACK_SIZE 512ul
|
||||
#define OOB_THREAD_PRIORITY K_PRIO_COOP(5)
|
||||
#define OOB_THREAD_WAIT -1
|
||||
|
||||
/* Thread to process asynchronous callbacks */
|
||||
void espihub_thread(void *p1, void *p2, void *p3);
|
||||
|
||||
void temperature_timer(struct k_timer *timer_id);
|
||||
|
||||
K_TIMER_DEFINE(temp_timer, temperature_timer, NULL);
|
||||
K_THREAD_DEFINE(espihub_thrd_id, OOB_THREAD_STACK_SIZE, espihub_thread,
|
||||
NULL, NULL, NULL,
|
||||
OOB_THREAD_PRIORITY, K_INHERIT_PERMS, OOB_THREAD_WAIT);
|
||||
|
||||
K_MSGQ_DEFINE(from_host, sizeof(uint8_t), 8, 4);
|
||||
|
||||
struct thread_context {
|
||||
const struct device *espi_dev;
|
||||
int cycles;
|
||||
};
|
||||
|
||||
static struct thread_context context;
|
||||
#endif
|
||||
|
||||
static struct espi_oob_packet resp_pckt;
|
||||
static uint8_t buf[MAX_ESPI_BUF_LEN];
|
||||
|
||||
static int request_temp(const struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
struct oob_header oob_hdr;
|
||||
struct espi_oob_packet req_pckt;
|
||||
|
||||
LOG_WRN("%s", __func__);
|
||||
|
||||
oob_hdr.dest_slave_addr = PCH_DEST_SLV_ADDR;
|
||||
oob_hdr.oob_cmd_code = OOB_CMDCODE;
|
||||
oob_hdr.byte_cnt = 1;
|
||||
oob_hdr.src_slave_addr = SRC_SLV_ADDR;
|
||||
|
||||
/* Packetize OOB request */
|
||||
req_pckt.buf = (uint8_t *)&oob_hdr;
|
||||
req_pckt.len = sizeof(struct oob_header);
|
||||
|
||||
ret = espi_send_oob(dev, &req_pckt);
|
||||
if (ret) {
|
||||
LOG_ERR("OOB Tx failed %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int retrieve_packet(const struct device *dev, uint8_t *sender)
|
||||
{
|
||||
int ret;
|
||||
|
||||
#ifdef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
|
||||
/* Note that no data is in the item */
|
||||
uint8_t response_len;
|
||||
|
||||
if (k_msgq_num_used_get(&from_host) == 0U) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
k_msgq_get(&from_host, &response_len, K_FOREVER);
|
||||
#endif
|
||||
|
||||
resp_pckt.buf = (uint8_t *)&buf;
|
||||
resp_pckt.len = MAX_ESPI_BUF_LEN;
|
||||
|
||||
ret = espi_receive_oob(dev, &resp_pckt);
|
||||
if (ret) {
|
||||
LOG_ERR("OOB Rx failed %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
LOG_INF("OOB transaction completed rcvd: %d bytes", resp_pckt.len);
|
||||
for (int i = 0; i < resp_pckt.len; i++) {
|
||||
LOG_INF("%x ", buf[i]);
|
||||
}
|
||||
|
||||
if (sender) {
|
||||
*sender = buf[OOB_RESPONSE_SENDER_INDEX];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int get_pch_temp_sync(const struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
for (int i = 0; i < MIN_GET_TEMP_CYCLES; i++) {
|
||||
ret = request_temp(dev);
|
||||
if (ret) {
|
||||
LOG_ERR("OOB req failed %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = retrieve_packet(dev, NULL);
|
||||
if (ret) {
|
||||
LOG_ERR("OOB retrieve failed %d", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int get_pch_temp_async(const struct device *dev)
|
||||
{
|
||||
#if !defined(CONFIG_ESPI_OOB_CHANNEL) || \
|
||||
!defined(CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC)
|
||||
return -ENOTSUP;
|
||||
#else
|
||||
context.espi_dev = dev;
|
||||
context.cycles = MIN_GET_TEMP_CYCLES;
|
||||
|
||||
k_thread_start(espihub_thrd_id);
|
||||
k_thread_join(espihub_thrd_id, K_FOREVER);
|
||||
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
|
||||
|
||||
void oob_rx_handler(const struct device *dev, struct espi_callback *cb,
|
||||
struct espi_event event)
|
||||
{
|
||||
uint8_t last_resp_len = event.evt_details;
|
||||
|
||||
LOG_WRN("%s", __func__);
|
||||
/* Post for post-processing in a thread
|
||||
* Should not attempt to retrieve in callback context
|
||||
*/
|
||||
k_msgq_put(&from_host, &last_resp_len, K_NO_WAIT);
|
||||
}
|
||||
|
||||
|
||||
bool need_temp;
|
||||
|
||||
void temperature_timer(struct k_timer *timer_id)
|
||||
{
|
||||
LOG_WRN("%s", __func__);
|
||||
need_temp = true;
|
||||
}
|
||||
|
||||
void espihub_thread(void *p1, void *p2, void *p3)
|
||||
{
|
||||
int ret;
|
||||
uint8_t temp;
|
||||
uint8_t sender;
|
||||
|
||||
LOG_DBG("%s", __func__);
|
||||
k_timer_start(&temp_timer, K_MSEC(100), K_MSEC(100));
|
||||
while (context.cycles > 0) {
|
||||
k_msleep(50);
|
||||
|
||||
ret = retrieve_packet(context.espi_dev, &sender);
|
||||
if (!ret) {
|
||||
switch (sender) {
|
||||
case PCH_DEST_SLV_ADDR:
|
||||
LOG_INF("PCH response");
|
||||
/* Any other checks */
|
||||
if (resp_pckt.len == OOB_RESPONSE_LEN) {
|
||||
temp = buf[OOB_RESPONSE_DATA_INDEX];
|
||||
LOG_INF("Temp %d", temp);
|
||||
} else {
|
||||
LOG_ERR("Incorrect size response");
|
||||
}
|
||||
|
||||
context.cycles--;
|
||||
break;
|
||||
default:
|
||||
LOG_INF("Other host sender %x", sender);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (need_temp) {
|
||||
request_temp(context.espi_dev);
|
||||
need_temp = false;
|
||||
}
|
||||
}
|
||||
k_timer_stop(&temp_timer);
|
||||
}
|
||||
#endif /* CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC */
|
||||
59
samples/drivers/espi/src/espi_oob_handler.h
Normal file
59
samples/drivers/espi/src/espi_oob_handler.h
Normal file
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright (c) 2020 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef __ESPI_OOB_HANDLER_H__
|
||||
#define __ESPI_OOB_HANDLER_H__
|
||||
|
||||
/* eSPI host entity address */
|
||||
#define PCH_DEST_SLV_ADDR 0x02u
|
||||
#define SRC_SLV_ADDR 0x21u
|
||||
|
||||
#define OOB_RESPONSE_SENDER_INDEX 0x02u
|
||||
#define OOB_RESPONSE_DATA_INDEX 0x04u
|
||||
|
||||
|
||||
/* Temperature command opcode */
|
||||
#define OOB_CMDCODE 0x01u
|
||||
#define OOB_RESPONSE_LEN 0x05u
|
||||
|
||||
/* Maximum bytes for OOB transactions */
|
||||
#define MAX_ESPI_BUF_LEN 80u
|
||||
#define MIN_GET_TEMP_CYCLES 5u
|
||||
|
||||
/* 100ms */
|
||||
#define MAX_OOB_TIMEOUT 100ul
|
||||
|
||||
void oob_rx_handler(const struct device *dev, struct espi_callback *cb,
|
||||
struct espi_event event);
|
||||
|
||||
/**
|
||||
* @brief Retrieve PCH temperature over OOB channel.
|
||||
* Assumes OOB Tx and Rx as synchronous operation.
|
||||
*
|
||||
* @param dev eSPI driver handle.
|
||||
*
|
||||
* @retval 0 If successful.
|
||||
* @retval -ENOTSUP returned when OOB channel is not supported.
|
||||
* @retval -EINVAL is returned when OOB parameters are invalid.
|
||||
*
|
||||
*/
|
||||
int get_pch_temp_sync(const struct device *dev);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Retrieve PCH temperature over OOB channel.
|
||||
* Assumes OOB Tx and Rx as synchronous operation.
|
||||
*
|
||||
* @param dev eSPI driver handle.
|
||||
*
|
||||
* @retval 0 If successful.
|
||||
* @retval -ENOTSUP returned when OOB channel is not supported.
|
||||
* @retval -ETIMEOUT OOB operations could not be started.
|
||||
*
|
||||
*/
|
||||
int get_pch_temp_async(const struct device *dev);
|
||||
|
||||
#endif /* __ESPI_OOB_HANDLER_H__ */
|
||||
@@ -12,20 +12,11 @@
|
||||
#include <drivers/espi.h>
|
||||
#include <logging/log_ctrl.h>
|
||||
#include <logging/log.h>
|
||||
#ifdef CONFIG_ESPI_OOB_CHANNEL
|
||||
#include "espi_oob_handler.h"
|
||||
#endif
|
||||
LOG_MODULE_DECLARE(espi, CONFIG_ESPI_LOG_LEVEL);
|
||||
|
||||
/* eSPI host entity address */
|
||||
#define DEST_SLV_ADDR 0x02u
|
||||
#define SRC_SLV_ADDR 0x21u
|
||||
|
||||
/* Temperature command opcode */
|
||||
#define OOB_CMDCODE 0x01u
|
||||
#define OOB_RESPONSE_LEN 0x05u
|
||||
#define OOB_RESPONSE_INDEX 0x03u
|
||||
|
||||
/* Maximum bytes for OOB transactions */
|
||||
#define MAX_RESP_SIZE 20u
|
||||
|
||||
/* eSPI flash parameters */
|
||||
#define MAX_TEST_BUF_SIZE 1024u
|
||||
#define MAX_FLASH_REQUEST 64u
|
||||
@@ -44,13 +35,6 @@ LOG_MODULE_DECLARE(espi, CONFIG_ESPI_LOG_LEVEL);
|
||||
#define EVENT_TYPE(x) (x & EVENT_MASK)
|
||||
#define EVENT_DETAILS(x) ((x & EVENT_DETAILS_MASK) >> EVENT_DETAILS_POS)
|
||||
|
||||
struct oob_header {
|
||||
uint8_t dest_slave_addr;
|
||||
uint8_t oob_cmd_code;
|
||||
uint8_t byte_cnt;
|
||||
uint8_t src_slave_addr;
|
||||
};
|
||||
|
||||
#define PWR_SEQ_TIMEOUT 3000u
|
||||
|
||||
/* The devicetree node identifier for the board power rails pins. */
|
||||
@@ -73,8 +57,10 @@ static struct espi_callback espi_bus_cb;
|
||||
static struct espi_callback vw_rdy_cb;
|
||||
static struct espi_callback vw_cb;
|
||||
static struct espi_callback p80_cb;
|
||||
|
||||
static uint8_t espi_rst_sts;
|
||||
#ifdef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
|
||||
static struct espi_callback oob_cb;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ESPI_FLASH_CHANNEL
|
||||
static uint8_t flash_write_buf[MAX_TEST_BUF_SIZE];
|
||||
@@ -225,6 +211,10 @@ int espi_init(void)
|
||||
ESPI_BUS_EVENT_VWIRE_RECEIVED);
|
||||
espi_init_callback(&p80_cb, periph_handler,
|
||||
ESPI_BUS_PERIPHERAL_NOTIFICATION);
|
||||
#ifdef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
|
||||
espi_init_callback(&oob_cb, oob_rx_handler,
|
||||
ESPI_BUS_EVENT_OOB_RECEIVED);
|
||||
#endif
|
||||
LOG_INF("complete");
|
||||
|
||||
LOG_INF("eSPI test - callbacks registration... ");
|
||||
@@ -232,6 +222,9 @@ int espi_init(void)
|
||||
espi_add_callback(espi_dev, &vw_rdy_cb);
|
||||
espi_add_callback(espi_dev, &vw_cb);
|
||||
espi_add_callback(espi_dev, &p80_cb);
|
||||
#ifdef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
|
||||
espi_add_callback(espi_dev, &oob_cb);
|
||||
#endif
|
||||
LOG_INF("complete");
|
||||
|
||||
return ret;
|
||||
@@ -478,53 +471,6 @@ static int espi_flash_test(uint32_t start_flash_addr, uint8_t blocks)
|
||||
}
|
||||
#endif /* CONFIG_ESPI_FLASH_CHANNEL */
|
||||
|
||||
int get_pch_temp(const struct device *dev, int *temp)
|
||||
{
|
||||
struct espi_oob_packet req_pckt;
|
||||
struct espi_oob_packet resp_pckt;
|
||||
struct oob_header oob_hdr;
|
||||
uint8_t buf[MAX_RESP_SIZE];
|
||||
int ret;
|
||||
|
||||
LOG_INF("%s", __func__);
|
||||
|
||||
oob_hdr.dest_slave_addr = DEST_SLV_ADDR;
|
||||
oob_hdr.oob_cmd_code = OOB_CMDCODE;
|
||||
oob_hdr.byte_cnt = 1;
|
||||
oob_hdr.src_slave_addr = SRC_SLV_ADDR;
|
||||
|
||||
/* Packetize OOB request */
|
||||
req_pckt.buf = (uint8_t *)&oob_hdr;
|
||||
req_pckt.len = sizeof(struct oob_header);
|
||||
resp_pckt.buf = (uint8_t *)&buf;
|
||||
resp_pckt.len = MAX_RESP_SIZE;
|
||||
|
||||
ret = espi_send_oob(dev, &req_pckt);
|
||||
if (ret) {
|
||||
LOG_ERR("OOB Tx failed %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = espi_receive_oob(dev, &resp_pckt);
|
||||
if (ret) {
|
||||
LOG_ERR("OOB Rx failed %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
LOG_INF("OOB transaction completed rcvd: %d bytes", resp_pckt.len);
|
||||
for (int i = 0; i < resp_pckt.len; i++) {
|
||||
LOG_INF("%x ", buf[i]);
|
||||
}
|
||||
|
||||
if (resp_pckt.len == OOB_RESPONSE_LEN) {
|
||||
*temp = buf[OOB_RESPONSE_INDEX];
|
||||
} else {
|
||||
LOG_ERR("Incorrect size response");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ESPI_AUTOMATIC_BOOT_DONE_ACKNOWLEDGE
|
||||
static void send_slave_bootdone(void)
|
||||
{
|
||||
@@ -673,16 +619,13 @@ int espi_test(void)
|
||||
/* Attempt to use OOB channel to read temperature, regardless of
|
||||
* if is enabled or not.
|
||||
*/
|
||||
for (int i = 0; i < 5; i++) {
|
||||
int temp;
|
||||
|
||||
ret = get_pch_temp(espi_dev, &temp);
|
||||
if (ret) {
|
||||
LOG_ERR("eSPI OOB transaction failed %d", ret);
|
||||
} else {
|
||||
LOG_INF("Temp: %d ", temp);
|
||||
}
|
||||
}
|
||||
#ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
|
||||
/* System without host-initiated OOB Rx traffic */
|
||||
get_pch_temp_sync(espi_dev);
|
||||
#else
|
||||
/* System with host-initiated OOB Rx traffic */
|
||||
get_pch_temp_async(espi_dev);
|
||||
#endif
|
||||
|
||||
/* Cleanup */
|
||||
k_sleep(K_SECONDS(1));
|
||||
|
||||
1
samples/drivers/watchdog/boards/disco_l475_iot1.conf
Normal file
1
samples/drivers/watchdog/boards/disco_l475_iot1.conf
Normal file
@@ -0,0 +1 @@
|
||||
CONFIG_CLOCK_STM32_APB1_PRESCALER=16
|
||||
@@ -28,5 +28,3 @@ tests:
|
||||
tags: drivers watchdog
|
||||
filter: dt_compat_enabled("st,stm32-window-watchdog")
|
||||
depends_on: watchdog
|
||||
extra_configs:
|
||||
- CONFIG_CLOCK_STM32_APB1_PRESCALER=16
|
||||
|
||||
@@ -66,14 +66,23 @@
|
||||
* @param prop_name human-readable string name for 'prop'
|
||||
*/
|
||||
#define NRF_DT_CHECK_GPIO_CTLR_IS_SOC(node_id, prop, prop_name) \
|
||||
BUILD_ASSERT(!DT_NODE_HAS_PROP(node_id, prop) || \
|
||||
DT_NODE_HAS_COMPAT(DT_GPIO_CTLR(node_id, prop), \
|
||||
nordic_nrf_gpio), \
|
||||
"Devicetree node " DT_NODE_PATH(node_id) \
|
||||
" property " prop_name " must refer to a GPIO " \
|
||||
" controller with compatible nordic,nrf-gpio; " \
|
||||
"got " DT_NODE_PATH(DT_GPIO_CTLR(node_id, prop)) \
|
||||
", which does not have this compatible")
|
||||
COND_CODE_1(DT_NODE_HAS_PROP(node_id, prop), \
|
||||
(BUILD_ASSERT(DT_NODE_HAS_COMPAT( \
|
||||
DT_GPIO_CTLR(node_id, prop), \
|
||||
nordic_nrf_gpio), \
|
||||
"Devicetree node " \
|
||||
DT_NODE_PATH(node_id) \
|
||||
" property " prop_name \
|
||||
" must refer to a GPIO controller " \
|
||||
"with compatible nordic,nrf-gpio; " \
|
||||
"got " \
|
||||
DT_NODE_PATH(DT_GPIO_CTLR(node_id, \
|
||||
prop)) \
|
||||
", which does not have this " \
|
||||
"compatible")), \
|
||||
(BUILD_ASSERT(1, \
|
||||
"NRF_DT_CHECK_GPIO_CTLR_IS_SOC: OK")))
|
||||
/* Note: allow a trailing ";" either way */
|
||||
|
||||
#endif /* !_ASMLANGUAGE */
|
||||
|
||||
|
||||
@@ -292,7 +292,8 @@ void ll_rx_dequeue(void);
|
||||
void ll_rx_mem_release(void **node_rx);
|
||||
|
||||
/* External co-operation */
|
||||
void ll_timeslice_ticker_id_get(uint8_t * const instance_index, uint8_t * const user_id);
|
||||
void ll_timeslice_ticker_id_get(uint8_t * const instance_index,
|
||||
uint8_t * const ticker_id);
|
||||
void ll_radio_state_abort(void);
|
||||
uint32_t ll_radio_state_is_idle(void);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
|
||||
* Copyright (c) 2018-2021 Nordic Semiconductor ASA
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@@ -30,6 +30,16 @@
|
||||
#define XON_BITMASK BIT(31) /* XTAL has been retained from previous prepare */
|
||||
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
|
||||
|
||||
#if defined(CONFIG_BT_BROADCASTER)
|
||||
#if defined(CONFIG_BT_CTLR_ADV_SET)
|
||||
#define BT_CTLR_ADV_SET CONFIG_BT_CTLR_ADV_SET
|
||||
#else /* CONFIG_BT_CTLR_ADV_SET */
|
||||
#define BT_CTLR_ADV_SET 1
|
||||
#endif /* CONFIG_BT_CTLR_ADV_SET */
|
||||
#else /* !CONFIG_BT_BROADCASTER */
|
||||
#define BT_CTLR_ADV_SET 0
|
||||
#endif /* !CONFIG_BT_BROADCASTER */
|
||||
|
||||
#if defined(CONFIG_BT_OBSERVER)
|
||||
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
||||
#if defined(CONFIG_BT_CTLR_PHY_CODED)
|
||||
@@ -49,8 +59,7 @@ enum {
|
||||
TICKER_ID_ADV_STOP,
|
||||
TICKER_ID_ADV_BASE,
|
||||
#if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_HCI_MESH_EXT)
|
||||
TICKER_ID_ADV_LAST = ((TICKER_ID_ADV_BASE) +
|
||||
(CONFIG_BT_CTLR_ADV_SET) - 1),
|
||||
TICKER_ID_ADV_LAST = ((TICKER_ID_ADV_BASE) + (BT_CTLR_ADV_SET) - 1),
|
||||
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
||||
#if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
|
||||
TICKER_ID_ADV_AUX_BASE,
|
||||
@@ -138,7 +147,6 @@ struct ull_hdr {
|
||||
|
||||
struct lll_hdr {
|
||||
void *parent;
|
||||
uint8_t is_stop:1;
|
||||
};
|
||||
|
||||
struct lll_prepare_param {
|
||||
@@ -341,17 +349,6 @@ static inline void lll_hdr_init(void *lll, void *parent)
|
||||
struct lll_hdr *hdr = lll;
|
||||
|
||||
hdr->parent = parent;
|
||||
hdr->is_stop = 0U;
|
||||
}
|
||||
|
||||
static inline int lll_stop(void *lll)
|
||||
{
|
||||
struct lll_hdr *hdr = lll;
|
||||
int ret = !!hdr->is_stop;
|
||||
|
||||
hdr->is_stop = 1U;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int lll_init(void);
|
||||
|
||||
@@ -56,17 +56,25 @@ struct lll_conn {
|
||||
uint16_t data_chan_id;
|
||||
};
|
||||
|
||||
union {
|
||||
struct {
|
||||
uint8_t initiated:1;
|
||||
uint8_t cancelled:1;
|
||||
} master;
|
||||
#if defined(CONFIG_BT_PERIPHERAL)
|
||||
struct {
|
||||
uint8_t latency_enabled:1;
|
||||
uint32_t window_widening_periodic_us;
|
||||
uint32_t window_widening_max_us;
|
||||
uint32_t window_widening_prepare_us;
|
||||
uint32_t window_widening_event_us;
|
||||
uint32_t window_size_prepare_us;
|
||||
uint32_t window_size_event_us;
|
||||
} slave;
|
||||
struct {
|
||||
uint8_t initiated:1;
|
||||
uint8_t latency_enabled:1;
|
||||
|
||||
uint32_t window_widening_periodic_us;
|
||||
uint32_t window_widening_max_us;
|
||||
uint32_t window_widening_prepare_us;
|
||||
uint32_t window_widening_event_us;
|
||||
uint32_t window_size_prepare_us;
|
||||
uint32_t window_size_event_us;
|
||||
} slave;
|
||||
#endif /* CONFIG_BT_PERIPHERAL */
|
||||
};
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
uint16_t max_tx_octets;
|
||||
|
||||
@@ -8,8 +8,13 @@ struct lll_scan {
|
||||
struct lll_hdr hdr;
|
||||
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
/* NOTE: conn context has to be after lll_hdr */
|
||||
struct lll_conn *conn;
|
||||
/* NOTE: conn context SHALL be after lll_hdr,
|
||||
* check ull_conn_setup how it access the connection LLL
|
||||
* context.
|
||||
*/
|
||||
struct lll_conn *volatile conn;
|
||||
|
||||
uint8_t adv_addr[BDADDR_SIZE];
|
||||
uint32_t conn_win_offset_us;
|
||||
uint16_t conn_timeout;
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
@@ -17,9 +22,11 @@ struct lll_scan {
|
||||
uint8_t state:1;
|
||||
uint8_t chan:2;
|
||||
uint8_t filter_policy:2;
|
||||
uint8_t adv_addr_type:1;
|
||||
uint8_t init_addr_type:1;
|
||||
uint8_t type:1;
|
||||
uint8_t init_addr_type:1;
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
uint8_t adv_addr_type:1;
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
||||
uint16_t duration_reload;
|
||||
@@ -35,7 +42,6 @@ struct lll_scan {
|
||||
#endif /* CONFIG_BT_CTLR_PRIVACY */
|
||||
|
||||
uint8_t init_addr[BDADDR_SIZE];
|
||||
uint8_t adv_addr[BDADDR_SIZE];
|
||||
|
||||
uint16_t interval;
|
||||
uint32_t ticks_window;
|
||||
|
||||
@@ -440,10 +440,11 @@ static int prepare_cb(struct lll_prepare_param *p)
|
||||
|
||||
lll = p->param;
|
||||
|
||||
#if defined(CONFIG_BT_PERIPHERAL)
|
||||
/* Check if stopped (on connection establishment race between LLL and
|
||||
* ULL.
|
||||
*/
|
||||
if (unlikely(lll_is_stop(lll))) {
|
||||
if (unlikely(lll->conn && lll->conn->slave.initiated)) {
|
||||
int err;
|
||||
|
||||
err = lll_hfclock_off();
|
||||
@@ -454,6 +455,7 @@ static int prepare_cb(struct lll_prepare_param *p)
|
||||
DEBUG_RADIO_CLOSE_A(0);
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BT_PERIPHERAL */
|
||||
|
||||
radio_reset();
|
||||
|
||||
@@ -1032,7 +1034,6 @@ static inline int isr_rx_pdu(struct lll_adv *lll,
|
||||
lll->conn) {
|
||||
struct node_rx_ftr *ftr;
|
||||
struct node_rx_pdu *rx;
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
|
||||
rx = ull_pdu_rx_alloc_peek(4);
|
||||
@@ -1061,8 +1062,7 @@ static inline int isr_rx_pdu(struct lll_adv *lll,
|
||||
#endif /* CONFIG_BT_CTLR_CONN_RSSI */
|
||||
|
||||
/* Stop further LLL radio events */
|
||||
ret = lll_stop(lll);
|
||||
LL_ASSERT(!ret);
|
||||
lll->conn->slave.initiated = 1;
|
||||
|
||||
rx = ull_pdu_rx_alloc();
|
||||
|
||||
|
||||
@@ -1,15 +1,9 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019 Nordic Semiconductor ASA
|
||||
* Copyright (c) 2017-2021 Nordic Semiconductor ASA
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_ADV_SET)
|
||||
#define BT_CTLR_ADV_SET CONFIG_BT_CTLR_ADV_SET
|
||||
#else /* CONFIG_BT_CTLR_ADV_SET */
|
||||
#define BT_CTLR_ADV_SET 1
|
||||
#endif /* CONFIG_BT_CTLR_ADV_SET */
|
||||
|
||||
/* Structure used to double buffer pointers of AD Data PDU buffer.
|
||||
* The first and last members are used to make modification to AD data to be
|
||||
* context safe. Thread always appends or updates the buffer pointed to
|
||||
@@ -80,7 +74,10 @@ struct lll_adv {
|
||||
struct lll_hdr hdr;
|
||||
|
||||
#if defined(CONFIG_BT_PERIPHERAL)
|
||||
/* NOTE: conn context has to be after lll_hdr */
|
||||
/* NOTE: conn context SHALL be after lll_hdr,
|
||||
* check ull_conn_setup how it access the connection LLL
|
||||
* context.
|
||||
*/
|
||||
struct lll_conn *conn;
|
||||
uint8_t is_hdcd:1;
|
||||
#endif /* CONFIG_BT_PERIPHERAL */
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#include "lll_chan.h"
|
||||
#include "lll_adv.h"
|
||||
#include "lll_adv_aux.h"
|
||||
#include "lll_conn.h"
|
||||
#include "lll_filter.h"
|
||||
|
||||
#include "lll_internal.h"
|
||||
@@ -54,7 +55,9 @@ static inline int isr_rx_pdu(struct lll_adv_aux *lll_aux,
|
||||
uint8_t devmatch_ok, uint8_t devmatch_id,
|
||||
uint8_t irkmatch_ok, uint8_t irkmatch_id,
|
||||
uint8_t rssi_ready);
|
||||
#if defined(CONFIG_BT_PERIPHERAL)
|
||||
static void isr_tx_connect_rsp(void *param);
|
||||
#endif /* CONFIG_BT_PERIPHERAL */
|
||||
|
||||
static struct pdu_adv *init_connect_rsp_pdu(void)
|
||||
{
|
||||
@@ -101,6 +104,7 @@ static struct pdu_adv *init_connect_rsp_pdu(void)
|
||||
return pdu;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_PERIPHERAL)
|
||||
static struct pdu_adv *update_connect_rsp_pdu(struct pdu_adv *pdu_ci)
|
||||
{
|
||||
struct pdu_adv_com_ext_adv *cr_com_hdr;
|
||||
@@ -125,6 +129,7 @@ static struct pdu_adv *update_connect_rsp_pdu(struct pdu_adv *pdu_ci)
|
||||
|
||||
return pdu_cr;
|
||||
}
|
||||
#endif /* CONFIG_BT_PERIPHERAL */
|
||||
|
||||
int lll_adv_aux_init(void)
|
||||
{
|
||||
@@ -483,7 +488,6 @@ static inline int isr_rx_pdu(struct lll_adv_aux *lll_aux,
|
||||
struct pdu_adv *pdu_adv;
|
||||
struct pdu_adv *pdu_aux;
|
||||
struct pdu_adv *pdu_rx;
|
||||
struct pdu_adv *pdu_tx;
|
||||
struct lll_adv *lll;
|
||||
uint8_t *tgt_addr;
|
||||
uint8_t tx_addr;
|
||||
@@ -561,13 +565,17 @@ static inline int isr_rx_pdu(struct lll_adv_aux *lll_aux,
|
||||
CONFIG_BT_CTLR_GPIO_PA_OFFSET);
|
||||
#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN */
|
||||
return 0;
|
||||
|
||||
#if defined(CONFIG_BT_PERIPHERAL)
|
||||
} else if ((pdu_rx->type == PDU_ADV_TYPE_AUX_CONNECT_REQ) &&
|
||||
(pdu_rx->len == sizeof(struct pdu_adv_connect_ind)) &&
|
||||
lll_adv_connect_ind_check(lll, pdu_rx, tx_addr, addr,
|
||||
rx_addr, tgt_addr,
|
||||
devmatch_ok, &rl_idx)) {
|
||||
devmatch_ok, &rl_idx) &&
|
||||
lll->conn) {
|
||||
struct node_rx_ftr *ftr;
|
||||
struct node_rx_pdu *rx;
|
||||
struct pdu_adv *pdu_tx;
|
||||
|
||||
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
|
||||
rx = ull_pdu_rx_alloc_peek(4);
|
||||
@@ -632,18 +640,19 @@ static inline int isr_rx_pdu(struct lll_adv_aux *lll_aux,
|
||||
}
|
||||
|
||||
return 0;
|
||||
#endif /* CONFIG_BT_PERIPHERAL */
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_PERIPHERAL)
|
||||
static void isr_tx_connect_rsp(void *param)
|
||||
{
|
||||
struct node_rx_ftr *ftr;
|
||||
struct node_rx_pdu *rx;
|
||||
struct lll_adv *lll;
|
||||
bool is_done;
|
||||
int ret;
|
||||
|
||||
rx = param;
|
||||
ftr = &(rx->hdr.rx_ftr);
|
||||
@@ -671,11 +680,11 @@ static void isr_tx_connect_rsp(void *param)
|
||||
|
||||
if (is_done) {
|
||||
/* Stop further LLL radio events */
|
||||
ret = lll_stop(lll);
|
||||
LL_ASSERT(!ret);
|
||||
lll->conn->slave.initiated = 1;
|
||||
}
|
||||
|
||||
/* Clear radio status and events */
|
||||
lll_isr_status_reset();
|
||||
lll_isr_cleanup(lll);
|
||||
}
|
||||
#endif /* CONFIG_BT_PERIPHERAL */
|
||||
|
||||
@@ -14,12 +14,6 @@ int lll_is_abort_cb(void *next, int prio, void *curr,
|
||||
lll_prepare_cb_t *resume_cb, int *resume_prio);
|
||||
void lll_abort_cb(struct lll_prepare_param *prepare_param, void *param);
|
||||
|
||||
static inline int lll_is_stop(void *lll)
|
||||
{
|
||||
struct lll_hdr *hdr = lll;
|
||||
|
||||
return !!hdr->is_stop;
|
||||
}
|
||||
uint32_t lll_evt_offset_get(struct evt_hdr *evt);
|
||||
uint32_t lll_preempt_calc(struct evt_hdr *evt, uint8_t ticker_id,
|
||||
uint32_t ticks_at_event);
|
||||
|
||||
@@ -66,10 +66,12 @@ static inline int isr_rx_pdu(struct lll_scan *lll, struct pdu_adv *pdu_adv_rx,
|
||||
uint8_t devmatch_ok, uint8_t devmatch_id,
|
||||
uint8_t irkmatch_ok, uint8_t irkmatch_id,
|
||||
uint8_t rl_idx, uint8_t rssi_ready);
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
static inline bool isr_scan_init_check(struct lll_scan *lll,
|
||||
struct pdu_adv *pdu, uint8_t rl_idx);
|
||||
static inline bool isr_scan_init_adva_check(struct lll_scan *lll,
|
||||
struct pdu_adv *pdu, uint8_t rl_idx);
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
static inline bool isr_scan_tgta_check(struct lll_scan *lll, bool init,
|
||||
struct pdu_adv *pdu, uint8_t rl_idx,
|
||||
bool *dir_report);
|
||||
@@ -134,10 +136,13 @@ static int prepare_cb(struct lll_prepare_param *p)
|
||||
|
||||
lll = p->param;
|
||||
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
/* Check if stopped (on connection establishment race between LLL and
|
||||
* ULL.
|
||||
*/
|
||||
if (unlikely(lll_is_stop(lll))) {
|
||||
if (unlikely(lll->conn &&
|
||||
(lll->conn->master.initiated ||
|
||||
lll->conn->master.cancelled))) {
|
||||
int err;
|
||||
|
||||
err = lll_hfclock_off();
|
||||
@@ -148,6 +153,7 @@ static int prepare_cb(struct lll_prepare_param *p)
|
||||
DEBUG_RADIO_CLOSE_O(0);
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
|
||||
/* Initialize scanning state */
|
||||
lll->state = 0U;
|
||||
@@ -355,6 +361,9 @@ static int is_abort_cb(void *next, int prio, void *curr,
|
||||
|
||||
static void abort_cb(struct lll_prepare_param *prepare_param, void *param)
|
||||
{
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
struct lll_scan *lll = param;
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
int err;
|
||||
|
||||
/* NOTE: This is not a prepare being cancelled */
|
||||
@@ -363,10 +372,14 @@ static void abort_cb(struct lll_prepare_param *prepare_param, void *param)
|
||||
* After event has been cleanly aborted, clean up resources
|
||||
* and dispatch event done.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) && lll_is_stop(param)) {
|
||||
if (0) {
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
} else if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) &&
|
||||
lll->conn && lll->conn->master.initiated) {
|
||||
while (!radio_has_disabled()) {
|
||||
cpu_sleep();
|
||||
}
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
} else {
|
||||
radio_isr_set(isr_abort, param);
|
||||
radio_disable();
|
||||
@@ -768,7 +781,7 @@ static inline int isr_rx_pdu(struct lll_scan *lll, struct pdu_adv *pdu_adv_rx,
|
||||
if (0) {
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
/* Initiator */
|
||||
} else if ((lll->conn) &&
|
||||
} else if (lll->conn && !lll->conn->master.cancelled &&
|
||||
isr_scan_init_check(lll, pdu_adv_rx, rl_idx)) {
|
||||
struct lll_conn *lll_conn;
|
||||
struct node_rx_ftr *ftr;
|
||||
@@ -782,7 +795,6 @@ static inline int isr_rx_pdu(struct lll_scan *lll, struct pdu_adv *pdu_adv_rx,
|
||||
#if defined(CONFIG_BT_CTLR_PRIVACY)
|
||||
bt_addr_t *lrpa;
|
||||
#endif /* CONFIG_BT_CTLR_PRIVACY */
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
|
||||
rx = ull_pdu_rx_alloc_peek(4);
|
||||
@@ -922,8 +934,7 @@ static inline int isr_rx_pdu(struct lll_scan *lll, struct pdu_adv *pdu_adv_rx,
|
||||
*/
|
||||
|
||||
/* Stop further LLL radio events */
|
||||
ret = lll_stop(lll);
|
||||
LL_ASSERT(!ret);
|
||||
lll->conn->master.initiated = 1;
|
||||
|
||||
rx = ull_pdu_rx_alloc();
|
||||
|
||||
@@ -1097,6 +1108,7 @@ static inline int isr_rx_pdu(struct lll_scan *lll, struct pdu_adv *pdu_adv_rx,
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
static inline bool isr_scan_init_check(struct lll_scan *lll,
|
||||
struct pdu_adv *pdu, uint8_t rl_idx)
|
||||
{
|
||||
@@ -1117,11 +1129,16 @@ static inline bool isr_scan_init_adva_check(struct lll_scan *lll,
|
||||
/* Only applies to initiator with no whitelist */
|
||||
if (rl_idx != FILTER_IDX_NONE) {
|
||||
return (rl_idx == lll->rl_idx);
|
||||
} else if (!ull_filter_lll_rl_addr_allowed(pdu->tx_addr,
|
||||
pdu->adv_ind.addr,
|
||||
&rl_idx)) {
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_PRIVACY */
|
||||
return ((lll->adv_addr_type == pdu->tx_addr) &&
|
||||
!memcmp(lll->adv_addr, &pdu->adv_ind.addr[0], BDADDR_SIZE));
|
||||
}
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
|
||||
static inline bool isr_scan_tgta_check(struct lll_scan *lll, bool init,
|
||||
struct pdu_adv *pdu, uint8_t rl_idx,
|
||||
|
||||
@@ -326,10 +326,11 @@ static int prepare_cb(struct lll_prepare_param *prepare_param)
|
||||
|
||||
DEBUG_RADIO_START_A(1);
|
||||
|
||||
#if defined(CONFIG_BT_PERIPHERAL)
|
||||
/* Check if stopped (on connection establishment race between LLL and
|
||||
* ULL.
|
||||
*/
|
||||
if (lll_is_stop(lll)) {
|
||||
if (unlikely(lll->conn && lll->conn->master.initiated)) {
|
||||
int err;
|
||||
|
||||
err = lll_clk_off();
|
||||
@@ -340,6 +341,7 @@ static int prepare_cb(struct lll_prepare_param *prepare_param)
|
||||
DEBUG_RADIO_START_A(0);
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BT_PERIPHERAL */
|
||||
|
||||
radio_reset();
|
||||
/* TODO: other Tx Power settings */
|
||||
@@ -897,7 +899,6 @@ static inline int isr_rx_pdu(struct lll_adv *lll,
|
||||
lll->conn) {
|
||||
struct node_rx_ftr *ftr;
|
||||
struct node_rx_pdu *rx;
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
|
||||
rx = ull_pdu_rx_alloc_peek(4);
|
||||
@@ -925,8 +926,7 @@ static inline int isr_rx_pdu(struct lll_adv *lll,
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_CONN_RSSI */
|
||||
/* Stop further LLL radio events */
|
||||
ret = lll_stop(lll);
|
||||
LL_ASSERT(!ret);
|
||||
lll->conn->master.initiated = 1;
|
||||
|
||||
rx = ull_pdu_rx_alloc();
|
||||
|
||||
|
||||
@@ -14,13 +14,6 @@ int lll_is_abort_cb(void *next, int prio, void *curr,
|
||||
lll_prepare_cb_t *resume_cb, int *resume_prio);
|
||||
void lll_abort_cb(struct lll_prepare_param *prepare_param, void *param);
|
||||
|
||||
static inline int lll_is_stop(void *lll)
|
||||
{
|
||||
struct lll_hdr *hdr = lll;
|
||||
|
||||
return !!hdr->is_stop;
|
||||
}
|
||||
|
||||
int lll_clk_on(void);
|
||||
int lll_clk_on_wait(void);
|
||||
int lll_clk_off(void);
|
||||
|
||||
@@ -127,10 +127,11 @@ static int prepare_cb(struct lll_prepare_param *prepare_param)
|
||||
|
||||
DEBUG_RADIO_START_O(1);
|
||||
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
/* Check if stopped (on connection establishment race between LLL and
|
||||
* ULL.
|
||||
*/
|
||||
if (lll_is_stop(lll)) {
|
||||
if (unlikely(lll->conn && lll->conn->master.initiated)) {
|
||||
int err;
|
||||
|
||||
err = lll_clk_off();
|
||||
@@ -141,6 +142,7 @@ static int prepare_cb(struct lll_prepare_param *prepare_param)
|
||||
DEBUG_RADIO_START_O(0);
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
|
||||
radio_reset();
|
||||
/* TODO: other Tx Power settings */
|
||||
@@ -671,7 +673,7 @@ static inline uint32_t isr_rx_pdu(struct lll_scan *lll, uint8_t devmatch_ok,
|
||||
if (0) {
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
/* Initiator */
|
||||
} else if ((lll->conn) &&
|
||||
} else if (lll->conn &&
|
||||
isr_scan_init_check(lll, pdu_adv_rx, rl_idx)) {
|
||||
struct lll_conn *lll_conn;
|
||||
struct node_rx_ftr *ftr;
|
||||
@@ -685,7 +687,6 @@ static inline uint32_t isr_rx_pdu(struct lll_scan *lll, uint8_t devmatch_ok,
|
||||
#if defined(CONFIG_BT_CTLR_PRIVACY)
|
||||
bt_addr_t *lrpa;
|
||||
#endif /* CONFIG_BT_CTLR_PRIVACY */
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
|
||||
rx = ull_pdu_rx_alloc_peek(4);
|
||||
@@ -825,8 +826,7 @@ static inline uint32_t isr_rx_pdu(struct lll_scan *lll, uint8_t devmatch_ok,
|
||||
*/
|
||||
|
||||
/* Stop further LLL radio events */
|
||||
ret = lll_stop(lll);
|
||||
LL_ASSERT(!ret);
|
||||
lll->conn->master.initiated = 1;
|
||||
|
||||
rx = ull_pdu_rx_alloc();
|
||||
|
||||
@@ -1015,6 +1015,10 @@ static inline bool isr_scan_init_adva_check(struct lll_scan *lll,
|
||||
/* Only applies to initiator with no whitelist */
|
||||
if (rl_idx != FILTER_IDX_NONE) {
|
||||
return (rl_idx == lll->rl_idx);
|
||||
} else if (!ull_filter_lll_rl_addr_allowed(pdu->tx_addr,
|
||||
pdu->adv_ind.addr,
|
||||
&rl_idx)) {
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_PRIVACY */
|
||||
return ((lll->adv_addr_type == pdu->tx_addr) &&
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019 Nordic Semiconductor ASA
|
||||
* Copyright (c) 2017-2021 Nordic Semiconductor ASA
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@@ -63,30 +63,6 @@
|
||||
#include "common/log.h"
|
||||
#include "hal/debug.h"
|
||||
|
||||
#if !defined(TICKER_USER_LLL_VENDOR_OPS)
|
||||
#define TICKER_USER_LLL_VENDOR_OPS 0
|
||||
#endif /* TICKER_USER_LLL_VENDOR_OPS */
|
||||
|
||||
#if !defined(TICKER_USER_ULL_HIGH_VENDOR_OPS)
|
||||
#define TICKER_USER_ULL_HIGH_VENDOR_OPS 0
|
||||
#endif /* TICKER_USER_ULL_HIGH_VENDOR_OPS */
|
||||
|
||||
#if !defined(TICKER_USER_THREAD_VENDOR_OPS)
|
||||
#define TICKER_USER_THREAD_VENDOR_OPS 0
|
||||
#endif /* TICKER_USER_THREAD_VENDOR_OPS */
|
||||
|
||||
/* Define ticker nodes and user operations */
|
||||
#if defined(CONFIG_BT_CTLR_LOW_LAT) && \
|
||||
(CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
|
||||
#define TICKER_USER_LLL_OPS (3 + TICKER_USER_LLL_VENDOR_OPS + 1)
|
||||
#else
|
||||
#define TICKER_USER_LLL_OPS (2 + TICKER_USER_LLL_VENDOR_OPS + 1)
|
||||
#endif /* CONFIG_BT_CTLR_LOW_LAT */
|
||||
|
||||
#define TICKER_USER_ULL_HIGH_OPS (3 + TICKER_USER_ULL_HIGH_VENDOR_OPS + 1)
|
||||
#define TICKER_USER_ULL_LOW_OPS (1 + 1)
|
||||
#define TICKER_USER_THREAD_OPS (1 + TICKER_USER_THREAD_VENDOR_OPS + 1)
|
||||
|
||||
#if defined(CONFIG_BT_BROADCASTER)
|
||||
#define BT_ADV_TICKER_NODES ((TICKER_ID_ADV_LAST) - (TICKER_ID_ADV_STOP) + 1)
|
||||
#if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
|
||||
@@ -135,20 +111,33 @@
|
||||
#define BT_CONN_TICKER_NODES 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SOC_FLASH_NRF_RADIO_SYNC_TICKER)
|
||||
#define FLASH_TICKER_NODES 2 /* No. of tickers reserved for flashing */
|
||||
#define FLASH_TICKER_USER_APP_OPS 1 /* No. of additional ticker operations */
|
||||
#else
|
||||
#define FLASH_TICKER_NODES 0
|
||||
#define FLASH_TICKER_USER_APP_OPS 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_USER_EXT)
|
||||
#define USER_TICKER_NODES CONFIG_BT_CTLR_USER_TICKER_ID_RANGE
|
||||
#else
|
||||
#define USER_TICKER_NODES 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SOC_FLASH_NRF_RADIO_SYNC_TICKER)
|
||||
#define FLASH_TICKER_NODES 2 /* No. of tickers reserved for flash
|
||||
* driver
|
||||
*/
|
||||
#define TICKER_USER_ULL_HIGH_FLASH_OPS 1 /* No. of additional ticker ULL_HIGH
|
||||
* context operations
|
||||
*/
|
||||
#define TICKER_USER_THREAD_FLASH_OPS 1 /* No. of additional ticker thread
|
||||
* context operations
|
||||
*/
|
||||
#else
|
||||
#define FLASH_TICKER_NODES 0
|
||||
#define TICKER_USER_ULL_HIGH_FLASH_OPS 0
|
||||
#define TICKER_USER_THREAD_FLASH_OPS 0
|
||||
#endif
|
||||
|
||||
/* Define ticker nodes */
|
||||
/* NOTE: FLASH_TICKER_NODES shall be after Link Layer's list of ticker id
|
||||
* allocations, refer to ll_timeslice_ticker_id_get on how ticker id
|
||||
* used by flash driver is returned.
|
||||
*/
|
||||
#define TICKER_NODES (TICKER_ID_ULL_BASE + \
|
||||
BT_ADV_TICKER_NODES + \
|
||||
BT_ADV_AUX_TICKER_NODES + \
|
||||
@@ -157,15 +146,71 @@
|
||||
BT_SCAN_AUX_TICKER_NODES + \
|
||||
BT_SCAN_SYNC_TICKER_NODES + \
|
||||
BT_CONN_TICKER_NODES + \
|
||||
FLASH_TICKER_NODES + \
|
||||
USER_TICKER_NODES)
|
||||
#define TICKER_USER_APP_OPS (TICKER_USER_THREAD_OPS + \
|
||||
FLASH_TICKER_USER_APP_OPS)
|
||||
USER_TICKER_NODES + \
|
||||
FLASH_TICKER_NODES)
|
||||
|
||||
/* When both central and peripheral are supported, one each Rx node will be
|
||||
* needed by connectable advertising and the initiator to generate connection
|
||||
* complete event, hence conditionally set the count.
|
||||
*/
|
||||
#if defined(CONFIG_BT_MAX_CONN)
|
||||
#if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_PERIPHERAL)
|
||||
#define BT_CTLR_MAX_CONNECTABLE 2
|
||||
#else
|
||||
#define BT_CTLR_MAX_CONNECTABLE 1
|
||||
#endif
|
||||
#define BT_CTLR_MAX_CONN CONFIG_BT_MAX_CONN
|
||||
#else
|
||||
#define BT_CTLR_MAX_CONNECTABLE 0
|
||||
#define BT_CTLR_MAX_CONN 0
|
||||
#endif
|
||||
|
||||
#if !defined(TICKER_USER_LLL_VENDOR_OPS)
|
||||
#define TICKER_USER_LLL_VENDOR_OPS 0
|
||||
#endif /* TICKER_USER_LLL_VENDOR_OPS */
|
||||
|
||||
#if !defined(TICKER_USER_ULL_HIGH_VENDOR_OPS)
|
||||
#define TICKER_USER_ULL_HIGH_VENDOR_OPS 0
|
||||
#endif /* TICKER_USER_ULL_HIGH_VENDOR_OPS */
|
||||
|
||||
#if !defined(TICKER_USER_THREAD_VENDOR_OPS)
|
||||
#define TICKER_USER_THREAD_VENDOR_OPS 0
|
||||
#endif /* TICKER_USER_THREAD_VENDOR_OPS */
|
||||
|
||||
/* Define ticker user operations */
|
||||
#if defined(CONFIG_BT_CTLR_LOW_LAT) && \
|
||||
(CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
|
||||
#define TICKER_USER_LLL_OPS (3 + TICKER_USER_LLL_VENDOR_OPS + 1)
|
||||
/* NOTE: When ticker job is disabled inside radio events then all advertising,
|
||||
* scanning, and slave latency cancel ticker operations will be deferred,
|
||||
* requiring increased ticker thread context operation queue count.
|
||||
*/
|
||||
#define TICKER_USER_THREAD_OPS (BT_CTLR_ADV_SET + BT_CTLR_SCAN_SET + \
|
||||
BT_CTLR_MAX_CONN + \
|
||||
TICKER_USER_THREAD_VENDOR_OPS + \
|
||||
TICKER_USER_THREAD_FLASH_OPS + \
|
||||
1)
|
||||
#else /* !CONFIG_BT_CTLR_LOW_LAT */
|
||||
#define TICKER_USER_LLL_OPS (2 + TICKER_USER_LLL_VENDOR_OPS + 1)
|
||||
/* NOTE: As ticker job is not disabled inside radio events, no need for extra
|
||||
* thread operations queue element for flash driver.
|
||||
*/
|
||||
#define TICKER_USER_THREAD_OPS (1 + TICKER_USER_THREAD_VENDOR_OPS + 1)
|
||||
#endif /* !CONFIG_BT_CTLR_LOW_LAT */
|
||||
|
||||
/* NOTE: When ULL_LOW priority is configured to lower than ULL_HIGH, then extra
|
||||
* ULL_HIGH operations queue elements are required to buffer the
|
||||
* requested ticker operations.
|
||||
*/
|
||||
#define TICKER_USER_ULL_HIGH_OPS (3 + TICKER_USER_ULL_HIGH_VENDOR_OPS + \
|
||||
TICKER_USER_ULL_HIGH_FLASH_OPS + 1)
|
||||
|
||||
#define TICKER_USER_ULL_LOW_OPS (1 + 1)
|
||||
|
||||
#define TICKER_USER_OPS (TICKER_USER_LLL_OPS + \
|
||||
TICKER_USER_ULL_HIGH_OPS + \
|
||||
TICKER_USER_ULL_LOW_OPS + \
|
||||
TICKER_USER_THREAD_OPS + \
|
||||
FLASH_TICKER_USER_APP_OPS)
|
||||
TICKER_USER_THREAD_OPS)
|
||||
|
||||
/* Memory for ticker nodes/instances */
|
||||
static uint8_t MALIGN(4) ticker_nodes[TICKER_NODES][TICKER_NODE_T_SIZE];
|
||||
@@ -246,22 +291,6 @@ static MFIFO_DEFINE(pdu_rx_free, sizeof(void *), PDU_RX_CNT);
|
||||
PDU_RX_USER_PDU_OCTETS_MAX) \
|
||||
)
|
||||
|
||||
/* When both central and peripheral are supported, one each Rx node will be
|
||||
* needed by connectable advertising and the initiator to generate connection
|
||||
* complete event, hence conditionally set the count.
|
||||
*/
|
||||
#if defined(CONFIG_BT_MAX_CONN)
|
||||
#if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_PERIPHERAL)
|
||||
#define BT_CTLR_MAX_CONNECTABLE 2
|
||||
#else
|
||||
#define BT_CTLR_MAX_CONNECTABLE 1
|
||||
#endif
|
||||
#define BT_CTLR_MAX_CONN CONFIG_BT_MAX_CONN
|
||||
#else
|
||||
#define BT_CTLR_MAX_CONNECTABLE 0
|
||||
#define BT_CTLR_MAX_CONN 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_SCAN_SYNC_SET)
|
||||
#define BT_CTLR_SCAN_SYNC_SET CONFIG_BT_CTLR_SCAN_SYNC_SET
|
||||
#else
|
||||
@@ -356,7 +385,7 @@ int ll_init(struct k_sem *sem_rx)
|
||||
ticker_users[MAYFLY_CALL_ID_0][0] = TICKER_USER_LLL_OPS;
|
||||
ticker_users[MAYFLY_CALL_ID_1][0] = TICKER_USER_ULL_HIGH_OPS;
|
||||
ticker_users[MAYFLY_CALL_ID_2][0] = TICKER_USER_ULL_LOW_OPS;
|
||||
ticker_users[MAYFLY_CALL_ID_PROGRAM][0] = TICKER_USER_APP_OPS;
|
||||
ticker_users[MAYFLY_CALL_ID_PROGRAM][0] = TICKER_USER_THREAD_OPS;
|
||||
|
||||
err = ticker_init(TICKER_INSTANCE_ID_CTLR,
|
||||
TICKER_NODES, &ticker_nodes[0],
|
||||
@@ -1275,10 +1304,10 @@ void ll_tx_ack_put(uint16_t handle, struct node_tx *node_tx)
|
||||
#endif /* CONFIG_BT_CONN */
|
||||
|
||||
void ll_timeslice_ticker_id_get(uint8_t * const instance_index,
|
||||
uint8_t * const user_id)
|
||||
uint8_t * const ticker_id)
|
||||
{
|
||||
*instance_index = TICKER_INSTANCE_ID_CTLR;
|
||||
*user_id = (TICKER_NODES - FLASH_TICKER_NODES);
|
||||
*ticker_id = (TICKER_NODES - FLASH_TICKER_NODES);
|
||||
}
|
||||
|
||||
void ll_radio_state_abort(void)
|
||||
|
||||
@@ -856,6 +856,7 @@ uint8_t ll_adv_enable(uint8_t enable)
|
||||
|
||||
/* FIXME: BEGIN: Move to ULL? */
|
||||
conn_lll->role = 1;
|
||||
conn_lll->slave.initiated = 0;
|
||||
conn_lll->data_chan_sel = 0;
|
||||
conn_lll->data_chan_use = 0;
|
||||
conn_lll->event_counter = 0;
|
||||
@@ -876,6 +877,7 @@ uint8_t ll_adv_enable(uint8_t enable)
|
||||
conn->procedure_expire = 0;
|
||||
|
||||
conn->common.fex_valid = 0;
|
||||
conn->common.txn_lock = 0;
|
||||
conn->slave.latency_cancel = 0;
|
||||
|
||||
conn->llcp_req = conn->llcp_ack = conn->llcp_type = 0;
|
||||
@@ -1537,6 +1539,10 @@ uint8_t ull_scan_rsp_set(struct ll_adv_set *adv, uint8_t len,
|
||||
struct pdu_adv *pdu;
|
||||
uint8_t idx;
|
||||
|
||||
if (len > PDU_AC_DATA_SIZE_MAX) {
|
||||
return BT_HCI_ERR_INVALID_PARAM;
|
||||
}
|
||||
|
||||
/* update scan pdu fields. */
|
||||
prev = lll_adv_scan_rsp_peek(&adv->lll);
|
||||
pdu = lll_adv_scan_rsp_alloc(&adv->lll, &idx);
|
||||
|
||||
@@ -323,7 +323,7 @@ uint16_t ll_adv_aux_max_data_length_get(void)
|
||||
|
||||
uint8_t ll_adv_aux_set_count_get(void)
|
||||
{
|
||||
return CONFIG_BT_CTLR_ADV_SET;
|
||||
return BT_CTLR_ADV_SET;
|
||||
}
|
||||
|
||||
uint8_t ll_adv_aux_set_remove(uint8_t handle)
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
#include "ull_adv_types.h"
|
||||
#include "ull_adv_internal.h"
|
||||
|
||||
static struct ll_adv_iso ll_adv_iso[CONFIG_BT_CTLR_ADV_SET];
|
||||
static struct ll_adv_iso ll_adv_iso[BT_CTLR_ADV_SET];
|
||||
static void *adv_iso_free;
|
||||
|
||||
static uint32_t ull_adv_iso_start(struct ll_adv_iso *adv_iso,
|
||||
@@ -238,7 +238,7 @@ uint8_t ll_adv_iso_by_hci_handle_get(uint8_t hci_handle, uint8_t *handle)
|
||||
|
||||
adv_iso = &ll_adv_iso[0];
|
||||
|
||||
for (idx = 0U; idx < CONFIG_BT_CTLR_ADV_SET; idx++, adv_iso++) {
|
||||
for (idx = 0U; idx < BT_CTLR_ADV_SET; idx++, adv_iso++) {
|
||||
if (adv_iso->is_created &&
|
||||
(adv_iso->hci_handle == hci_handle)) {
|
||||
*handle = idx;
|
||||
@@ -257,7 +257,7 @@ uint8_t ll_adv_iso_by_hci_handle_new(uint8_t hci_handle, uint8_t *handle)
|
||||
adv_iso = &ll_adv_iso[0];
|
||||
adv_iso_empty = NULL;
|
||||
|
||||
for (idx = 0U; idx < CONFIG_BT_CTLR_ADV_SET; idx++, adv_iso++) {
|
||||
for (idx = 0U; idx < BT_CTLR_ADV_SET; idx++, adv_iso++) {
|
||||
if (adv_iso->is_created) {
|
||||
if (adv_iso->hci_handle == hci_handle) {
|
||||
return BT_HCI_ERR_CMD_DISALLOWED;
|
||||
@@ -330,7 +330,7 @@ static uint32_t ull_adv_iso_start(struct ll_adv_iso *adv_iso,
|
||||
|
||||
static inline struct ll_adv_iso *ull_adv_iso_get(uint8_t handle)
|
||||
{
|
||||
if (handle >= CONFIG_BT_CTLR_ADV_SET) {
|
||||
if (handle >= BT_CTLR_ADV_SET) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -92,6 +92,7 @@ static inline void event_ch_map_prep(struct ll_conn *conn,
|
||||
uint16_t event_counter);
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_LE_ENC)
|
||||
static inline void ctrl_tx_check_and_resume(struct ll_conn *conn);
|
||||
static bool is_enc_req_pause_tx(struct ll_conn *conn);
|
||||
static inline void event_enc_prep(struct ll_conn *conn);
|
||||
#if defined(CONFIG_BT_PERIPHERAL)
|
||||
@@ -778,6 +779,9 @@ void ull_conn_setup(memq_link_t *link, struct node_rx_hdr *rx)
|
||||
|
||||
ftr = &(rx->rx_ftr);
|
||||
|
||||
/* NOTE: LLL conn context SHALL be after lll_hdr in
|
||||
* struct lll_adv and struct lll_scan.
|
||||
*/
|
||||
lll = *((struct lll_conn **)((uint8_t *)ftr->param +
|
||||
sizeof(struct lll_hdr)));
|
||||
switch (lll->role) {
|
||||
@@ -1847,18 +1851,11 @@ static void tx_demux(void *param)
|
||||
|
||||
static struct node_tx *tx_ull_dequeue(struct ll_conn *conn, struct node_tx *tx)
|
||||
{
|
||||
#if defined(CONFIG_BT_CTLR_LE_ENC)
|
||||
if (!conn->tx_ctrl && (conn->tx_head != conn->tx_data)) {
|
||||
struct pdu_data *pdu_data_tx;
|
||||
|
||||
pdu_data_tx = (void *)conn->tx_head->pdu;
|
||||
if ((pdu_data_tx->ll_id != PDU_DATA_LLID_CTRL) ||
|
||||
((pdu_data_tx->llctrl.opcode !=
|
||||
PDU_DATA_LLCTRL_TYPE_ENC_REQ) &&
|
||||
(pdu_data_tx->llctrl.opcode !=
|
||||
PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ))) {
|
||||
conn->tx_ctrl = conn->tx_ctrl_last = conn->tx_head;
|
||||
}
|
||||
ctrl_tx_check_and_resume(conn);
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_LE_ENC */
|
||||
|
||||
if (conn->tx_head == conn->tx_ctrl) {
|
||||
conn->tx_head = conn->tx_head->next;
|
||||
@@ -1969,8 +1966,60 @@ static int empty_data_start_release(struct ll_conn *conn, struct node_tx *tx)
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
|
||||
|
||||
static void ctrl_tx_last_enqueue(struct ll_conn *conn,
|
||||
struct node_tx *tx)
|
||||
#if defined(CONFIG_BT_CTLR_LE_ENC)
|
||||
static inline void ctrl_tx_check_and_resume(struct ll_conn *conn)
|
||||
{
|
||||
struct pdu_data *pdu_data_tx;
|
||||
|
||||
pdu_data_tx = (void *)conn->tx_head->pdu;
|
||||
if ((pdu_data_tx->ll_id != PDU_DATA_LLID_CTRL) ||
|
||||
((pdu_data_tx->llctrl.opcode !=
|
||||
PDU_DATA_LLCTRL_TYPE_ENC_REQ) &&
|
||||
(pdu_data_tx->llctrl.opcode !=
|
||||
PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ))) {
|
||||
conn->tx_ctrl = conn->tx_ctrl_last = conn->tx_head;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_LE_ENC */
|
||||
|
||||
/* Check transaction violation and get free ctrl tx PDU */
|
||||
static struct node_tx *ctrl_tx_rsp_mem_acquire(struct ll_conn *conn,
|
||||
struct node_rx_pdu *rx,
|
||||
int *err)
|
||||
{
|
||||
struct node_tx *tx;
|
||||
|
||||
/* Ignore duplicate requests without previous being acknowledged. */
|
||||
if (conn->common.txn_lock) {
|
||||
/* Mark for buffer for release */
|
||||
rx->hdr.type = NODE_RX_TYPE_RELEASE;
|
||||
|
||||
/* Drop request */
|
||||
*err = 0U;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Acquire ctrl tx mem */
|
||||
tx = mem_acquire(&mem_conn_tx_ctrl.free);
|
||||
if (!tx) {
|
||||
*err = -ENOBUFS;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Lock further responses to duplicate requests before previous
|
||||
* response is acknowledged.
|
||||
*/
|
||||
conn->common.txn_lock = 1U;
|
||||
|
||||
/* NOTE: err value not required when returning valid ctrl tx PDU */
|
||||
|
||||
return tx;
|
||||
}
|
||||
|
||||
static inline void ctrl_tx_last_enqueue(struct ll_conn *conn,
|
||||
struct node_tx *tx)
|
||||
{
|
||||
tx->next = conn->tx_ctrl_last->next;
|
||||
conn->tx_ctrl_last->next = tx;
|
||||
@@ -2001,6 +2050,10 @@ static inline void ctrl_tx_pause_enqueue(struct ll_conn *conn,
|
||||
*/
|
||||
if (conn->tx_head == conn->tx_data) {
|
||||
conn->tx_data = conn->tx_data->next;
|
||||
#if defined(CONFIG_BT_CTLR_LE_ENC)
|
||||
} else if (!conn->tx_ctrl) {
|
||||
ctrl_tx_check_and_resume(conn);
|
||||
#endif /* CONFIG_BT_CTLR_LE_ENC */
|
||||
}
|
||||
|
||||
/* if no ctrl packet already queued, new ctrl added will be
|
||||
@@ -2844,6 +2897,11 @@ static inline void event_enc_prep(struct ll_conn *conn)
|
||||
event_enc_reject_prep(conn, pdu_ctrl_tx);
|
||||
|
||||
ctrl_tx_enqueue(conn, tx);
|
||||
|
||||
/* procedure request acked */
|
||||
conn->llcp_ack = conn->llcp_req;
|
||||
|
||||
return;
|
||||
}
|
||||
/* place the start enc req packet as next in tx queue */
|
||||
else {
|
||||
@@ -2889,19 +2947,16 @@ static inline void event_enc_prep(struct ll_conn *conn)
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_BT_CTLR_FAST_ENC)
|
||||
/* Peripheral sends start enc rsp after reception of start enc rsp */
|
||||
} else {
|
||||
start_enc_rsp_send(conn, pdu_ctrl_tx);
|
||||
|
||||
ctrl_tx_enqueue(conn, tx);
|
||||
|
||||
/* resume data packet rx and tx */
|
||||
conn->llcp_enc.pause_rx = 0U;
|
||||
conn->llcp_enc.pause_tx = 0U;
|
||||
#endif /* !CONFIG_BT_CTLR_FAST_ENC */
|
||||
}
|
||||
|
||||
/* procedure request acked */
|
||||
conn->llcp_ack = conn->llcp_req;
|
||||
/* Wait for encryption setup to complete */
|
||||
conn->llcp.encryption.state = LLCP_ENC_STATE_ENC_WAIT;
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_LE_ENC */
|
||||
|
||||
@@ -4228,13 +4283,14 @@ static inline bool ctrl_is_unexpected(struct ll_conn *conn, uint8_t opcode)
|
||||
static int unknown_rsp_send(struct ll_conn *conn, struct node_rx_pdu *rx,
|
||||
uint8_t type)
|
||||
{
|
||||
struct node_tx *tx;
|
||||
struct pdu_data *pdu;
|
||||
struct node_tx *tx;
|
||||
int err;
|
||||
|
||||
/* acquire ctrl tx mem */
|
||||
tx = mem_acquire(&mem_conn_tx_ctrl.free);
|
||||
/* Check transaction violation and get free ctrl tx PDU */
|
||||
tx = ctrl_tx_rsp_mem_acquire(conn, rx, &err);
|
||||
if (!tx) {
|
||||
return -ENOBUFS;
|
||||
return err;
|
||||
}
|
||||
|
||||
pdu = (void *)tx->pdu;
|
||||
@@ -4282,14 +4338,15 @@ static int feature_rsp_send(struct ll_conn *conn, struct node_rx_pdu *rx,
|
||||
struct pdu_data *pdu_rx)
|
||||
{
|
||||
struct pdu_data_llctrl_feature_req *req;
|
||||
struct node_tx *tx;
|
||||
struct pdu_data *pdu_tx;
|
||||
struct node_tx *tx;
|
||||
uint32_t feat;
|
||||
int err;
|
||||
|
||||
/* acquire tx mem */
|
||||
tx = mem_acquire(&mem_conn_tx_ctrl.free);
|
||||
/* Check transaction violation and get free ctrl tx PDU */
|
||||
tx = ctrl_tx_rsp_mem_acquire(conn, rx, &err);
|
||||
if (!tx) {
|
||||
return -ENOBUFS;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* AND the feature set to get Feature USED */
|
||||
@@ -4470,11 +4527,12 @@ static int reject_ext_ind_send(struct ll_conn *conn, struct node_rx_pdu *rx,
|
||||
{
|
||||
struct pdu_data *pdu_ctrl_tx;
|
||||
struct node_tx *tx;
|
||||
int err;
|
||||
|
||||
/* acquire tx mem */
|
||||
tx = mem_acquire(&mem_conn_tx_ctrl.free);
|
||||
/* Check transaction violation and get free ctrl tx PDU */
|
||||
tx = ctrl_tx_rsp_mem_acquire(conn, rx, &err);
|
||||
if (!tx) {
|
||||
return -ENOBUFS;
|
||||
return err;
|
||||
}
|
||||
|
||||
pdu_ctrl_tx = (void *)tx->pdu;
|
||||
@@ -4832,9 +4890,12 @@ static inline int length_req_rsp_recv(struct ll_conn *conn, memq_link_t *link,
|
||||
|
||||
/* Check for free ctrl tx PDU */
|
||||
if (pdu_rx->llctrl.opcode == PDU_DATA_LLCTRL_TYPE_LENGTH_REQ) {
|
||||
tx = mem_acquire(&mem_conn_tx_ctrl.free);
|
||||
int err;
|
||||
|
||||
/* Check transaction violation and get free ctrl tx PDU */
|
||||
tx = ctrl_tx_rsp_mem_acquire(conn, *rx, &err);
|
||||
if (!tx) {
|
||||
return -ENOBUFS;
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5032,6 +5093,11 @@ static inline int length_req_rsp_recv(struct ll_conn *conn, memq_link_t *link,
|
||||
if (pdu_rx->llctrl.opcode != PDU_DATA_LLCTRL_TYPE_LENGTH_RSP) {
|
||||
mem_release(tx, &mem_conn_tx_ctrl.free);
|
||||
|
||||
/* Release the transacation lock, as ctrl tx PDU is not
|
||||
* being enqueued.
|
||||
*/
|
||||
conn->common.txn_lock = 0U;
|
||||
|
||||
/* Defer new request if previous in resize state */
|
||||
if (conn->llcp_length.state ==
|
||||
LLCP_LENGTH_STATE_RESIZE) {
|
||||
@@ -5064,13 +5130,14 @@ send_length_resp:
|
||||
#if defined(CONFIG_BT_CTLR_LE_PING)
|
||||
static int ping_resp_send(struct ll_conn *conn, struct node_rx_pdu *rx)
|
||||
{
|
||||
struct node_tx *tx;
|
||||
struct pdu_data *pdu_tx;
|
||||
struct node_tx *tx;
|
||||
int err;
|
||||
|
||||
/* acquire tx mem */
|
||||
tx = mem_acquire(&mem_conn_tx_ctrl.free);
|
||||
/* Check transaction violation and get free ctrl tx PDU */
|
||||
tx = ctrl_tx_rsp_mem_acquire(conn, rx, &err);
|
||||
if (!tx) {
|
||||
return -ENOBUFS;
|
||||
return err;
|
||||
}
|
||||
|
||||
pdu_tx = (void *)tx->pdu;
|
||||
@@ -5095,11 +5162,12 @@ static int phy_rsp_send(struct ll_conn *conn, struct node_rx_pdu *rx,
|
||||
struct pdu_data_llctrl_phy_req *p;
|
||||
struct pdu_data *pdu_ctrl_tx;
|
||||
struct node_tx *tx;
|
||||
int err;
|
||||
|
||||
/* acquire tx mem */
|
||||
tx = mem_acquire(&mem_conn_tx_ctrl.free);
|
||||
/* Check transaction violation and get free ctrl tx PDU */
|
||||
tx = ctrl_tx_rsp_mem_acquire(conn, rx, &err);
|
||||
if (!tx) {
|
||||
return -ENOBUFS;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Wait for peer master to complete the procedure */
|
||||
@@ -5300,6 +5368,13 @@ static inline void ctrl_tx_ack(struct ll_conn *conn, struct node_tx **tx,
|
||||
}
|
||||
break;
|
||||
|
||||
case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP:
|
||||
case PDU_DATA_LLCTRL_TYPE_PING_RSP:
|
||||
case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
|
||||
/* Reset the transaction lock */
|
||||
conn->common.txn_lock = 0U;
|
||||
break;
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_LE_ENC)
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
case PDU_DATA_LLCTRL_TYPE_ENC_REQ:
|
||||
@@ -5330,13 +5405,29 @@ static inline void ctrl_tx_ack(struct ll_conn *conn, struct node_tx **tx,
|
||||
break;
|
||||
|
||||
case PDU_DATA_LLCTRL_TYPE_START_ENC_REQ:
|
||||
/* Nothing to do.
|
||||
* Remember that we may have received encrypted START_ENC_RSP
|
||||
/* Remember that we may have received encrypted START_ENC_RSP
|
||||
* alongwith this tx ack at this point in time.
|
||||
*/
|
||||
conn->llcp.encryption.state = LLCP_ENC_STATE_ENC_WAIT;
|
||||
break;
|
||||
#endif /* CONFIG_BT_PERIPHERAL */
|
||||
|
||||
case PDU_DATA_LLCTRL_TYPE_START_ENC_RSP:
|
||||
if (conn->lll.role) {
|
||||
/* resume data packet rx and tx */
|
||||
conn->llcp_enc.pause_rx = 0U;
|
||||
conn->llcp_enc.pause_tx = 0U;
|
||||
|
||||
/* Procedure complete */
|
||||
conn->procedure_expire = 0U;
|
||||
|
||||
/* procedure request acked */
|
||||
conn->llcp_ack = conn->llcp_req;
|
||||
} else {
|
||||
conn->llcp.encryption.state = LLCP_ENC_STATE_ENC_WAIT;
|
||||
}
|
||||
break;
|
||||
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
case PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ:
|
||||
/* pause data packet tx */
|
||||
@@ -5372,6 +5463,12 @@ static inline void ctrl_tx_ack(struct ll_conn *conn, struct node_tx **tx,
|
||||
case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
|
||||
if (pdu_tx->llctrl.reject_ext_ind.reject_opcode !=
|
||||
PDU_DATA_LLCTRL_TYPE_ENC_REQ) {
|
||||
/* Reset the transaction lock set by connection
|
||||
* parameter request and PHY update procedure when
|
||||
* sending the Reject Ext Ind PDU.
|
||||
*/
|
||||
conn->common.txn_lock = 0U;
|
||||
|
||||
break;
|
||||
}
|
||||
__fallthrough;
|
||||
@@ -5393,6 +5490,9 @@ static inline void ctrl_tx_ack(struct ll_conn *conn, struct node_tx **tx,
|
||||
break;
|
||||
|
||||
case PDU_DATA_LLCTRL_TYPE_LENGTH_RSP:
|
||||
/* Reset the transaction lock */
|
||||
conn->common.txn_lock = 0U;
|
||||
|
||||
if (conn->llcp_length.req != conn->llcp_length.ack) {
|
||||
switch (conn->llcp_length.state) {
|
||||
case LLCP_LENGTH_STATE_RSP_ACK_WAIT:
|
||||
@@ -5459,9 +5559,18 @@ static inline void ctrl_tx_ack(struct ll_conn *conn, struct node_tx **tx,
|
||||
uint8_t phy_tx_time[8] = {PHY_1M, PHY_1M, PHY_2M,
|
||||
PHY_1M, PHY_CODED, PHY_CODED,
|
||||
PHY_CODED, PHY_CODED};
|
||||
struct lll_conn *lll = &conn->lll;
|
||||
struct lll_conn *lll;
|
||||
uint8_t phys;
|
||||
|
||||
/* Reset the transaction lock when PHY update response
|
||||
* sent by peripheral is acknowledged.
|
||||
*/
|
||||
if (pdu_tx->llctrl.opcode ==
|
||||
PDU_DATA_LLCTRL_TYPE_PHY_RSP) {
|
||||
conn->common.txn_lock = 0U;
|
||||
}
|
||||
|
||||
lll = &conn->lll;
|
||||
phys = conn->llcp_phy.tx | lll->phy_tx;
|
||||
lll->phy_tx_time = phy_tx_time[phys];
|
||||
}
|
||||
@@ -5729,40 +5838,35 @@ static inline int ctrl_rx(memq_link_t *link, struct node_rx_pdu **rx,
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
|
||||
case PDU_DATA_LLCTRL_TYPE_START_ENC_RSP:
|
||||
if (!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_START_ENC_RSP,
|
||||
pdu_rx->len)) {
|
||||
if ((conn->llcp_req == conn->llcp_ack) ||
|
||||
(conn->llcp_type != LLCP_ENCRYPTION) ||
|
||||
(!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_START_ENC_RSP,
|
||||
pdu_rx->len))) {
|
||||
goto ull_conn_rx_unknown_rsp_send;
|
||||
}
|
||||
|
||||
if (conn->lll.role) {
|
||||
#if !defined(CONFIG_BT_CTLR_FAST_ENC)
|
||||
if ((conn->llcp_req != conn->llcp_ack) &&
|
||||
(conn->llcp_type != LLCP_ENCRYPTION)) {
|
||||
goto ull_conn_rx_unknown_rsp_send;
|
||||
}
|
||||
|
||||
/* start enc rsp to be scheduled in slave prepare */
|
||||
/* start enc rsp to be scheduled in slave prepare */
|
||||
conn->llcp.encryption.state = LLCP_ENC_STATE_INPROG;
|
||||
if (conn->llcp_req == conn->llcp_ack) {
|
||||
conn->llcp_type = LLCP_ENCRYPTION;
|
||||
conn->llcp_ack -= 2U;
|
||||
}
|
||||
|
||||
#else /* CONFIG_BT_CTLR_FAST_ENC */
|
||||
nack = start_enc_rsp_send(conn, NULL);
|
||||
if (nack) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* resume data packet rx and tx */
|
||||
conn->llcp_enc.pause_rx = 0U;
|
||||
conn->llcp_enc.pause_tx = 0U;
|
||||
#endif /* CONFIG_BT_CTLR_FAST_ENC */
|
||||
|
||||
} else {
|
||||
/* resume data packet rx and tx */
|
||||
conn->llcp_enc.pause_rx = 0U;
|
||||
conn->llcp_enc.pause_tx = 0U;
|
||||
|
||||
/* Procedure complete */
|
||||
conn->procedure_expire = 0U;
|
||||
|
||||
/* procedure request acked */
|
||||
conn->llcp_ack = conn->llcp_req;
|
||||
}
|
||||
|
||||
/* enqueue the start enc resp (encryption change/refresh) */
|
||||
@@ -5772,10 +5876,6 @@ static inline int ctrl_rx(memq_link_t *link, struct node_rx_pdu **rx,
|
||||
/* key refresh event */
|
||||
(*rx)->hdr.type = NODE_RX_TYPE_ENC_REFRESH;
|
||||
}
|
||||
|
||||
/* Procedure complete */
|
||||
conn->procedure_expire = 0U;
|
||||
|
||||
break;
|
||||
#endif /* CONFIG_BT_CTLR_LE_ENC */
|
||||
|
||||
@@ -6107,7 +6207,12 @@ static inline int ctrl_rx(memq_link_t *link, struct node_rx_pdu **rx,
|
||||
conn_upd_curr = conn;
|
||||
}
|
||||
} else {
|
||||
LL_ASSERT(0);
|
||||
/* Ignore duplicate request as peripheral is busy
|
||||
* processing the previously initiated connection
|
||||
* update request procedure.
|
||||
*/
|
||||
/* Mark for buffer for release */
|
||||
(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
@@ -59,6 +59,7 @@ struct ll_conn {
|
||||
union {
|
||||
struct {
|
||||
uint8_t fex_valid:1;
|
||||
uint8_t txn_lock:1;
|
||||
#if defined(CONFIG_BT_CTLR_CONN_META)
|
||||
uint8_t is_must_expire:1;
|
||||
#endif /* CONFIG_BT_CTLR_CONN_META */
|
||||
@@ -67,6 +68,7 @@ struct ll_conn {
|
||||
#if defined(CONFIG_BT_PERIPHERAL)
|
||||
struct {
|
||||
uint8_t fex_valid:1;
|
||||
uint8_t txn_lock:1;
|
||||
#if defined(CONFIG_BT_CTLR_CONN_META)
|
||||
uint8_t is_must_expire:1;
|
||||
#endif /* CONFIG_BT_CTLR_CONN_META */
|
||||
@@ -87,6 +89,7 @@ struct ll_conn {
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
struct {
|
||||
uint8_t fex_valid:1;
|
||||
uint8_t txn_lock:1;
|
||||
#if defined(CONFIG_BT_CTLR_CONN_META)
|
||||
uint8_t is_must_expire:1;
|
||||
#endif /* CONFIG_BT_CTLR_CONN_META */
|
||||
@@ -131,6 +134,7 @@ struct ll_conn {
|
||||
LLCP_ENC_STATE_INPROG,
|
||||
LLCP_ENC_STATE_INIT,
|
||||
LLCP_ENC_STATE_LTK_WAIT,
|
||||
LLCP_ENC_STATE_ENC_WAIT,
|
||||
} state:2 __packed;
|
||||
uint8_t error_code;
|
||||
uint8_t skd[16];
|
||||
|
||||
@@ -757,10 +757,10 @@ bool ull_filter_lll_rl_addr_allowed(uint8_t id_addr_type, uint8_t *id_addr, uint
|
||||
{
|
||||
uint8_t i, j;
|
||||
|
||||
/* If AR is disabled or we matched an IRK then we're all set. No hw
|
||||
/* We matched an IRK then we're all set. No hw
|
||||
* filters are used in this case.
|
||||
*/
|
||||
if (!rl_enable || *rl_idx != FILTER_IDX_NONE) {
|
||||
if (*rl_idx != FILTER_IDX_NONE) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -210,6 +210,8 @@ uint8_t ll_create_connection(uint16_t scan_interval, uint16_t scan_window,
|
||||
conn_lll->data_chan_sel = 0;
|
||||
conn_lll->data_chan_use = 0;
|
||||
conn_lll->role = 0;
|
||||
conn_lll->master.initiated = 0;
|
||||
conn_lll->master.cancelled = 0;
|
||||
/* FIXME: END: Move to ULL? */
|
||||
#if defined(CONFIG_BT_CTLR_CONN_META)
|
||||
memset(&conn_lll->conn_meta, 0, sizeof(conn_lll->conn_meta));
|
||||
@@ -240,6 +242,7 @@ uint8_t ll_create_connection(uint16_t scan_interval, uint16_t scan_window,
|
||||
#endif /* CONFIG_BT_CTLR_LE_PING */
|
||||
|
||||
conn->common.fex_valid = 0U;
|
||||
conn->common.txn_lock = 0U;
|
||||
conn->master.terminate_ack = 0U;
|
||||
|
||||
conn->llcp_req = conn->llcp_ack = conn->llcp_type = 0U;
|
||||
@@ -382,8 +385,28 @@ uint8_t ll_connect_disable(void **rx)
|
||||
return BT_HCI_ERR_CMD_DISALLOWED;
|
||||
}
|
||||
|
||||
/* Check if initiator active */
|
||||
conn_lll = scan->lll.conn;
|
||||
if (!conn_lll) {
|
||||
/* Scanning not associated with initiation of a connection or
|
||||
* connection setup already complete (was set to NULL in
|
||||
* ull_master_setup), but HCI event not processed by host.
|
||||
*/
|
||||
return BT_HCI_ERR_CMD_DISALLOWED;
|
||||
}
|
||||
|
||||
/* Indicate to LLL that a cancellation is requested */
|
||||
conn_lll->master.cancelled = 1U;
|
||||
cpu_dmb();
|
||||
|
||||
/* Check if connection was established under race condition, i.e.
|
||||
* before the cancelled flag was set.
|
||||
*/
|
||||
conn_lll = scan->lll.conn;
|
||||
if (!conn_lll) {
|
||||
/* Connection setup completed on race condition with cancelled
|
||||
* flag, before it was set.
|
||||
*/
|
||||
return BT_HCI_ERR_CMD_DISALLOWED;
|
||||
}
|
||||
|
||||
|
||||
@@ -59,17 +59,16 @@ void ull_slave_setup(memq_link_t *link, struct node_rx_hdr *rx,
|
||||
uint8_t peer_addr[BDADDR_SIZE];
|
||||
uint32_t ticks_slot_overhead;
|
||||
uint32_t ticks_slot_offset;
|
||||
uint32_t ready_delay_us;
|
||||
struct pdu_adv *pdu_adv;
|
||||
struct ll_adv_set *adv;
|
||||
struct node_rx_cc *cc;
|
||||
struct ll_conn *conn;
|
||||
uint32_t ready_delay_us;
|
||||
uint32_t ticker_status;
|
||||
uint8_t peer_addr_type;
|
||||
uint16_t win_offset;
|
||||
uint16_t win_delay_us;
|
||||
struct node_rx_cc *cc;
|
||||
struct ll_conn *conn;
|
||||
uint16_t win_offset;
|
||||
uint16_t timeout;
|
||||
uint16_t interval;
|
||||
uint8_t chan_sel;
|
||||
|
||||
adv = ((struct lll_adv *)ftr->param)->hdr.parent;
|
||||
@@ -83,22 +82,48 @@ void ull_slave_setup(memq_link_t *link, struct node_rx_hdr *rx,
|
||||
sizeof(lll->data_chan_map));
|
||||
lll->data_chan_count = util_ones_count_get(&lll->data_chan_map[0],
|
||||
sizeof(lll->data_chan_map));
|
||||
if (lll->data_chan_count < 2) {
|
||||
return;
|
||||
}
|
||||
lll->data_chan_hop = pdu_adv->connect_ind.hop;
|
||||
if ((lll->data_chan_hop < 5) || (lll->data_chan_hop > 16)) {
|
||||
lll->interval = sys_le16_to_cpu(pdu_adv->connect_ind.interval);
|
||||
if ((lll->data_chan_count < 2) || (lll->data_chan_hop < 5) ||
|
||||
(lll->data_chan_hop > 16) || !lll->interval) {
|
||||
lll->slave.initiated = 0U;
|
||||
|
||||
/* Mark for buffer for release */
|
||||
rx->type = NODE_RX_TYPE_RELEASE;
|
||||
|
||||
/* Release CSA#2 related node rx too */
|
||||
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
|
||||
struct node_rx_pdu *rx_csa;
|
||||
|
||||
/* pick the rx node instance stored within the
|
||||
* connection rx node.
|
||||
*/
|
||||
rx_csa = (void *)ftr->extra;
|
||||
|
||||
/* Enqueue the connection event to be release */
|
||||
ll_rx_put(link, rx);
|
||||
|
||||
/* Use the rx node for CSA event */
|
||||
rx = (void *)rx_csa;
|
||||
link = rx->link;
|
||||
|
||||
/* Mark for buffer for release */
|
||||
rx->type = NODE_RX_TYPE_RELEASE;
|
||||
}
|
||||
|
||||
/* Enqueue connection or CSA event to be release */
|
||||
ll_rx_put(link, rx);
|
||||
ll_rx_sched();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
((struct lll_adv *)ftr->param)->conn = NULL;
|
||||
|
||||
interval = sys_le16_to_cpu(pdu_adv->connect_ind.interval);
|
||||
lll->interval = interval;
|
||||
lll->latency = sys_le16_to_cpu(pdu_adv->connect_ind.latency);
|
||||
|
||||
win_offset = sys_le16_to_cpu(pdu_adv->connect_ind.win_offset);
|
||||
conn_interval_us = interval * CONN_INT_UNIT_US;
|
||||
conn_interval_us = lll->interval * CONN_INT_UNIT_US;
|
||||
|
||||
if (0) {
|
||||
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
||||
|
||||
@@ -130,6 +130,13 @@ void att_sent(struct bt_conn *conn, void *user_data)
|
||||
}
|
||||
}
|
||||
|
||||
/* In case of success the ownership of the buffer is transferred to the stack
|
||||
* which takes care of releasing it when it completes transmitting to the
|
||||
* controller.
|
||||
*
|
||||
* In case bt_l2cap_send_cb fails the buffer state and ownership are retained
|
||||
* so the buffer can be safely pushed back to the queue to be processed later.
|
||||
*/
|
||||
static int chan_send(struct bt_att_chan *chan, struct net_buf *buf,
|
||||
bt_att_chan_sent_t cb)
|
||||
{
|
||||
@@ -189,11 +196,23 @@ static int chan_send(struct bt_att_chan *chan, struct net_buf *buf,
|
||||
|
||||
chan->sent = cb ? cb : chan_cb(buf);
|
||||
|
||||
/* bt_l2cap_send_cb takes onwership of the buffer so take another
|
||||
* reference to restore the state in case an error is returned.
|
||||
*/
|
||||
net_buf_ref(buf);
|
||||
|
||||
err = bt_l2cap_send_cb(chan->att->conn, BT_L2CAP_CID_ATT,
|
||||
buf, att_cb(chan->sent),
|
||||
&chan->chan.chan);
|
||||
if (err) {
|
||||
/* In case of an error has occurred restore the buffer state as
|
||||
* the extra reference shall have prevented the buffer to be
|
||||
* freed.
|
||||
*/
|
||||
net_buf_simple_restore(&buf->b, &state);
|
||||
} else {
|
||||
/* In case of success unref the extra reference taken */
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
||||
@@ -129,6 +129,7 @@ struct net_buf *bt_buf_get_tx(enum bt_buf_type type, k_timeout_t timeout,
|
||||
case H4_ISO:
|
||||
type = BT_BUF_ISO_OUT;
|
||||
pool = &hci_iso_pool;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
LOG_ERR("Unknown H4 type %u", type);
|
||||
|
||||
@@ -1107,6 +1107,13 @@ static void le_ecred_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
|
||||
}
|
||||
|
||||
req = net_buf_pull_mem(buf, sizeof(*req));
|
||||
|
||||
if (buf->len > sizeof(dcid)) {
|
||||
BT_ERR("Too large LE conn req packet size");
|
||||
result = BT_L2CAP_LE_ERR_INVALID_PARAMS;
|
||||
goto response;
|
||||
}
|
||||
|
||||
psm = sys_le16_to_cpu(req->psm);
|
||||
mtu = sys_le16_to_cpu(req->mtu);
|
||||
mps = sys_le16_to_cpu(req->mps);
|
||||
@@ -1133,6 +1140,8 @@ static void le_ecred_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
|
||||
goto response;
|
||||
}
|
||||
|
||||
memset(dcid, 0, sizeof(dcid));
|
||||
|
||||
while (buf->len >= sizeof(scid)) {
|
||||
scid = net_buf_pull_le16(buf);
|
||||
|
||||
@@ -1144,28 +1153,20 @@ static void le_ecred_conn_req(struct bt_l2cap *l2cap, uint8_t ident,
|
||||
dcid[i++] = sys_cpu_to_le16(ch->rx.cid);
|
||||
continue;
|
||||
/* Some connections refused – invalid Source CID */
|
||||
case BT_L2CAP_LE_ERR_INVALID_SCID:
|
||||
/* Some connections refused – Source CID already allocated */
|
||||
case BT_L2CAP_LE_ERR_SCID_IN_USE:
|
||||
/* Some connections refused – not enough resources
|
||||
* available.
|
||||
*/
|
||||
default:
|
||||
/* If a Destination CID is 0x0000, the channel was not
|
||||
* established.
|
||||
*/
|
||||
dcid[i++] = 0x0000;
|
||||
continue;
|
||||
/* Some connections refused – not enough resources
|
||||
* available.
|
||||
*/
|
||||
case BT_L2CAP_LE_ERR_NO_RESOURCES:
|
||||
default:
|
||||
goto response;
|
||||
}
|
||||
}
|
||||
|
||||
response:
|
||||
if (!i) {
|
||||
i = buf->len / sizeof(scid);
|
||||
}
|
||||
|
||||
buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_ECRED_CONN_RSP, ident,
|
||||
sizeof(*rsp) + (sizeof(scid) * i));
|
||||
|
||||
@@ -1660,9 +1661,16 @@ static void l2cap_chan_tx_resume(struct bt_l2cap_le_chan *ch)
|
||||
|
||||
static void l2cap_chan_sdu_sent(struct bt_conn *conn, void *user_data)
|
||||
{
|
||||
struct bt_l2cap_chan *chan = user_data;
|
||||
uint16_t cid = POINTER_TO_UINT(user_data);
|
||||
struct bt_l2cap_chan *chan;
|
||||
|
||||
BT_DBG("conn %p chan %p", conn, chan);
|
||||
BT_DBG("conn %p CID 0x%04x", conn, cid);
|
||||
|
||||
chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
|
||||
if (!chan) {
|
||||
/* Received SDU sent callback for disconnected channel */
|
||||
return;
|
||||
}
|
||||
|
||||
if (chan->ops->sent) {
|
||||
chan->ops->sent(chan);
|
||||
@@ -1673,9 +1681,16 @@ static void l2cap_chan_sdu_sent(struct bt_conn *conn, void *user_data)
|
||||
|
||||
static void l2cap_chan_seg_sent(struct bt_conn *conn, void *user_data)
|
||||
{
|
||||
struct bt_l2cap_chan *chan = user_data;
|
||||
uint16_t cid = POINTER_TO_UINT(user_data);
|
||||
struct bt_l2cap_chan *chan;
|
||||
|
||||
BT_DBG("conn %p chan %p", conn, chan);
|
||||
BT_DBG("conn %p CID 0x%04x", conn, cid);
|
||||
|
||||
chan = bt_l2cap_le_lookup_tx_cid(conn, cid);
|
||||
if (!chan) {
|
||||
/* Received segment sent callback for disconnected channel */
|
||||
return;
|
||||
}
|
||||
|
||||
l2cap_chan_tx_resume(BT_L2CAP_LE_CHAN(chan));
|
||||
}
|
||||
@@ -1737,10 +1752,12 @@ static int l2cap_chan_le_send(struct bt_l2cap_le_chan *ch,
|
||||
*/
|
||||
if ((buf == seg || !buf->len) && ch->chan.ops->sent) {
|
||||
err = bt_l2cap_send_cb(ch->chan.conn, ch->tx.cid, seg,
|
||||
l2cap_chan_sdu_sent, &ch->chan);
|
||||
l2cap_chan_sdu_sent,
|
||||
UINT_TO_POINTER(ch->tx.cid));
|
||||
} else {
|
||||
err = bt_l2cap_send_cb(ch->chan.conn, ch->tx.cid, seg,
|
||||
l2cap_chan_seg_sent, &ch->chan);
|
||||
l2cap_chan_seg_sent,
|
||||
UINT_TO_POINTER(ch->tx.cid));
|
||||
}
|
||||
|
||||
if (err) {
|
||||
@@ -2189,6 +2206,12 @@ static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan,
|
||||
return;
|
||||
}
|
||||
|
||||
if (buf->len < 2) {
|
||||
BT_WARN("Too short data packet");
|
||||
bt_l2cap_chan_disconnect(&chan->chan);
|
||||
return;
|
||||
}
|
||||
|
||||
sdu_len = net_buf_pull_le16(buf);
|
||||
|
||||
BT_DBG("chan %p len %u sdu_len %u", chan, buf->len, sdu_len);
|
||||
|
||||
@@ -736,14 +736,8 @@ static uint8_t get_encryption_key_size(struct bt_smp *smp)
|
||||
/* Check that if a new pairing procedure with an existing bond will not lower
|
||||
* the established security level of the bond.
|
||||
*/
|
||||
static bool update_keys_check(struct bt_smp *smp)
|
||||
static bool update_keys_check(struct bt_smp *smp, struct bt_keys *keys)
|
||||
{
|
||||
struct bt_conn *conn = smp->chan.chan.conn;
|
||||
|
||||
if (!conn->le.keys) {
|
||||
conn->le.keys = bt_keys_get_addr(conn->id, &conn->le.dst);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_BT_SMP_DISABLE_LEGACY_JW_PASSKEY) &&
|
||||
!atomic_test_bit(smp->flags, SMP_FLAG_SC) &&
|
||||
smp->method != LEGACY_OOB) {
|
||||
@@ -755,27 +749,27 @@ static bool update_keys_check(struct bt_smp *smp)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!conn->le.keys ||
|
||||
!(conn->le.keys->keys & (BT_KEYS_LTK_P256 | BT_KEYS_LTK))) {
|
||||
if (!keys ||
|
||||
!(keys->keys & (BT_KEYS_LTK_P256 | BT_KEYS_LTK))) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (conn->le.keys->enc_size > get_encryption_key_size(smp)) {
|
||||
if (keys->enc_size > get_encryption_key_size(smp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((conn->le.keys->keys & BT_KEYS_LTK_P256) &&
|
||||
if ((keys->keys & BT_KEYS_LTK_P256) &&
|
||||
!atomic_test_bit(smp->flags, SMP_FLAG_SC)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((conn->le.keys->flags & BT_KEYS_AUTHENTICATED) &&
|
||||
if ((keys->flags & BT_KEYS_AUTHENTICATED) &&
|
||||
smp->method == JUST_WORKS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_BT_SMP_ALLOW_UNAUTH_OVERWRITE) &&
|
||||
(!(conn->le.keys->flags & BT_KEYS_AUTHENTICATED)
|
||||
(!(keys->flags & BT_KEYS_AUTHENTICATED)
|
||||
&& smp->method == JUST_WORKS)) {
|
||||
return false;
|
||||
}
|
||||
@@ -3008,7 +3002,7 @@ static uint8_t smp_pairing_req(struct bt_smp *smp, struct net_buf *buf)
|
||||
|
||||
smp->method = get_pair_method(smp, req->io_capability);
|
||||
|
||||
if (!update_keys_check(smp)) {
|
||||
if (!update_keys_check(smp, conn->le.keys)) {
|
||||
return BT_SMP_ERR_AUTH_REQUIREMENTS;
|
||||
}
|
||||
|
||||
@@ -3206,7 +3200,7 @@ static uint8_t smp_pairing_rsp(struct bt_smp *smp, struct net_buf *buf)
|
||||
|
||||
smp->method = get_pair_method(smp, rsp->io_capability);
|
||||
|
||||
if (!update_keys_check(smp)) {
|
||||
if (!update_keys_check(smp, conn->le.keys)) {
|
||||
return BT_SMP_ERR_AUTH_REQUIREMENTS;
|
||||
}
|
||||
|
||||
@@ -3834,6 +3828,18 @@ static uint8_t smp_ident_addr_info(struct bt_smp *smp, struct net_buf *buf)
|
||||
return BT_SMP_ERR_INVALID_PARAMS;
|
||||
}
|
||||
|
||||
if (bt_addr_le_cmp(&conn->le.dst, &req->addr) != 0) {
|
||||
struct bt_keys *keys = bt_keys_find_addr(conn->id, &req->addr);
|
||||
|
||||
if (keys) {
|
||||
if (!update_keys_check(smp, keys)) {
|
||||
return BT_SMP_ERR_UNSPECIFIED;
|
||||
}
|
||||
|
||||
bt_keys_clear(keys);
|
||||
}
|
||||
}
|
||||
|
||||
if (atomic_test_bit(smp->flags, SMP_FLAG_BOND)) {
|
||||
const bt_addr_le_t *dst;
|
||||
struct bt_keys *keys;
|
||||
|
||||
@@ -27,14 +27,6 @@ config NET_MGMT_EVENT_STACK_SIZE
|
||||
Set the internal stack size for NM to run registered callbacks
|
||||
on events.
|
||||
|
||||
config NET_MGMT_EVENT_THREAD_PRIO
|
||||
int "Inner thread priority (use with care)"
|
||||
default -1 if NET_TC_THREAD_COOPERATIVE
|
||||
default 7
|
||||
help
|
||||
Set the network management event core's inner thread priority.
|
||||
Do not change this unless you know what you are doing.
|
||||
|
||||
config NET_MGMT_EVENT_QUEUE_SIZE
|
||||
int "Size of event queue"
|
||||
default 16 if NET_MGMT_EVENT_MONITOR
|
||||
|
||||
@@ -386,10 +386,17 @@ void net_mgmt_event_init(void)
|
||||
(void)memset(events, 0, CONFIG_NET_MGMT_EVENT_QUEUE_SIZE *
|
||||
sizeof(struct mgmt_event_entry));
|
||||
|
||||
#if IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE)
|
||||
/* Lowest priority cooperative thread */
|
||||
#define THREAD_PRIORITY K_PRIO_COOP(CONFIG_NUM_COOP_PRIORITIES - 1)
|
||||
#else
|
||||
#define THREAD_PRIORITY K_PRIO_PREEMPT(CONFIG_NUM_PREEMPT_PRIORITIES - 1)
|
||||
#endif
|
||||
|
||||
k_thread_create(&mgmt_thread_data, mgmt_stack,
|
||||
K_KERNEL_STACK_SIZEOF(mgmt_stack),
|
||||
(k_thread_entry_t)mgmt_thread, NULL, NULL, NULL,
|
||||
CONFIG_NET_MGMT_EVENT_THREAD_PRIO, 0, K_NO_WAIT);
|
||||
THREAD_PRIORITY, 0, K_NO_WAIT);
|
||||
k_thread_name_set(&mgmt_thread_data, "net_mgmt");
|
||||
|
||||
NET_DBG("Net MGMT initialized: queue of %u entries, stack size of %u",
|
||||
|
||||
@@ -73,7 +73,7 @@ struct dtls_timing_context {
|
||||
};
|
||||
|
||||
/** TLS context information. */
|
||||
struct tls_context {
|
||||
__net_socket struct tls_context {
|
||||
/** Information whether TLS context is used. */
|
||||
bool is_used;
|
||||
|
||||
|
||||
@@ -190,7 +190,7 @@ static inline void z_zassert(bool cond,
|
||||
* @param msg Optional message to print if the assertion fails
|
||||
*/
|
||||
#define zassert_within(a, b, d, msg, ...) \
|
||||
zassert(((a) > ((b) - (d))) && ((a) < ((b) + (d))), \
|
||||
zassert(((a) >= ((b) - (d))) && ((a) <= ((b) + (d))), \
|
||||
#a " not within " #b " +/- " #d, \
|
||||
msg, ##__VA_ARGS__)
|
||||
|
||||
|
||||
@@ -42,6 +42,8 @@ static struct k_thread tx_thread_data;
|
||||
static bool configured;
|
||||
static bool suspended;
|
||||
|
||||
static uint8_t ep_out_buf[USB_MAX_FS_BULK_MPS];
|
||||
|
||||
struct usb_bluetooth_config {
|
||||
struct usb_if_descriptor if0;
|
||||
struct usb_ep_descriptor if0_int_ep;
|
||||
@@ -167,32 +169,97 @@ static void hci_rx_thread(void)
|
||||
}
|
||||
}
|
||||
|
||||
static uint16_t hci_pkt_get_len(struct net_buf *buf,
|
||||
const uint8_t *data, size_t size)
|
||||
{
|
||||
uint16_t len = 0;
|
||||
size_t hdr_len = 0;
|
||||
|
||||
switch (bt_buf_get_type(buf)) {
|
||||
case BT_BUF_CMD: {
|
||||
struct bt_hci_cmd_hdr *cmd_hdr;
|
||||
|
||||
hdr_len = sizeof(*cmd_hdr);
|
||||
cmd_hdr = (struct bt_hci_cmd_hdr *)data;
|
||||
len = cmd_hdr->param_len + hdr_len;
|
||||
break;
|
||||
}
|
||||
case BT_BUF_ACL_OUT: {
|
||||
struct bt_hci_acl_hdr *acl_hdr;
|
||||
|
||||
hdr_len = sizeof(*acl_hdr);
|
||||
acl_hdr = (struct bt_hci_acl_hdr *)data;
|
||||
len = sys_le16_to_cpu(acl_hdr->len) + hdr_len;
|
||||
break;
|
||||
}
|
||||
case BT_BUF_ISO_OUT: {
|
||||
struct bt_hci_iso_data_hdr *iso_hdr;
|
||||
|
||||
hdr_len = sizeof(*iso_hdr);
|
||||
iso_hdr = (struct bt_hci_iso_data_hdr *)data;
|
||||
len = sys_le16_to_cpu(iso_hdr->slen) + hdr_len;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
LOG_ERR("Unknown bt buffer type");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (size < hdr_len) ? 0 : len;
|
||||
}
|
||||
|
||||
static void acl_read_cb(uint8_t ep, int size, void *priv)
|
||||
{
|
||||
static uint8_t data[USB_MAX_FS_BULK_MPS];
|
||||
static struct net_buf *buf;
|
||||
static uint16_t pkt_len;
|
||||
uint8_t *data = ep_out_buf;
|
||||
|
||||
if (size > 0) {
|
||||
struct net_buf *buf;
|
||||
if (size == 0) {
|
||||
goto restart_out_transfer;
|
||||
}
|
||||
|
||||
if (buf == NULL) {
|
||||
/*
|
||||
* Obtain the first chunk and determine the length
|
||||
* of the HCI packet.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_USB_DEVICE_BLUETOOTH_VS_H4) &&
|
||||
bt_hci_raw_get_mode() == BT_HCI_RAW_MODE_H4) {
|
||||
buf = bt_buf_get_tx(BT_BUF_H4, K_FOREVER, data, size);
|
||||
pkt_len = hci_pkt_get_len(buf, &data[1], size - 1);
|
||||
LOG_DBG("pkt_len %u, chunk %u", pkt_len, size);
|
||||
} else {
|
||||
buf = bt_buf_get_tx(BT_BUF_ACL_OUT, K_FOREVER, data,
|
||||
size);
|
||||
buf = bt_buf_get_tx(BT_BUF_ACL_OUT, K_FOREVER,
|
||||
data, size);
|
||||
pkt_len = hci_pkt_get_len(buf, data, size);
|
||||
LOG_DBG("pkt_len %u, chunk %u", pkt_len, size);
|
||||
}
|
||||
|
||||
if (!buf) {
|
||||
LOG_ERR("Cannot get free TX buffer\n");
|
||||
return;
|
||||
if (pkt_len == 0) {
|
||||
LOG_ERR("Failed to get packet length");
|
||||
net_buf_unref(buf);
|
||||
buf = NULL;
|
||||
}
|
||||
|
||||
net_buf_put(&rx_queue, buf);
|
||||
} else {
|
||||
/*
|
||||
* Take over the next chunk if HCI packet is
|
||||
* larger than USB_MAX_FS_BULK_MPS.
|
||||
*/
|
||||
net_buf_add_mem(buf, data, size);
|
||||
LOG_DBG("len %u, chunk %u", buf->len, size);
|
||||
}
|
||||
|
||||
/* Start a new read transfer */
|
||||
usb_transfer(bluetooth_ep_data[HCI_OUT_EP_IDX].ep_addr, data,
|
||||
BT_BUF_ACL_SIZE, USB_TRANS_READ, acl_read_cb, NULL);
|
||||
if (buf != NULL && pkt_len == buf->len) {
|
||||
net_buf_put(&rx_queue, buf);
|
||||
LOG_DBG("put");
|
||||
buf = NULL;
|
||||
pkt_len = 0;
|
||||
}
|
||||
|
||||
restart_out_transfer:
|
||||
usb_transfer(bluetooth_ep_data[HCI_OUT_EP_IDX].ep_addr, ep_out_buf,
|
||||
sizeof(ep_out_buf), USB_TRANS_READ | USB_TRANS_NO_ZLP,
|
||||
acl_read_cb, NULL);
|
||||
}
|
||||
|
||||
static void bluetooth_status_cb(struct usb_cfg_data *cfg,
|
||||
|
||||
@@ -191,6 +191,15 @@ void arm_isr_handler(const void *args)
|
||||
{
|
||||
ARG_UNUSED(args);
|
||||
|
||||
#if defined(CONFIG_CPU_CORTEX_M) && defined(CONFIG_FPU) && \
|
||||
defined(CONFIG_FPU_SHARING)
|
||||
/* Clear Floating Point Status and Control Register (FPSCR),
|
||||
* to prevent from having the interrupt line set to pending again,
|
||||
* in case FPU IRQ is selected by the test as "Available IRQ line"
|
||||
*/
|
||||
__set_FPSCR(0);
|
||||
#endif
|
||||
|
||||
test_flag++;
|
||||
|
||||
if (test_flag == 1) {
|
||||
|
||||
@@ -21,7 +21,7 @@ LL/CON/INI/BV-19-C
|
||||
LL/CON/INI/BV-20-C
|
||||
LL/CON/INI/BV-21-C
|
||||
LL/CON/INI/BV-23-C
|
||||
#LL/CON/INI/BV-24-C #currently failing, to be investigated
|
||||
LL/CON/INI/BV-24-C
|
||||
LL/CON/MAS/BI-06-C
|
||||
LL/CON/MAS/BV-03-C
|
||||
LL/CON/MAS/BV-04-C
|
||||
|
||||
@@ -10,3 +10,4 @@ target_sources(app PRIVATE
|
||||
src/test_uart_poll.c
|
||||
)
|
||||
target_sources_ifdef(CONFIG_UART_INTERRUPT_DRIVEN app PRIVATE src/test_uart_fifo.c)
|
||||
target_sources_ifdef(CONFIG_UART_INTERRUPT_DRIVEN app PRIVATE src/test_uart_pending.c)
|
||||
|
||||
@@ -20,6 +20,7 @@ TC_CMD_DEFINE(test_uart_fifo_read)
|
||||
TC_CMD_DEFINE(test_uart_fifo_fill)
|
||||
TC_CMD_DEFINE(test_uart_poll_in)
|
||||
TC_CMD_DEFINE(test_uart_poll_out)
|
||||
TC_CMD_DEFINE(test_uart_pending)
|
||||
|
||||
SHELL_CMD_REGISTER(test_uart_configure, NULL, NULL,
|
||||
TC_CMD_ITEM(test_uart_configure));
|
||||
@@ -33,6 +34,8 @@ SHELL_CMD_REGISTER(test_uart_poll_in, NULL, NULL,
|
||||
TC_CMD_ITEM(test_uart_poll_in));
|
||||
SHELL_CMD_REGISTER(test_uart_poll_out, NULL, NULL,
|
||||
TC_CMD_ITEM(test_uart_poll_out));
|
||||
SHELL_CMD_REGISTER(test_uart_pending, NULL, NULL,
|
||||
TC_CMD_ITEM(test_uart_pending));
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_UART_INTERRUPT_DRIVEN
|
||||
@@ -45,6 +48,11 @@ void test_uart_fifo_read(void)
|
||||
{
|
||||
ztest_test_skip();
|
||||
}
|
||||
|
||||
void test_uart_pending(void)
|
||||
{
|
||||
ztest_test_skip();
|
||||
}
|
||||
#endif
|
||||
|
||||
void test_main(void)
|
||||
@@ -56,7 +64,8 @@ void test_main(void)
|
||||
ztest_unit_test(test_uart_fifo_fill),
|
||||
ztest_unit_test(test_uart_fifo_read),
|
||||
ztest_unit_test(test_uart_poll_in),
|
||||
ztest_unit_test(test_uart_poll_out));
|
||||
ztest_unit_test(test_uart_poll_out),
|
||||
ztest_unit_test(test_uart_pending));
|
||||
ztest_run_test_suite(uart_basic_test);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -26,5 +26,6 @@ void test_uart_poll_out(void);
|
||||
void test_uart_fifo_fill(void);
|
||||
void test_uart_fifo_read(void);
|
||||
void test_uart_poll_in(void);
|
||||
void test_uart_pending(void);
|
||||
|
||||
#endif /* __TEST_UART_H__ */
|
||||
|
||||
140
tests/drivers/uart/uart_basic_api/src/test_uart_pending.c
Normal file
140
tests/drivers/uart/uart_basic_api/src/test_uart_pending.c
Normal file
@@ -0,0 +1,140 @@
|
||||
/*
|
||||
* Copyright (c) 2021 Linaro Limited
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/*
|
||||
* @addtogroup t_uart_basic
|
||||
* @{
|
||||
* @defgroup t_uart_fifo test_uart_pending
|
||||
* @brief TestPurpose: test UART uart_irq_is_pending()
|
||||
* @details
|
||||
*
|
||||
* Test if uart_irq_is_pending() correctly returns 0 when there are no
|
||||
* more RX and TX pending interrupts.
|
||||
*
|
||||
* The test consists in disabling TX IRQ so no TX interrupts are
|
||||
* generated and the TX IRQ pending flag is never set. At the same time
|
||||
* RX IRQ is enabled to let received data cause a RX IRQ and so set the
|
||||
* RX IRQ pending flag.
|
||||
*
|
||||
* Then a message is sent via serial to inform that the test is ready to
|
||||
* receive serial data, which will trigger a RX IRQ.
|
||||
*
|
||||
* Once a RX IRQ happens RX data is read by uart_fifo_read() until there
|
||||
* is no more RX data to be popped from FIFO and all IRQs are handled.
|
||||
* When that happens uart_irq_is_pending() is called and must return 0,
|
||||
* indicating there are no more pending interrupts to be processed. If 0
|
||||
* is returned the test passes.
|
||||
*
|
||||
* In some cases uart_irq_is_pending() does not correctly use the IRQ
|
||||
* pending flags to determine if there are pending interrupts, hence
|
||||
* even tho there aren't any further RX and TX IRQs to be processed it
|
||||
* wrongly returns 1. If 1 is returned the test fails.
|
||||
*
|
||||
* @}
|
||||
*/
|
||||
|
||||
#include "test_uart.h"
|
||||
|
||||
#define MAX_NUM_TRIES 512
|
||||
#define NOT_READY 0
|
||||
|
||||
#define FAILED 0
|
||||
#define PASSED 1
|
||||
#define WAIT 2
|
||||
static int volatile status;
|
||||
|
||||
static void uart_pending_callback(const struct device *dev, void *user_data)
|
||||
{
|
||||
ARG_UNUSED(user_data);
|
||||
|
||||
int num_tries = 0;
|
||||
char recv_char;
|
||||
|
||||
/*
|
||||
* If the bug is not present uart_fifo_read() will pop all
|
||||
* received data until there is no more RX data, thus
|
||||
* uart_irq_is_pending() must correctly return 0 indicating
|
||||
* that there are no more RX interrupts to be processed.
|
||||
* Otherwise uart_irq_is_pending() never returns 0 even tho
|
||||
* there is no more RX data in the RX buffer to be processed,
|
||||
* so, in that case, the test fails after MAX_NUM_TRIES attempts.
|
||||
*/
|
||||
status = PASSED;
|
||||
while (uart_irq_update(dev) && uart_irq_is_pending(dev)) {
|
||||
if (uart_irq_rx_ready(dev) == NOT_READY) {
|
||||
if (num_tries < MAX_NUM_TRIES) {
|
||||
num_tries++;
|
||||
continue;
|
||||
} else {
|
||||
/*
|
||||
* Bug: no more tries; uart_irq_is_pending()
|
||||
* always returned 1 in spite of having no more
|
||||
* RX data to be read from FIFO and no more TX
|
||||
* data in FIFO to be sent via serial line.
|
||||
* N.B. uart_irq_update() always returns 1, thus
|
||||
* uart_irq_is_pending() got stuck without any
|
||||
* real pending interrupt, i.e. no more RX and
|
||||
* TX data to be popped or pushed from/to FIFO.
|
||||
*/
|
||||
status = FAILED;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
while (uart_fifo_read(dev, &recv_char, 1)) {
|
||||
/* Echo received char */
|
||||
TC_PRINT("%c", recv_char);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int test_pending(void)
|
||||
{
|
||||
const struct device *uart_dev = device_get_binding(UART_DEVICE_NAME);
|
||||
|
||||
/*
|
||||
* Set IRQ callback function to handle RX IRQ.
|
||||
*/
|
||||
uart_irq_callback_set(uart_dev, uart_pending_callback);
|
||||
|
||||
/*
|
||||
* Disable TX IRQ since transmitted data is not
|
||||
* handled by uart_pending_callback() and we don't
|
||||
* want to trigger any TX IRQ for this test.
|
||||
*/
|
||||
uart_irq_tx_disable(uart_dev);
|
||||
|
||||
/*
|
||||
* Enable RX IRQ so uart_pending_callback() can
|
||||
* handle input data is available in RX FIFO.
|
||||
*/
|
||||
uart_irq_rx_enable(uart_dev);
|
||||
|
||||
status = WAIT;
|
||||
|
||||
/* Inform test is ready to receive data */
|
||||
TC_PRINT("Please send characters to serial console\n");
|
||||
|
||||
while (status == WAIT) {
|
||||
/*
|
||||
* Wait RX handler change 'status' properly:
|
||||
* it will change to PASSED or FAILED after
|
||||
* uart_irq_is_pending() is tested by
|
||||
* uart_pending_callback() upon data reception.
|
||||
*/
|
||||
}
|
||||
|
||||
if (status == PASSED) {
|
||||
return TC_PASS;
|
||||
} else {
|
||||
return TC_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
void test_uart_pending(void)
|
||||
{
|
||||
zassert_true(test_pending() == TC_PASS, NULL);
|
||||
}
|
||||
@@ -71,13 +71,13 @@ static void thread_time_slice(void *p1, void *p2, void *p3)
|
||||
* also expecting task switch below the switching tolerance.
|
||||
*/
|
||||
expected_slice_min =
|
||||
(k_ms_to_ticks_ceil32(SLICE_SIZE)
|
||||
(k_ms_to_ticks_floor32(SLICE_SIZE)
|
||||
- switch_tolerance_ticks)
|
||||
* k_ticks_to_cyc_floor32(1);
|
||||
expected_slice_max =
|
||||
(k_ms_to_ticks_ceil32(SLICE_SIZE)
|
||||
+ switch_tolerance_ticks)
|
||||
* k_ticks_to_cyc_floor32(1);
|
||||
* k_ticks_to_cyc_ceil32(1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG
|
||||
|
||||
@@ -17,8 +17,8 @@ struct timer_data {
|
||||
#define DURATION 100
|
||||
#define PERIOD 50
|
||||
#define EXPIRE_TIMES 4
|
||||
#define WITHIN_ERROR(var, target, epsilon) \
|
||||
(((var) >= (target)) && ((var) <= (target) + (epsilon)))
|
||||
#define WITHIN_ERROR(var, target, epsilon) (abs((target) - (var)) <= (epsilon))
|
||||
|
||||
/* ms can be converted precisely to ticks only when a ms is exactly
|
||||
* represented by an integral number of ticks. If the conversion is
|
||||
* not precise, then the reverse conversion of a difference in ms can
|
||||
@@ -81,6 +81,31 @@ static void init_timer_data(void)
|
||||
{
|
||||
tdata.expire_cnt = 0;
|
||||
tdata.stop_cnt = 0;
|
||||
|
||||
k_usleep(1); /* align to tick */
|
||||
tdata.timestamp = k_uptime_get();
|
||||
}
|
||||
|
||||
static bool interval_check(int64_t interval, int64_t desired)
|
||||
{
|
||||
int64_t slop = INEXACT_MS_CONVERT ? 1 : 0;
|
||||
|
||||
/* Tickless kernels will advance time inside of an ISR, so it
|
||||
* is always possible (especially with high tick rates and
|
||||
* slow CPUs) for us to arrive at the uptime check above too
|
||||
* late to see a full period elapse before the next period.
|
||||
* We can alias at both sides of the interval, so two
|
||||
* one-ticks deltas (NOT one two-tick delta!)
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
|
||||
slop += 2 * k_ticks_to_ms_ceil32(1);
|
||||
}
|
||||
|
||||
if (abs(interval - desired) > slop) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* entry routines */
|
||||
@@ -91,13 +116,9 @@ static void duration_expire(struct k_timer *timer)
|
||||
|
||||
tdata.expire_cnt++;
|
||||
if (tdata.expire_cnt == 1) {
|
||||
TIMER_ASSERT((interval >= DURATION)
|
||||
|| (INEXACT_MS_CONVERT
|
||||
&& (interval == DURATION - 1)), timer);
|
||||
TIMER_ASSERT(interval_check(interval, DURATION), timer);
|
||||
} else {
|
||||
TIMER_ASSERT((interval >= PERIOD)
|
||||
|| (INEXACT_MS_CONVERT
|
||||
&& (interval == PERIOD - 1)), timer);
|
||||
TIMER_ASSERT(interval_check(interval, PERIOD), timer);
|
||||
}
|
||||
|
||||
if (tdata.expire_cnt >= EXPIRE_TIMES) {
|
||||
@@ -165,9 +186,7 @@ void test_timer_duration_period(void)
|
||||
{
|
||||
init_timer_data();
|
||||
/** TESTPOINT: init timer via k_timer_init */
|
||||
k_usleep(1); /* align to tick */
|
||||
k_timer_start(&duration_timer, K_MSEC(DURATION), K_MSEC(PERIOD));
|
||||
tdata.timestamp = k_uptime_get();
|
||||
busy_wait_ms(DURATION + PERIOD * EXPIRE_TIMES + PERIOD / 2);
|
||||
/** TESTPOINT: check expire and stop times */
|
||||
TIMER_ASSERT(tdata.expire_cnt == EXPIRE_TIMES, &duration_timer);
|
||||
@@ -236,7 +255,6 @@ void test_timer_period_0(void)
|
||||
- BUSY_SLEW_THRESHOLD_TICKS(DURATION
|
||||
* USEC_PER_MSEC)),
|
||||
K_NO_WAIT);
|
||||
tdata.timestamp = k_uptime_get();
|
||||
busy_wait_ms(DURATION + 1);
|
||||
|
||||
/** TESTPOINT: ensure it is one-short timer */
|
||||
@@ -454,6 +472,7 @@ void test_timer_status_sync(void)
|
||||
TIMER_ASSERT(tdata.expire_cnt == (i + 1), &status_sync_timer);
|
||||
}
|
||||
|
||||
init_timer_data();
|
||||
k_timer_start(&status_sync_timer, K_MSEC(DURATION), K_MSEC(PERIOD));
|
||||
busy_wait_ms(PERIOD*2);
|
||||
zassert_true(k_timer_status_sync(&status_sync_timer), NULL);
|
||||
@@ -482,9 +501,7 @@ void test_timer_k_define(void)
|
||||
{
|
||||
init_timer_data();
|
||||
/** TESTPOINT: init timer via k_timer_init */
|
||||
k_usleep(1); /* align to tick */
|
||||
k_timer_start(&ktimer, K_MSEC(DURATION), K_MSEC(PERIOD));
|
||||
tdata.timestamp = k_uptime_get();
|
||||
busy_wait_ms(DURATION + PERIOD * EXPIRE_TIMES + PERIOD / 2);
|
||||
|
||||
/** TESTPOINT: check expire and stop times */
|
||||
@@ -620,7 +637,6 @@ void test_timer_remaining(void)
|
||||
|
||||
|
||||
init_timer_data();
|
||||
k_usleep(1); /* align to tick */
|
||||
k_timer_start(&remain_timer, K_MSEC(DURATION), K_NO_WAIT);
|
||||
busy_wait_ms(DURATION / 2);
|
||||
rem_ticks = k_timer_remaining_ticks(&remain_timer);
|
||||
@@ -700,7 +716,7 @@ void test_timeout_abs(void)
|
||||
* ticks, so we have to check that at least one case is
|
||||
* satisfied.
|
||||
*/
|
||||
k_usleep(1); /* align to tick */
|
||||
init_timer_data();
|
||||
k_timer_start(&remain_timer, t, K_FOREVER);
|
||||
cap_ticks = k_uptime_ticks();
|
||||
rem_ticks = k_timer_remaining_ticks(&remain_timer);
|
||||
|
||||
@@ -207,7 +207,6 @@ CONFIG_NET_CONFIG_NEED_IPV6=y
|
||||
CONFIG_NET_MGMT=y
|
||||
CONFIG_NET_MGMT_EVENT=y
|
||||
CONFIG_NET_MGMT_EVENT_STACK_SIZE=800
|
||||
CONFIG_NET_MGMT_EVENT_THREAD_PRIO=66
|
||||
CONFIG_NET_MGMT_EVENT_QUEUE_SIZE=2
|
||||
CONFIG_NET_MGMT_EVENT_LOG_LEVEL_DBG=y
|
||||
CONFIG_NET_DEBUG_MGMT_EVENT_STACK=y
|
||||
|
||||
@@ -9,7 +9,6 @@ CONFIG_DNS_NUM_CONCUR_QUERIES=1
|
||||
CONFIG_NET_LOG=y
|
||||
CONFIG_NET_MGMT=y
|
||||
CONFIG_NET_MGMT_EVENT=y
|
||||
CONFIG_NET_MGMT_EVENT_THREAD_PRIO=7
|
||||
CONFIG_NET_MGMT_EVENT_QUEUE_SIZE=2
|
||||
CONFIG_NET_IPV4=y
|
||||
CONFIG_NET_IPV6=y
|
||||
|
||||
Reference in New Issue
Block a user