kernel: remove old micro/nanokernel C code

include/ will be cleaned up in a subsequent patch.

Change-Id: If3609f5fc8562ec4a6fec4592aefeec155599cfb
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie
2016-11-07 08:29:57 -08:00
parent 5cf1b90461
commit 4f798177cf
62 changed files with 0 additions and 13384 deletions

View File

@@ -1,4 +0,0 @@
ifneq ($(CONFIG_KERNEL_V2),y)
obj-y = nanokernel/
obj-$(CONFIG_MICROKERNEL) += microkernel/
endif

View File

@@ -1,171 +0,0 @@
# Kconfig - microkernel configuration options
#
# Copyright (c) 2014-2015 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
menu "Microkernel Options"
config MICROKERNEL_SERVER_STACK_SIZE
int
prompt "Microkernel server fiber (_k_server) stack size"
default 1024
depends on MICROKERNEL
help
This option specifies the size of the stack used by the microkernel
server fiber, whose entry point is _k_server(). This must be able
to handle the deepest call stack for internal handling of microkernel
config MICROKERNEL_SERVER_PRIORITY
int
prompt "Priority of the kernel service fiber"
default 0
depends on MICROKERNEL
help
Priority of the microkernel server fiber that performs
kernel requests and task scheduling assignments.
config PRIORITY_CEILING
int
prompt "Maximum priority for priority inheritance algorithm"
default 0
depends on MICROKERNEL
help
The highest task priority for the mutex priority inheritance
algorithm.
A task of low priority holding a mutex will see its priority
bumped to the priority of a task trying to acquire the mutex.
This option puts an upper boundary to the priority a task may
get bumped to.
config COMMAND_STACK_SIZE
int
prompt "Microkernel server command stack size (in packets)"
default 64
depends on MICROKERNEL
help
This option specifies the maximum number of command packets that
can be queued up for processing by the kernel's _k_server fiber.
config NUM_COMMAND_PACKETS
int
prompt "Number of command packets"
default 16
depends on MICROKERNEL
help
This option specifies the number of packets in the command packet pool.
This pool needs to be large enough to accommodate all in-flight
asynchronous command requests as well as those internally issued by
the microkernel server fiber (_k_server).
config NUM_TIMER_PACKETS
int
prompt "Number of timer packets" if SYS_CLOCK_EXISTS
default 0 if !SYS_CLOCK_EXISTS
default 10 if SYS_CLOCK_EXISTS
depends on MICROKERNEL
help
This option specifies the number of timer packets to create. Each
explicit and implicit timer usage consumes one timer packet.
config NUM_TASK_PRIORITIES
int
prompt "Number of task priorities"
default 16
range 1 256
depends on MICROKERNEL
help
This option specifies the number of task priorities supported by the
task scheduler. Specifying "N" provides support for task priorities
ranging from 0 (highest) through N-2; task priority N-1 (lowest) is
reserved for the kernel's idle task.
config WORKLOAD_MONITOR
bool
prompt "Workload monitoring [EXPERIMENTAL]"
default n
depends on MICROKERNEL
help
This option instructs the kernel to record the percentage of time
the system is doing useful work (i.e. is not idle).
menu "Timer API Options"
config TIMESLICING
bool
prompt "Task time slicing"
default y
depends on MICROKERNEL && SYS_CLOCK_EXISTS
help
This option enables time slicing between tasks of equal priority.
config TIMESLICE_SIZE
int
prompt "Time slice size (in ticks)"
default 0
depends on TIMESLICING
help
This option specifies the maximum amount of time a task can execute
before other tasks of equal priority are given an opportunity to run.
A time slice size of zero means "no limit" (i.e. an infinitely large
time slice).
config TIMESLICE_PRIORITY
int
prompt "Time slicing task priority threshold"
default 0
depends on TIMESLICING
help
This option specifies the task priority level at which time slicing
takes effect; tasks having a higher priority than this threshold
are not subject to time slicing. A threshold level of zero means
that all tasks are potentially subject to time slicing.
endmenu
config TASK_MONITOR
bool
prompt "Task monitoring [EXPERIMENTAL]"
default n
depends on MICROKERNEL && KERNEL_EVENT_LOGGER
help
This option instructs the kernel to record significant task
activities. These can include: task switches, task state changes,
kernel service requests, and the signalling of events.
config TASK_MONITOR_MASK
int
prompt "Trace buffer mask"
default 15
depends on TASK_MONITOR
help
This option specifies which task execution activities are captured
in the task monitor's trace buffer. The following values can be
OR-ed together to form the mask:
1 (MON_TSWAP): task switch
2 (MON_STATE): task state change
4 (MON_KSERV): task execution of kernel APIs
8 (MON_EVENT): task event signalled
config OBJECT_MONITOR
bool
prompt "Kernel object monitoring [EXPERIMENTAL]"
default n
depends on MICROKERNEL
help
This option instructs the kernel to record statistics about
microkernel object usage.
endmenu

View File

@@ -1,26 +0,0 @@
ccflags-y +=-I$(srctree)/kernel/microkernel/include
ccflags-y +=-I$(srctree)/kernel/nanokernel/include
obj-y = k_task.o
obj-y += k_idle.o
obj-y += k_init.o
obj-y += k_command_packet.o
obj-y += k_move_data.o
obj-y += k_ticker.o
obj-y += k_memory_map.o
obj-y += k_memory_pool.o
obj-y += k_nop.o
obj-y += k_offload.o
obj-y += k_event.o
obj-y += k_mailbox.o
obj-y += k_mutex.o
obj-y += k_fifo.o
obj-y += k_semaphore.o
obj-y += k_timer.o
obj-y += k_pipe_buffer.o k_pipe.o k_pipe_get.o \
k_pipe_put.o k_pipe_util.o k_pipe_xfer.o
obj-y += k_nano.o
obj-$(CONFIG_MICROKERNEL) += k_server.o
obj-$(CONFIG_TASK_MONITOR) += k_task_monitor.o

View File

@@ -1,63 +0,0 @@
/* k_pipe_buffer.h */
/*
* Copyright (c) 1997-2010, 2014-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _K_PIPE_BUFFER_H
#define _K_PIPE_BUFFER_H
#ifdef __cplusplus
extern "C" {
#endif
#include <micro_private_types.h>
void BuffInit(unsigned char *pBuffer,
int *piBuffSize,
struct _k_pipe_desc *desc);
void BuffGetFreeSpaceTotal(struct _k_pipe_desc *desc, int *piTotalFreeSpace);
void BuffGetFreeSpace(struct _k_pipe_desc *desc,
int *piTotalFreeSpace,
int *free_space_count_ptr,
int *free_space_post_wrap_around_ptr);
void BuffGetAvailDataTotal(struct _k_pipe_desc *desc, int *piAvailDataTotal);
void BuffGetAvailData(struct _k_pipe_desc *desc,
int *piAvailDataTotal,
int *available_data_count_ptr,
int *available_data_post_wrap_around_ptr);
int BuffEmpty(struct _k_pipe_desc *desc);
int BuffFull(struct _k_pipe_desc *desc);
int BuffEnQ(struct _k_pipe_desc *desc, int size, unsigned char **ppWrite);
int BuffEnQA(struct _k_pipe_desc *desc, int size, unsigned char **ppWrite,
int *piTransferID);
void BuffEnQA_End(struct _k_pipe_desc *desc, int iTransferID,
int size /* optional */);
int BuffDeQ(struct _k_pipe_desc *desc, int size, unsigned char **ppRead);
int BuffDeQA(struct _k_pipe_desc *desc, int size, unsigned char **ppRead,
int *piTransferID);
void BuffDeQA_End(struct _k_pipe_desc *desc, int iTransferID,
int size /* optional */);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* _K_PIPE_BUFFER_H */

View File

@@ -1,68 +0,0 @@
/* k_pipe_util.h */
/*
* Copyright (c) 1997-2012, 2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _K_PIPE_UTIL_H
#define _K_PIPE_UTIL_H
/* high-level behavior of the pipe service */
#define CANCEL_TIMERS
typedef uint32_t REQ_TYPE;
#define _ALLREQ ((REQ_TYPE)0x0000FF00)
#define _SYNCREQ ((REQ_TYPE)0x00000100)
#define _SYNCREQL ((REQ_TYPE)0x00000200)
#define _ASYNCREQ ((REQ_TYPE)0x00000400)
typedef uint32_t TIME_TYPE;
#define _ALLTIME ((TIME_TYPE)0x00FF0000)
#define _TIME_NB ((TIME_TYPE)0x00010000)
#define _TIME_B ((TIME_TYPE)0x00020000)
#define _TIME_BT ((TIME_TYPE)0x00040000)
#ifdef __cplusplus
extern "C" {
#endif
extern void _k_pipe_process(struct _k_pipe_struct *pipe_ptr,
struct k_args *writer_ptr, struct k_args *reader_ptr);
extern void mycopypacket(struct k_args **out, struct k_args *in);
int CalcFreeReaderSpace(struct k_args *pReaderList);
int CalcAvailWriterData(struct k_args *pWriterList);
void DeListWaiter(struct k_args *pReqProc);
void myfreetimer(struct k_timer **ppTimer);
K_PIPE_OPTION _k_pipe_option_get(K_ARGS_ARGS *args);
void _k_pipe_option_set(K_ARGS_ARGS *args, K_PIPE_OPTION option);
REQ_TYPE _k_pipe_request_type_get(K_ARGS_ARGS *args);
void _k_pipe_request_type_set(K_ARGS_ARGS *args, REQ_TYPE req_type);
void _k_pipe_request_status_set(struct _pipe_xfer_req_arg *pipe_xfer_req,
PIPE_REQUEST_STATUS status);
TIME_TYPE _k_pipe_time_type_get(K_ARGS_ARGS *args);
void _k_pipe_time_type_set(K_ARGS_ARGS *args, TIME_TYPE TimeType);
#ifdef __cplusplus
}
#endif
#endif /* _K_PIPE_UTIL_H */

View File

@@ -1,287 +0,0 @@
/*
* Copyright (c) 1997-2012, 2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Non-public microkernel APIs referenced by kernel_main.c.
*/
#ifndef _ENTRIES_H
#define _ENTRIES_H
#ifdef __cplusplus
extern "C" {
#endif
/* Format of a task's entry routine and (optional) abort routine */
typedef void (*taskstartfunction)(void);
typedef void (*taskabortfunction)(void);
/* APIs referenced by generated data structures */
extern int _k_ticker(int event);
/* APIs referenced by generated routines */
extern void _k_pipe_init(void);
extern void _k_mem_map_init(void);
extern void _k_mem_pool_init(void);
/* Format of routines invoked by microkernel server */
typedef void (*kernelfunc)(struct k_args *);
/* Jumptable entrypoints */
extern void _k_nop(struct k_args *);
extern void _k_offload_to_fiber(struct k_args *);
extern void _k_workload_get(struct k_args *);
/**
*
* @brief Handle semaphore signal request
*
* @param k_args Pointer to a k_args structure.
*
* @return N/A
*/
extern void _k_sem_signal(struct k_args *);
/**
*
* @brief Handle signal semaphore group request
*
* @param k_args Pointer to a k_args structure.
*
* @return N/A
*/
extern void _k_sem_group_signal(struct k_args *);
/**
*
* @brief Handle semaphore reset request
*
* @param k_args Pointer to a k_args structure.
*
* @return N/A
*/
extern void _k_sem_reset(struct k_args *);
/**
*
* @brief Handle semaphore group reset request
*
* @param k_args Pointer to a k_args structure.
*
* @return N/A
*/
extern void _k_sem_group_reset(struct k_args *);
/**
*
* @brief Handle internal wait request on a semaphore involved in a
* semaphore group wait request
*
* @param k_args Pointer to a k_args structure.
*
* @return N/A
*/
extern void _k_sem_wait_request(struct k_args *);
/**
*
* @brief Reply to a semaphore wait request
*
* @param k_args Pointer to a k_args structure.
*
* @return N/A
*/
extern void _k_sem_wait_reply(struct k_args *);
/**
*
* @brief Reply to a semaphore wait request with timeout.
*
* @param A Pointer to a k_args structure.
*
* @return N/A
*
* @sa _k_sem_wait_reply
*/
extern void _k_sem_wait_reply_timeout(struct k_args *A);
/**
*
* @brief Handle semaphore group wait request
*
* This routine splits the single semaphore group wait request into several
* internal wait requests--one for each semaphore in the group.
*
* @param k_args Pointer to a k_args structure.
*
* @return N/A
*/
extern void _k_sem_group_wait_any(struct k_args *);
/**
*
* @brief Handle semaphore test and wait request
*
* @param k_args Pointer to a k_args structure.
*
* @return N/A
*/
extern void _k_sem_group_wait_request(struct k_args *);
/**
*
* @brief Handle semaphore ready request
*
* This routine only applies to semaphore group wait requests. It identifies
* the one semaphore in the group that "won" the semaphore group wait request
* before triggering the semaphore group timeout handler.
*
* @param k_args Pointer to a k_args structure.
*
* @return N/A
*/
extern void _k_sem_group_ready(struct k_args *);
/**
*
* @brief Handle cancellation of a semaphore involved in a
* semaphore group wait request
*
* This routine only applies to semaphore group wait requests. It is invoked
* for each semaphore in the semaphore group that "lost" the semaphore group
* wait request.
*
* @param k_args Pointer to a k_args structure.
*
* @return N/A
*/
extern void _k_sem_group_wait_cancel(struct k_args *);
/**
*
* @brief Handle acceptance of the ready semaphore request
*
* This routine only applies to semaphore group wait requests. It handles
* the request for the one semaphore in the group that "wins" the semaphore
* group wait request.
*
* @param k_args Pointer to a k_args structure.
*
* @return N/A
*/
extern void _k_sem_group_wait_accept(struct k_args *);
/**
*
* @brief Finish handling incomplete waits on semaphores
*
* @param k_args Pointer to a k_args structure.
*
* @return N/A
*/
extern void _k_sem_group_wait(struct k_args *);
/**
*
* @brief Handle semaphore group timeout request
*
* @param k_args Pointer to a k_args structure.
*
* @return N/A
*/
extern void _k_sem_group_wait_timeout(struct k_args *);
/**
*
* @brief Handle semaphore inquiry request
*
* @param k_args Pointer to a k_args structure.
*
* @return N/A
*/
extern void _k_sem_inquiry(struct k_args *);
extern void _k_mutex_lock_request(struct k_args *);
extern void _k_mutex_lock_reply(struct k_args *);
extern void _k_mutex_lock_reply_timeout(struct k_args *);
extern void _k_mutex_unlock(struct k_args *);
extern void _k_fifo_enque_request(struct k_args *);
extern void _k_fifo_enque_reply(struct k_args *);
extern void _k_fifo_enque_reply_timeout(struct k_args *);
extern void _k_fifo_deque_request(struct k_args *);
extern void _k_fifo_deque_reply(struct k_args *);
extern void _k_fifo_deque_reply_timeout(struct k_args *);
extern void _k_fifo_ioctl(struct k_args *);
extern void _k_mbox_send_request(struct k_args *);
extern void _k_mbox_send_reply(struct k_args *);
extern void _k_mbox_send_ack(struct k_args *);
extern void _k_mbox_send_data(struct k_args *);
extern void _k_mbox_receive_request(struct k_args *);
extern void _k_mbox_receive_reply(struct k_args *);
extern void _k_mbox_receive_ack(struct k_args *);
extern void _k_mbox_receive_data(struct k_args *);
extern void _k_task_sleep(struct k_args *);
extern void _k_task_wakeup(struct k_args *);
extern void _k_task_op(struct k_args *);
extern void _k_task_group_op(struct k_args *);
extern void _k_task_priority_set(struct k_args *);
extern void _k_task_yield(struct k_args *);
extern void _k_mem_map_alloc(struct k_args *);
extern void _k_mem_map_dealloc(struct k_args *);
extern void _k_timer_alloc(struct k_args *);
extern void _k_timer_dealloc(struct k_args *);
extern void _k_timer_start(struct k_args *);
extern void _k_timer_stop(struct k_args *);
extern void _k_mem_map_alloc_timeout(struct k_args *);
extern void _k_event_test(struct k_args *);
extern void _k_event_handler_set(struct k_args *);
extern void _k_event_signal(struct k_args *);
extern void _k_mem_pool_block_get(struct k_args *);
extern void _k_mem_pool_block_release(struct k_args *);
extern void _k_block_waiters_get(struct k_args *);
extern void _k_mem_pool_block_get_timeout_handle(struct k_args *);
extern void _k_defrag(struct k_args *);
extern void _k_movedata_request(struct k_args *Req);
extern void K_mvdsndreq(struct k_args *SndReq);
extern void K_mvdrcvreq(struct k_args *RcvReq);
extern void K_rawdata(struct k_args *DataPacket);
extern void K_mvdsndack(struct k_args *SndDAck);
extern void K_mvdrcvack(struct k_args *RcvDAck);
extern void _k_pipe_put_request(struct k_args *Writer);
extern void _k_pipe_put_timeout(struct k_args *Writer);
extern void _k_pipe_put_reply(struct k_args *Writer);
extern void _k_pipe_put_ack(struct k_args *Writer);
extern void _k_pipe_get_request(struct k_args *Reader);
extern void _k_pipe_get_timeout(struct k_args *Reader);
extern void _k_pipe_get_reply(struct k_args *Reader);
extern void _k_pipe_get_ack(struct k_args *Reader);
extern void _k_pipe_movedata_ack(struct k_args *pEOXfer);
extern void _k_event_test_timeout(struct k_args *A);
#ifdef __cplusplus
}
#endif
#endif /* _ENTRIES_H */

View File

@@ -1,246 +0,0 @@
/* micro_private.h */
/*
* Copyright (c) 1997-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINIK_H
#define MINIK_H
#include <stddef.h>
#include <micro_private_types.h>
#include <kernel_main.h>
#include <nano_private.h>
#include <misc/__assert.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* The 2 least significant bits of the commands placed on the microkernel
* server's command stack identify the type of command; the remaining bits
* are the actual argument for the command. (This works because the actual
* arguments are always multiples of 4.)
*/
/* process the specified command packet (containing command & argument info) */
#define KERNEL_CMD_PACKET_TYPE (0u)
/* give the specified event */
#define KERNEL_CMD_EVENT_TYPE (1u)
/* give the specified semaphore */
#define KERNEL_CMD_SEMAPHORE_TYPE (2u)
/* not used */
#define KERNEL_CMD_RESERVED_TYPE (3u)
/* mask for isolating the 2 type bits */
#define KERNEL_CMD_TYPE_MASK (3u)
#define KERNEL_ENTRY(A) _k_task_call(A)
#define OBJ_INDEX(objId) ((uint16_t)objId)
extern struct k_tqhd _k_task_priority_list[];
extern struct pool_struct _k_mem_pool_list[];
extern int _k_mem_pool_count;
extern const kmemory_pool_t _heap_mem_pool_id;
extern struct k_task *_k_current_task;
extern uint32_t _k_task_priority_bitmap[];
extern struct k_timer *_k_timer_list_head;
extern struct k_timer *_k_timer_list_tail;
extern struct nano_stack _k_command_stack;
extern struct nano_lifo _k_server_command_packet_free;
extern struct nano_lifo _k_timer_free;
extern void _k_timer_enlist(struct k_timer *T);
extern void _k_timer_delist(struct k_timer *T);
extern void _k_timeout_alloc(struct k_args *P);
extern void _k_timeout_free(struct k_timer *T);
extern void _k_timeout_cancel(struct k_args *A);
extern void _k_timer_list_update(int ticks);
extern void _k_do_event_signal(kevent_t event);
extern void _k_state_bit_set(struct k_task *, uint32_t);
extern void _k_state_bit_reset(struct k_task *, uint32_t);
extern void _k_task_call(struct k_args *);
/*
* The task status flags may be OR'ed together to form a task's state. The
* existence of one or more non-zero bits indicates that the task can not be
* scheduled for execution because of the conditions associated with those
* bits. The task status flags are divided into four (4) groups as follows:
*
* Break flags (bits 0..5) are associated with conditions that require an
* external entity to permit further execution.
*
* Spare flags (bits 6..9) are located between the break and wait flags
* to allow either set to be extended without impacting the other group.
*
* Wait flags (bits 10..27) are associated with operations that the task itself
* initiated, and for which task execution will resume when the requested
* operation completes.
*
* Monitoring bits (bits 28..31) are reserved for use with task level
* monitoring.
*/
#define TF_STOP 0x00000001 /* Not started */
#define TF_TERM 0x00000002 /* Terminated */
#define TF_SUSP 0x00000004 /* Suspended */
#define TF_BLCK 0x00000008 /* Blocked */
#define TF_GDBSTOP 0x00000010 /* Stopped by GDB agent */
#define TF_PRIO 0x00000020 /* Task priority is changing */
#define TF_NANO 0x00000400 /* Waiting on a nanokernel object */
#define TF_TIME 0x00000800 /* Sleeping */
#define TF_DRIV 0x00001000 /* Waiting for arch specific driver */
#define TF_RES0 0x00002000 /* Reserved */
#define TF_EVNT 0x00004000 /* Waiting for an event */
#define TF_ENQU 0x00008000 /* Waiting to put data on a FIFO */
#define TF_DEQU 0x00010000 /* Waiting to get data from a FIFO */
#define TF_SEND 0x00020000 /* Waiting to send via mailbox or pipe */
#define TF_RECV 0x00040000 /* Waiting to recv via mailbox or pipe */
#define TF_SEMA 0x00080000 /* Waiting for a semaphore */
#define TF_LIST 0x00100000 /* Waiting for a group of semaphores */
#define TF_LOCK 0x00200000 /* Waiting for a mutex */
#define TF_ALLO 0x00400000 /* Waiting on a memory mapping */
#define TF_GTBL 0x00800000 /* Waiting on a memory pool */
#define TF_RES1 0x01000000 /* Reserved */
#define TF_RES2 0x02000000 /* Reserved */
#define TF_RECVDATA 0x04000000 /* Waiting to receive data */
#define TF_SENDDATA 0x08000000 /* Waiting to send data */
#define TF_ALLW 0x0FFFFC00 /* Mask of all wait flags */
#ifdef CONFIG_TASK_DEBUG
extern int _k_debug_halt;
#endif
#ifdef CONFIG_TASK_MONITOR
#define MON_TSWAP 1
#define MON_STATE 2
#define MON_KSERV 4
#define MON_EVENT 8
#define MON_ALL 15
typedef void (*k_task_monitor_hook_t)(ktask_t taskid, uint32_t timestamp);
extern void task_monitor_hook_set(k_task_monitor_hook_t func);
extern void _k_task_monitor(struct k_task *, uint32_t d2);
extern void _k_task_monitor_args(struct k_args *);
extern void _k_task_monitor_read(struct k_args *);
#ifdef CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
extern int _k_monitor_mask;
#else
extern const int _k_monitor_mask;
#endif
/* task level monitor bits */
#define MO_STBIT0 0x20000000
#define MO_STBIT1 0x30000000
#define MO_EVENT 0x40000000
#define MO_LCOMM 0x50000000
#define MO_RCOMM 0x60000000
#endif
#ifdef CONFIG_WORKLOAD_MONITOR
extern void _k_workload_monitor_calibrate(void);
extern void _k_workload_monitor_update(void);
extern void _k_workload_monitor_idle_start(void);
extern void _k_workload_monitor_idle_end(void);
#else
#define _k_workload_monitor_update() do { /* nothing */ } while (0)
#endif
#define INSERT_ELM(L, E) \
{ \
struct k_args *X = (L); \
struct k_args *Y = NULL; \
while (X && (X->priority <= (E)->priority)) { \
Y = X; \
X = X->next; \
} \
if (Y) \
Y->next = (E); \
else \
(L) = (E); \
(E)->next = X; \
(E)->head = &(L); \
}
#define REMOVE_ELM(E) \
{ \
struct k_args *X = *((E)->head); \
struct k_args *Y = NULL; \
\
while (X && (X != (E))) { \
Y = X; \
X = X->next; \
} \
if (X) { \
if (Y) \
Y->next = X->next; \
else \
*((E)->head) = X->next; \
} \
}
#define GETARGS(A) \
do { \
(A) = _nano_fiber_lifo_get_panic(&_k_server_command_packet_free); \
} while (0)
#define GETTIMER(T) \
do { \
(T) = _nano_fiber_lifo_get_panic(&_k_timer_free); \
} while (0)
#define FREEARGS(A) nano_fiber_lifo_put(&_k_server_command_packet_free, (A))
#define FREETIMER(T) nano_fiber_lifo_put(&_k_timer_free, (T))
#define TO_ALIST(L, A) nano_fiber_stack_push((L), (uint32_t)(A))
#define _COMMAND_STACK_SIZE_CHECK() do { \
__ASSERT((_k_command_stack.next - _k_command_stack.base) \
< CONFIG_COMMAND_STACK_SIZE, \
"microkernel server command stack exceeded\n"); \
} while ((0))
#define SENDARGS(A) do { \
_COMMAND_STACK_SIZE_CHECK(); \
nano_fiber_stack_push(&_k_command_stack, (uint32_t)(A)); \
} while ((0))
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -1,409 +0,0 @@
/* major non-public microkernel structures */
/*
* Copyright (c) 1997-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _MICRO_PRIVATE_TYPES_H
#define _MICRO_PRIVATE_TYPES_H
#include <microkernel/base_api.h>
#include <nanokernel.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef union k_args_args K_ARGS_ARGS;
/* Kernel timer structure */
struct k_timer {
struct k_timer *next;
struct k_timer *prev;
int32_t duration;
int32_t period;
struct k_args *args;
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
/*List all user allocated timers*/
struct k_timer *__next;
struct k_timer *__prev;
#endif
};
/* Kernel server command codes */
#define _K_SVC_UNDEFINED (NULL)
#define _K_SVC_BLOCK_WAITERS_GET _k_block_waiters_get
#define _K_SVC_DEFRAG _k_defrag
#define _K_SVC_MOVEDATA_REQ _k_movedata_request
#define _K_SVC_NOP _k_nop
#define _K_SVC_OFFLOAD_TO_FIBER _k_offload_to_fiber
#define _K_SVC_WORKLOAD_GET _k_workload_get
#define _K_SVC_EVENT_HANDLER_SET _k_event_handler_set
#define _K_SVC_EVENT_SIGNAL _k_event_signal
#define _K_SVC_EVENT_TEST _k_event_test
#define _K_SVC_EVENT_TEST_TIMEOUT _k_event_test_timeout
#define _K_SVC_SEM_INQUIRY _k_sem_inquiry
#define _K_SVC_SEM_SIGNAL _k_sem_signal
#define _K_SVC_SEM_RESET _k_sem_reset
#define _K_SVC_SEM_WAIT_REQUEST _k_sem_wait_request
#define _K_SVC_SEM_WAIT_REPLY _k_sem_wait_reply
#define _K_SVC_SEM_WAIT_REPLY_TIMEOUT _k_sem_wait_reply_timeout
#define _K_SVC_SEM_GROUP_SIGNAL _k_sem_group_signal
#define _K_SVC_SEM_GROUP_RESET _k_sem_group_reset
#define _K_SVC_SEM_GROUP_WAIT _k_sem_group_wait
#define _K_SVC_SEM_GROUP_WAIT_ANY _k_sem_group_wait_any
#define _K_SVC_SEM_GROUP_WAIT_ACCEPT _k_sem_group_wait_accept
#define _K_SVC_SEM_GROUP_WAIT_CANCEL _k_sem_group_wait_cancel
#define _K_SVC_SEM_GROUP_WAIT_READY _k_sem_group_ready
#define _K_SVC_SEM_GROUP_WAIT_REQUEST _k_sem_group_wait_request
#define _K_SVC_SEM_GROUP_WAIT_TIMEOUT _k_sem_group_wait_timeout
#define _K_SVC_MUTEX_LOCK_REQUEST _k_mutex_lock_request
#define _K_SVC_MUTEX_LOCK_REPLY _k_mutex_lock_reply
#define _K_SVC_MUTEX_LOCK_REPLY_TIMEOUT _k_mutex_lock_reply_timeout
#define _K_SVC_MUTEX_UNLOCK _k_mutex_unlock
#define _K_SVC_FIFO_ENQUE_REQUEST _k_fifo_enque_request
#define _K_SVC_FIFO_ENQUE_REPLY _k_fifo_enque_reply
#define _K_SVC_FIFO_ENQUE_REPLY_TIMEOUT _k_fifo_enque_reply_timeout
#define _K_SVC_FIFO_DEQUE_REQUEST _k_fifo_deque_request
#define _K_SVC_FIFO_DEQUE_REPLY _k_fifo_deque_reply
#define _K_SVC_FIFO_DEQUE_REPLY_TIMEOUT _k_fifo_deque_reply_timeout
#define _K_SVC_FIFO_IOCTL _k_fifo_ioctl
#define _K_SVC_MBOX_SEND_REQUEST _k_mbox_send_request
#define _K_SVC_MBOX_SEND_REPLY _k_mbox_send_reply
#define _K_SVC_MBOX_SEND_ACK _k_mbox_send_ack
#define _K_SVC_MBOX_SEND_DATA _k_mbox_send_data
#define _K_SVC_MBOX_RECEIVE_REQUEST _k_mbox_receive_request
#define _K_SVC_MBOX_RECEIVE_REPLY _k_mbox_receive_reply
#define _K_SVC_MBOX_RECEIVE_ACK _k_mbox_receive_ack
#define _K_SVC_MBOX_RECEIVE_DATA _k_mbox_receive_data
#define _K_SVC_TASK_SLEEP _k_task_sleep
#define _K_SVC_TASK_WAKEUP _k_task_wakeup
#define _K_SVC_TASK_OP _k_task_op
#define _K_SVC_TASK_GROUP_OP _k_task_group_op
#define _K_SVC_TASK_PRIORITY_SET _k_task_priority_set
#define _K_SVC_TASK_YIELD _k_task_yield
#define _K_SVC_MEM_MAP_ALLOC _k_mem_map_alloc
#define _K_SVC_MEM_MAP_ALLOC_TIMEOUT _k_mem_map_alloc_timeout
#define _K_SVC_MEM_MAP_DEALLOC _k_mem_map_dealloc
#define _K_SVC_TIMER_ALLOC _k_timer_alloc
#define _K_SVC_TIMER_DEALLOC _k_timer_dealloc
#define _K_SVC_TIMER_START _k_timer_start
#define _K_SVC_TIMER_STOP _k_timer_stop
#define _K_SVC_MEM_POOL_BLOCK_GET _k_mem_pool_block_get
#define _K_SVC_MEM_POOL_BLOCK_GET_TIMEOUT_HANDLE _k_mem_pool_block_get_timeout_handle
#define _K_SVC_MEM_POOL_BLOCK_RELEASE _k_mem_pool_block_release
#define _K_SVC_PIPE_PUT_REQUEST _k_pipe_put_request
#define _K_SVC_PIPE_PUT_TIMEOUT _k_pipe_put_timeout
#define _K_SVC_PIPE_PUT_REPLY _k_pipe_put_reply
#define _K_SVC_PIPE_PUT_ACK _k_pipe_put_ack
#define _K_SVC_PIPE_GET_REQUEST _k_pipe_get_request
#define _K_SVC_PIPE_GET_TIMEOUT _k_pipe_get_timeout
#define _K_SVC_PIPE_GET_REPLY _k_pipe_get_reply
#define _K_SVC_PIPE_GET_ACK _k_pipe_get_ack
#define _K_SVC_PIPE_MOVEDATA_ACK _k_pipe_movedata_ack
/* Task queue header */
struct k_tqhd {
struct k_task *head;
struct k_task *tail;
};
typedef enum {
XFER_UNDEFINED,
XFER_W2B,
XFER_B2R,
XFER_W2R
} XFER_TYPE;
typedef enum {
XFER_IDLE = 0x0001,
XFER_BUSY = 0x0002,
TERM_FORCED = 0x0010,
TERM_SATISFIED = 0x0020,
TERM_TMO = 0x0040,
TERM_XXX = TERM_FORCED | TERM_SATISFIED | TERM_TMO
} PIPE_REQUEST_STATUS;
struct req_info {
union {
kpipe_t id;
struct _k_pipe_struct *ptr;
} pipe;
int params;
};
struct sync_req {
void *data_ptr;
int total_size;
};
struct async_req {
struct k_block block;
int total_size;
ksem_t sema;
};
struct _pipe_req_arg {
struct req_info req_info;
union {
struct sync_req sync;
struct async_req async;
} req_type;
int dummy;
};
struct _pipe_xfer_req_arg {
struct req_info req_info;
void *data_ptr; /* if NULL, data is embedded in cmd packet */
int total_size; /* total size of data/free space */
int xferred_size; /* size of data ALREADY Xferred */
PIPE_REQUEST_STATUS status; /* status of processing of request */
int num_pending_xfers; /* # data Xfers (still) in progress */
};
struct _pipe_ack_arg {
struct req_info req_info;
union {
struct sync_req dummy;
struct async_req async;
} req_type;
int xferred_size;
};
struct _pipe_xfer_ack_arg {
struct _k_pipe_struct *pipe_ptr;
XFER_TYPE xfer_type; /* W2B, B2R or W2R */
struct k_args *writer_ptr; /* if there's a writer involved,
* this is the link to it
*/
struct k_args *reader_ptr; /* if there's a reader involved,
* this is the link to it
*/
int id; /* if it is a Xfer to/from a buffer, this is the registered
* Xfer's ID
*/
int size; /* amount of data Xferred */
};
/* COMMAND PACKET STRUCTURES */
typedef union {
ktask_t task_id;
struct k_task *task;
struct k_args *args;
} K_CREF;
struct _a1arg {
kmemory_map_t mmap;
void **mptr;
};
struct _c1arg {
int64_t time1;
int64_t time2;
struct k_timer *timer;
ksem_t sema;
ktask_t task;
};
struct _e1arg {
kevent_t event;
int opt;
kevent_handler_t func;
};
struct moved_req_args_setup {
struct k_args *continuation_send;
struct k_args *continuation_receive;
ksem_t sema;
uint32_t dummy;
};
#define MVDACT_NONE 0
/* notify when data has been sent */
#define MVDACT_SNDACK 0x0001
/* notify when data has been received */
#define MVDACT_RCVACK 0x0002
/* Resume On Send (completion): the SeNDer (task) */
#define MVDACT_ROS_SND 0x0004
/* Resume On Recv (completion): the ReCeiVing (task) */
#define MVDACT_ROR_RCV 0x0008
#define MVDACT_VALID (MVDACT_SNDACK | MVDACT_RCVACK)
#define MVDACT_INVALID (~(MVDACT_VALID))
typedef uint32_t MovedAction;
struct moved_req {
MovedAction action;
void *source;
void *destination;
uint32_t total_size;
union {
struct moved_req_args_setup setup;
} extra;
};
struct _g1arg {
ktask_t task;
ktask_group_t group;
kpriority_t prio;
int opt;
int val;
};
struct _l1arg {
kmutex_t mutex;
ktask_t task;
};
struct _m1arg {
struct k_msg mess;
};
struct _p1arg {
kmemory_pool_t pool_id;
int req_size;
void *rep_poolptr;
void *rep_dataptr;
};
struct _q1arg {
kfifo_t queue;
int size;
char *data;
};
struct _q2arg {
kfifo_t queue;
int size;
char data[OCTET_TO_SIZEOFUNIT(40)];
};
struct _s1arg {
ksem_t sema;
ksemg_t list;
int nsem;
};
struct _u1arg {
int (*func)();
void *argp;
int rval;
};
union k_args_args {
struct _a1arg a1;
struct _c1arg c1;
struct moved_req moved_req;
struct _e1arg e1;
struct _g1arg g1;
struct _l1arg l1;
struct _m1arg m1;
struct _p1arg p1;
struct _q1arg q1;
struct _q2arg q2;
struct _s1arg s1;
struct _u1arg u1;
struct _pipe_xfer_req_arg pipe_xfer_req;
struct _pipe_xfer_ack_arg pipe_xfer_ack;
struct _pipe_req_arg pipe_req;
struct _pipe_ack_arg pipe_ack;
};
/*
* A command packet must be aligned on a 4-byte boundary, since this is what
* the microkernel server's command stack processing requires.
*
* The command packet's size must = CMD_PKT_SIZE_IN_WORDS * sizeof(uint32_t).
* Consequently, the structure is packed to prevent some compilers from
* introducing unwanted padding between fields; however, this then requires
* that some fields be explicitly 4-byte aligned to ensure the overall
* size of the structure is correct.
*/
struct k_args {
struct k_args *next;
struct k_args **head;
kpriority_t priority;
/* 'alloc' is true if k_args is allocated via GETARGS() */
bool alloc;
/*
* Align the next structure element if alloc is just one byte.
* Otherwise on ARM it leads to "unaligned write" exception.
*/
void (*Comm)(struct k_args *) __aligned(4);
K_CREF Ctxt;
union {
int32_t ticks;
struct k_timer *timer;
int rcode;
} Time;
K_ARGS_ARGS args;
} __aligned(4) __packed;
/* ---------------------------------------------------------------------- */
/* KERNEL OBJECT STRUCTURES */
struct pool_quad_block {
char *mem_blocks;
uint32_t mem_status;
};
struct pool_block_set {
int block_size;
int nr_of_entries;
struct pool_quad_block *quad_block;
int count;
};
struct pool_struct {
int maxblock_size;
int minblock_size;
int nr_of_maxblocks;
int nr_of_block_sets;
struct k_args *waiters;
struct pool_block_set *block_set;
char *bufblock;
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
struct pool_struct *__next;
#endif
};
#ifdef __cplusplus
}
#endif
#endif /* _MICRO_PRIVATE_TYPES_H */

View File

@@ -1,49 +0,0 @@
/*
* Copyright (c) 2012, 2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Microkernel command packet library
*/
#include <nanokernel.h>
#include <arch/cpu.h>
#include <microkernel/command_packet.h>
#include <micro_private.h>
#include <sections.h>
/**
* Generate build error by defining a negative-size array if the hard-coded
* command packet size differs from the actual size; otherwise, define
* a zero-element array that gets thrown away by linker
*/
uint32_t _k_test_cmd_pkt_size
[0 - ((CMD_PKT_SIZE_IN_WORDS * sizeof(uint32_t)) != sizeof(struct k_args))];
/**
*
* @brief Send command packet to be processed by _k_server
* @param cmd_packet Arguments
* @return N/A
*/
void _k_task_call(struct k_args *cmd_packet)
{
_COMMAND_STACK_SIZE_CHECK();
cmd_packet->alloc = false;
_k_current_task->args = cmd_packet;
nano_task_stack_push(&_k_command_stack, (uint32_t)cmd_packet);
}

View File

@@ -1,225 +0,0 @@
/*
* Copyright (c) 1997-2010, 2013-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Event kernel services.
*/
#include <micro_private.h>
#include "microkernel/event.h"
#include <toolchain.h>
#include <sections.h>
#include <misc/__assert.h>
extern kevent_t _k_event_list_start[];
extern kevent_t _k_event_list_end[];
#define ASSERT_EVENT_IS_VALID(e, function) do { \
__ASSERT((vaddr_t)e >= (vaddr_t)&_k_event_list_start,\
"invalid event passed to %s", function); \
__ASSERT((vaddr_t)e < (vaddr_t)&_k_event_list_end, \
"invalid event passed to %s", function); \
} while ((0))
/**
*
* @brief Perform set event handler request
*
* @return N/A
*/
void _k_event_handler_set(struct k_args *A)
{
struct _k_event_struct *E = (struct _k_event_struct *)A->args.e1.event;
if (E->func != NULL) {
if (likely(A->args.e1.func == NULL)) {
/* uninstall handler */
E->func = NULL;
A->Time.rcode = RC_OK;
} else {
/* can't overwrite an existing handler */
A->Time.rcode = RC_FAIL;
}
} else {
/* install handler */
E->func = A->args.e1.func;
E->status = 0;
A->Time.rcode = RC_OK;
}
}
int task_event_handler_set(kevent_t event, kevent_handler_t handler)
{
struct k_args A;
ASSERT_EVENT_IS_VALID(event, __func__);
A.Comm = _K_SVC_EVENT_HANDLER_SET;
A.args.e1.event = event;
A.args.e1.func = handler;
KERNEL_ENTRY(&A);
return A.Time.rcode;
}
/**
*
* @brief Finish handling a test for event request that timed out
*
* @return N/A
*/
void _k_event_test_timeout(struct k_args *A)
{
struct _k_event_struct *E = (struct _k_event_struct *)A->args.e1.event;
FREETIMER(A->Time.timer);
A->Time.rcode = RC_TIME;
E->waiter = NULL;
_k_state_bit_reset(A->Ctxt.task, TF_EVNT);
}
/**
*
* @brief Perform test for event request
*
* @return N/A
*/
void _k_event_test(struct k_args *A)
{
struct _k_event_struct *E = (struct _k_event_struct *)A->args.e1.event;
if (E->status) { /* the next event can be received */
E->status = 0;
A->Time.rcode = RC_OK;
} else {
if (likely(A->Time.ticks != TICKS_NONE)) {
/* Caller will wait for the event */
if (likely(E->waiter == NULL)) {
A->Ctxt.task = _k_current_task;
E->waiter = A;
_k_state_bit_set(_k_current_task, TF_EVNT);
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.ticks == TICKS_UNLIMITED) {
A->Time.timer = NULL;
} else {
A->Comm = _K_SVC_EVENT_TEST_TIMEOUT;
_k_timeout_alloc(A);
}
#endif
} else {
/* already a waiter present */
A->Time.rcode = RC_FAIL;
}
} else {
/* Caller will not wait for the event */
A->Time.rcode = RC_FAIL;
}
}
}
int task_event_recv(kevent_t event, int32_t timeout)
{
struct k_args A;
ASSERT_EVENT_IS_VALID(event, __func__);
A.Comm = _K_SVC_EVENT_TEST;
A.args.e1.event = event;
A.Time.ticks = timeout;
KERNEL_ENTRY(&A);
return A.Time.rcode;
}
/**
*
* @brief Signal an event
*
* Lowest level event signalling routine, which is invoked directly when the
* signal is issued by a task and indirectly when the signal is issued by a
* fiber or ISR. The specified event number must be valid.
*
* @return N/A
*/
void _k_do_event_signal(kevent_t event)
{
struct _k_event_struct *E = (struct _k_event_struct *)event;
struct k_args *A = E->waiter;
int ret_val = 1; /* If no handler is available, then ret_val is 1 by default */
if ((E->func) != NULL) { /* handler available */
ret_val = (E->func)(event); /* call handler */
}
if (ret_val != 0) {
E->status = 1;
}
/* if task waiting, will be rescheduled */
if (((A) != NULL) && (E->status != 0)) {
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.timer != NULL) {
_k_timeout_free(A->Time.timer);
A->Comm = _K_SVC_NOP;
}
#endif
A->Time.rcode = RC_OK;
_k_state_bit_reset(A->Ctxt.task, TF_EVNT);
E->waiter = NULL;
E->status = 0;
}
#ifdef CONFIG_OBJECT_MONITOR
E->count++;
#endif
}
/**
*
* @brief Perform signal an event request
*
* @return N/A
*/
void _k_event_signal(struct k_args *A)
{
kevent_t event = A->args.e1.event;
_k_do_event_signal(event);
A->Time.rcode = RC_OK;
}
int task_event_send(kevent_t event)
{
struct k_args A;
ASSERT_EVENT_IS_VALID(event, __func__);
A.Comm = _K_SVC_EVENT_SIGNAL;
A.args.e1.event = event;
KERNEL_ENTRY(&A);
return A.Time.rcode;
}
FUNC_ALIAS(isr_event_send, fiber_event_send, void);
void isr_event_send(kevent_t event)
{
ASSERT_EVENT_IS_VALID(event, __func__);
_COMMAND_STACK_SIZE_CHECK();
nano_isr_stack_push(&_k_command_stack,
(uint32_t)event | KERNEL_CMD_EVENT_TYPE);
}

View File

@@ -1,354 +0,0 @@
/*
* Copyright (c) 1997-2010, 2013-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief FIFO kernel services
*
* This file contains all the services needed for the implementation of a FIFO
* for the microkernel.
*
*
*/
#include <micro_private.h>
#include <string.h>
#include <toolchain.h>
#include <sections.h>
/**
*
* @brief Finish performing an incomplete FIFO enqueue request
*
* @return N/A
*/
void _k_fifo_enque_reply(struct k_args *A)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.timer)
FREETIMER(A->Time.timer);
if (unlikely(A->Comm == _K_SVC_FIFO_ENQUE_REPLY_TIMEOUT)) {
REMOVE_ELM(A);
A->Time.rcode = RC_TIME;
} else {
A->Time.rcode = RC_OK;
}
#else
A->Time.rcode = RC_OK;
#endif
_k_state_bit_reset(A->Ctxt.task, TF_ENQU);
}
/**
*
* @brief Finish performing an incomplete FIFO enqueue request with timeout.
*
* @param A Pointer to a k_args structure
*
* @return N/A
*
* @sa _k_fifo_enque_reply
*/
void _k_fifo_enque_reply_timeout(struct k_args *A)
{
_k_fifo_enque_reply(A);
}
/**
*
* @brief Perform a FIFO enqueue request
*
* @return N/A
*/
void _k_fifo_enque_request(struct k_args *A)
{
struct k_args *W;
struct _k_fifo_struct *Q;
int Qid, n, w;
char *p, *q; /* Ski char->uint32_t ??? */
Qid = A->args.q1.queue;
Q = (struct _k_fifo_struct *)Qid;
w = OCTET_TO_SIZEOFUNIT(Q->element_size);
q = A->args.q1.data;
n = Q->num_used;
if (n < Q->Nelms) {
W = Q->waiters;
if (W) {
Q->waiters = W->next;
p = W->args.q1.data;
memcpy(p, q, w);
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (W->Time.timer) {
_k_timeout_cancel(W);
W->Comm = _K_SVC_FIFO_DEQUE_REPLY;
} else {
#endif
W->Time.rcode = RC_OK;
_k_state_bit_reset(W->Ctxt.task, TF_DEQU);
}
#ifdef CONFIG_SYS_CLOCK_EXISTS
}
#endif
else {
p = Q->enqueue_point;
memcpy(p, q, w);
p = (char *)((int)p + w);
if (p == Q->end_point)
Q->enqueue_point = Q->base;
else
Q->enqueue_point = p;
Q->num_used = ++n;
#ifdef CONFIG_OBJECT_MONITOR
if (Q->high_watermark < n)
Q->high_watermark = n;
#endif
}
A->Time.rcode = RC_OK;
#ifdef CONFIG_OBJECT_MONITOR
Q->count++;
#endif
} else {
if (likely(A->Time.ticks != TICKS_NONE)) {
A->Ctxt.task = _k_current_task;
A->priority = _k_current_task->priority;
_k_state_bit_set(_k_current_task, TF_ENQU);
INSERT_ELM(Q->waiters, A);
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.ticks == TICKS_UNLIMITED)
A->Time.timer = NULL;
else {
A->Comm = _K_SVC_FIFO_ENQUE_REPLY_TIMEOUT;
_k_timeout_alloc(A);
}
#endif
} else {
A->Time.rcode = RC_FAIL;
}
}
}
int task_fifo_put(kfifo_t queue, void *data, int32_t timeout)
{
struct k_args A;
A.Comm = _K_SVC_FIFO_ENQUE_REQUEST;
A.Time.ticks = timeout;
A.args.q1.data = (char *)data;
A.args.q1.queue = queue;
KERNEL_ENTRY(&A);
return A.Time.rcode;
}
/**
*
* @brief Finish performing an incomplete FIFO dequeue request
*
* @return N/A
*/
void _k_fifo_deque_reply(struct k_args *A)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.timer)
FREETIMER(A->Time.timer);
if (unlikely(A->Comm == _K_SVC_FIFO_DEQUE_REPLY_TIMEOUT)) {
REMOVE_ELM(A);
A->Time.rcode = RC_TIME;
} else {
A->Time.rcode = RC_OK;
}
#else
A->Time.rcode = RC_OK;
#endif
_k_state_bit_reset(A->Ctxt.task, TF_DEQU);
}
/**
*
* @brief Finish performing an incomplete FIFO dequeue request with timeout.
*
* @param A Pointer to a k_args structure.
*
* @return N/A
*
* @sa _k_fifo_deque_reply
*/
void _k_fifo_deque_reply_timeout(struct k_args *A)
{
_k_fifo_deque_reply(A);
}
/**
*
* @brief Perform FIFO dequeue request
*
* @return N/A
*/
void _k_fifo_deque_request(struct k_args *A)
{
struct k_args *W;
struct _k_fifo_struct *Q;
int Qid, n, w;
char *p, *q; /* idem */
Qid = A->args.q1.queue;
Q = (struct _k_fifo_struct *)Qid;
w = OCTET_TO_SIZEOFUNIT(Q->element_size);
p = A->args.q1.data;
n = Q->num_used;
if (n) {
q = Q->dequeue_point;
memcpy(p, q, w);
q = (char *)((int)q + w);
if (q == Q->end_point)
Q->dequeue_point = Q->base;
else
Q->dequeue_point = q;
A->Time.rcode = RC_OK;
W = Q->waiters;
if (W) {
Q->waiters = W->next;
p = Q->enqueue_point;
q = W->args.q1.data;
w = OCTET_TO_SIZEOFUNIT(Q->element_size);
memcpy(p, q, w);
p = (char *)((int)p + w);
if (p == Q->end_point)
Q->enqueue_point = Q->base;
else
Q->enqueue_point = p;
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (W->Time.timer) {
_k_timeout_cancel(W);
W->Comm = _K_SVC_FIFO_ENQUE_REPLY;
} else {
#endif
W->Time.rcode = RC_OK;
_k_state_bit_reset(W->Ctxt.task, TF_ENQU);
#ifdef CONFIG_SYS_CLOCK_EXISTS
}
#endif
#ifdef CONFIG_OBJECT_MONITOR
Q->count++;
#endif
} else
Q->num_used = --n;
} else {
if (likely(A->Time.ticks != TICKS_NONE)) {
A->Ctxt.task = _k_current_task;
A->priority = _k_current_task->priority;
_k_state_bit_set(_k_current_task, TF_DEQU);
INSERT_ELM(Q->waiters, A);
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.ticks == TICKS_UNLIMITED)
A->Time.timer = NULL;
else {
A->Comm = _K_SVC_FIFO_DEQUE_REPLY_TIMEOUT;
_k_timeout_alloc(A);
}
#endif
} else {
A->Time.rcode = RC_FAIL;
}
}
}
int task_fifo_get(kfifo_t queue, void *data, int32_t timeout)
{
struct k_args A;
A.Comm = _K_SVC_FIFO_DEQUE_REQUEST;
A.Time.ticks = timeout;
A.args.q1.data = (char *)data;
A.args.q1.queue = queue;
KERNEL_ENTRY(&A);
return A.Time.rcode;
}
/**
*
* @brief Perform miscellaneous FIFO request
* @param A Kernel Argument
*
* @return N/A
*/
void _k_fifo_ioctl(struct k_args *A)
{
struct _k_fifo_struct *Q;
int Qid;
Qid = A->args.q1.queue;
Q = (struct _k_fifo_struct *)Qid;
if (A->args.q1.size) {
if (Q->num_used) {
struct k_args *X;
while ((X = Q->waiters)) {
Q->waiters = X->next;
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (likely(X->Time.timer)) {
_k_timeout_cancel(X);
X->Comm = _K_SVC_FIFO_ENQUE_REPLY;
} else {
#endif
X->Time.rcode = RC_FAIL;
_k_state_bit_reset(X->Ctxt.task, TF_ENQU);
#ifdef CONFIG_SYS_CLOCK_EXISTS
}
#endif
}
}
Q->num_used = 0;
Q->enqueue_point = Q->dequeue_point = Q->base;
A->Time.rcode = RC_OK;
} else
A->Time.rcode = Q->num_used;
}
/**
*
* @brief Miscellaneous FIFO request
*
* Depending upon the chosen operation, this routine will ...
* 1. <op> = 0 : query the number of FIFO entries
* 2. <op> = 1 : purge the FIFO of its entries
*
* @param queue FIFO queue
* @param op 0 for status query and 1 for purge
* @return # of FIFO entries on query; RC_OK on purge
*/
int _task_fifo_ioctl(kfifo_t queue, int op)
{
struct k_args A;
A.Comm = _K_SVC_FIFO_IOCTL;
A.args.q1.queue = queue;
A.args.q1.size = op;
KERNEL_ENTRY(&A);
return A.Time.rcode;
}

View File

@@ -1,460 +0,0 @@
/*
* Copyright (c) 1997-2010, 2012-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Microkernel idle logic
*
* Microkernel idle logic. Different forms of idling are performed by the idle
* task, depending on how the kernel is configured.
*/
#include <micro_private.h>
#include <nano_private.h>
#include <arch/cpu.h>
#include <toolchain.h>
#include <sections.h>
#include <microkernel.h>
#include <misc/util.h>
#include <drivers/system_timer.h>
#if defined(CONFIG_WORKLOAD_MONITOR)
static unsigned int _k_workload_slice = 0x0;
static unsigned int _k_workload_ticks = 0x0;
static unsigned int _k_workload_ref_time = 0x0;
static unsigned int _k_workload_t0 = 0x0;
static unsigned int _k_workload_t1 = 0x0;
static volatile unsigned int _k_workload_n0 = 0x0;
static volatile unsigned int _k_workload_n1 = 0x0;
static volatile unsigned int _k_workload_i = 0x0;
static volatile unsigned int _k_workload_i0 = 0x0;
static volatile unsigned int _k_workload_delta = 0x0;
static volatile unsigned int _k_workload_start_time = 0x0;
static volatile unsigned int _k_workload_end_time = 0x0;
#ifdef WL_SCALE
static extern uint32_t _k_workload_scale;
#endif
/**
*
* @brief Shared code between workload calibration and monitoring
*
* Perform idle task "dummy work".
*
* This routine increments _k_workload_i and checks it against _k_workload_n1.
* _k_workload_n1 is updated by the system tick handler, and both are kept
* in close synchronization.
*
* @return N/A
*
*/
static void workload_loop(void)
{
volatile int x = 87654321;
volatile int y = 4;
/* loop never terminates, except during calibration phase */
while (++_k_workload_i != _k_workload_n1) {
unsigned int s_iCountDummyProc = 0;
while (64 != s_iCountDummyProc++) { /* 64 == 2^6 */
x >>= y;
x <<= y;
y++;
x >>= y;
x <<= y;
y--;
}
}
}
/**
*
* @brief Calibrate the workload monitoring subsystem
*
* Measures the time required to do a fixed amount of "dummy work", and
* sets default values for the workload measuring period.
*
* @return N/A
*
*/
void _k_workload_monitor_calibrate(void)
{
_k_workload_n0 = _k_workload_i = 0;
_k_workload_n1 = 1000;
_k_workload_t0 = sys_cycle_get_32();
workload_loop();
_k_workload_t1 = sys_cycle_get_32();
_k_workload_delta = _k_workload_t1 - _k_workload_t0;
_k_workload_i0 = _k_workload_i;
#ifdef WL_SCALE
_k_workload_ref_time =
(_k_workload_t1 - _k_workload_t0) >> (_k_workload_scale);
#else
_k_workload_ref_time = (_k_workload_t1 - _k_workload_t0) >> (4 + 6);
#endif
_k_workload_slice = 100;
_k_workload_ticks = 100;
}
/**
*
* @brief Workload monitor tick handler
*
* If workload monitor is configured this routine updates the global variables
* it uses to record the passage of time.
*
* @return N/A
*
*/
void _k_workload_monitor_update(void)
{
if (--_k_workload_ticks == 0) {
_k_workload_t0 = _k_workload_t1;
_k_workload_t1 = sys_cycle_get_32();
_k_workload_n0 = _k_workload_n1;
_k_workload_n1 = _k_workload_i - 1;
_k_workload_ticks = _k_workload_slice;
}
}
/**
*
* @brief Workload monitor "start idling" handler
*
* Records time when idle task was selected for execution by the microkernel.
*
* @return N/A
*/
void _k_workload_monitor_idle_start(void)
{
_k_workload_start_time = sys_cycle_get_32();
}
/**
*
* @brief Workload monitor "end idling" handler
*
* Records time when idle task was no longer selected for execution by the
* microkernel, and updates amount of time spent idling.
*
* @return N/A
*/
void _k_workload_monitor_idle_end(void)
{
_k_workload_end_time = sys_cycle_get_32();
_k_workload_i += (_k_workload_i0 *
(_k_workload_end_time - _k_workload_start_time)) / _k_workload_delta;
}
/**
*
* @brief Process request to read the processor workload
*
* Computes workload, or uses 0 if workload monitoring is not configured.
*
* @return N/A
*/
void _k_workload_get(struct k_args *P)
{
unsigned int k, t;
signed int iret;
k = (_k_workload_i - _k_workload_n0) * _k_workload_ref_time;
#ifdef WL_SCALE
t = (sys_cycle_get_32() - _k_workload_t0) >> (_k_workload_scale);
#else
t = (sys_cycle_get_32() - _k_workload_t0) >> (4 + 6);
#endif
iret = MSEC_PER_SEC - k / t;
/*
* Due to calibration at startup, <iret> could be slightly negative.
* Ensure a negative value is never returned.
*/
if (iret < 0) {
iret = 0;
}
P->args.u1.rval = iret;
}
#else
void _k_workload_get(struct k_args *P)
{
P->args.u1.rval = 0;
}
#endif /* CONFIG_WORKLOAD_MONITOR */
int task_workload_get(void)
{
struct k_args A;
A.Comm = _K_SVC_WORKLOAD_GET;
KERNEL_ENTRY(&A);
return A.args.u1.rval;
}
void sys_workload_time_slice_set(int32_t t)
{
#ifdef CONFIG_WORKLOAD_MONITOR
if (t < 10) {
t = 10;
}
if (t > 1000) {
t = 1000;
}
_k_workload_slice = t;
#else
ARG_UNUSED(t);
#endif
}
unsigned char _sys_power_save_flag = 1;
#if defined(CONFIG_SYS_POWER_MANAGEMENT)
#include <nanokernel.h>
#include <microkernel/base_api.h>
void __attribute__((weak)) _sys_soc_resume(void)
{
}
#if (defined(CONFIG_SYS_POWER_LOW_POWER_STATE) || \
defined(CONFIG_SYS_POWER_DEEP_SLEEP) || \
defined(CONFIG_DEVICE_POWER_MANAGEMENT))
#include <power.h>
/*
* Used to allow _sys_soc_suspend() implementation to control notification
* of the wake event that caused exit from low power state
*/
unsigned char _sys_soc_notify_wake_event;
#endif
#if defined(CONFIG_TICKLESS_IDLE)
#include <drivers/system_timer.h>
#endif
extern void nano_cpu_set_idle(int32_t ticks);
#if defined(CONFIG_TICKLESS_IDLE)
/*
* Idle time must be this value or higher for timer to go into tickless idle
* state.
*/
int32_t _sys_idle_threshold_ticks = CONFIG_TICKLESS_IDLE_THRESH;
#endif /* CONFIG_TICKLESS_IDLE */
/**
*
* @brief Power management policy when kernel begins idling
*
* This routine implements the power management policy based on the time
* until the timer expires, in system ticks.
* Routine is invoked from the idle task with interrupts disabled
*
* @return N/A
*/
void _sys_power_save_idle(int32_t ticks)
{
#if defined(CONFIG_TICKLESS_IDLE)
if ((ticks == TICKS_UNLIMITED) || ticks >= _sys_idle_threshold_ticks) {
/*
* Stop generating system timer interrupts until it's time for
* the next scheduled microkernel timer to expire.
*/
_timer_idle_enter(ticks);
}
#endif /* CONFIG_TICKLESS_IDLE */
nano_cpu_set_idle(ticks);
#if (defined(CONFIG_SYS_POWER_LOW_POWER_STATE) || \
defined(CONFIG_SYS_POWER_DEEP_SLEEP) || \
defined(CONFIG_DEVICE_POWER_MANAGEMENT))
/* This assignment will be controlled by Kconfig flag in future */
_sys_soc_notify_wake_event = 1;
/*
* Call the suspend hook function of the soc interface to allow
* entry into a low power state. The function returns
* SYS_PM_NOT_HANDLED if low power state was not entered, in which
* case, kernel does normal idle processing.
*
* This function is entered with interrupts disabled. If a low power
* state was entered, then the hook function should enable inerrupts
* before exiting. This is because the kernel does not do its own idle
* processing in those cases i.e. skips nano_cpu_idle(). The kernel's
* idle processing re-enables interrupts which is essential for
* the kernel's scheduling logic.
*/
if (_sys_soc_suspend(ticks) == SYS_PM_NOT_HANDLED) {
_sys_soc_notify_wake_event = 0;
nano_cpu_idle();
}
#else
nano_cpu_idle();
#endif
}
/**
*
* @brief Power management policy when kernel stops idling
*
* This routine is invoked when the kernel leaves the idle state.
* Routine can be modified to wake up other devices.
* The routine is invoked from interrupt thread, with interrupts disabled.
*
* @return N/A
*/
void _sys_power_save_idle_exit(int32_t ticks)
{
#if (defined(CONFIG_SYS_POWER_LOW_POWER_STATE) || \
defined(CONFIG_SYS_POWER_DEEP_SLEEP) || \
defined(CONFIG_DEVICE_POWER_MANAGEMENT))
/* Some CPU low power states require notification at the ISR
* to allow any operations that needs to be done before kernel
* switches task or processes nested interrupts. This can be
* disabled by calling _sys_soc_disable_wake_event_notification().
* Alternatively it can be simply ignored if not required.
*/
if (_sys_soc_notify_wake_event) {
_sys_soc_resume();
}
#endif
#ifdef CONFIG_TICKLESS_IDLE
if ((ticks == TICKS_UNLIMITED) || ticks >= _sys_idle_threshold_ticks) {
/* Resume normal periodic system timer interrupts */
_timer_idle_exit();
}
#else
ARG_UNUSED(ticks);
#endif /* CONFIG_TICKLESS_IDLE */
}
/**
*
* @brief Obtain number of ticks until next timer expires
*
* Must be called with interrupts locked to prevent the timer queues from
* changing.
*
* @return Number of ticks until next timer expires.
*
*/
static inline int32_t _get_next_timer_expiry(void)
{
uint32_t closest_deadline = (uint32_t)TICKS_UNLIMITED;
if (_k_timer_list_head) {
closest_deadline = _k_timer_list_head->duration;
}
return (int32_t)min(closest_deadline, _nano_get_earliest_deadline());
}
#endif
/**
*
* @brief Power saving when idle
*
* If _sys_power_save_flag is non-zero, this routine keeps the system in a low
* power state whenever the kernel is idle. If it is zero, this routine will
* fall through and _k_kernel_idle() will try the next idling mechanism.
*
* @return N/A
*
*/
static void _power_save(void)
{
if (_sys_power_save_flag) {
for (;;) {
irq_lock();
#ifdef CONFIG_SYS_POWER_MANAGEMENT
_sys_power_save_idle(_get_next_timer_expiry());
#else
/*
* nano_cpu_idle() is invoked here directly only if APM
* is disabled. Otherwise the microkernel decides
* either to invoke it or to implement advanced idle
* functionality
*/
nano_cpu_idle();
#endif
}
/*
* Code analyzers may complain that _power_save() uses an
* infinite loop unless we indicate that this is intentional
*/
CODE_UNREACHABLE;
}
}
/* Specify what work to do when idle task is "busy waiting" */
#ifdef CONFIG_WORKLOAD_MONITOR
#define DO_IDLE_WORK() workload_loop()
#else
#define DO_IDLE_WORK() do { /* do nothing */ } while (0)
#endif
/**
*
* @brief Microkernel idle task
*
* If power save is on, we sleep; if power save is off, we "busy wait".
*
* @return N/A
*
*/
int _k_kernel_idle(void)
{
_power_save(); /* never returns if power saving is enabled */
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
/* record timestamp when idling begins */
extern uint64_t __idle_tsc;
__idle_tsc = _tsc_read();
#endif
for (;;) {
DO_IDLE_WORK();
}
/*
* Code analyzers may complain that _k_kernel_idle() uses an infinite
* loop unless we indicate that this is intentional
*/
CODE_UNREACHABLE;
}

View File

@@ -1,122 +0,0 @@
/* k_init.c */
/*
* Copyright (c) 1997-2010, 2012-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <microkernel.h>
#include <micro_private.h>
#include <nano_private.h>
#include <string.h>
#include <toolchain.h>
#include <sections.h>
#include <device.h>
#include <init.h>
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
#include <arch/cpu.h>
#endif
extern void _k_init_dynamic(void); /* defined by sysgen */
char __noinit __stack _k_server_stack[CONFIG_MICROKERNEL_SERVER_STACK_SIZE];
#ifdef CONFIG_TASK_DEBUG
int _k_debug_halt;
#endif
#ifdef CONFIG_INIT_STACKS
static uint32_t _k_server_command_stack_storage
[CONFIG_COMMAND_STACK_SIZE] = {
[0 ... CONFIG_COMMAND_STACK_SIZE - 1] = 0xAAAAAAAA };
#else
static uint32_t __noinit _k_server_command_stack_storage
[CONFIG_COMMAND_STACK_SIZE];
#endif
struct nano_stack _k_command_stack = {NULL,
_k_server_command_stack_storage,
_k_server_command_stack_storage,
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
/* _k_command stack is not tracked by
* the debug tracing kernel objects feature.
*/
NULL,
#endif
};
extern void _k_server(int unused1, int unused2);
extern int _k_kernel_idle(void);
/**
*
* @brief Mainline for microkernel's idle task
*
* This routine completes kernel initialization and starts any application
* tasks in the EXE task group. From then on it takes care of doing idle
* processing whenever there is no other work for the kernel to do.
*
* @return N/A
*/
void _main(void)
{
_sys_device_do_config_level(_SYS_INIT_LEVEL_SECONDARY);
_sys_device_do_config_level(_SYS_INIT_LEVEL_NANOKERNEL);
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
/*
* record timestamp for microkernel's _main() function
*/
extern uint64_t __main_tsc;
__main_tsc = _tsc_read();
#endif
/*
* Most variables and data structure are statically initialized in
* kernel_main.c: this only initializes what must be dynamically
* initialized at runtime.
*/
_k_init_dynamic();
task_fiber_start(_k_server_stack,
CONFIG_MICROKERNEL_SERVER_STACK_SIZE,
_k_server,
0,
0,
CONFIG_MICROKERNEL_SERVER_PRIORITY,
0);
_sys_device_do_config_level(_SYS_INIT_LEVEL_MICROKERNEL);
_sys_device_do_config_level(_SYS_INIT_LEVEL_APPLICATION);
#ifdef CONFIG_CPLUSPLUS
/* Process the .ctors and .init_array sections */
extern void __do_global_ctors_aux(void);
extern void __do_init_array_aux(void);
__do_global_ctors_aux();
__do_init_array_aux();
#endif
#ifdef CONFIG_WORKLOAD_MONITOR
_k_workload_monitor_calibrate();
#endif
task_group_start(EXE_GROUP);
_k_kernel_idle();
}

View File

@@ -1,898 +0,0 @@
/* mailbox kernel services */
/*
* Copyright (c) 1997-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <microkernel.h>
#include <string.h>
#include <toolchain.h>
#include <sections.h>
#include <micro_private.h>
#include <misc/__assert.h>
#include <misc/util.h>
/**
*
* @brief Determines if mailbox message is synchronous or asynchronous
*
* Returns a non-zero value if the specified message contains a valid pool ID,
* indicating that it is an asynchronous message.
*/
#define ISASYNCMSG(message) ((message)->tx_block.pool_id != 0)
/**
*
* @brief Copy a packet
*
* @param in the packet to be copied
* @param out the packet to copy to
*
* @return N/A
*/
static void copy_packet(struct k_args **out, struct k_args *in)
{
GETARGS(*out);
/*
* Copy the data from <in> to <*out> and create
* a backpointer to the original packet.
*/
memcpy(*out, in, sizeof(struct k_args));
(*out)->Ctxt.args = in;
}
/**
*
* @brief Determine if there is a match between the mailbox sender and receiver
*
* @return matched message size, or -1 if no match
*/
static int match(struct k_args *Reader, struct k_args *Writer)
{
if ((Reader->args.m1.mess.tx_task == ANYTASK ||
Reader->args.m1.mess.tx_task == Writer->args.m1.mess.tx_task) &&
(Writer->args.m1.mess.rx_task == ANYTASK ||
Writer->args.m1.mess.rx_task == Reader->args.m1.mess.rx_task)) {
if (!ISASYNCMSG(&(Writer->args.m1.mess))) {
int32_t info;
Reader->args.m1.mess.tx_task =
Writer->args.m1.mess.tx_task;
Writer->args.m1.mess.rx_task =
Reader->args.m1.mess.rx_task;
info = Reader->args.m1.mess.info;
Reader->args.m1.mess.info = Writer->args.m1.mess.info;
Writer->args.m1.mess.info = info;
} else {
Reader->args.m1.mess.tx_task =
Writer->args.m1.mess.tx_task;
Reader->args.m1.mess.tx_data = NULL;
Reader->args.m1.mess.tx_block =
Writer->args.m1.mess.tx_block;
Reader->args.m1.mess.info = Writer->args.m1.mess.info;
}
if (Reader->args.m1.mess.size > Writer->args.m1.mess.size) {
Reader->args.m1.mess.size = Writer->args.m1.mess.size;
} else {
Writer->args.m1.mess.size = Reader->args.m1.mess.size;
}
/*
* The __ASSERT_NO_MSG() statements are used to verify that
* the -1 will not be returned when there is a match.
*/
__ASSERT_NO_MSG(Writer->args.m1.mess.size ==
Reader->args.m1.mess.size);
__ASSERT_NO_MSG(Reader->args.m1.mess.size != (uint32_t)(-1));
return Reader->args.m1.mess.size;
}
return -1; /* There was no match */
}
/**
* @brief Prepare transfer
*
* @return true or false
*/
static bool prepare_transfer(struct k_args *move,
struct k_args *reader,
struct k_args *writer)
{
/* extract info from writer and reader before they change: */
/*
* prepare writer and reader cmd packets for 'return':
* (this is shared code, irrespective of the value of 'move')
*/
__ASSERT_NO_MSG(reader->next == NULL);
reader->Comm = _K_SVC_MBOX_RECEIVE_ACK;
reader->Time.rcode = RC_OK;
__ASSERT_NO_MSG(writer->next == NULL);
writer->alloc = true;
writer->Comm = _K_SVC_MBOX_SEND_ACK;
writer->Time.rcode = RC_OK;
if (move) {
/* { move != NULL, which means full data exchange } */
bool all_data_present = true;
move->Comm = _K_SVC_MOVEDATA_REQ;
/*
* transfer the data with the highest
* priority of reader and writer
*/
move->priority = max(writer->priority, reader->priority);
move->Ctxt.task = NULL;
move->args.moved_req.action =
(MovedAction)(MVDACT_SNDACK | MVDACT_RCVACK);
move->args.moved_req.total_size = writer->args.m1.mess.size;
move->args.moved_req.extra.setup.continuation_send = NULL;
move->args.moved_req.extra.setup.continuation_receive = NULL;
/* reader: */
if (reader->args.m1.mess.rx_data == NULL) {
all_data_present = false;
__ASSERT_NO_MSG(0 == reader->args.m1.mess.extra
.transfer); /* == extra.sema */
reader->args.m1.mess.extra.transfer = move;
/*SENDARGS(reader); */
} else {
move->args.moved_req.destination =
reader->args.m1.mess.rx_data;
writer->args.m1.mess.rx_data =
reader->args.m1.mess.rx_data;
/* chain the reader */
move->args.moved_req.extra.setup.continuation_receive = reader;
}
/* writer: */
if (ISASYNCMSG(&(writer->args.m1.mess))) {
move->args.moved_req.source =
writer->args.m1.mess.tx_block.pointer_to_data;
reader->args.m1.mess.tx_block =
writer->args.m1.mess.tx_block;
} else {
__ASSERT_NO_MSG(writer->args.m1.mess.tx_data != NULL);
move->args.moved_req.source =
writer->args.m1.mess.tx_data;
reader->args.m1.mess.tx_data =
writer->args.m1.mess.tx_data;
}
/* chain the writer */
move->args.moved_req.extra.setup.continuation_send = writer;
return all_data_present;
}
/* { NULL == move, which means header exchange only } */
return 0; /* == don't care actually */
}
/**
* @brief Do transfer
*
* @return N/A
*/
static void transfer(struct k_args *pMvdReq)
{
__ASSERT_NO_MSG(pMvdReq->args.moved_req.source != NULL);
__ASSERT_NO_MSG(pMvdReq->args.moved_req.destination != NULL);
_k_movedata_request(pMvdReq);
FREEARGS(pMvdReq);
}
/**
* @brief Process the acknowledgment to a mailbox send request
*
* @return N/A
*/
void _k_mbox_send_ack(struct k_args *pCopyWriter)
{
struct k_args *Starter;
if (ISASYNCMSG(&(pCopyWriter->args.m1.mess))) {
if (pCopyWriter->args.m1.mess.extra.sema) {
/*
* Signal the semaphore. Alternatively, this could
* be done using the continuation mechanism.
*/
struct k_args A;
#ifndef NO_KARG_CLEAR
memset(&A, 0xfd, sizeof(struct k_args));
#endif
A.Comm = _K_SVC_SEM_SIGNAL;
A.args.s1.sema = pCopyWriter->args.m1.mess.extra.sema;
_k_sem_signal(&A);
}
/*
* release the block from the memory pool
* unless this an asynchronous transfer.
*/
if (pCopyWriter->args.m1.mess.tx_block.pool_id != (uint32_t)(-1)) {
/*
* special value to tell if block should be
* freed or not
*/
pCopyWriter->Comm = _K_SVC_MEM_POOL_BLOCK_RELEASE;
pCopyWriter->args.p1.pool_id =
pCopyWriter->args.m1.mess.tx_block.pool_id;
pCopyWriter->args.p1.rep_poolptr =
pCopyWriter->args.m1.mess.tx_block
.address_in_pool;
pCopyWriter->args.p1.rep_dataptr =
pCopyWriter->args.m1.mess.tx_block
.pointer_to_data;
pCopyWriter->args.p1.req_size =
pCopyWriter->args.m1.mess.tx_block.req_size;
SENDARGS(pCopyWriter);
return;
}
FREEARGS(pCopyWriter);
return;
}
/*
* Get a pointer to the original command packet of the sender
* and copy both the result as well as the message information
* from the received packet of the sender before resetting the
* TF_SEND and TF_SENDDATA state bits.
*/
Starter = pCopyWriter->Ctxt.args;
Starter->Time.rcode = pCopyWriter->Time.rcode;
Starter->args.m1.mess = pCopyWriter->args.m1.mess;
_k_state_bit_reset(Starter->Ctxt.task, TF_SEND | TF_SENDDATA);
FREEARGS(pCopyWriter);
}
/**
*
* @brief Process the timeout for a mailbox send request
*
* @return N/A
*/
void _k_mbox_send_reply(struct k_args *pCopyWriter)
{
FREETIMER(pCopyWriter->Time.timer);
REMOVE_ELM(pCopyWriter);
pCopyWriter->Time.rcode = RC_TIME;
pCopyWriter->Comm = _K_SVC_MBOX_SEND_ACK;
SENDARGS(pCopyWriter);
}
/**
*
* @brief Process a mailbox send request
*
* @return N/A
*/
void _k_mbox_send_request(struct k_args *Writer)
{
kmbox_t MailBoxId = Writer->args.m1.mess.mailbox;
struct _k_mbox_struct *MailBox;
struct k_args *CopyReader;
struct k_args *CopyWriter;
struct k_args *temp;
bool bAsync;
bAsync = ISASYNCMSG(&Writer->args.m1.mess);
struct k_task *sender = NULL;
/*
* Only deschedule the task if it is not a poster
* (not an asynchronous request).
*/
if (!bAsync) {
sender = _k_current_task;
_k_state_bit_set(sender, TF_SEND);
}
Writer->Ctxt.task = sender;
MailBox = (struct _k_mbox_struct *)MailBoxId;
copy_packet(&CopyWriter, Writer);
if (bAsync) {
/*
* Clear the [Ctxt] field in an asynchronous request as the
* original packet will not be available later.
*/
CopyWriter->Ctxt.args = NULL;
}
/*
* The [next] field can be changed later when added to the Writer's
* list, but when not listed, [next] must be NULL.
*/
CopyWriter->next = NULL;
for (CopyReader = MailBox->readers, temp = NULL; CopyReader != NULL;
temp = CopyReader, CopyReader = CopyReader->next) {
uint32_t u32Size;
u32Size = match(CopyReader, CopyWriter);
if (u32Size != (uint32_t)(-1)) {
#ifdef CONFIG_OBJECT_MONITOR
MailBox->count++;
#endif
/*
* There is a match. Remove the chosen reader from the
* list.
*/
if (temp != NULL) {
temp->next = CopyReader->next;
} else {
MailBox->readers = CopyReader->next;
}
CopyReader->next = NULL;
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (CopyReader->Time.timer != NULL) {
/*
* The reader was trying to handshake with
* timeout
*/
_k_timer_delist(CopyReader->Time.timer);
FREETIMER(CopyReader->Time.timer);
}
#endif
if (u32Size == 0) {
/* No data exchange--header only */
prepare_transfer(NULL, CopyReader, CopyWriter);
SENDARGS(CopyReader);
SENDARGS(CopyWriter);
} else {
struct k_args *Moved_req;
GETARGS(Moved_req);
if (prepare_transfer(Moved_req,
CopyReader, CopyWriter)) {
/*
* <Moved_req> will be cleared as well
*/
transfer(Moved_req);
} else {
SENDARGS(CopyReader);
}
}
return;
}
}
/* There is no matching receiver for this message. */
if (bAsync) {
/*
* For asynchronous requests, just post the message into the
* list and continue. No further action is required.
*/
INSERT_ELM(MailBox->writers, CopyWriter);
return;
}
if (CopyWriter->Time.ticks != TICKS_NONE) {
/*
* The writer specified a wait or wait with timeout operation.
*
* Note: Setting the command to SEND_TMO is only necessary in
* the wait with timeout case. However, it is more efficient
* to blindly set it rather than waste time on a comparison.
*/
CopyWriter->Comm = _K_SVC_MBOX_SEND_REPLY;
/* Put the letter into the mailbox */
INSERT_ELM(MailBox->writers, CopyWriter);
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (CopyWriter->Time.ticks == TICKS_UNLIMITED) {
/* This is a wait operation; there is no timer. */
CopyWriter->Time.timer = NULL;
} else {
/*
* This is a wait with timeout operation.
* Enlist a new timeout.
*/
_k_timeout_alloc(CopyWriter);
}
#endif
} else {
/*
* This is a no-wait operation.
* Notify the sender of failure.
*/
CopyWriter->Comm = _K_SVC_MBOX_SEND_ACK;
CopyWriter->Time.rcode = RC_FAIL;
SENDARGS(CopyWriter);
}
}
int task_mbox_put(kmbox_t mbox, kpriority_t prio, struct k_msg *M, int32_t timeout)
{
struct k_args A;
__ASSERT((M->size == 0) || (M->tx_data != NULL),
"Invalid mailbox data specification\n");
if (unlikely(M->size == (uint32_t)(-1))) {
/* the sender side cannot specify a size of -1 == 0xfff..ff */
return RC_FAIL;
}
M->tx_task = _k_current_task->id;
M->tx_block.pool_id = 0; /* NO ASYNC POST */
M->extra.sema = 0;
M->mailbox = mbox;
A.priority = prio;
A.Comm = _K_SVC_MBOX_SEND_REQUEST;
A.Time.ticks = timeout;
A.args.m1.mess = *M;
KERNEL_ENTRY(&A);
*M = A.args.m1.mess;
return A.Time.rcode;
}
/**
*
* @brief Process a mailbox receive acknowledgment
*
* This routine processes a mailbox receive acknowledgment.
*
* INTERNAL: This routine frees the <pCopyReader> packet
*
* @return N/A
*/
void _k_mbox_receive_ack(struct k_args *pCopyReader)
{
struct k_args *Starter;
/* Get a pointer to the original command packet of the sender */
Starter = pCopyReader->Ctxt.args;
/* Copy result from received packet */
Starter->Time.rcode = pCopyReader->Time.rcode;
/* And copy the message information from the received packet. */
Starter->args.m1.mess = pCopyReader->args.m1.mess;
/* Reschedule the sender task */
_k_state_bit_reset(Starter->Ctxt.task, TF_RECV | TF_RECVDATA);
FREEARGS(pCopyReader);
}
/**
* @brief Process the timeout for a mailbox receive request
*
* @return N/A
*/
void _k_mbox_receive_reply(struct k_args *pCopyReader)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
FREETIMER(pCopyReader->Time.timer);
REMOVE_ELM(pCopyReader);
pCopyReader->Time.rcode = RC_TIME;
pCopyReader->Comm = _K_SVC_MBOX_RECEIVE_ACK;
SENDARGS(pCopyReader);
#endif
}
/**
* @brief Process a mailbox receive request
*
* @return N/A
*/
void _k_mbox_receive_request(struct k_args *Reader)
{
kmbox_t MailBoxId = Reader->args.m1.mess.mailbox;
struct _k_mbox_struct *MailBox;
struct k_args *CopyWriter;
struct k_args *temp;
struct k_args *CopyReader;
Reader->Ctxt.task = _k_current_task;
_k_state_bit_set(Reader->Ctxt.task, TF_RECV);
copy_packet(&CopyReader, Reader);
/*
* The [next] field can be changed later when added to the Reader's
* list, but when not listed, [next] must be NULL.
*/
CopyReader->next = NULL;
MailBox = (struct _k_mbox_struct *)MailBoxId;
for (CopyWriter = MailBox->writers, temp = NULL; CopyWriter != NULL;
temp = CopyWriter, CopyWriter = CopyWriter->next) {
uint32_t u32Size;
u32Size = match(CopyReader, CopyWriter);
if (u32Size != (uint32_t)(-1)) {
#ifdef CONFIG_OBJECT_MONITOR
MailBox->count++;
#endif
/*
* There is a match. Remove the chosen reader
* from the list.
*/
if (temp != NULL) {
temp->next = CopyWriter->next;
} else {
MailBox->writers = CopyWriter->next;
}
CopyWriter->next = NULL;
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (CopyWriter->Time.timer != NULL) {
/*
* The writer was trying to handshake with
* timeout.
*/
_k_timer_delist(CopyWriter->Time.timer);
FREETIMER(CopyWriter->Time.timer);
}
#endif
if (u32Size == 0) {
/* No data exchange--header only */
prepare_transfer(NULL, CopyReader, CopyWriter);
SENDARGS(CopyReader);
SENDARGS(CopyWriter);
} else {
struct k_args *Moved_req;
GETARGS(Moved_req);
if (prepare_transfer(Moved_req,
CopyReader, CopyWriter)) {
/*
* <Moved_req> will be
* cleared as well
*/
transfer(Moved_req);
} else {
SENDARGS(CopyReader);
}
}
return;
}
}
/* There is no matching writer for this message. */
if (Reader->Time.ticks != TICKS_NONE) {
/*
* The writer specified a wait or wait with timeout operation.
*
* Note: Setting the command to RECV_TMO is only necessary in
* the wait with timeout case. However, it is more efficient
* to blindly set it rather than waste time on a comparison.
*/
CopyReader->Comm = _K_SVC_MBOX_RECEIVE_REPLY;
/* Put the letter into the mailbox */
INSERT_ELM(MailBox->readers, CopyReader);
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (CopyReader->Time.ticks == TICKS_UNLIMITED) {
/* This is a wait operation; there is no timer. */
CopyReader->Time.timer = NULL;
} else {
/*
* This is a wait with timeout operation.
* Enlist a new timeout.
*/
_k_timeout_alloc(CopyReader);
}
#endif
} else {
/*
* This is a no-wait operation.
* Notify the receiver of failure.
*/
CopyReader->Comm = _K_SVC_MBOX_RECEIVE_ACK;
CopyReader->Time.rcode = RC_FAIL;
SENDARGS(CopyReader);
}
}
int task_mbox_get(kmbox_t mbox, struct k_msg *M, int32_t timeout)
{
struct k_args A;
M->rx_task = _k_current_task->id;
M->mailbox = mbox;
M->extra.transfer = 0;
/*
* NOTE: to make sure there is no conflict with extra.sema,
* there is an assertion check in prepare_transfer() if equal to 0
*/
A.priority = _k_current_task->priority;
A.Comm = _K_SVC_MBOX_RECEIVE_REQUEST;
A.Time.ticks = timeout;
A.args.m1.mess = *M;
KERNEL_ENTRY(&A);
*M = A.args.m1.mess;
return A.Time.rcode;
}
void _task_mbox_block_put(kmbox_t mbox,
kpriority_t prio,
struct k_msg *M,
ksem_t sema)
{
struct k_args A;
__ASSERT(0xFFFFFFFF != M->size, "Invalid mailbox data specification\n");
if (M->size == 0) {
/*
* trick: special value to indicate that tx_block
* should NOT be released in the SND_ACK
*/
M->tx_block.pool_id = (uint32_t)(-1);
}
M->tx_task = _k_current_task->id;
M->tx_data = NULL;
M->mailbox = mbox;
M->extra.sema = sema;
#ifdef CONFIG_SYS_CLOCK_EXISTS
A.Time.timer = NULL;
#endif
A.priority = prio;
A.Comm = _K_SVC_MBOX_SEND_REQUEST;
A.args.m1.mess = *M;
KERNEL_ENTRY(&A);
}
/**
*
* @brief Process a mailbox receive data request
*
* @return N/A
*/
void _k_mbox_receive_data(struct k_args *Starter)
{
struct k_args *CopyStarter;
struct k_args *MoveD;
struct k_args *Writer;
Starter->Ctxt.task = _k_current_task;
_k_state_bit_set(_k_current_task, TF_RECVDATA);
GETARGS(CopyStarter);
memcpy(CopyStarter, Starter, sizeof(struct k_args));
CopyStarter->Ctxt.args = Starter;
MoveD = CopyStarter->args.m1.mess.extra.transfer;
CopyStarter->Comm = _K_SVC_MBOX_RECEIVE_ACK;
CopyStarter->Time.rcode = RC_OK;
MoveD->args.moved_req.extra.setup.continuation_receive = CopyStarter;
CopyStarter->next = NULL;
MoveD->args.moved_req.destination = CopyStarter->args.m1.mess.rx_data;
MoveD->args.moved_req.total_size = CopyStarter->args.m1.mess.size;
Writer = MoveD->args.moved_req.extra.setup.continuation_send;
if (Writer != NULL) {
if (ISASYNCMSG(&(Writer->args.m1.mess))) {
CopyStarter->args.m1.mess.tx_block =
Writer->args.m1.mess.tx_block;
} else {
Writer->args.m1.mess.rx_data =
CopyStarter->args.m1.mess.rx_data;
CopyStarter->args.m1.mess.tx_data =
Writer->args.m1.mess.tx_data;
}
transfer(MoveD); /* and MoveD will be cleared as well */
}
}
void _task_mbox_data_get(struct k_msg *M)
{
struct k_args A;
/* sanity checks */
if (unlikely(M->extra.transfer == NULL)) {
/*
* protection: if a user erroneously calls this function after
* a task_mbox_get(), we should not run into trouble
*/
return;
}
A.args.m1.mess = *M;
A.Comm = _K_SVC_MBOX_RECEIVE_DATA;
KERNEL_ENTRY(&A);
}
int task_mbox_data_block_get(struct k_msg *M, struct k_block *block,
kmemory_pool_t pool_id, int32_t timeout)
{
int retval;
struct k_args *MoveD;
/* sanity checks: */
if (M->extra.transfer == NULL) {
/*
* If a user erroneously calls this function after a
* task_mbox_get(), we should not run into trouble.
* Return RC_OK instead of RC_FAIL to be downwards compatible.
*/
return RC_OK;
}
/* special flow to check for possible optimisations: */
if (ISASYNCMSG(M)) {
/* First transfer block */
__ASSERT_NO_MSG(M->tx_block.pool_id != -1);
*block = M->tx_block;
/* This is the MOVED packet */
MoveD = M->extra.transfer;
/* Then release sender (writer) */
struct k_args *Writer;
/*
* This is the first of the continuation packets for
* continuation on send. It should be the only one.
* That is, it should not have any followers. To
* prevent [tx_block] from being released when the
* SEND_ACK is processed, change its [pool_id] to -1.
*/
Writer = MoveD->args.moved_req.extra.setup.continuation_send;
__ASSERT_NO_MSG(Writer != NULL);
__ASSERT_NO_MSG(Writer->next == NULL);
Writer->args.m1.mess.tx_block.pool_id = (uint32_t)(-1);
nano_task_stack_push(&_k_command_stack, (uint32_t)Writer);
#ifdef ACTIV_ASSERTS
struct k_args *dummy;
/*
* Confirm that there are not any continuation packets
* for continuation on receive.
*/
dummy = MoveD->args.moved_req.extra.setup.continuation_receive;
__ASSERT_NO_MSG(dummy == NULL);
#endif
FREEARGS(MoveD); /* Clean up MOVED */
return RC_OK;
}
/* 'normal' flow of task_mbox_data_block_get(): */
if (M->size != 0) {
retval = task_mem_pool_alloc(block, pool_id,
M->size, timeout);
if (retval != RC_OK) {
return retval;
}
M->rx_data = block->pointer_to_data;
} else {
block->pool_id = (kmemory_pool_t) -1;
}
/*
* Invoke task_mbox_data_get() core without sanity checks, as they have
* already been performed.
*/
struct k_args A;
A.args.m1.mess = *M;
A.Comm = _K_SVC_MBOX_RECEIVE_DATA;
KERNEL_ENTRY(&A);
return RC_OK; /* task_mbox_data_get() doesn't return anything */
}
/**
* @brief Process a mailbox send data request
*
* @return N/A
*/
void _k_mbox_send_data(struct k_args *Starter)
{
struct k_args *CopyStarter;
struct k_args *MoveD;
struct k_args *Reader;
Starter->Ctxt.task = _k_current_task;
_k_state_bit_set(_k_current_task, TF_SENDDATA);
GETARGS(CopyStarter);
memcpy(CopyStarter, Starter, sizeof(struct k_args));
CopyStarter->Ctxt.args = Starter;
MoveD = CopyStarter->args.m1.mess.extra.transfer;
CopyStarter->Time.rcode = RC_OK;
CopyStarter->Comm = _K_SVC_MBOX_SEND_ACK;
MoveD->args.moved_req.extra.setup.continuation_send = CopyStarter;
CopyStarter->next = NULL;
MoveD->args.moved_req.source = CopyStarter->args.m1.mess.rx_data;
Reader = MoveD->args.moved_req.extra.setup.continuation_receive;
if (Reader != NULL) {
Reader->args.m1.mess.rx_data =
CopyStarter->args.m1.mess.rx_data;
CopyStarter->args.m1.mess.tx_data =
Reader->args.m1.mess.tx_data;
transfer(MoveD); /* and MoveD will be cleared as well */
}
}

View File

@@ -1,184 +0,0 @@
/*
* Copyright (c) 1997-2010, 2013-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* @file
* @brief Memory map kernel services.
*/
#include <micro_private.h>
#include <sections.h>
#include <microkernel/memory_map.h>
extern kmemory_map_t _k_mem_map_ptr_start[];
extern kmemory_map_t _k_mem_map_ptr_end[];
/**
* @brief Initialize kernel memory map subsystem
*
* Perform any initialization of memory maps that wasn't done at build time.
*/
void _k_mem_map_init(void)
{
int j, w;
kmemory_map_t *id;
struct _k_mem_map_struct *M;
for (id = _k_mem_map_ptr_start; id < _k_mem_map_ptr_end; id++) {
char *p;
char *q;
M = (struct _k_mem_map_struct *)(*id);
M->waiters = NULL;
w = OCTET_TO_SIZEOFUNIT(M->element_size);
p = M->base;
q = NULL;
for (j = 0; j < M->Nelms; j++) {
*(char **)p = q;
q = p;
p += w;
}
M->free = q;
M->num_used = 0;
M->high_watermark = 0;
M->count = 0;
}
}
/**
* @brief Finish handling a memory map block allocation request that timed out
*/
void _k_mem_map_alloc_timeout(struct k_args *A)
{
_k_timeout_free(A->Time.timer);
REMOVE_ELM(A);
A->Time.rcode = RC_TIME;
_k_state_bit_reset(A->Ctxt.task, TF_ALLO);
}
/**
* @brief Handle a request to allocate a memory map block
*/
void _k_mem_map_alloc(struct k_args *A)
{
struct _k_mem_map_struct *M =
(struct _k_mem_map_struct *)(A->args.a1.mmap);
if (M->free != NULL) {
*(A->args.a1.mptr) = M->free;
M->free = *(char **)(M->free);
M->num_used++;
#ifdef CONFIG_OBJECT_MONITOR
M->count++;
if (M->high_watermark < M->num_used)
M->high_watermark = M->num_used;
#endif
A->Time.rcode = RC_OK;
return;
}
*(A->args.a1.mptr) = NULL;
if (likely(A->Time.ticks != TICKS_NONE)) {
A->priority = _k_current_task->priority;
A->Ctxt.task = _k_current_task;
_k_state_bit_set(_k_current_task, TF_ALLO);
INSERT_ELM(M->waiters, A);
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.ticks == TICKS_UNLIMITED)
A->Time.timer = NULL;
else {
A->Comm = _K_SVC_MEM_MAP_ALLOC_TIMEOUT;
_k_timeout_alloc(A);
}
#endif
} else
A->Time.rcode = RC_FAIL;
}
int task_mem_map_alloc(kmemory_map_t mmap, void **mptr, int32_t timeout)
{
struct k_args A;
A.Comm = _K_SVC_MEM_MAP_ALLOC;
A.Time.ticks = timeout;
A.args.a1.mmap = mmap;
A.args.a1.mptr = mptr;
KERNEL_ENTRY(&A);
return A.Time.rcode;
}
/**
* @brief Handle a request to free a memory map block
*
* Give block to a waiting task, if there is one.
*/
void _k_mem_map_dealloc(struct k_args *A)
{
struct _k_mem_map_struct *M =
(struct _k_mem_map_struct *)(A->args.a1.mmap);
struct k_args *X;
**(char ***)(A->args.a1.mptr) = M->free;
M->free = *(char **)(A->args.a1.mptr);
*(A->args.a1.mptr) = NULL;
X = M->waiters;
if (X) {
M->waiters = X->next;
*(X->args.a1.mptr) = M->free;
M->free = *(char **)(M->free);
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (X->Time.timer) {
_k_timeout_free(X->Time.timer);
X->Comm = _K_SVC_NOP;
}
#endif
X->Time.rcode = RC_OK;
_k_state_bit_reset(X->Ctxt.task, TF_ALLO);
#ifdef CONFIG_OBJECT_MONITOR
M->count++;
#endif
return;
}
M->num_used--;
}
void _task_mem_map_free(kmemory_map_t mmap, void **mptr)
{
struct k_args A;
A.Comm = _K_SVC_MEM_MAP_DEALLOC;
A.args.a1.mmap = mmap;
A.args.a1.mptr = mptr;
KERNEL_ENTRY(&A);
}
int task_mem_map_used_get(kmemory_map_t mmap)
{
struct _k_mem_map_struct *M = (struct _k_mem_map_struct *)mmap;
return M->num_used;
}

View File

@@ -1,730 +0,0 @@
/*
* Copyright (c) 1997-2010, 2013-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <microkernel.h>
#include <micro_private.h>
#include <toolchain.h>
#include <sections.h>
/* Auto-Defrag settings */
#define AD_NONE 0
#define AD_BEFORE_SEARCH4BIGGERBLOCK 1
#define AD_AFTER_SEARCH4BIGGERBLOCK 2
#define AUTODEFRAG AD_AFTER_SEARCH4BIGGERBLOCK
/**
*
* @brief Initialize kernel memory pool subsystem
*
* Perform any initialization of memory pool that wasn't done at build time.
*
* @return N/A
*/
void _k_mem_pool_init(void)
{
struct pool_struct *P;
int i;
/* perform initialization for each memory pool */
for (i = 0, P = _k_mem_pool_list; i < _k_mem_pool_count; i++, P++) {
/*
* mark block set for largest block size
* as owning all of the memory pool buffer space
*/
int remaining = P->nr_of_maxblocks;
int t = 0;
char *memptr = P->bufblock;
while (remaining >= 4) {
P->block_set[0].quad_block[t].mem_blocks = memptr;
P->block_set[0].quad_block[t].mem_status = 0xF;
t++;
remaining = remaining - 4;
memptr +=
OCTET_TO_SIZEOFUNIT(P->block_set[0].block_size)
* 4;
}
if (remaining != 0) {
P->block_set[0].quad_block[t].mem_blocks = memptr;
P->block_set[0].quad_block[t].mem_status =
0xF >> (4 - remaining);
/* non-existent blocks are marked as unavailable */
}
/*
* note: all other block sets own no blocks, since their
* first quad-block has a NULL memory pointer
*/
}
}
/**
*
* @brief Determines which block set corresponds to the specified data size
*
* Finds the block set with the smallest blocks that can hold the specified
* amount of data.
*
* @return block set index
*/
static int compute_block_set_index(struct pool_struct *P, int data_size)
{
int block_size = P->minblock_size;
int offset = P->nr_of_block_sets - 1;
while (data_size > block_size) {
block_size = block_size << 2;
offset--;
}
return offset;
}
/**
*
* @brief Return an allocated block to its block set
*
* @param ptr pointer to start of block
* @param P memory pool descriptor
* @param index block set identifier
*
* @return N/A
*/
static void free_existing_block(char *ptr, struct pool_struct *P, int index)
{
struct pool_quad_block *quad_block = P->block_set[index].quad_block;
char *block_ptr;
int i, j;
/*
* search block set's quad-blocks until the block is located,
* then mark it as unused
*
* note: block *must* exist, so no need to do array bounds checking
*/
for (i = 0; ; i++) {
__ASSERT((i < P->block_set[index].nr_of_entries) &&
(quad_block[i].mem_blocks != NULL),
"Attempt to free unallocated memory pool block\n");
block_ptr = quad_block[i].mem_blocks;
for (j = 0; j < 4; j++) {
if (ptr == block_ptr) {
quad_block[i].mem_status |= (1 << j);
return;
}
block_ptr += OCTET_TO_SIZEOFUNIT(
P->block_set[index].block_size);
}
}
}
/**
*
* @brief Defragment the specified memory pool block sets
*
* Reassembles any quad-blocks that are entirely unused into larger blocks
* (to the extent permitted).
*
* @param P memory pool descriptor
* @param ifraglevel_start index of smallest block set to defragment
* @param ifraglevel_stop index of largest block set to defragment
*
* @return N/A
*/
static void defrag(struct pool_struct *P,
int ifraglevel_start, int ifraglevel_stop)
{
int i, j, k;
struct pool_quad_block *quad_block;
/* process block sets from smallest to largest permitted sizes */
for (j = ifraglevel_start; j > ifraglevel_stop; j--) {
quad_block = P->block_set[j].quad_block;
i = 0;
do {
/* block set is done if no more quad-blocks exist */
if (quad_block[i].mem_blocks == NULL) {
break;
}
/* reassemble current quad-block, if possible */
if (quad_block[i].mem_status == 0xF) {
/*
* mark the corresponding block in next larger
* block set as free
*/
free_existing_block(
quad_block[i].mem_blocks, P, j - 1);
/*
* delete the quad-block from this block set
* by replacing it with the last quad-block
*
* (algorithm works even when the deleted
* quad-block is the last quad_block)
*/
k = i;
while (((k+1) != P->block_set[j].nr_of_entries)
&&
(quad_block[k+1].mem_blocks != NULL)) {
k++;
}
quad_block[i].mem_blocks =
quad_block[k].mem_blocks;
quad_block[i].mem_status =
quad_block[k].mem_status;
quad_block[k].mem_blocks = NULL;
/* loop & process replacement quad_block[i] */
} else {
i++;
}
/* block set is done if at end of quad-block array */
} while (i < P->block_set[j].nr_of_entries);
}
}
/**
*
* @brief Perform defragment memory pool request
*
* @return N/A
*/
void _k_defrag(struct k_args *A)
{
struct pool_struct *P = _k_mem_pool_list + OBJ_INDEX(A->args.p1.pool_id);
/* do complete defragmentation of memory pool (i.e. all block sets) */
defrag(P, P->nr_of_block_sets - 1, 0);
/* reschedule anybody waiting for a block */
if (P->waiters) {
struct k_args *NewGet;
/*
* create a command packet to re-try block allocation
* for the waiting tasks, and add it to the command stack
*/
GETARGS(NewGet);
*NewGet = *A;
NewGet->Comm = _K_SVC_BLOCK_WAITERS_GET;
TO_ALIST(&_k_command_stack, NewGet);
}
}
void task_mem_pool_defragment(kmemory_pool_t Pid)
{
struct k_args A;
A.Comm = _K_SVC_DEFRAG;
A.args.p1.pool_id = Pid;
KERNEL_ENTRY(&A);
}
/**
*
* @brief Allocate block from an existing block set
*
* @param pfraglevelinfo pointer to block set
* @param piblockindex area to return index of first unused quad-block
* when allocation fails
*
* @return pointer to allocated block, or NULL if none available
*/
static char *get_existing_block(struct pool_block_set *pfraglevelinfo,
int *piblockindex)
{
char *found = NULL;
int i = 0;
int status;
int free_bit;
do {
/* give up if no more quad-blocks exist */
if (pfraglevelinfo->quad_block[i].mem_blocks == NULL) {
break;
}
/* allocate a block from current quad-block, if possible */
status = pfraglevelinfo->quad_block[i].mem_status;
if (status != 0x0) {
/* identify first free block */
free_bit = find_lsb_set(status) - 1;
/* compute address of free block */
found = pfraglevelinfo->quad_block[i].mem_blocks +
(OCTET_TO_SIZEOFUNIT(free_bit *
pfraglevelinfo->block_size));
/* mark block as unavailable (using XOR to invert) */
pfraglevelinfo->quad_block[i].mem_status ^=
1 << free_bit;
#ifdef CONFIG_OBJECT_MONITOR
pfraglevelinfo->count++;
#endif
break;
}
/* move on to next quad-block; give up if at end of array */
} while (++i < pfraglevelinfo->nr_of_entries);
*piblockindex = i;
return found;
}
/**
*
* @brief Allocate a block, recursively fragmenting larger blocks if necessary
*
* @param P memory pool descriptor
* @param index index of block set currently being examined
* @param startindex index of block set for which allocation is being done
*
* @return pointer to allocated block, or NULL if none available
*/
static char *get_block_recursive(struct pool_struct *P,
int index, int startindex)
{
int i;
char *found, *larger_block;
struct pool_block_set *fr_table;
/* give up if we've exhausted the set of maximum size blocks */
if (index < 0) {
return NULL;
}
/* try allocating a block from the current block set */
fr_table = P->block_set;
i = 0;
found = get_existing_block(&(fr_table[index]), &i);
if (found != NULL) {
return found;
}
#if AUTODEFRAG == AD_BEFORE_SEARCH4BIGGERBLOCK
/*
* do a partial defragmentation of memory pool & try allocating again
* - do this on initial invocation only, not recursive ones
* (since there is no benefit in repeating the defrag)
* - defrag only the blocks smaller than the desired size,
* and only until the size needed is reached
*
* note: defragging at this time tries to preserve the memory pool's
* larger blocks by fragmenting them only when necessary
* (i.e. at the cost of doing more frequent auto-defragmentations)
*/
if (index == startindex) {
defrag(P, P->nr_of_block_sets - 1, startindex);
found = get_existing_block(&(fr_table[index]), &i);
if (found != NULL) {
return found;
}
}
#endif
/* try allocating a block from the next largest block set */
larger_block = get_block_recursive(P, index - 1, startindex);
if (larger_block != NULL) {
/*
* add a new quad-block to the current block set,
* then mark one of its 4 blocks as used and return it
*
* note: "i" was earlier set to indicate the first unused
* quad-block entry in the current block set
*/
fr_table[index].quad_block[i].mem_blocks = larger_block;
fr_table[index].quad_block[i].mem_status = 0xE;
#ifdef CONFIG_OBJECT_MONITOR
fr_table[index].count++;
#endif
return larger_block;
}
#if AUTODEFRAG == AD_AFTER_SEARCH4BIGGERBLOCK
/*
* do a partial defragmentation of memory pool & try allocating again
* - do this on initial invocation only, not recursive ones
* (since there is no benefit in repeating the defrag)
* - defrag only the blocks smaller than the desired size,
* and only until the size needed is reached
*
* note: defragging at this time tries to limit the cost of doing
* auto-defragmentations by doing them only when necessary
* (i.e. at the cost of fragmenting the memory pool's larger blocks)
*/
if (index == startindex) {
defrag(P, P->nr_of_block_sets - 1, startindex);
found = get_existing_block(&(fr_table[index]), &i);
if (found != NULL) {
return found;
}
}
#endif
return NULL; /* can't find (or create) desired block */
}
/**
*
* @brief Examine tasks that are waiting for memory pool blocks
*
* This routine attempts to satisfy any incomplete block allocation requests for
* the specified memory pool. It can be invoked either by the explicit freeing
* of a used block or as a result of defragmenting the pool (which may create
* one or more new, larger blocks).
*
* @return N/A
*/
void _k_block_waiters_get(struct k_args *A)
{
struct pool_struct *P = _k_mem_pool_list + OBJ_INDEX(A->args.p1.pool_id);
char *found_block;
struct k_args *curr_task, *prev_task;
int offset;
curr_task = P->waiters;
/* forw is first field in struct */
prev_task = (struct k_args *)&(P->waiters);
/* loop all waiters */
while (curr_task != NULL) {
/* locate block set to try allocating from */
offset = compute_block_set_index(
P, curr_task->args.p1.req_size);
/* allocate block (fragmenting a larger block, if needed) */
found_block = get_block_recursive(
P, offset, offset);
/* if success : remove task from list and reschedule */
if (found_block != NULL) {
/* return found block */
curr_task->args.p1.rep_poolptr = found_block;
curr_task->args.p1.rep_dataptr = found_block;
/* reschedule task */
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (curr_task->Time.timer) {
_k_timeout_free(curr_task->Time.timer);
}
#endif
curr_task->Time.rcode = RC_OK;
_k_state_bit_reset(curr_task->Ctxt.task, TF_GTBL);
/* remove from list */
prev_task->next = curr_task->next;
/* and get next task */
curr_task = curr_task->next;
} else {
/* else just get next task */
prev_task = curr_task;
curr_task = curr_task->next;
}
}
/* put used command packet on the empty packet list */
FREEARGS(A);
}
/**
*
* @brief Finish handling an allocate block request that timed out
*
* @return N/A
*/
void _k_mem_pool_block_get_timeout_handle(struct k_args *A)
{
_k_timeout_free(A->Time.timer);
REMOVE_ELM(A);
A->Time.rcode = RC_TIME;
_k_state_bit_reset(A->Ctxt.task, TF_GTBL);
}
/**
*
* @brief Perform allocate memory pool block request
*
* @return N/A
*/
void _k_mem_pool_block_get(struct k_args *A)
{
struct pool_struct *P = _k_mem_pool_list + OBJ_INDEX(A->args.p1.pool_id);
char *found_block;
int offset;
/* locate block set to try allocating from */
offset = compute_block_set_index(P, A->args.p1.req_size);
/* allocate block (fragmenting a larger block, if needed) */
found_block = get_block_recursive(P, offset, offset);
if (found_block != NULL) {
A->args.p1.rep_poolptr = found_block;
A->args.p1.rep_dataptr = found_block;
A->Time.rcode = RC_OK;
return;
}
/*
* no suitable block is currently available,
* so either wait for one to appear or indicate failure
*/
if (likely(A->Time.ticks != TICKS_NONE)) {
A->priority = _k_current_task->priority;
A->Ctxt.task = _k_current_task;
_k_state_bit_set(_k_current_task, TF_GTBL);
INSERT_ELM(P->waiters, A);
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.ticks == TICKS_UNLIMITED) {
A->Time.timer = NULL;
} else {
A->Comm = _K_SVC_MEM_POOL_BLOCK_GET_TIMEOUT_HANDLE;
_k_timeout_alloc(A);
}
#endif
} else {
A->Time.rcode = RC_FAIL;
}
}
/**
* @brief Helper function invoking POOL_BLOCK_GET command
*
* Info: Since the _k_mem_pool_block_get() invoked here is returning the
* same pointer in both A->args.p1.rep_poolptr and A->args.p1.rep_dataptr, we
* are passing down only one address (in alloc_mem)
*
* @return RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively
*/
int _do_task_mem_pool_alloc(kmemory_pool_t pool_id, int reqsize,
int32_t timeout, void **alloc_mem)
{
struct k_args A;
A.Comm = _K_SVC_MEM_POOL_BLOCK_GET;
A.Time.ticks = timeout;
A.args.p1.pool_id = pool_id;
A.args.p1.req_size = reqsize;
KERNEL_ENTRY(&A);
*alloc_mem = A.args.p1.rep_poolptr;
return A.Time.rcode;
}
/**
*
* @brief Allocate memory pool block request
*
* This routine allocates a free block from the specified memory pool, ensuring
* that its size is at least as big as the size requested (in bytes).
*
* @param blockptr poitner to requested block
* @param pool_id pool from which to get block
* @param reqsize requested block size
* @param time maximum number of ticks to wait
*
* @return RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively
*/
int task_mem_pool_alloc(struct k_block *blockptr, kmemory_pool_t pool_id,
int reqsize, int32_t timeout)
{
void *pool_ptr;
int retval;
retval = _do_task_mem_pool_alloc(pool_id, reqsize, timeout,
&pool_ptr);
blockptr->pool_id = pool_id;
blockptr->address_in_pool = pool_ptr;
blockptr->pointer_to_data = pool_ptr;
blockptr->req_size = reqsize;
return retval;
}
#define MALLOC_ALIGN (sizeof(uint32_t))
/**
* @brief Allocate memory from heap pool
*
* This routine provides traditional malloc semantics; internally it uses
* the microkernel pool APIs on a dedicated HEAP pool
*
* @param size Size of memory requested by the caller.
*
* @retval address of the block if successful otherwise returns NULL
*/
void *task_malloc(uint32_t size)
{
uint32_t new_size;
uint32_t *aligned_addr;
void *pool_ptr;
/* The address pool returns, may not be aligned. Also
* pool_free requires both start address and size. So
* we end up needing 2 slots to save the size and
* start address in addition to padding space
*/
new_size = size + (sizeof(uint32_t) << 1) + MALLOC_ALIGN - 1;
if (_do_task_mem_pool_alloc(_heap_mem_pool_id, new_size, TICKS_NONE,
&pool_ptr) != RC_OK) {
return NULL;
}
/* Get the next aligned address following the address returned by pool*/
aligned_addr = (uint32_t *) ROUND_UP(pool_ptr, MALLOC_ALIGN);
/* Save the size requested to the pool API, to be used while freeing */
*aligned_addr = new_size;
/* Save the original unaligned_addr pointer too */
aligned_addr++;
*((void **) aligned_addr) = pool_ptr;
/* return the subsequent address */
return ++aligned_addr;
}
/**
*
* @brief Perform return memory pool block request
*
* Marks a block belonging to a pool as free; if there are waiters that can use
* the the block it is passed to a waiting task.
*
* @return N/A
*/
void _k_mem_pool_block_release(struct k_args *A)
{
struct pool_struct *P;
int Pid;
int offset;
Pid = A->args.p1.pool_id;
P = _k_mem_pool_list + OBJ_INDEX(Pid);
/* determine block set that block belongs to */
offset = compute_block_set_index(P, A->args.p1.req_size);
/* mark the block as unused */
free_existing_block(A->args.p1.rep_poolptr, P, offset);
/* reschedule anybody waiting for a block */
if (P->waiters != NULL) {
struct k_args *NewGet;
/*
* create a command packet to re-try block allocation
* for the waiting tasks, and add it to the command stack
*/
GETARGS(NewGet);
*NewGet = *A;
NewGet->Comm = _K_SVC_BLOCK_WAITERS_GET;
TO_ALIST(&_k_command_stack, NewGet);
}
if (A->alloc) {
FREEARGS(A);
}
}
void task_mem_pool_free(struct k_block *blockptr)
{
struct k_args A;
A.Comm = _K_SVC_MEM_POOL_BLOCK_RELEASE;
A.args.p1.pool_id = blockptr->pool_id;
A.args.p1.req_size = blockptr->req_size;
A.args.p1.rep_poolptr = blockptr->address_in_pool;
A.args.p1.rep_dataptr = blockptr->pointer_to_data;
KERNEL_ENTRY(&A);
}
/**
* @brief Free memory allocated through task_malloc
*
* @param ptr pointer to be freed
*
* @return NA
*/
void task_free(void *ptr)
{
struct k_args A;
A.Comm = _K_SVC_MEM_POOL_BLOCK_RELEASE;
A.args.p1.pool_id = _heap_mem_pool_id;
/* Fetch the pointer returned by the pool API */
A.args.p1.rep_poolptr = *((void **) ((uint32_t *)ptr - 1));
/* Further fetch the size asked from pool */
A.args.p1.req_size = *((uint32_t *)ptr - 2);
KERNEL_ENTRY(&A);
}

View File

@@ -1,93 +0,0 @@
/* k_move_data.c */
/*
* Copyright (c) 1997-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <micro_private.h>
#include <string.h>
#include <toolchain.h>
#include <sections.h>
#include <misc/__assert.h>
/**
*
* mvdreq_docont -
*
* @return N/A
*/
static void mvdreq_docont(struct k_args *Cont)
{
struct k_args *next;
while (Cont) {
next = Cont;
Cont = Cont->next;
SENDARGS(next);
}
}
/**
*
* @brief Perform movedata request
*
* @return N/A
*/
static void mvdreq_copy(struct moved_req *ReqArgs)
{
memcpy(ReqArgs->destination, ReqArgs->source,
OCTET_TO_SIZEOFUNIT(ReqArgs->total_size));
if (ReqArgs->action & MVDACT_SNDACK)
mvdreq_docont(ReqArgs->extra.setup.continuation_send);
if (ReqArgs->action & MVDACT_RCVACK)
mvdreq_docont(ReqArgs->extra.setup.continuation_receive);
}
/**
*
* @brief Process a movedata request
*
* @return N/A
*/
void _k_movedata_request(struct k_args *Req)
{
struct moved_req *ReqArgs;
ReqArgs = &(Req->args.moved_req);
__ASSERT_NO_MSG(0 ==
(ReqArgs->total_size %
SIZEOFUNIT_TO_OCTET(1))); /* must be a multiple of size_t */
__ASSERT_NO_MSG(!(ReqArgs->action & MVDACT_INVALID));
/*
* If no data is to be transferred, just execute the continuation
* packet, if any, and get out:
*/
if (0 == ReqArgs->total_size) {
if (ReqArgs->action & MVDACT_SNDACK)
/* Send ack continuation */
mvdreq_docont(
ReqArgs->extra.setup.continuation_send);
if (ReqArgs->action & MVDACT_RCVACK)
/* Recv ack continuation */
mvdreq_docont(
ReqArgs->extra.setup.continuation_receive);
return;
}
mvdreq_copy(ReqArgs);
}

View File

@@ -1,384 +0,0 @@
/*
* Copyright (c) 1997-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief mutex kernel services
*
* This module contains routines for handling mutex locking and unlocking. It
* also includes routines that force the release of mutex objects when a task
* is aborted or unloaded.
*
* Mutexes implement a priority inheritance algorithm that boosts the priority
* level of the owning task to match the priority level of the highest priority
* task waiting on the mutex.
*
* Each mutex that contributes to priority inheritance must be released in the
* reverse order in which is was acquired. Furthermore each subsequent mutex
* that contributes to raising the owning task's priority level must be acquired
* at a point after the most recent "bumping" of the priority level.
*
* For example, if task A has two mutexes contributing to the raising of its
* priority level, the second mutex M2 must be acquired by task A after task
* A's priority level was bumped due to owning the first mutex M1. When
* releasing the mutex, task A must release M2 before it releases M1. Failure
* to follow this nested model may result in tasks running at unexpected priority
* levels (too high, or too low).
*/
#include <microkernel.h>
#include <micro_private.h>
#include <nano_private.h>
/**
* @brief Reply to a mutex lock request.
*
* This routine replies to a mutex lock request. This will occur if either
* the waiting task times out or acquires the mutex lock.
*
* @param A k_args
*
* @return N/A
*/
void _k_mutex_lock_reply(
struct k_args *A /* pointer to mutex lock reply request arguments */
)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
struct _k_mutex_struct *Mutex; /* pointer to internal mutex structure */
struct k_args *PrioChanger; /* used to change a task's priority level */
struct k_args *FirstWaiter; /* pointer to first task in wait queue */
kpriority_t newPriority; /* priority level to which to drop */
int MutexId; /* mutex ID obtained from request args */
if (A->Time.timer) {
FREETIMER(A->Time.timer);
}
if (A->Comm == _K_SVC_MUTEX_LOCK_REPLY_TIMEOUT) {/* Timeout case */
REMOVE_ELM(A);
A->Time.rcode = RC_TIME;
MutexId = A->args.l1.mutex;
Mutex = (struct _k_mutex_struct *)MutexId;
FirstWaiter = Mutex->waiters;
/*
* When timing out, there are two cases to consider.
* 1. There are no waiting tasks.
* - As there are no waiting tasks, this mutex is no longer
* involved in priority inheritance. It's current priority
* level should be dropped (if needed) to the original
* priority level.
* 2. There is at least one waiting task in a priority ordered
* list.
* - Depending upon the the priority level of the first
* waiting task, the owner task's original priority and
* the ceiling priority, the owner's priority level may
* be dropped but not necessarily to the original priority
* level.
*/
newPriority = Mutex->original_owner_priority;
if (FirstWaiter != NULL) {
newPriority = (FirstWaiter->priority < newPriority)
? FirstWaiter->priority
: newPriority;
newPriority = (newPriority > CONFIG_PRIORITY_CEILING)
? newPriority
: CONFIG_PRIORITY_CEILING;
}
if (Mutex->current_owner_priority != newPriority) {
GETARGS(PrioChanger);
PrioChanger->alloc = true;
PrioChanger->Comm = _K_SVC_TASK_PRIORITY_SET;
PrioChanger->priority = newPriority;
PrioChanger->args.g1.task = Mutex->owner;
PrioChanger->args.g1.prio = newPriority;
SENDARGS(PrioChanger);
Mutex->current_owner_priority = newPriority;
}
} else {/* LOCK_RPL: Reply case */
A->Time.rcode = RC_OK;
}
#else
/* LOCK_RPL: Reply case */
A->Time.rcode = RC_OK;
#endif
_k_state_bit_reset(A->Ctxt.task, TF_LOCK);
}
/**
* @brief Reply to a mutex lock request with timeout.
*
* This routine replies to a mutex lock request. This will occur if either
* the waiting task times out or acquires the mutex lock.
*
* @param A Pointer to a k_args structure.
*
* @return N/A
*/
void _k_mutex_lock_reply_timeout(struct k_args *A)
{
_k_mutex_lock_reply(A);
}
/**
* @brief Process a mutex lock request
*
* This routine processes a mutex lock request (LOCK_REQ). If the mutex
* is already locked, and the timeout is non-zero then the priority inheritance
* algorithm may be applied to prevent priority inversion scenarios.
*
* @param A k_args
*
* @return N/A
*/
void _k_mutex_lock_request(struct k_args *A /* pointer to mutex lock
* request arguments
*/
)
{
struct _k_mutex_struct *Mutex; /* pointer to internal mutex structure */
int MutexId; /* mutex ID obtained from lock request */
struct k_args *PrioBooster; /* used to change a task's priority level */
kpriority_t BoostedPrio; /* new "boosted" priority level */
MutexId = A->args.l1.mutex;
Mutex = (struct _k_mutex_struct *)MutexId;
if (Mutex->level == 0 || Mutex->owner == A->args.l1.task) {
/* The mutex is either unowned or this is a nested lock. */
#ifdef CONFIG_OBJECT_MONITOR
Mutex->count++;
#endif
Mutex->owner = A->args.l1.task;
/*
* Assign the current owner's priority from the priority found
* in the current task's task object: the priority stored there
* may be more recent than the one stored in struct k_args.
*/
Mutex->current_owner_priority = _k_current_task->priority;
/*
* Save the original priority when first acquiring the lock (but
* not on nested locks). The original priority level only
* reflects the priority level of the requesting task at the
* time the lock is acquired. Consequently, if the requesting
* task is already involved in priority inheritance, this
* original priority reflects its "boosted" priority.
*/
if (Mutex->level == 0) {
Mutex->original_owner_priority = Mutex->current_owner_priority;
}
Mutex->level++;
A->Time.rcode = RC_OK;
} else {
/* The mutex is owned by another task. */
#ifdef CONFIG_OBJECT_MONITOR
Mutex->num_conflicts++;
#endif
if (likely(A->Time.ticks != TICKS_NONE)) {
/*
* A non-zero timeout was specified. Ensure the
* priority saved in the request is up to date
*/
A->Ctxt.task = _k_current_task;
A->priority = _k_current_task->priority;
_k_state_bit_set(_k_current_task, TF_LOCK);
/* Note: Mutex->waiters is a priority sorted list */
INSERT_ELM(Mutex->waiters, A);
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.ticks == TICKS_UNLIMITED) {
/* Request will not time out */
A->Time.timer = NULL;
} else {
/*
* Prepare to call _k_mutex_lock_reply() should
* the request time out.
*/
A->Comm = _K_SVC_MUTEX_LOCK_REPLY_TIMEOUT;
_k_timeout_alloc(A);
}
#endif
if (A->priority < Mutex->current_owner_priority) {
/*
* The priority level of the owning task is less
* than that of the requesting task. Boost the
* priority level of the owning task to match
* the priority level of the requesting task.
* Note that the boosted priority level is
* limited to <K_PrioCeiling>.
*/
BoostedPrio = (A->priority > CONFIG_PRIORITY_CEILING)
? A->priority
: CONFIG_PRIORITY_CEILING;
if (BoostedPrio < Mutex->current_owner_priority) {
/* Boost the priority level */
GETARGS(PrioBooster);
PrioBooster->alloc = true;
PrioBooster->Comm = _K_SVC_TASK_PRIORITY_SET;
PrioBooster->priority = BoostedPrio;
PrioBooster->args.g1.task = Mutex->owner;
PrioBooster->args.g1.prio = BoostedPrio;
SENDARGS(PrioBooster);
Mutex->current_owner_priority = BoostedPrio;
}
}
} else {
/*
* ERROR. The mutex is locked by another task and
* this is an immediate lock request (timeout = 0).
*/
A->Time.rcode = RC_FAIL;
}
}
}
int task_mutex_lock(kmutex_t mutex, int32_t timeout)
{
struct k_args A; /* argument packet */
A.Comm = _K_SVC_MUTEX_LOCK_REQUEST;
A.Time.ticks = timeout;
A.args.l1.mutex = mutex;
A.args.l1.task = _k_current_task->id;
KERNEL_ENTRY(&A);
return A.Time.rcode;
}
/**
* @brief Process a mutex unlock request
*
* This routine processes a mutex unlock request (UNLOCK). If the mutex
* was involved in priority inheritance, then it will change the priority level
* of the current owner to the priority level it had when it acquired the
* mutex.
*
* @param A pointer to mutex unlock request arguments
*
* @return N/A
*/
void _k_mutex_unlock(struct k_args *A)
{
struct _k_mutex_struct *Mutex; /* pointer internal mutex structure */
int MutexId; /* mutex ID obtained from unlock request */
struct k_args *PrioDowner; /* used to change a task's priority level */
MutexId = A->args.l1.mutex;
Mutex = (struct _k_mutex_struct *)MutexId;
if (Mutex->owner == A->args.l1.task && --(Mutex->level) == 0) {
/*
* The requesting task owns the mutex and all locks
* have been released.
*/
struct k_args *X;
#ifdef CONFIG_OBJECT_MONITOR
Mutex->count++;
#endif
if (Mutex->current_owner_priority != Mutex->original_owner_priority) {
/*
* This mutex is involved in priority inheritance.
* Send a request to revert the priority level of
* the owning task back to its priority level when
* it first acquired the mutex.
*/
GETARGS(PrioDowner);
PrioDowner->alloc = true;
PrioDowner->Comm = _K_SVC_TASK_PRIORITY_SET;
PrioDowner->priority = Mutex->original_owner_priority;
PrioDowner->args.g1.task = Mutex->owner;
PrioDowner->args.g1.prio = Mutex->original_owner_priority;
SENDARGS(PrioDowner);
}
X = Mutex->waiters;
if (X != NULL) {
/*
* At least one task was waiting for the mutex.
* Assign the new owner of the task to be the
* first in the queue.
*/
Mutex->waiters = X->next;
Mutex->owner = X->args.l1.task;
Mutex->level = 1;
Mutex->current_owner_priority = X->priority;
Mutex->original_owner_priority = X->priority;
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (X->Time.timer) {
/*
* Trigger a call to _k_mutex_lock_reply()--it
* will send a reply with a return code of
* RC_OK.
*/
_k_timeout_cancel(X);
X->Comm = _K_SVC_MUTEX_LOCK_REPLY;
} else {
#endif
/*
* There is no timer to update.
* Set the return code.
*/
X->Time.rcode = RC_OK;
_k_state_bit_reset(X->Ctxt.task, TF_LOCK);
#ifdef CONFIG_SYS_CLOCK_EXISTS
}
#endif
} else {
/* No task is waiting in the queue. */
Mutex->owner = ANYTASK;
Mutex->level = 0;
}
}
}
/**
* @brief Mutex unlock kernel service
*
* This routine is the entry to the mutex unlock kernel service.
*
* @param mutex mutex to unlock
*
* @return N/A
*/
void _task_mutex_unlock(kmutex_t mutex)
{
struct k_args A; /* argument packet */
A.Comm = _K_SVC_MUTEX_UNLOCK;
A.args.l1.mutex = mutex;
A.args.l1.task = _k_current_task->id;
KERNEL_ENTRY(&A);
}

View File

@@ -1,156 +0,0 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Nanokernel object kernel service
*
* This module provides routines used by the nanokernel for pending/unpending
* microkernel tasks on nanokernel objects.
*/
#include <micro_private.h>
#include <toolchain.h>
#include <wait_q.h>
#include <nanokernel.h>
#include <microkernel/event.h>
#include <drivers/system_timer.h>
extern void _task_nop(void);
extern void _nano_nop(void);
/**
* @brief Pend a task on a nanokernel object
*
* This routine pends a task on a nanokernel object. It is expected to only
* be called by internal nanokernel code with the interrupts locked.
*
* @return N/A
*/
void _task_nano_pend_task(struct _nano_queue *queue, int32_t timeout)
{
_NANO_TIMEOUT_ADD(queue, timeout);
/* Add task to nanokernel object's task wait queue */
_nano_wait_q_put(queue);
_k_state_bit_set(_k_current_task, TF_NANO);
_task_nop(); /* Trigger microkernel scheduler */
}
#ifdef CONFIG_NANO_TIMERS
/**
* @brief Pend a task on a nanokernel timer
*
* This routine pends a task on a nanokernel timer. It is expected to only
* be called by internal nanokernel code with the interrupts locked.
*
* @return N/A
*/
void _task_nano_timer_pend_task(struct nano_timer *timer)
{
struct _nano_timeout *t = &timer->timeout_data;
t->tcs = (struct tcs *) _k_current_task->workspace;
_k_state_bit_set(_k_current_task, TF_NANO);
_task_nop(); /* Trigger microkernel scheduler */
}
/**
* @brief Ready the task waiting on a nanokernel timer
*
* This routine readies the task that was waiting on a a nanokernel timer.
* It is expected to only be called by internal nanokernel code (fiber or
* ISR context) with the interrupts locked.
*
* @return N/A
*/
void _nano_timer_task_ready(void *ptr)
{
struct k_task *uk_task_ptr = ptr;
_k_state_bit_reset(uk_task_ptr, TF_NANO);
_nano_nop(); /* Trigger microkernel scheduler */
}
/**
* @brief Ready the task waiting on a nanokernel timer
*
* This routine readies the task that was waiting on a a nanokernel timer.
* It is expected to only be called by internal nanokernel code (task context)
* with the interrupts locked.
*
* @return N/A
*/
void _task_nano_timer_task_ready(void *ptr)
{
struct k_task *uk_task_ptr = ptr;
_k_state_bit_reset(uk_task_ptr, TF_NANO);
_task_nop(); /* Trigger microkernel scheduler */
}
#endif /* CONFIG_NANO_TIMERS */
/**
* @brief Ready a microkernel task due to timeout
*
* This routine makes a microkernel task ready. As it is invoked in the
* context of the kernel server fiber, there is no need to explicitly trigger
* the microkernel task scheduler here. Interrupts are already locked.
*
* @return N/A
*/
void _nano_task_ready(void *ptr)
{
struct k_task *uk_task_ptr = ptr;
_k_state_bit_reset(uk_task_ptr, TF_NANO);
}
/**
* @brief Unpend all tasks from nanokernel object
*
* This routine unpends all tasks the nanokernel object's task queue.
* It is expected to only be called by internal nanokernel code with
* the interrupts locked.
*
* @return Number of tasks that were unpended
*/
int _nano_unpend_tasks(struct _nano_queue *queue)
{
struct tcs *task = (struct tcs *)queue->head;
struct tcs *prev;
int num = 0;
/* Drain the nanokernel object's waiting task queue */
while (task != NULL) {
_nano_timeout_abort(task);
_k_state_bit_reset(task->uk_task_ptr, TF_NANO);
prev = task;
task = task->link;
prev->link = NULL;
num++;
}
_nano_wait_q_reset(queue);
return num;
}

View File

@@ -1,79 +0,0 @@
/*
* Copyright (c) 1997-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief "do nothing" kernel service
*
* This module provides a "do nothing" kernel service.
*
* This service is primarily used by other kernel services that need a way to
* resume the execution of a kernel request that could not be completed in a
* single invocation of the _k_server fiber. However, it can also be used by
* a task to measure the overhead involved in issuing a kernel service request.
*/
#include <micro_private.h>
#include <toolchain.h>
#include <sections.h>
/**
*
* @brief Perform "do nothing" kernel request
*
* @return N/A
*/
void _k_nop(struct k_args *A)
{
if (A->alloc) {
FREEARGS(A);
}
}
/**
*
* @brief "do nothing" kernel request
*
* This routine is a request for the _k_server to run a "do nothing" routine.
*
* @return N/A
*/
void _task_nop(void)
{
struct k_args A;
A.Comm = _K_SVC_NOP;
KERNEL_ENTRY(&A);
}
/**
* @brief "do nothing" kernel request
*
* This routine is a request for the _k_server to run a "do nothing" routine.
* It is invoked by the nanokernel internals to trigger the microkernel task
* scheduler.
*
* @return N/A
*/
void _nano_nop(void)
{
struct k_args *A;
GETARGS(A);
A->Comm = _K_SVC_NOP;
A->alloc = true;
SENDARGS(A);
}

View File

@@ -1,45 +0,0 @@
/* offload to fiber kernel service */
/*
* Copyright (c) 1997-2010, 2013-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <micro_private.h>
#include <sections.h>
/**
* @brief Process an "offload to fiber" request
*
* This routine simply invokes the requested function from within the context
* of the _k_server() fiber and saves the result.
* @param A Arguments
*
* @return N/A
*/
void _k_offload_to_fiber(struct k_args *A)
{
A->args.u1.rval = (*A->args.u1.func)(A->args.u1.argp);
}
int task_offload_to_fiber(int (*func)(), void *argp)
{
struct k_args A;
A.Comm = _K_SVC_OFFLOAD_TO_FIBER;
A.args.u1.func = func;
A.args.u1.argp = argp;
KERNEL_ENTRY(&A);
return A.args.u1.rval;
}

View File

@@ -1,186 +0,0 @@
/*
* Copyright (c) 1997-2010, 2013-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Pipe kernel services
*/
#include <micro_private.h>
#include <k_pipe_buffer.h>
#include <k_pipe_util.h>
#include <microkernel/pipe.h>
#include <misc/util.h>
extern kpipe_t _k_pipe_ptr_start[];
extern kpipe_t _k_pipe_ptr_end[];
/**
* @brief Initialize kernel pipe subsystem
*
* Performs any initialization of statically-defined pipes that wasn't done
* at build time. (Note: most pipe structure fields are set to zero by sysgen.)
*
* @return N/A
*/
void _k_pipe_init(void)
{
kpipe_t *pipeId;
struct _k_pipe_struct *pipe_ptr;
for (pipeId = _k_pipe_ptr_start; pipeId < _k_pipe_ptr_end; pipeId++) {
pipe_ptr = (struct _k_pipe_struct *)(*pipeId);
BuffInit((unsigned char *)pipe_ptr->Buffer,
&(pipe_ptr->buffer_size), &pipe_ptr->desc);
}
}
int task_pipe_get(kpipe_t id, void *buffer,
int bytes_to_read, int *bytes_read,
K_PIPE_OPTION options, int32_t timeout)
{
struct k_args A;
/*
* some users do not check the FUNCTION return value,
* but immediately use bytes_read; make sure it always
* has a good value, even when we return failure immediately
* (see below)
*/
*bytes_read = 0;
if (unlikely(bytes_to_read % SIZEOFUNIT_TO_OCTET(1))) {
return RC_ALIGNMENT;
}
if (unlikely(bytes_to_read == 0)) {
/*
* not allowed because enlisted requests with zero size
* will hang in _k_pipe_process()
*/
return RC_FAIL;
}
if (unlikely(options == _0_TO_N && timeout != TICKS_NONE)) {
return RC_FAIL;
}
A.priority = _k_current_task->priority;
A.Comm = _K_SVC_PIPE_GET_REQUEST;
A.Time.ticks = timeout;
A.args.pipe_req.req_info.pipe.id = id;
A.args.pipe_req.req_type.sync.total_size = bytes_to_read;
A.args.pipe_req.req_type.sync.data_ptr = buffer;
_k_pipe_option_set(&A.args, options);
_k_pipe_request_type_set(&A.args, _SYNCREQ);
KERNEL_ENTRY(&A);
*bytes_read = A.args.pipe_ack.xferred_size;
return A.Time.rcode;
}
int task_pipe_put(kpipe_t id, void *buffer,
int bytes_to_write, int *bytes_written,
K_PIPE_OPTION options, int32_t timeout)
{
struct k_args A;
/*
* some users do not check the FUNCTION return value,
* but immediately use bytes_written; make sure it always
* has a good value, even when we return failure immediately
* (see below)
*/
*bytes_written = 0;
if (unlikely(bytes_to_write % SIZEOFUNIT_TO_OCTET(1))) {
return RC_ALIGNMENT;
}
if (unlikely(bytes_to_write == 0)) {
/*
* not allowed because enlisted requests with zero size
* will hang in _k_pipe_process()
*/
return RC_FAIL;
}
if (unlikely(options == _0_TO_N && timeout != TICKS_NONE)) {
return RC_FAIL;
}
A.priority = _k_current_task->priority;
A.Comm = _K_SVC_PIPE_PUT_REQUEST;
A.Time.ticks = timeout;
A.args.pipe_req.req_info.pipe.id = id;
A.args.pipe_req.req_type.sync.total_size = bytes_to_write;
A.args.pipe_req.req_type.sync.data_ptr = buffer;
_k_pipe_option_set(&A.args, options);
_k_pipe_request_type_set(&A.args, _SYNCREQ);
KERNEL_ENTRY(&A);
*bytes_written = A.args.pipe_ack.xferred_size;
return A.Time.rcode;
}
/**
* @brief Asynchronous pipe write request
*
* This routine attempts to write data from a memory pool block to the
* specified pipe. (Note that partial transfers and timeouts are not
* supported, unlike the case for synchronous write requests.)
*
* @return RC_OK, RC_FAIL, or RC_ALIGNMENT
*/
int _task_pipe_block_put(kpipe_t Id, struct k_block Block,
int iReqSize2Xfer, ksem_t sema)
{
unsigned int iSize2Xfer;
struct k_args A;
iSize2Xfer = min((unsigned)iReqSize2Xfer, (unsigned)(Block.req_size));
if (unlikely(iSize2Xfer % SIZEOFUNIT_TO_OCTET(1))) {
return RC_ALIGNMENT;
}
if (unlikely(iSize2Xfer == 0)) {
/*
* not allowed because enlisted requests with zero size
* will hang in _k_pipe_process()
*/
return RC_FAIL;
}
A.priority = _k_current_task->priority;
A.Comm = _K_SVC_PIPE_PUT_REQUEST;
A.Time.ticks = TICKS_UNLIMITED;
/* same behavior in flow as a blocking call w/o a timeout */
A.args.pipe_req.req_info.pipe.id = Id;
A.args.pipe_req.req_type.async.block = Block;
A.args.pipe_req.req_type.async.total_size = iSize2Xfer;
A.args.pipe_req.req_type.async.sema = sema;
_k_pipe_request_type_set(&A.args, _ASYNCREQ);
_k_pipe_option_set(&A.args, _ALL_N); /* force ALL_N */
KERNEL_ENTRY(&A);
return RC_OK;
}

View File

@@ -1,825 +0,0 @@
/* k_pipe_buffer.c */
/*
* Copyright (c) 1997-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Implementation remarks:
* - when using a floating end pointer: do not use pipe_desc->iBuffsize for
* (pipe_desc->end_ptr - pipe_desc->begin_ptr)
*/
#include <microkernel/base_api.h>
#include <k_pipe_buffer.h>
#include <string.h>
#include <toolchain.h>
#include <sections.h>
#include <misc/__assert.h>
#define STORE_NBR_MARKERS
/* NOTE: the number of pending write and read Xfers is always stored,
* as it is required for the pipes to function properly. It is stored in the
* pipe descriptor fields num_pending_writes and num_pending_reads.
*
* In the Writer and Reader MarkersList, the number of markers (==nbr. of
* unreleased Xfers) is monitored as well. They actually equal
* num_pending_writes and num_pending_reads.
* Their existence depends on STORE_NBR_MARKERS. A reason to have them
* additionally is that some extra consistency checking is performed in the
* markers manipulation functionality itself.
* Drawback: double storage of nbr. of pending write Xfers (but for test
* purposes this is acceptable I think)
*/
#define CHECK_BUFFER_POINTER(data_ptr) \
__ASSERT_NO_MSG(desc->begin_ptr <= data_ptr && data_ptr < desc->end_ptr)
static void pipe_intrusion_check(struct _k_pipe_desc *desc,
unsigned char *begin_ptr,
int size);
/**
* Markers
*/
static int MarkerFindFree(struct _k_pipe_marker markers[])
{
struct _k_pipe_marker *pM = markers;
int i;
for (i = 0; i < MAXNBR_PIPE_MARKERS; i++, pM++) {
if (pM->pointer == NULL) {
break;
}
}
if (i == MAXNBR_PIPE_MARKERS) {
i = -1;
}
return i;
}
static void MarkerLinkToListAfter(struct _k_pipe_marker markers[],
int iMarker,
int iNewMarker)
{
int iNextMarker; /* index of next marker in original list */
/* let the original list be aware of the new marker */
if (iMarker != -1) {
iNextMarker = markers[iMarker].next;
markers[iMarker].next = iNewMarker;
if (iNextMarker != -1) {
markers[iNextMarker].prev = iNewMarker;
} else {
/* there was no next marker */
}
} else {
iNextMarker = -1; /* there wasn't even a marker */
}
/* link the new marker with the marker and next marker */
markers[iNewMarker].prev = iMarker;
markers[iNewMarker].next = iNextMarker;
}
static int MarkerAddLast(struct _k_pipe_marker_list *pMarkerList,
unsigned char *pointer,
int size,
bool buffer_xfer_busy)
{
int i = MarkerFindFree(pMarkerList->markers);
if (i == -1) {
return i;
}
pMarkerList->markers[i].pointer = pointer;
pMarkerList->markers[i].size = size;
pMarkerList->markers[i].buffer_xfer_busy = buffer_xfer_busy;
if (pMarkerList->first_marker == -1) {
__ASSERT_NO_MSG(pMarkerList->last_marker == -1);
pMarkerList->first_marker = i; /* we still need to set prev & next */
} else {
__ASSERT_NO_MSG(pMarkerList->last_marker != -1);
__ASSERT_NO_MSG(
pMarkerList->markers[pMarkerList->last_marker].next == -1);
}
MarkerLinkToListAfter(pMarkerList->markers, pMarkerList->last_marker, i);
__ASSERT_NO_MSG(pMarkerList->markers[i].next == -1);
pMarkerList->last_marker = i;
#ifdef STORE_NBR_MARKERS
pMarkerList->num_markers++;
__ASSERT_NO_MSG(pMarkerList->num_markers > 0);
#endif
return i;
}
static void MarkerUnlinkFromList(struct _k_pipe_marker markers[],
int iMarker,
int *piPredecessor,
int *piSuccessor)
{
int iNextMarker = markers[iMarker].next;
int iPrevMarker = markers[iMarker].prev;
/* remove the marker from the list */
markers[iMarker].next = -1;
markers[iMarker].prev = -1;
/* repair the chain */
if (iPrevMarker != -1) {
markers[iPrevMarker].next = iNextMarker;
}
if (iNextMarker != -1) {
markers[iNextMarker].prev = iPrevMarker;
}
*piPredecessor = iPrevMarker;
*piSuccessor = iNextMarker;
}
static void MarkerDelete(struct _k_pipe_marker_list *pMarkerList, int index)
{
int i;
int iPredecessor;
int iSuccessor;
i = index;
__ASSERT_NO_MSG(i != -1);
pMarkerList->markers[i].pointer = NULL;
MarkerUnlinkFromList(pMarkerList->markers, i,
&iPredecessor, &iSuccessor);
/* update first/last info */
if (i == pMarkerList->last_marker) {
pMarkerList->last_marker = iPredecessor;
}
if (i == pMarkerList->first_marker) {
pMarkerList->first_marker = iSuccessor;
}
#ifdef STORE_NBR_MARKERS
pMarkerList->num_markers--;
__ASSERT_NO_MSG(pMarkerList->num_markers >= 0);
if (pMarkerList->num_markers == 0) {
__ASSERT_NO_MSG(pMarkerList->first_marker == -1);
__ASSERT_NO_MSG(pMarkerList->last_marker == -1);
}
#endif
}
static void MarkersClear(struct _k_pipe_marker_list *pMarkerList)
{
struct _k_pipe_marker *pM = pMarkerList->markers;
int i;
for (i = 0; i < MAXNBR_PIPE_MARKERS; i++, pM++) {
memset(pM, 0, sizeof(struct _k_pipe_marker));
pM->next = -1;
pM->prev = -1;
}
#ifdef STORE_NBR_MARKERS
pMarkerList->num_markers = 0;
#endif
pMarkerList->first_marker = -1;
pMarkerList->last_marker = -1;
pMarkerList->post_wrap_around_marker = -1;
}
/**/
/* note on setting/clearing markers/guards:
*
* If there is at least one marker, there is a guard and equals one of the
* markers; if there are no markers (*), there is no guard.
* Consequently, if a marker is add when there were none, the guard will equal
* it. If additional markers are add, the guard will not change.
* However, if a marker is deleted:
* if it equals the guard a new guard must be selected (**)
* if not, guard doesn't change
*
* (*) we need to housekeep how much markers there are or we can inspect the
* guard
* (**) for this, the complete markers table needs to be investigated
*/
/*
* This function will see if one or more 'areas' in the buffer can be made
* available (either for writing xor reading).
* Note: such a series of areas starts from the beginning.
*/
static int ScanMarkers(struct _k_pipe_marker_list *pMarkerList,
int *piSizeBWA, int *piSizeAWA, int *piNbrPendingXfers)
{
struct _k_pipe_marker *pM;
bool bMarkersAreNowAWA;
int index;
index = pMarkerList->first_marker;
__ASSERT_NO_MSG(index != -1);
bMarkersAreNowAWA = false;
do {
int index_next;
__ASSERT_NO_MSG(index == pMarkerList->first_marker);
if (index == pMarkerList->post_wrap_around_marker) {
/* from now on, everything is AWA */
bMarkersAreNowAWA = true;
}
pM = &(pMarkerList->markers[index]);
if (pM->buffer_xfer_busy == true) {
break;
}
if (!bMarkersAreNowAWA) {
*piSizeBWA += pM->size;
} else {
*piSizeAWA += pM->size;
}
index_next = pM->next;
/* pMarkerList->first_marker will be updated */
MarkerDelete(pMarkerList, index);
/* adjust *piNbrPendingXfers */
if (piNbrPendingXfers) {
__ASSERT_NO_MSG(*piNbrPendingXfers >= 0);
(*piNbrPendingXfers)--;
}
index = index_next;
} while (index != -1);
__ASSERT_NO_MSG(index == pMarkerList->first_marker);
if (bMarkersAreNowAWA) {
pMarkerList->post_wrap_around_marker =
pMarkerList->first_marker;
}
#ifdef STORE_NBR_MARKERS
if (pMarkerList->num_markers == 0) {
__ASSERT_NO_MSG(pMarkerList->first_marker == -1);
__ASSERT_NO_MSG(pMarkerList->last_marker == -1);
__ASSERT_NO_MSG(pMarkerList->post_wrap_around_marker == -1);
}
#endif
return pMarkerList->first_marker;
}
/**
* General
*/
void BuffInit(unsigned char *pBuffer,
int *piBuffSize,
struct _k_pipe_desc *desc)
{
desc->begin_ptr = pBuffer;
desc->buffer_size = *piBuffSize;
/* reset all pointers */
desc->end_ptr = desc->begin_ptr +
OCTET_TO_SIZEOFUNIT(desc->buffer_size);
desc->original_end_ptr = desc->end_ptr;
/* assumed it is allowed */
desc->buffer_state = BUFF_EMPTY;
desc->end_ptr = desc->original_end_ptr;
desc->write_ptr = desc->begin_ptr;
desc->write_guard = NULL;
desc->wrap_around_write = false;
desc->read_ptr = desc->begin_ptr;
desc->read_guard = NULL;
desc->wrap_around_read = true; /* YES!! */
desc->free_space_count = desc->buffer_size;
desc->free_space_post_wrap_around = 0;
desc->num_pending_reads = 0;
desc->available_data_count = 0;
desc->available_data_post_wrap_around = 0;
desc->num_pending_writes = 0;
MarkersClear(&desc->write_markers);
MarkersClear(&desc->read_markers);
}
int CalcFreeSpace(struct _k_pipe_desc *desc, int *free_space_count_ptr,
int *free_space_post_wrap_around_ptr)
{
unsigned char *pStart = desc->write_ptr;
unsigned char *pStop = desc->read_ptr;
if (desc->write_guard != NULL) {
pStop = desc->write_guard;
} else {
/*
* if buffer_state==BUFF_EMPTY but we have a WriteGuard,
* we still need to calculate it as a normal [Start,Stop]
* interval
*/
if (desc->buffer_state == BUFF_EMPTY) {
*free_space_count_ptr =
SIZEOFUNIT_TO_OCTET(desc->end_ptr - pStart);
*free_space_post_wrap_around_ptr =
SIZEOFUNIT_TO_OCTET(pStop - desc->begin_ptr);
return (*free_space_count_ptr + *free_space_post_wrap_around_ptr);
/* this sum equals end_ptr-begin_ptr */
}
}
/*
* on the other hand, if buffer_state is full, we do not need a special
* flow; it will be correct as (pStop - pStart) equals 0
*/
if (pStop >= pStart) {
*free_space_count_ptr = SIZEOFUNIT_TO_OCTET(pStop - pStart);
*free_space_post_wrap_around_ptr = 0;
} else {
*free_space_count_ptr =
SIZEOFUNIT_TO_OCTET(desc->end_ptr - pStart);
*free_space_post_wrap_around_ptr =
SIZEOFUNIT_TO_OCTET(pStop - desc->begin_ptr);
}
return (*free_space_count_ptr + *free_space_post_wrap_around_ptr);
}
void BuffGetFreeSpace(struct _k_pipe_desc *desc,
int *piFreeSpaceTotal,
int *free_space_count_ptr,
int *free_space_post_wrap_around_ptr)
{
int free_space_count;
int free_space_post_wrap_around;
int iFreeSpaceTotal;
iFreeSpaceTotal =
CalcFreeSpace(desc, &free_space_count,
&free_space_post_wrap_around);
__ASSERT_NO_MSG(free_space_count == desc->free_space_count);
__ASSERT_NO_MSG(free_space_post_wrap_around == desc->free_space_post_wrap_around);
*piFreeSpaceTotal = iFreeSpaceTotal;
*free_space_count_ptr = desc->free_space_count;
*free_space_post_wrap_around_ptr = desc->free_space_post_wrap_around;
}
void BuffGetFreeSpaceTotal(struct _k_pipe_desc *desc, int *piFreeSpaceTotal)
{
int dummy1, dummy2;
*piFreeSpaceTotal = CalcFreeSpace(desc, &dummy1, &dummy2);
__ASSERT_NO_MSG(dummy1 == desc->free_space_count);
__ASSERT_NO_MSG(dummy2 == desc->free_space_post_wrap_around);
}
int BuffEmpty(struct _k_pipe_desc *desc)
{
/* 0==iAvailDataTotal is an INcorrect condition b/c of async behavior */
int iTotalFreeSpace;
BuffGetFreeSpaceTotal(desc, &iTotalFreeSpace);
return (desc->buffer_size == iTotalFreeSpace);
}
int CalcAvailData(struct _k_pipe_desc *desc, int *available_data_count_ptr,
int *available_data_post_wrap_around_ptr)
{
unsigned char *pStart = desc->read_ptr;
unsigned char *pStop = desc->write_ptr;
if (NULL != desc->read_guard) {
pStop = desc->read_guard;
} else {
/*
* if buffer_state==BUFF_FULL but we have a ReadGuard,
* we still need to calculate it as a normal [Start,Stop] interval
*/
if (BUFF_FULL == desc->buffer_state) {
*available_data_count_ptr =
SIZEOFUNIT_TO_OCTET(desc->end_ptr - pStart);
*available_data_post_wrap_around_ptr =
SIZEOFUNIT_TO_OCTET(pStop - desc->begin_ptr);
return (*available_data_count_ptr + *available_data_post_wrap_around_ptr);
/* this sum equals end_ptr-begin_ptr */
}
}
/*
* on the other hand, if buffer_state is empty, we do not need a
* special flow; it will be correct as (pStop - pStart) equals 0
*/
if (pStop >= pStart) {
*available_data_count_ptr = SIZEOFUNIT_TO_OCTET(pStop - pStart);
*available_data_post_wrap_around_ptr = 0;
} else {
*available_data_count_ptr =
SIZEOFUNIT_TO_OCTET(desc->end_ptr - pStart);
*available_data_post_wrap_around_ptr =
SIZEOFUNIT_TO_OCTET(pStop - desc->begin_ptr);
}
return (*available_data_count_ptr + *available_data_post_wrap_around_ptr);
}
void BuffGetAvailData(struct _k_pipe_desc *desc,
int *piAvailDataTotal,
int *available_data_count_ptr,
int *available_data_post_wrap_around_ptr)
{
int available_data_count;
int available_data_post_wrap_around;
int iAvailDataTotal;
iAvailDataTotal = CalcAvailData(desc, &available_data_count,
&available_data_post_wrap_around);
__ASSERT_NO_MSG(available_data_count == desc->available_data_count);
__ASSERT_NO_MSG(available_data_post_wrap_around == desc->available_data_post_wrap_around);
*piAvailDataTotal = iAvailDataTotal;
*available_data_count_ptr = desc->available_data_count;
*available_data_post_wrap_around_ptr =
desc->available_data_post_wrap_around;
}
void BuffGetAvailDataTotal(struct _k_pipe_desc *desc, int *piAvailDataTotal)
{
int dummy1, dummy2;
*piAvailDataTotal = CalcAvailData(desc, &dummy1, &dummy2);
__ASSERT_NO_MSG(dummy1 == desc->available_data_count);
__ASSERT_NO_MSG(dummy2 == desc->available_data_post_wrap_around);
}
int BuffFull(struct _k_pipe_desc *desc)
{
/* 0==iTotalFreeSpace is an INcorrect condition b/c of async behavior */
int iAvailDataTotal;
BuffGetAvailDataTotal(desc, &iAvailDataTotal);
return (desc->buffer_size == iAvailDataTotal);
}
/**
* Buffer en-queuing:
*/
static int AsyncEnQRegstr(struct _k_pipe_desc *desc, int size)
{
int i;
pipe_intrusion_check(desc, desc->write_ptr, size);
i = MarkerAddLast(&desc->write_markers, desc->write_ptr, size, true);
if (i != -1) {
/* adjust num_pending_writes */
__ASSERT_NO_MSG(desc->num_pending_writes >= 0);
desc->num_pending_writes++;
/* read_guard changes? */
if (desc->read_guard == NULL) {
desc->read_guard = desc->write_ptr;
}
__ASSERT_NO_MSG(desc->write_markers.markers
[desc->write_markers.first_marker].pointer ==
desc->read_guard);
/* post_wrap_around_marker changes? */
if (desc->write_markers.post_wrap_around_marker == -1 &&
desc->wrap_around_write) {
desc->write_markers.post_wrap_around_marker = i;
}
}
return i;
}
static void AsyncEnQFinished(struct _k_pipe_desc *desc, int iTransferID)
{
desc->write_markers.markers[iTransferID].buffer_xfer_busy = false;
if (desc->write_markers.first_marker == iTransferID) {
int iNewFirstMarker = ScanMarkers(&desc->write_markers,
&desc->available_data_count,
&desc->available_data_post_wrap_around,
&desc->num_pending_writes);
if (iNewFirstMarker != -1) {
desc->read_guard =
desc->write_markers.markers[iNewFirstMarker].pointer;
} else {
desc->read_guard = NULL;
}
}
}
int BuffEnQ(struct _k_pipe_desc *desc, int size, unsigned char **ppWrite)
{
int iTransferID;
if (BuffEnQA(desc, size, ppWrite, &iTransferID) == 0) {
return 0;
}
/* check ret value */
BuffEnQA_End(desc, iTransferID, size /* optional */);
return size;
}
int BuffEnQA(struct _k_pipe_desc *desc, int size, unsigned char **ppWrite,
int *piTransferID)
{
if (size > desc->free_space_count) {
return 0;
}
*piTransferID = AsyncEnQRegstr(desc, size);
if (*piTransferID == -1) {
return 0;
}
*ppWrite = desc->write_ptr;
/* adjust write pointer and free space*/
desc->write_ptr += OCTET_TO_SIZEOFUNIT(size);
if (desc->end_ptr == desc->write_ptr) {
desc->write_ptr = desc->begin_ptr;
desc->free_space_count = desc->free_space_post_wrap_around;
desc->free_space_post_wrap_around = 0;
desc->wrap_around_write = true;
desc->wrap_around_read = false;
desc->read_markers.post_wrap_around_marker = -1;
} else {
desc->free_space_count -= size;
}
if (desc->write_ptr == desc->read_ptr) {
desc->buffer_state = BUFF_FULL;
} else {
desc->buffer_state = BUFF_OTHER;
}
CHECK_BUFFER_POINTER(desc->write_ptr);
return size;
}
void BuffEnQA_End(struct _k_pipe_desc *desc, int iTransferID,
int size /* optional */)
{
ARG_UNUSED(size);
/* An asynchronous data transfer to the buffer has finished */
AsyncEnQFinished(desc, iTransferID);
}
/**
* Buffer de-queuing:
*/
static int AsyncDeQRegstr(struct _k_pipe_desc *desc, int size)
{
int i;
pipe_intrusion_check(desc, desc->read_ptr, size);
i = MarkerAddLast(&desc->read_markers, desc->read_ptr, size, true);
if (i != -1) {
/* adjust num_pending_reads */
__ASSERT_NO_MSG(desc->num_pending_reads >= 0);
desc->num_pending_reads++;
/* write_guard changes? */
if (desc->write_guard == NULL) {
desc->write_guard = desc->read_ptr;
}
__ASSERT_NO_MSG(desc->read_markers.markers
[desc->read_markers.first_marker].pointer ==
desc->write_guard);
/* post_wrap_around_marker changes? */
if (desc->read_markers.post_wrap_around_marker == -1 &&
desc->wrap_around_read) {
desc->read_markers.post_wrap_around_marker = i;
}
}
return i;
}
static void AsyncDeQFinished(struct _k_pipe_desc *desc, int iTransferID)
{
desc->read_markers.markers[iTransferID].buffer_xfer_busy = false;
if (desc->read_markers.first_marker == iTransferID) {
int iNewFirstMarker = ScanMarkers(&desc->read_markers,
&desc->free_space_count,
&desc->free_space_post_wrap_around,
&desc->num_pending_reads);
if (iNewFirstMarker != -1) {
desc->write_guard =
desc->read_markers.markers[iNewFirstMarker].pointer;
} else {
desc->write_guard = NULL;
}
}
}
int BuffDeQ(struct _k_pipe_desc *desc, int size, unsigned char **ppRead)
{
int iTransferID;
if (BuffDeQA(desc, size, ppRead, &iTransferID) == 0) {
return 0;
}
BuffDeQA_End(desc, iTransferID, size /* optional */);
return size;
}
int BuffDeQA(struct _k_pipe_desc *desc, int size, unsigned char **ppRead,
int *piTransferID)
{
/* asynchronous data transfer; read guard pointers must be set */
if (size > desc->available_data_count) {
/* free space is from read to guard pointer/end pointer */
return 0;
}
*piTransferID = AsyncDeQRegstr(desc, size);
if (*piTransferID == -1) {
return 0;
}
*ppRead = desc->read_ptr;
/* adjust read pointer and avail data */
desc->read_ptr += OCTET_TO_SIZEOFUNIT(size);
if (desc->end_ptr == desc->read_ptr) {
desc->read_ptr = desc->begin_ptr;
desc->available_data_count =
desc->available_data_post_wrap_around;
desc->available_data_post_wrap_around = 0;
desc->wrap_around_write = false;
desc->wrap_around_read = true;
desc->write_markers.post_wrap_around_marker = -1;
} else {
desc->available_data_count -= size;
}
if (desc->write_ptr == desc->read_ptr) {
desc->buffer_state = BUFF_EMPTY;
} else {
desc->buffer_state = BUFF_OTHER;
}
CHECK_BUFFER_POINTER(desc->read_ptr);
return size;
}
void BuffDeQA_End(struct _k_pipe_desc *desc, int iTransferID,
int size /* optional */)
{
ARG_UNUSED(size);
/* An asynchronous data transfer from the buffer has finished */
AsyncDeQFinished(desc, iTransferID);
}
/**
* Buffer instrusion
*/
static bool AreasCheck4Intrusion(unsigned char *pBegin1, int iSize1,
unsigned char *pBegin2, int iSize2)
{
unsigned char *pEnd1;
unsigned char *pEnd2;
pEnd1 = pBegin1 + OCTET_TO_SIZEOFUNIT(iSize1);
pEnd2 = pBegin2 + OCTET_TO_SIZEOFUNIT(iSize2);
/*
* 2 tests are required to determine the status of the 2 areas,
* in terms of their position wrt each other
*/
if (pBegin2 >= pBegin1) {
/* check intrusion of pBegin2 in [pBegin1, pEnd1( */
if (pBegin2 < pEnd1) {
/* intrusion!! */
return true;
}
/*
* pBegin2 lies outside and to the right of the first
* area, intrusion is impossible
*/
return false;
}
/* pBegin2 lies to the left of (pBegin1, pEnd1) */
/* check end pointer: is pEnd2 in (pBegin1, pEnd1( ?? */
if (pEnd2 > pBegin1) {
/* intrusion!! */
return true;
}
/*
* pEnd2 lies outside and to the left of the first area,
* intrusion is impossible
*/
return false;
}
static void pipe_intrusion_check(struct _k_pipe_desc *desc,
unsigned char *begin_ptr,
int size)
{
/*
* check possible collision with all existing data areas,
* both for read and write areas
*/
int index;
struct _k_pipe_marker_list *pMarkerList;
/* write markers */
#ifdef STORE_NBR_MARKERS
/* first a small consistency check */
if (desc->write_markers.num_markers == 0) {
__ASSERT_NO_MSG(desc->write_markers.first_marker == -1);
__ASSERT_NO_MSG(desc->write_markers.last_marker == -1);
__ASSERT_NO_MSG(desc->write_markers.post_wrap_around_marker == -1);
}
#endif
pMarkerList = &desc->write_markers;
index = pMarkerList->first_marker;
while (index != -1) {
struct _k_pipe_marker *pM;
pM = &(pMarkerList->markers[index]);
if (AreasCheck4Intrusion(begin_ptr, size,
pM->pointer, pM->size) != 0) {
__ASSERT_NO_MSG(1 == 0);
}
index = pM->next;
}
/* read markers */
#ifdef STORE_NBR_MARKERS
/* first a small consistency check */
if (desc->read_markers.num_markers == 0) {
__ASSERT_NO_MSG(desc->read_markers.first_marker == -1);
__ASSERT_NO_MSG(desc->read_markers.last_marker == -1);
__ASSERT_NO_MSG(desc->read_markers.post_wrap_around_marker == -1);
}
#endif
pMarkerList = &desc->read_markers;
index = pMarkerList->first_marker;
while (index != -1) {
struct _k_pipe_marker *pM;
pM = &(pMarkerList->markers[index]);
if (AreasCheck4Intrusion(begin_ptr, size,
pM->pointer, pM->size) != 0) {
__ASSERT_NO_MSG(1 == 0);
}
index = pM->next;
}
}

View File

@@ -1,262 +0,0 @@
/* command processing for pipe get operation */
/*
* Copyright (c) 1997-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <micro_private.h>
#include <k_pipe_util.h>
#include <toolchain.h>
#include <sections.h>
#include <misc/__assert.h>
/**
*
* @brief Process request command for a pipe get operation
*
* @return N/A
*/
void _k_pipe_get_request(struct k_args *RequestOrig)
{
struct k_args *Request;
struct k_args *RequestProc;
kpipe_t pipeId = RequestOrig->args.pipe_req.req_info.pipe.id;
/* If it's a poster, then don't deschedule the task */
/* First we save the pointer to the task's TCB for rescheduling later */
RequestOrig->Ctxt.task = _k_current_task;
_k_state_bit_set(_k_current_task, TF_RECV);
mycopypacket(&Request, RequestOrig);
/* Now, we need a new packet for processing of the request;
* the Request package is too small b/c of space lost due to possible
* embedded local data
*/
mycopypacket(&RequestProc, Request);
RequestProc->args.pipe_xfer_req.req_info.pipe.ptr =
(struct _k_pipe_struct *)pipeId;
switch (_k_pipe_request_type_get(&RequestProc->args)) {
case _SYNCREQ:
RequestProc->args.pipe_xfer_req.data_ptr =
Request->args.pipe_req.req_type.sync.data_ptr;
RequestProc->args.pipe_xfer_req.total_size =
Request->args.pipe_req.req_type.sync.total_size;
break;
default:
break;
}
RequestProc->args.pipe_xfer_req.status = XFER_IDLE;
RequestProc->args.pipe_xfer_req.num_pending_xfers = 0;
RequestProc->args.pipe_xfer_req.xferred_size = 0;
RequestProc->next = NULL;
RequestProc->head = NULL;
switch (RequestProc->Time.ticks) {
case TICKS_NONE:
_k_pipe_time_type_set(&RequestProc->args, _TIME_NB);
break;
case TICKS_UNLIMITED:
_k_pipe_time_type_set(&RequestProc->args, _TIME_B);
break;
default:
_k_pipe_time_type_set(&RequestProc->args, _TIME_BT);
break;
}
/* start processing */
struct _k_pipe_struct *pipe_ptr;
pipe_ptr = RequestProc->args.pipe_xfer_req.req_info.pipe.ptr;
do {
int iData2ReadFromWriters;
int iAvailBufferData;
int iTotalData2Read;
int32_t ticks;
iData2ReadFromWriters = CalcAvailWriterData(pipe_ptr->writers);
iAvailBufferData =
pipe_ptr->desc.available_data_count +
pipe_ptr->desc.available_data_post_wrap_around;
iTotalData2Read =
iAvailBufferData + iData2ReadFromWriters;
if (iTotalData2Read == 0)
break; /* special case b/c even not good enough for 1_TO_N */
/* (possibly) do some processing */
ticks = RequestProc->Time.ticks;
RequestProc->Time.timer = NULL;
_k_pipe_process(pipe_ptr, NULL /* writer */, RequestProc /* reader */);
RequestProc->Time.ticks = ticks;
/* check if request was processed */
if (TERM_XXX & RequestProc->args.pipe_xfer_req.status) {
RequestProc->Time.timer = NULL; /* not really required */
return; /* not listed anymore --> completely processed */
}
} while (0);
/*
* if we got up to here, we did none or SOME (partial)
* processing on the request
*/
if (_TIME_NB != _k_pipe_time_type_get(&RequestProc->args)) {
/* call is blocking */
INSERT_ELM(pipe_ptr->readers, RequestProc);
/*
* NOTE: It is both faster and simpler to blindly assign the
* PIPE_GET_TIMEOUT microkernel command to the packet even though it
* is only useful to the finite timeout case.
*/
RequestProc->Comm = _K_SVC_PIPE_GET_TIMEOUT;
if (_k_pipe_time_type_get(&RequestProc->args) == _TIME_B) {
/*
* The writer specified TICKS_UNLIMITED, so NULL the timer.
*/
RequestProc->Time.timer = NULL;
return;
}
/* { TIME_BT } */
#ifdef CANCEL_TIMERS
if (RequestProc->args.pipe_xfer_req.xferred_size != 0) {
RequestProc->Time.timer = NULL;
} else
#endif
/* enlist a new timer into the timeout chain */
_k_timeout_alloc(RequestProc);
return;
}
/* call is non-blocking;
* Check if we don't have to queue it b/c it could not
* be processed at once
*/
RequestProc->Time.timer = NULL;
if (RequestProc->args.pipe_xfer_req.status == XFER_BUSY) {
INSERT_ELM(pipe_ptr->readers, RequestProc);
} else {
__ASSERT_NO_MSG(RequestProc->args.pipe_xfer_req.status == XFER_IDLE);
__ASSERT_NO_MSG(RequestProc->args.pipe_xfer_req.xferred_size == 0);
RequestProc->Comm = _K_SVC_PIPE_GET_REPLY;
_k_pipe_get_reply(RequestProc);
}
return;
}
/**
*
* @brief Process timeout command for a pipe get operation
*
* @return N/A
*/
void _k_pipe_get_timeout(struct k_args *ReqProc)
{
__ASSERT_NO_MSG(NULL != ReqProc->Time.timer);
myfreetimer(&(ReqProc->Time.timer));
_k_pipe_request_status_set(&ReqProc->args.pipe_xfer_req, TERM_TMO);
DeListWaiter(ReqProc);
if (ReqProc->args.pipe_xfer_req.num_pending_xfers == 0) {
_k_pipe_get_reply(ReqProc);
}
}
/**
*
* @brief Process reply command for a pipe get operation
*
* @return N/A
*/
void _k_pipe_get_reply(struct k_args *ReqProc)
{
__ASSERT_NO_MSG(
(ReqProc->args.pipe_xfer_req.num_pending_xfers == 0) /* no pending Xfers */
&& (ReqProc->Time.timer == NULL) /* no pending timer */
&& (ReqProc->head == NULL)); /* not in list */
/* orig packet must be sent back, not ReqProc */
struct k_args *ReqOrig = ReqProc->Ctxt.args;
PIPE_REQUEST_STATUS status;
ReqOrig->Comm = _K_SVC_PIPE_GET_ACK;
/* determine return value */
status = ReqProc->args.pipe_xfer_req.status;
if (status == TERM_TMO) {
ReqOrig->Time.rcode = RC_TIME;
} else if ((TERM_XXX | XFER_IDLE) & status) {
K_PIPE_OPTION Option = _k_pipe_option_get(&ReqProc->args);
if (likely(ReqProc->args.pipe_xfer_req.xferred_size ==
ReqProc->args.pipe_xfer_req.total_size)) {
/* All data has been transferred */
ReqOrig->Time.rcode = RC_OK;
} else if (ReqProc->args.pipe_xfer_req.xferred_size != 0) {
/* Some but not all data has been transferred */
ReqOrig->Time.rcode = (Option == _ALL_N) ?
RC_INCOMPLETE : RC_OK;
} else {
/* No data has been transferred */
ReqOrig->Time.rcode = (Option == _0_TO_N) ? RC_OK : RC_FAIL;
}
} else {
/* unknown (invalid) status */
__ASSERT_NO_MSG(1 == 0); /* should not come here */
}
ReqOrig->args.pipe_ack.xferred_size =
ReqProc->args.pipe_xfer_req.xferred_size;
SENDARGS(ReqOrig);
FREEARGS(ReqProc);
}
/**
*
* @brief Process acknowledgment command for a pipe get operation
*
* @return N/A
*/
void _k_pipe_get_ack(struct k_args *Request)
{
struct k_args *LocalReq;
LocalReq = Request->Ctxt.args;
LocalReq->Time.rcode = Request->Time.rcode;
LocalReq->args.pipe_ack = Request->args.pipe_ack;
/* Reschedule the sender task */
_k_state_bit_reset(LocalReq->Ctxt.task, TF_RECV | TF_RECVDATA);
FREEARGS(Request);
}

View File

@@ -1,307 +0,0 @@
/* command processing for pipe put operation */
/*
* Copyright (c) 1997-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <micro_private.h>
#include <k_pipe_util.h>
#include <toolchain.h>
#include <sections.h>
#include <misc/__assert.h>
/**
*
* @brief Process request command for a pipe put operation
*
* @return N/A
*/
void _k_pipe_put_request(struct k_args *RequestOrig)
{
struct k_args *Request;
struct k_args *RequestProc;
kpipe_t pipeId = RequestOrig->args.pipe_req.req_info.pipe.id;
bool bAsync;
if (_k_pipe_request_type_get(&RequestOrig->args) == _ASYNCREQ) {
bAsync = true;
} else {
bAsync = false;
}
if (!bAsync) {
/* First save the pointer to the task's TCB for rescheduling later */
RequestOrig->Ctxt.task = _k_current_task;
_k_state_bit_set(_k_current_task, TF_SEND);
} else {
/* No need to put in data about sender, since it's a poster */
RequestOrig->Ctxt.task = NULL;
}
mycopypacket(&Request, RequestOrig);
/* Now, we need a new packet for processing of the request; the
* Request package is too small b/c of space lost due to possible
* embedded local data
*/
mycopypacket(&RequestProc, Request);
RequestProc->args.pipe_xfer_req.req_info.pipe.ptr =
(struct _k_pipe_struct *)pipeId;
switch (_k_pipe_request_type_get(&RequestProc->args)) {
case _SYNCREQ:
RequestProc->args.pipe_xfer_req.data_ptr =
Request->args.pipe_req.req_type.sync.data_ptr;
RequestProc->args.pipe_xfer_req.total_size =
Request->args.pipe_req.req_type.sync.total_size;
break;
case _ASYNCREQ:
RequestProc->args.pipe_xfer_req.data_ptr =
Request->args.pipe_req.req_type.async.block.pointer_to_data;
RequestProc->args.pipe_xfer_req.total_size =
Request->args.pipe_req.req_type.async.total_size;
break;
default:
break;
}
RequestProc->args.pipe_xfer_req.status = XFER_IDLE;
RequestProc->args.pipe_xfer_req.num_pending_xfers = 0;
RequestProc->args.pipe_xfer_req.xferred_size = 0;
RequestProc->next = NULL;
RequestProc->head = NULL;
switch (RequestProc->Time.ticks) {
case TICKS_NONE:
_k_pipe_time_type_set(&RequestProc->args, _TIME_NB);
break;
case TICKS_UNLIMITED:
_k_pipe_time_type_set(&RequestProc->args, _TIME_B);
break;
default:
_k_pipe_time_type_set(&RequestProc->args, _TIME_BT);
break;
}
/* start processing */
struct _k_pipe_struct *pipe_ptr;
pipe_ptr = RequestProc->args.pipe_xfer_req.req_info.pipe.ptr;
do {
int iSpace2WriteinReaders;
int iFreeBufferSpace;
int iTotalSpace2Write;
int32_t ticks;
iSpace2WriteinReaders = CalcFreeReaderSpace(pipe_ptr->readers);
iFreeBufferSpace =
pipe_ptr->desc.free_space_count +
pipe_ptr->desc.free_space_post_wrap_around;
iTotalSpace2Write =
iFreeBufferSpace + iSpace2WriteinReaders;
if (iTotalSpace2Write == 0)
break; /* special case b/c even not good enough for 1_TO_N */
/* (possibly) do some processing */
ticks = RequestProc->Time.ticks;
RequestProc->Time.timer = NULL;
_k_pipe_process(pipe_ptr, RequestProc /* writer */, NULL /* reader */);
RequestProc->Time.ticks = ticks;
/* check if request was processed */
if (TERM_XXX & RequestProc->args.pipe_xfer_req.status) {
/* not really required */
RequestProc->Time.timer = NULL;
/* not listed anymore --> completely processed */
return;
}
} while (0);
/*
* if we got up to here, we did none or SOME (partial)
* processing on the request
*/
if (_k_pipe_time_type_get(&RequestProc->args) != _TIME_NB) {
/* call is blocking */
INSERT_ELM(pipe_ptr->writers, RequestProc);
/*
* NOTE: It is both faster and simpler to blindly assign the
* PIPE_PUT_TIMEOUT microkernel command to the packet even
* though it is only useful to the finite timeout case.
*/
RequestProc->Comm = _K_SVC_PIPE_PUT_TIMEOUT;
if (_k_pipe_time_type_get(&RequestProc->args) == _TIME_B) {
/*
* The writer specified TICKS_UNLIMITED; NULL the timer.
*/
RequestProc->Time.timer = NULL;
return;
}
/* { TIME_BT } */
#ifdef CANCEL_TIMERS
if (RequestProc->args.pipe_xfer_req.xferred_size != 0) {
RequestProc->Time.timer = NULL;
} else
#endif
/* enlist a new timer into the timeout chain */
_k_timeout_alloc(RequestProc);
return;
}
/*
* call is non-blocking;
* Check if we don't have to queue it b/c it could not
* be processed at once
*/
RequestProc->Time.timer = NULL;
if (RequestProc->args.pipe_xfer_req.status == XFER_BUSY) {
INSERT_ELM(pipe_ptr->writers, RequestProc);
} else {
__ASSERT_NO_MSG(RequestProc->args.pipe_xfer_req.status == XFER_IDLE);
__ASSERT_NO_MSG(RequestProc->args.pipe_xfer_req.xferred_size == 0);
RequestProc->Comm = _K_SVC_PIPE_PUT_REPLY;
_k_pipe_put_reply(RequestProc);
}
return;
}
/**
*
* @brief Perform timeout command for a pipe put operation
*
* @return N/A
*/
void _k_pipe_put_timeout(struct k_args *ReqProc)
{
__ASSERT_NO_MSG(ReqProc->Time.timer != NULL);
myfreetimer(&(ReqProc->Time.timer));
_k_pipe_request_status_set(&ReqProc->args.pipe_xfer_req, TERM_TMO);
DeListWaiter(ReqProc);
if (ReqProc->args.pipe_xfer_req.num_pending_xfers == 0) {
_k_pipe_put_reply(ReqProc);
}
}
/**
*
* @brief Process reply command for a pipe put operation
*
* @return N/A
*/
void _k_pipe_put_reply(struct k_args *ReqProc)
{
__ASSERT_NO_MSG(
ReqProc->args.pipe_xfer_req.num_pending_xfers == 0 /* no pending Xfers */
&& ReqProc->Time.timer == NULL /* no pending timer */
&& ReqProc->head == NULL); /* not in list */
/* orig packet must be sent back, not ReqProc */
struct k_args *ReqOrig = ReqProc->Ctxt.args;
PIPE_REQUEST_STATUS status;
ReqOrig->Comm = _K_SVC_PIPE_PUT_ACK;
/* determine return value:
*/
status = ReqProc->args.pipe_xfer_req.status;
if (unlikely(status == TERM_TMO)) {
ReqOrig->Time.rcode = RC_TIME;
} else if ((TERM_XXX | XFER_IDLE) & status) {
K_PIPE_OPTION Option = _k_pipe_option_get(&ReqProc->args);
if (likely(ReqProc->args.pipe_xfer_req.xferred_size ==
ReqProc->args.pipe_xfer_req.total_size)) {
/* All data has been transferred */
ReqOrig->Time.rcode = RC_OK;
} else if (ReqProc->args.pipe_xfer_req.xferred_size != 0) {
/* Some but not all data has been transferred */
ReqOrig->Time.rcode = (Option == _ALL_N) ? RC_INCOMPLETE : RC_OK;
} else {
/* No data has been transferred */
ReqOrig->Time.rcode = (Option == _0_TO_N) ? RC_OK : RC_FAIL;
}
} else {
/* unknown (invalid) status */
__ASSERT_NO_MSG(1 == 0); /* should not come here */
}
if (_k_pipe_request_type_get(&ReqOrig->args) != _ASYNCREQ) {
ReqOrig->args.pipe_ack.xferred_size =
ReqProc->args.pipe_xfer_req.xferred_size;
}
SENDARGS(ReqOrig);
FREEARGS(ReqProc);
}
/**
*
* @brief Process acknowledgment command for a pipe put operation
*
* @return N/A
*/
void _k_pipe_put_ack(struct k_args *Request)
{
if (_k_pipe_request_type_get(&Request->args) == _ASYNCREQ) {
struct _pipe_ack_arg *pipe_ack = &Request->args.pipe_ack;
struct k_args A;
struct k_block *blockptr;
/* invoke command to release block */
blockptr = &pipe_ack->req_type.async.block;
A.Comm = _K_SVC_MEM_POOL_BLOCK_RELEASE;
A.args.p1.pool_id = blockptr->pool_id;
A.args.p1.req_size = blockptr->req_size;
A.args.p1.rep_poolptr = blockptr->address_in_pool;
A.args.p1.rep_dataptr = blockptr->pointer_to_data;
_k_mem_pool_block_release(&A); /* will return immediately */
if (pipe_ack->req_type.async.sema != (ksem_t)NULL) {
/* invoke command to signal sema */
struct k_args A;
A.Comm = _K_SVC_SEM_SIGNAL;
A.args.s1.sema = pipe_ack->req_type.async.sema;
_k_sem_signal(&A); /* will return immediately */
}
} else {
/* Reschedule the sender task */
struct k_args *LocalReq;
LocalReq = Request->Ctxt.args;
LocalReq->Time.rcode = Request->Time.rcode;
LocalReq->args.pipe_ack = Request->args.pipe_ack;
_k_state_bit_reset(LocalReq->Ctxt.task, TF_SEND | TF_SENDDATA);
}
FREEARGS(Request);
}

View File

@@ -1,136 +0,0 @@
/* k_pipe_util.c */
/*
* Copyright (c) 1997-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <micro_private.h>
#include <k_pipe_util.h>
#include <string.h>
#include <toolchain.h>
#include <sections.h>
#include <misc/__assert.h>
#define _ALL_OPT (0x000000FF)
void DeListWaiter(struct k_args *pReqProc)
{
__ASSERT_NO_MSG(pReqProc->head != NULL);
REMOVE_ELM(pReqProc);
pReqProc->head = NULL;
}
void myfreetimer(struct k_timer **ppTimer)
{
if (*ppTimer) {
_k_timer_delist(*ppTimer);
FREETIMER(*ppTimer);
*ppTimer = NULL;
}
}
/* adapted from mailbox implementation of copypacket() */
void mycopypacket(struct k_args **out, struct k_args *in)
{
GETARGS(*out);
memcpy(*out, in, sizeof(struct k_args));
(*out)->Ctxt.args = in;
}
int CalcFreeReaderSpace(struct k_args *pReaderList)
{
int size = 0;
if (pReaderList) {
struct k_args *reader_ptr = pReaderList;
while (reader_ptr != NULL) {
size += (reader_ptr->args.pipe_xfer_req.total_size -
reader_ptr->args.pipe_xfer_req.xferred_size);
reader_ptr = reader_ptr->next;
}
}
return size;
}
int CalcAvailWriterData(struct k_args *pWriterList)
{
int size = 0;
if (pWriterList) {
struct k_args *writer_ptr = pWriterList;
while (writer_ptr != NULL) {
size += (writer_ptr->args.pipe_xfer_req.total_size -
writer_ptr->args.pipe_xfer_req.xferred_size);
writer_ptr = writer_ptr->next;
}
}
return size;
}
K_PIPE_OPTION _k_pipe_option_get(K_ARGS_ARGS *args)
{
return (K_PIPE_OPTION)(args->pipe_xfer_req.req_info.params & _ALL_OPT);
}
void _k_pipe_option_set(K_ARGS_ARGS *args, K_PIPE_OPTION option)
{
/* Ensure that only the pipe option bits are modified */
args->pipe_xfer_req.req_info.params &= (~_ALL_OPT);
args->pipe_xfer_req.req_info.params |= (option & _ALL_OPT);
}
REQ_TYPE _k_pipe_request_type_get(K_ARGS_ARGS *args)
{
return (REQ_TYPE)(args->pipe_xfer_req.req_info.params & _ALLREQ);
}
void _k_pipe_request_type_set(K_ARGS_ARGS *args, REQ_TYPE req_type)
{
/* Ensure that only the request type bits are modified */
args->pipe_xfer_req.req_info.params &= (~_ALLREQ);
args->pipe_xfer_req.req_info.params |= (req_type & _ALLREQ);
}
TIME_TYPE _k_pipe_time_type_get(K_ARGS_ARGS *args)
{
return (TIME_TYPE)(args->pipe_xfer_req.req_info.params & _ALLTIME);
}
void _k_pipe_time_type_set(K_ARGS_ARGS *args, TIME_TYPE TimeType)
{
/* Ensure that only the time type bits are modified */
args->pipe_xfer_req.req_info.params &= (~_ALLTIME);
args->pipe_xfer_req.req_info.params |= (TimeType & _ALLTIME);
}
void _k_pipe_request_status_set(struct _pipe_xfer_req_arg *pipe_xfer_req,
PIPE_REQUEST_STATUS status)
{
#ifdef CONFIG_OBJECT_MONITOR
/*
* if transition XFER_IDLE --> XFER_BUSY, TERM_XXX
* increment pipe counter
*/
if (pipe_xfer_req->status == XFER_IDLE /* current (old) status */
&& (XFER_BUSY | TERM_XXX) & status /* new status */) {
(pipe_xfer_req->req_info.pipe.ptr->count)++;
}
#endif
pipe_xfer_req->status = status;
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,473 +0,0 @@
/*
* Copyright (c) 1997-2010, 2012-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
* @brief Semaphore kernel services.
*/
#include <microkernel.h>
#include <toolchain.h>
#include <sections.h>
#include <micro_private.h>
/**
*
* @brief Update value of semaphore structure
*
* This routine updates the value of the semaphore by 0 or more units, then
* gives the semaphore to any waiting tasks that can now be satisfied.
*
* @param n Number of additional times semaphore has been given.
* @param sema Semaphore structure to update.
*
* @return N/A
*/
void _k_sem_struct_value_update(int n, struct _k_sem_struct *S)
{
struct k_args *A, *X, *Y;
#ifdef CONFIG_OBJECT_MONITOR
S->count += n;
#endif
S->level += n;
A = S->waiters;
Y = NULL;
while (A && S->level) {
X = A->next;
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Comm == _K_SVC_SEM_WAIT_REQUEST
|| A->Comm == _K_SVC_SEM_WAIT_REPLY_TIMEOUT)
#else
if (A->Comm == _K_SVC_SEM_WAIT_REQUEST)
#endif
{
S->level--;
if (Y) {
Y->next = X;
} else {
S->waiters = X;
}
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.timer) {
_k_timeout_cancel(A);
A->Comm = _K_SVC_SEM_WAIT_REPLY;
} else {
#endif
A->Time.rcode = RC_OK;
_k_state_bit_reset(A->Ctxt.task, TF_SEMA);
#ifdef CONFIG_SYS_CLOCK_EXISTS
}
#endif
} else if (A->Comm == _K_SVC_SEM_GROUP_WAIT_REQUEST) {
S->level--;
A->Comm = _K_SVC_SEM_GROUP_WAIT_READY;
GETARGS(Y);
*Y = *A;
SENDARGS(Y);
Y = A;
} else {
Y = A;
}
A = X;
}
}
void _k_sem_group_wait(struct k_args *R)
{
struct k_args *A = R->Ctxt.args;
FREEARGS(R);
if (--(A->args.s1.nsem) == 0) {
_k_state_bit_reset(A->Ctxt.task, TF_LIST);
}
}
void _k_sem_group_wait_cancel(struct k_args *A)
{
struct _k_sem_struct *S = (struct _k_sem_struct *)A->args.s1.sema;
struct k_args *X = S->waiters;
struct k_args *Y = NULL;
while (X && (X->priority <= A->priority)) {
if (X->Ctxt.args == A->Ctxt.args) {
if (Y) {
Y->next = X->next;
} else {
S->waiters = X->next;
}
if (X->Comm == _K_SVC_SEM_GROUP_WAIT_REQUEST
|| X->Comm == _K_SVC_SEM_GROUP_WAIT_READY) {
if (X->Comm == _K_SVC_SEM_GROUP_WAIT_READY) {
/* obtain struct k_args of waiting task */
struct k_args *waitTaskArgs = X->Ctxt.args;
/*
* Determine if the wait cancellation request is being
* processed after the state of the 'waiters' packet state
* has been updated to _K_SVC_SEM_GROUP_WAIT_READY, but before
* the _K_SVC_SEM_GROUP_WAIT_READY packet has been processed.
* This will occur if a _K_SVC_SEM_GROUP_WAIT_TIMEOUT
* timer expiry occurs between the update of the packet state
* and the processing of the WAITMRDY packet.
*/
if (unlikely(waitTaskArgs->args.s1.sema ==
ENDLIST)) {
waitTaskArgs->args.s1.sema = A->args.s1.sema;
} else {
_k_sem_struct_value_update(1, S);
}
}
_k_sem_group_wait(X);
} else {
FREEARGS(X); /* ERROR */
}
FREEARGS(A);
return;
}
Y = X;
X = X->next;
}
A->next = X;
if (Y) {
Y->next = A;
} else {
S->waiters = A;
}
}
void _k_sem_group_wait_accept(struct k_args *A)
{
struct _k_sem_struct *S = (struct _k_sem_struct *)A->args.s1.sema;
struct k_args *X = S->waiters;
struct k_args *Y = NULL;
while (X && (X->priority <= A->priority)) {
if (X->Ctxt.args == A->Ctxt.args) {
if (Y) {
Y->next = X->next;
} else {
S->waiters = X->next;
}
if (X->Comm == _K_SVC_SEM_GROUP_WAIT_READY) {
_k_sem_group_wait(X);
} else {
FREEARGS(X); /* ERROR */
}
FREEARGS(A);
return;
}
Y = X;
X = X->next;
}
/* ERROR */
}
void _k_sem_group_wait_timeout(struct k_args *A)
{
ksem_t *L;
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.timer) {
FREETIMER(A->Time.timer);
}
#endif
L = A->args.s1.list;
while (*L != ENDLIST) {
struct k_args *R;
GETARGS(R);
R->priority = A->priority;
R->Comm =
((*L == A->args.s1.sema) ?
_K_SVC_SEM_GROUP_WAIT_ACCEPT : _K_SVC_SEM_GROUP_WAIT_CANCEL);
R->Ctxt.args = A;
R->args.s1.sema = *L++;
SENDARGS(R);
}
}
void _k_sem_group_ready(struct k_args *R)
{
struct k_args *A = R->Ctxt.args;
if (A->args.s1.sema == ENDLIST) {
A->args.s1.sema = R->args.s1.sema;
A->Comm = _K_SVC_SEM_GROUP_WAIT_TIMEOUT;
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.timer) {
_k_timeout_cancel(A);
} else
#endif
_k_sem_group_wait_timeout(A);
}
FREEARGS(R);
}
void _k_sem_wait_reply(struct k_args *A)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.timer) {
FREETIMER(A->Time.timer);
}
if (A->Comm == _K_SVC_SEM_WAIT_REPLY_TIMEOUT) {
REMOVE_ELM(A);
A->Time.rcode = RC_TIME;
} else
#endif
A->Time.rcode = RC_OK;
_k_state_bit_reset(A->Ctxt.task, TF_SEMA);
}
void _k_sem_wait_reply_timeout(struct k_args *A)
{
_k_sem_wait_reply(A);
}
void _k_sem_group_wait_request(struct k_args *A)
{
struct _k_sem_struct *S = (struct _k_sem_struct *)A->args.s1.sema;
struct k_args *X = S->waiters;
struct k_args *Y = NULL;
while (X && (X->priority <= A->priority)) {
if (X->Ctxt.args == A->Ctxt.args) {
if (Y) {
Y->next = X->next;
} else {
S->waiters = X->next;
}
if (X->Comm == _K_SVC_SEM_GROUP_WAIT_CANCEL) {
_k_sem_group_wait(X);
} else {
FREEARGS(X); /* ERROR */
}
FREEARGS(A);
return;
}
Y = X;
X = X->next;
}
A->next = X;
if (Y) {
Y->next = A;
} else {
S->waiters = A;
}
_k_sem_struct_value_update(0, S);
}
void _k_sem_group_wait_any(struct k_args *A)
{
ksem_t *L;
L = A->args.s1.list;
A->args.s1.sema = ENDLIST;
A->args.s1.nsem = 0;
if (*L == ENDLIST) {
return;
}
while (*L != ENDLIST) {
struct k_args *R;
GETARGS(R);
R->priority = _k_current_task->priority;
R->Comm = _K_SVC_SEM_GROUP_WAIT_REQUEST;
R->Ctxt.args = A;
R->args.s1.sema = *L++;
SENDARGS(R);
(A->args.s1.nsem)++;
}
A->Ctxt.task = _k_current_task;
_k_state_bit_set(_k_current_task, TF_LIST);
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.ticks != TICKS_NONE) {
if (A->Time.ticks == TICKS_UNLIMITED) {
A->Time.timer = NULL;
} else {
A->Comm = _K_SVC_SEM_GROUP_WAIT_TIMEOUT;
_k_timeout_alloc(A);
}
}
#endif
}
void _k_sem_wait_request(struct k_args *A)
{
struct _k_sem_struct *S;
uint32_t Sid;
Sid = A->args.s1.sema;
S = (struct _k_sem_struct *)Sid;
if (S->level) {
S->level--;
A->Time.rcode = RC_OK;
} else if (A->Time.ticks != TICKS_NONE) {
A->Ctxt.task = _k_current_task;
A->priority = _k_current_task->priority;
_k_state_bit_set(_k_current_task, TF_SEMA);
INSERT_ELM(S->waiters, A);
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.ticks == TICKS_UNLIMITED) {
A->Time.timer = NULL;
} else {
A->Comm = _K_SVC_SEM_WAIT_REPLY_TIMEOUT;
_k_timeout_alloc(A);
}
#endif
return;
} else {
A->Time.rcode = RC_FAIL;
}
}
int task_sem_take(ksem_t sema, int32_t timeout)
{
struct k_args A;
A.Comm = _K_SVC_SEM_WAIT_REQUEST;
A.Time.ticks = timeout;
A.args.s1.sema = sema;
KERNEL_ENTRY(&A);
return A.Time.rcode;
}
ksem_t task_sem_group_take(ksemg_t group, int32_t timeout)
{
struct k_args A;
A.Comm = _K_SVC_SEM_GROUP_WAIT_ANY;
A.priority = _k_current_task->priority;
A.Time.ticks = timeout;
A.args.s1.list = group;
KERNEL_ENTRY(&A);
return A.args.s1.sema;
}
void _k_sem_signal(struct k_args *A)
{
uint32_t Sid = A->args.s1.sema;
struct _k_sem_struct *S = (struct _k_sem_struct *)Sid;
_k_sem_struct_value_update(1, S);
}
void _k_sem_group_signal(struct k_args *A)
{
ksem_t *L = A->args.s1.list;
while ((A->args.s1.sema = *L++) != ENDLIST) {
_k_sem_signal(A);
}
}
void task_sem_give(ksem_t sema)
{
struct k_args A;
A.Comm = _K_SVC_SEM_SIGNAL;
A.args.s1.sema = sema;
KERNEL_ENTRY(&A);
}
void task_sem_group_give(ksemg_t group)
{
struct k_args A;
A.Comm = _K_SVC_SEM_GROUP_SIGNAL;
A.args.s1.list = group;
KERNEL_ENTRY(&A);
}
FUNC_ALIAS(isr_sem_give, fiber_sem_give, void);
void isr_sem_give(ksem_t sema)
{
_COMMAND_STACK_SIZE_CHECK();
nano_isr_stack_push(&_k_command_stack,
(uint32_t)sema | KERNEL_CMD_SEMAPHORE_TYPE);
}
void _k_sem_reset(struct k_args *A)
{
uint32_t Sid = A->args.s1.sema;
struct _k_sem_struct *S = (struct _k_sem_struct *)Sid;
S->level = 0;
}
void _k_sem_group_reset(struct k_args *A)
{
ksem_t *L = A->args.s1.list;
while ((A->args.s1.sema = *L++) != ENDLIST) {
_k_sem_reset(A);
}
}
void task_sem_reset(ksem_t sema)
{
struct k_args A;
A.Comm = _K_SVC_SEM_RESET;
A.args.s1.sema = sema;
KERNEL_ENTRY(&A);
}
void task_sem_group_reset(ksemg_t group)
{
struct k_args A;
A.Comm = _K_SVC_SEM_GROUP_RESET;
A.args.s1.list = group;
KERNEL_ENTRY(&A);
}
void _k_sem_inquiry(struct k_args *A)
{
struct _k_sem_struct *S;
uint32_t Sid;
Sid = A->args.s1.sema;
S = (struct _k_sem_struct *)Sid;
A->Time.rcode = S->level;
}
int task_sem_count_get(ksem_t sema)
{
struct k_args A;
A.Comm = _K_SVC_SEM_INQUIRY;
A.args.s1.sema = sema;
KERNEL_ENTRY(&A);
return A.Time.rcode;
}

View File

@@ -1,182 +0,0 @@
/*
* Copyright (c) 2010, 2012-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Microkernel server
*
* This module implements the microkernel server, which processes service
* requests from tasks (and, less commonly, fibers and ISRs). The requests are
* service by a high priority fiber, thereby ensuring that requests are
* processed in a timely manner and in a single threaded manner that prevents
* simultaneous requests from interfering with each other.
*/
#include <toolchain.h>
#include <sections.h>
#include <micro_private.h>
#include <nano_private.h>
#include <microkernel.h>
#include <nanokernel.h>
#include <misc/__assert.h>
#include <drivers/system_timer.h>
extern const kernelfunc _k_server_dispatch_table[];
/**
*
* @brief Select task to be executed by microkernel
*
* Locates that highest priority task queue that is non-empty and chooses the
* task at the head of that queue. It's guaranteed that there will always be
* a non-empty queue, since the idle task is always executable.
*
* @return pointer to selected task
*/
static struct k_task *next_task_select(void)
{
int K_PrioListIdx;
#if (CONFIG_NUM_TASK_PRIORITIES <= 32)
K_PrioListIdx = find_lsb_set(_k_task_priority_bitmap[0]) - 1;
#else
int bit_map;
int set_bit_pos;
K_PrioListIdx = -1;
for (bit_map = 0; ; bit_map++) {
set_bit_pos = find_lsb_set(_k_task_priority_bitmap[bit_map]);
if (set_bit_pos) {
K_PrioListIdx += set_bit_pos;
break;
}
K_PrioListIdx += 32;
}
#endif
return _k_task_priority_list[K_PrioListIdx].head;
}
/**
*
* @brief The microkernel thread entry point
*
* This function implements the microkernel fiber. It waits for command
* packets to arrive on its command stack. It executes all commands on the
* stack and then sets up the next task that is ready to run. Next it
* goes to wait on further inputs on the command stack.
*
* @return Does not return.
*/
FUNC_NORETURN void _k_server(int unused1, int unused2)
{
struct k_args *pArgs;
struct k_task *pNextTask;
ARG_UNUSED(unused1);
ARG_UNUSED(unused2);
/* indicate that failure of this fiber may be fatal to the entire system
*/
_thread_essential_set();
while (1) { /* forever */
(void) nano_fiber_stack_pop(&_k_command_stack, (uint32_t *)&pArgs,
TICKS_UNLIMITED); /* will schedule */
do {
int cmd_type = (int)pArgs & KERNEL_CMD_TYPE_MASK;
if (cmd_type == KERNEL_CMD_PACKET_TYPE) {
/* process command packet */
#ifdef CONFIG_TASK_MONITOR
if (_k_monitor_mask & MON_KSERV) {
_k_task_monitor_args(pArgs);
}
#endif
(*pArgs->Comm)(pArgs);
} else if (cmd_type == KERNEL_CMD_EVENT_TYPE) {
/* give event */
#ifdef CONFIG_TASK_MONITOR
if (_k_monitor_mask & MON_EVENT) {
_k_task_monitor_args(pArgs);
}
#endif
kevent_t event = (int)pArgs & ~KERNEL_CMD_TYPE_MASK;
_k_do_event_signal(event);
} else { /* cmd_type == KERNEL_CMD_SEMAPHORE_TYPE */
/* give semaphore */
#ifdef CONFIG_TASK_MONITOR
/* task monitoring for giving semaphore not implemented */
#endif
ksem_t sem = (int)pArgs & ~KERNEL_CMD_TYPE_MASK;
_k_sem_struct_value_update(1, (struct _k_sem_struct *)sem);
}
/*
* check if another fiber (of equal or greater priority)
* needs to run
*/
if (_nanokernel.fiber) {
fiber_yield();
}
} while (nano_fiber_stack_pop(&_k_command_stack, (uint32_t *)&pArgs,
TICKS_NONE));
pNextTask = next_task_select();
if (_k_current_task != pNextTask) {
/*
* switch from currently selected task to a different
* one
*/
#ifdef CONFIG_WORKLOAD_MONITOR
if (pNextTask->id == 0x00000000) {
_k_workload_monitor_idle_start();
} else if (_k_current_task->id == 0x00000000) {
_k_workload_monitor_idle_end();
}
#endif
_k_current_task = pNextTask;
_nanokernel.task = (struct tcs *)pNextTask->workspace;
#ifdef CONFIG_TASK_MONITOR
if (_k_monitor_mask & MON_TSWAP) {
_k_task_monitor(_k_current_task, 0);
}
#endif
}
}
/*
* Code analyzers may complain that _k_server() uses an infinite loop
* unless we indicate that this is intentional
*/
CODE_UNREACHABLE;
}

View File

@@ -1,510 +0,0 @@
/* task kernel services */
/*
* Copyright (c) 1997-2010, 2013-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <microkernel.h>
#include <nanokernel.h>
#include <arch/cpu.h>
#include <string.h>
#include <toolchain.h>
#include <sections.h>
#include <micro_private.h>
#include <nano_private.h>
#include <start_task_arch.h>
#include <misc/debug/object_tracing_common.h>
extern ktask_t _k_task_ptr_start[];
extern ktask_t _k_task_ptr_end[];
ktask_t task_id_get(void)
{
return _k_current_task->id;
}
/**
* @brief Reset the specified task state bits
*
* This routine resets the specified task state bits. When a task's state bits
* are zero, the task may be scheduled to run. The tasks's state bits are a
* bitmask of the TF_xxx bits. Each TF_xxx bit indicates a reason why the task
* must not be scheduled to run.
*
* @param X Pointer to task
* @param bits Bitmask of TF_xxx bits to reset
* @return N/A
*
* @internal
* When operating on microkernel objects, this routine is invoked in the
* context of the microkernel server fiber. However, since microkernel tasks
* may pend/unpend on nanokernel objects, interrupts must be locked to
* prevent data corruption.
* @endinternal
*/
void _k_state_bit_reset(struct k_task *X, uint32_t bits)
{
unsigned int key = irq_lock();
uint32_t f_old = X->state; /* old state bits */
uint32_t f_new = f_old & ~bits; /* new state bits */
X->state = f_new; /* Update task's state bits */
if ((f_old != 0) && (f_new == 0)) {
/*
* The task may now be scheduled to run (but could not
* previously) as all the TF_xxx bits are clear. It must
* be added to the list of schedulable tasks.
*/
struct k_tqhd *H = _k_task_priority_list + X->priority;
X->next = NULL;
H->tail->next = X;
H->tail = X;
_k_task_priority_bitmap[X->priority >> 5] |=
(1 << (X->priority & 0x1F));
}
irq_unlock(key);
#ifdef CONFIG_TASK_MONITOR
f_new ^= f_old;
if ((_k_monitor_mask & MON_STATE) && (f_new)) {
/*
* Task monitoring is enabled and the new state bits are
* different than the old state bits.
*
* <f_new> now contains the bits that are different.
*/
_k_task_monitor(X, f_new | MO_STBIT0);
}
#endif
}
/**
* @brief Set specified task state bits
*
* This routine sets the specified task state bits. When a task's state bits
* are non-zero, the task will not be scheduled to run. The task's state bits
* are a bitmask of the TF_xxx bits. Each TF_xxx bit indicates a reason why
* the task must not be scheduled to run.
* @param task_ptr Task pointer
* @param bitmask of TF_xxx bits to set
* @return N/A
*
* @internal
* When operating on microkernel objects, this routine is invoked in the
* context of the microkernel server fiber. However, since microkernel tasks
* may pend/unpend on nanokernel objects, interrupts must be locked to
* prevent data corruption.
* @endinternal
*/
void _k_state_bit_set(struct k_task *task_ptr, uint32_t bits)
{
unsigned int key = irq_lock();
uint32_t old_state_bits = task_ptr->state;
uint32_t new_state_bits = old_state_bits | bits;
task_ptr->state = new_state_bits;
if ((old_state_bits == 0) && (new_state_bits != 0)) {
/*
* The task could have been scheduled to run ([state] was 0)
* but can not be scheduled to run anymore at least one TF_xxx
* bit has been set. Remove it from the list of schedulable
* tasks.
*/
#if defined(__GNUC__)
#if defined(CONFIG_ARM)
/*
* Avoid bad code generation by certain gcc toolchains for ARM
* when an optimization setting of -O2 or above is used.
*
* Specifically, this issue has been seen with ARM gcc version
* 4.6.3 (Sourcery CodeBench Lite 2012.03-56): The 'volatile'
* attribute is added to the following variable to prevent it
* from being lost--otherwise the register that holds its value
* is reused, but the compiled code uses it later on as if it
* was still that variable.
*/
volatile
#endif
#endif
struct k_tqhd *task_queue = _k_task_priority_list +
task_ptr->priority;
struct k_task *cur_task = (struct k_task *)(&task_queue->head);
/*
* Search in the list for this task priority level,
* and remove the task.
*/
while (cur_task->next != task_ptr) {
cur_task = cur_task->next;
}
cur_task->next = task_ptr->next;
if (task_queue->tail == task_ptr) {
task_queue->tail = cur_task;
}
/*
* If there are no more tasks of this priority that are
* runnable, then clear that bit in the global priority bit map.
*/
if (task_queue->head == NULL) {
_k_task_priority_bitmap[task_ptr->priority >> 5] &=
~(1 << (task_ptr->priority & 0x1F));
}
}
irq_unlock(key);
#ifdef CONFIG_TASK_MONITOR
new_state_bits ^= old_state_bits;
if ((_k_monitor_mask & MON_STATE) && (new_state_bits)) {
/*
* Task monitoring is enabled and the new state bits are
* different than the old state bits.
*
* <new_state_bits> now contains the bits that are different.
*/
_k_task_monitor(task_ptr, new_state_bits | MO_STBIT1);
}
#endif
}
/**
* @brief Initialize and start a task
*
* @param X Pointer to task control block
* @param func Entry point for task
* @return N/A
*/
static void start_task(struct k_task *X, void (*func)(void))
{
unsigned int task_options;
void *parameter1;
/* Note: the field X->worksize now represents the task size in bytes */
task_options = 0;
_START_TASK_ARCH(X, &task_options);
/*
* The 'func' argument to _new_thread() represents the entry point of
* the
* kernel task. The 'parameter1', 'parameter2', & 'parameter3'
* arguments
* are not applicable to such tasks. A 'priority' of -1 indicates that
* the thread is a task, rather than a fiber.
*/
#ifdef CONFIG_THREAD_MONITOR
parameter1 = (void *)X;
#else
parameter1 = (void *)0;
#endif
_new_thread((char *)X->workspace, /* pStackMem */
X->worksize, /* stackSize */
X, /* microkernel task pointer */
(_thread_entry_t)func, /* pEntry */
parameter1, /* parameter1 */
(void *)0, /* parameter2 */
(void *)0, /* parameter3 */
-1, /* priority */
task_options /* options */
);
X->fn_abort = NULL;
_k_state_bit_reset(X, TF_STOP | TF_TERM);
}
/**
* @brief Abort a task
*
* This routine aborts the specified task.
* @param X Task pointer
* @return N/A
*/
static void abort_task(struct k_task *X)
{
/* Do normal thread exit cleanup */
_thread_monitor_exit((struct tcs *)X->workspace);
/* Set TF_TERM and TF_STOP state flags */
_k_state_bit_set(X, TF_STOP | TF_TERM);
/* Invoke abort function, if there is one */
if (X->fn_abort != NULL) {
X->fn_abort();
}
}
#ifndef CONFIG_ARCH_HAS_TASK_ABORT
/**
* @brief Microkernel handler for fatal task errors
*
* To be invoked when a task aborts implicitly, either by returning from its
* entry point or due to a software or hardware fault.
*
* @return does not return
*/
FUNC_NORETURN void _TaskAbort(void)
{
_task_ioctl(_k_current_task->id, TASK_ABORT);
/*
* Compiler can't tell that _task_ioctl() won't return and issues
* a warning unless we explicitly tell it that control never gets this
* far.
*/
CODE_UNREACHABLE;
}
#endif
void task_abort_handler_set(void (*func)(void))
{
_k_current_task->fn_abort = func;
}
/**
* @brief Handle a task operation request
*
* This routine handles any one of the following task operation requests:
* starting either a kernel or user task, aborting a task, suspending a task,
* resuming a task, blocking a task or unblocking a task
* @param A Arguments
* @return N/A
*/
void _k_task_op(struct k_args *A)
{
ktask_t Tid = A->args.g1.task;
struct k_task *X = (struct k_task *)Tid;
switch (A->args.g1.opt) {
case TASK_START:
start_task(X, X->fn_start);
SYS_TRACING_OBJ_INIT(micro_task, X);
break;
case TASK_ABORT:
abort_task(X);
break;
case TASK_SUSPEND:
_k_state_bit_set(X, TF_SUSP);
break;
case TASK_RESUME:
_k_state_bit_reset(X, TF_SUSP);
break;
case TASK_BLOCK:
_k_state_bit_set(X, TF_BLCK);
break;
case TASK_UNBLOCK:
_k_state_bit_reset(X, TF_BLCK);
break;
}
}
/**
* @brief Task operations
* @param task Task on which to operate
* @param opt Task operation
* @return N/A
*/
void _task_ioctl(ktask_t task, int opt)
{
struct k_args A;
A.Comm = _K_SVC_TASK_OP;
A.args.g1.task = task;
A.args.g1.opt = opt;
KERNEL_ENTRY(&A);
}
/**
* @brief Handle task group operation request
*
* This routine handles any one of the following task group operations requests:
* starting either kernel or user tasks, aborting tasks, suspending tasks,
* resuming tasks, blocking tasks or unblocking tasks
* @param A Arguments
* @return N/A
*/
void _k_task_group_op(struct k_args *A)
{
ktask_group_t grp = A->args.g1.group;
int opt = A->args.g1.opt;
struct k_task *X;
ktask_t *task_id;
#ifdef CONFIG_TASK_DEBUG
if (opt == TASK_GROUP_BLOCK)
_k_debug_halt = 1;
if (opt == TASK_GROUP_UNBLOCK)
_k_debug_halt = 0;
#endif
for (task_id = _k_task_ptr_start; task_id < _k_task_ptr_end;
task_id++) {
X = (struct k_task *)(*task_id);
if (X->group & grp) {
switch (opt) {
case TASK_GROUP_START:
start_task(X, X->fn_start);
SYS_TRACING_OBJ_INIT(micro_task, X);
break;
case TASK_GROUP_ABORT:
abort_task(X);
break;
case TASK_GROUP_SUSPEND:
_k_state_bit_set(X, TF_SUSP);
break;
case TASK_GROUP_RESUME:
_k_state_bit_reset(X, TF_SUSP);
break;
case TASK_GROUP_BLOCK:
_k_state_bit_set(X, TF_BLCK);
break;
case TASK_GROUP_UNBLOCK:
_k_state_bit_reset(X, TF_BLCK);
break;
}
}
}
}
/**
* @brief Task group operations
* @param group Task group
* @param opt Operation
* @return N/A
*/
void _task_group_ioctl(ktask_group_t group, int opt)
{
struct k_args A;
A.Comm = _K_SVC_TASK_GROUP_OP;
A.args.g1.group = group;
A.args.g1.opt = opt;
KERNEL_ENTRY(&A);
}
kpriority_t task_group_mask_get(void)
{
return _k_current_task->group;
}
void task_group_join(uint32_t groups)
{
_k_current_task->group |= groups;
}
void task_group_leave(uint32_t groups)
{
_k_current_task->group &= ~groups;
}
/**
* @brief Get task priority
*
* @return priority of current task
*/
kpriority_t task_priority_get(void)
{
return _k_current_task->priority;
}
/**
* @brief Handle task set priority request
* @param A Arguments
* @return N/A
*/
void _k_task_priority_set(struct k_args *A)
{
ktask_t Tid = A->args.g1.task;
struct k_task *X = (struct k_task *)Tid;
_k_state_bit_set(X, TF_PRIO);
X->priority = A->args.g1.prio;
_k_state_bit_reset(X, TF_PRIO);
if (A->alloc)
FREEARGS(A);
}
void task_priority_set(ktask_t task, kpriority_t prio)
{
struct k_args A;
A.Comm = _K_SVC_TASK_PRIORITY_SET;
A.args.g1.task = task;
A.args.g1.prio = prio;
KERNEL_ENTRY(&A);
}
/**
* @brief Handle task yield request
*
* @param A Arguments
* @return N/A
*/
void _k_task_yield(struct k_args *A)
{
struct k_tqhd *H = _k_task_priority_list + _k_current_task->priority;
struct k_task *X = _k_current_task->next;
ARG_UNUSED(A);
if (X && H->head == _k_current_task) {
_k_current_task->next = NULL;
H->tail->next = _k_current_task;
H->tail = _k_current_task;
H->head = X;
}
}
void task_yield(void)
{
struct k_args A;
A.Comm = _K_SVC_TASK_YIELD;
KERNEL_ENTRY(&A);
}
void task_entry_set(ktask_t task, void (*func)(void))
{
struct k_task *X = (struct k_task *)task;
X->fn_start = func;
}

View File

@@ -1,82 +0,0 @@
/* k_task_monitor.c - microkernel task monitoring subsystem */
/*
* Copyright (c) 1997-2010, 2013-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <micro_private.h>
#include <misc/kernel_event_logger.h>
#ifdef CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
int _k_monitor_mask;
#else
const int _k_monitor_mask = CONFIG_TASK_MONITOR_MASK;
#endif
k_task_monitor_hook_t _k_task_switch_callback;
void task_monitor_hook_set(k_task_monitor_hook_t func)
{
_k_task_switch_callback = func;
}
void _k_task_monitor(struct k_task *X, uint32_t D)
{
uint32_t data[3];
#ifdef CONFIG_TASK_DEBUG
if (!_k_debug_halt)
#endif
{
data[0] = _sys_k_get_time();
data[1] = X->id;
data[2] = D;
sys_k_event_logger_put(
KERNEL_EVENT_LOGGER_TASK_MON_TASK_STATE_CHANGE_EVENT_ID,
data, ARRAY_SIZE(data));
}
if ((_k_task_switch_callback != NULL) && (D == 0)) {
(_k_task_switch_callback)(X->id, sys_cycle_get_32());
}
}
void _k_task_monitor_args(struct k_args *A)
{
#ifdef CONFIG_TASK_DEBUG
if (!_k_debug_halt)
#endif
{
int cmd_type;
cmd_type = (int)A & KERNEL_CMD_TYPE_MASK;
if (cmd_type == KERNEL_CMD_EVENT_TYPE) {
uint32_t data[2];
data[0] = _sys_k_get_time();
data[1] = MO_EVENT | (uint32_t)A;
sys_k_event_logger_put(
KERNEL_EVENT_LOGGER_TASK_MON_KEVENT_EVENT_ID,
data, ARRAY_SIZE(data));
} else {
uint32_t data[3];
data[0] = _sys_k_get_time();
data[1] = _k_current_task->id;
data[2] = (uint32_t)A->Comm;
sys_k_event_logger_put(
KERNEL_EVENT_LOGGER_TASK_MON_CMD_PACKET_EVENT_ID,
data, ARRAY_SIZE(data));
}
}
}

View File

@@ -1,183 +0,0 @@
/*
* Copyright (c) 1997-2010, 2012-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Microkernel tick event handler
*
* This module implements the microkernel's tick event handler.
*/
#include <nanokernel.h>
#include <arch/cpu.h>
#include <micro_private.h>
#include <drivers/system_timer.h>
#include <microkernel.h>
#include <microkernel/ticks.h>
#include <toolchain.h>
#include <sections.h>
#include <init.h>
#ifdef CONFIG_TIMESLICING
static int32_t slice_count = (int32_t)0;
static int32_t slice_time = (int32_t)CONFIG_TIMESLICE_SIZE;
static kpriority_t slice_prio =
(kpriority_t)CONFIG_TIMESLICE_PRIORITY;
#endif /* CONFIG_TIMESLICING */
#ifdef CONFIG_TICKLESS_IDLE
/* Number of ticks elapsed that have not been announced to the microkernel */
int32_t _sys_idle_elapsed_ticks; /* Initial value must be 0 */
#endif
/**
* @internal
* @brief Task level debugging tick handler
*
* If task level debugging is configured this routine updates the low resolution
* debugging timer and determines if task level processing should be suspended.
*
* @return 0 if task level processing should be halted or 1 if not
*
*/
#ifdef CONFIG_TASK_DEBUG
uint32_t __noinit _k_debug_sys_clock_tick_count;
static inline int _TlDebugUpdate(int32_t ticks)
{
_k_debug_sys_clock_tick_count += ticks;
return !_k_debug_halt;
}
#else
#define _TlDebugUpdate(ticks) 1
#endif
/**
* @internal
* @brief Tick handler time slice logic
*
* This routine checks to see if it is time for the current task
* to relinquish control, and yields CPU if so.
*
* @return N/A
*
*/
static inline void _TimeSliceUpdate(void)
{
#ifdef CONFIG_TIMESLICING
int yield = slice_time && (_k_current_task->priority >= slice_prio) &&
(++slice_count >= slice_time);
if (yield) {
slice_count = 0;
_k_task_yield(NULL);
}
#else
/* do nothing */
#endif /* CONFIG_TIMESLICING */
}
/**
* @internal
* @brief Get elapsed ticks
*
* If tickless idle support is configured this routine returns the number
* of ticks since going idle and then resets the global elapsed tick counter back
* to zero indicating all elapsed ticks have been consumed. This is done with
* interrupts locked to prevent the timer ISR from modifying the global elapsed
* tick counter.
* If tickless idle support is not configured in it simply returns 1.
*
* @return number of ticks to process
*/
static inline int32_t _SysIdleElapsedTicksGet(void)
{
#ifdef CONFIG_TICKLESS_IDLE
int32_t ticks;
int key;
key = irq_lock();
ticks = _sys_idle_elapsed_ticks;
_sys_idle_elapsed_ticks = 0;
irq_unlock(key);
return ticks;
#else
/* A single tick always elapses when not in tickless mode */
return 1;
#endif
}
/**
*
* @brief Microkernel tick handler
*
* This routine informs other microkernel subsystems that a tick event has
* occurred.
* @param even Event
* @return 1
*/
int _k_ticker(int event)
{
(void)event; /* prevent "unused argument" compiler warning */
int32_t ticks;
ticks = _SysIdleElapsedTicksGet();
_k_workload_monitor_update();
if (_TlDebugUpdate(ticks)) {
_TimeSliceUpdate();
_k_timer_list_update(ticks);
_nano_sys_clock_tick_announce(ticks);
}
return 1;
}
#ifdef CONFIG_SYS_CLOCK_EXISTS
static void _sys_clock_tick_announce_pre_micro(kevent_t e)
{
ARG_UNUSED(e);
/* before k_server starts use nanokernel tick announce function */
_nano_sys_clock_tick_announce(_SysIdleElapsedTicksGet());
}
void (*_do_sys_clock_tick_announce)(kevent_t) =
_sys_clock_tick_announce_pre_micro;
static int _sys_clock_microkernel_handler_install(struct device *dev)
{
ARG_UNUSED(dev);
extern void (*_do_task_sleep)(int32_t ticks);
extern void _micro_task_sleep(int32_t ticks);
_do_sys_clock_tick_announce = isr_event_send;
_do_task_sleep = _micro_task_sleep;
return 0;
}
SYS_INIT(_sys_clock_microkernel_handler_install, MICROKERNEL, 0);
#endif /* CONFIG_SYS_CLOCK_EXISTS */
#ifdef CONFIG_TIMESLICING
void sys_scheduler_time_slice_set(int32_t t, kpriority_t p)
{
slice_time = t;
slice_prio = p;
}
#endif /* CONFIG_TIMESLICING */

View File

@@ -1,423 +0,0 @@
/* timer kernel services */
/*
* Copyright (c) 1997-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <microkernel.h>
#include <toolchain.h>
#include <sections.h>
#include <micro_private.h>
#include <drivers/system_timer.h>
#include <misc/debug/object_tracing_common.h>
extern struct k_timer _k_timer_blocks[];
struct k_timer *_k_timer_list_head;
struct k_timer *_k_timer_list_tail;
/**
* @brief Insert a timer into the timer queue
* @param T Timer
* @return N/A
*/
void _k_timer_enlist(struct k_timer *T)
{
struct k_timer *P = _k_timer_list_head;
struct k_timer *Q = NULL;
while (P && (T->duration > P->duration)) {
T->duration -= P->duration;
Q = P;
P = P->next;
}
if (P) {
P->duration -= T->duration;
P->prev = T;
} else {
_k_timer_list_tail = T;
}
if (Q) {
Q->next = T;
} else {
_k_timer_list_head = T;
}
T->next = P;
T->prev = Q;
}
/**
* @brief Remove a timer from the timer queue
* @param T Timer
* @return N/A
*/
void _k_timer_delist(struct k_timer *T)
{
struct k_timer *P = T->next;
struct k_timer *Q = T->prev;
if (P) {
P->duration += T->duration;
P->prev = Q;
} else
_k_timer_list_tail = Q;
if (Q)
Q->next = P;
else
_k_timer_list_head = P;
T->duration = -1;
}
/**
* @brief Allocate timer used for command packet timeout
*
* Allocates timer for command packet and inserts it into the timer queue.
* @param P Arguments
* @return N/A
*/
void _k_timeout_alloc(struct k_args *P)
{
struct k_timer *T;
GETTIMER(T);
T->duration = P->Time.ticks;
T->period = 0;
T->args = P;
_k_timer_enlist(T);
P->Time.timer = T;
SYS_TRACING_OBJ_INIT_DLL(micro_timer, T);
}
/**
* @brief Cancel timer used for command packet timeout
*
* Cancels timer (if not already expired), then reschedules the command packet
* for further processing.
*
* The command that is processed following cancellation is typically NOT the
* command that would have occurred had the timeout expired on its own.
*
* @return N/A
*/
void _k_timeout_cancel(struct k_args *A)
{
struct k_timer *T = A->Time.timer;
if (T->duration != -1) {
_k_timer_delist(T);
TO_ALIST(&_k_command_stack, A);
}
}
/**
* @brief Free timer used for command packet timeout
*
* Cancels timer (if not already expired), then frees it.
* @param T Timer
* @return N/A
*/
void _k_timeout_free(struct k_timer *T)
{
if (T->duration != -1)
_k_timer_delist(T);
FREETIMER(T);
SYS_TRACING_OBJ_REMOVE_DLL(micro_timer, T);
}
/**
* @brief Handle expired timers
*
* Process the sorted list of timers associated with waiting tasks and
* activate each task whose timer has now expired.
*
* With tickless idle, a tick announcement may encompass multiple ticks.
* Due to limitations of the underlying timer driver, the number of elapsed
* ticks may -- under very rare circumstances -- exceed the first timer's
* remaining tick count, although never by more a single tick. This means that
* a task timer may occasionally expire one tick later than it was scheduled to,
* and that a periodic timer may exhibit a slow, ever-increasing degree of drift
* from the main system timer over long intervals.
*
* @param ticks Number of ticks
* @return N/A
*/
void _k_timer_list_update(int ticks)
{
struct k_timer *T;
while (_k_timer_list_head != NULL) {
_k_timer_list_head->duration -= ticks;
if (_k_timer_list_head->duration > 0) {
return;
}
T = _k_timer_list_head;
if (T == _k_timer_list_tail) {
_k_timer_list_head = _k_timer_list_tail = NULL;
} else {
_k_timer_list_head = T->next;
_k_timer_list_head->prev = NULL;
}
if (T->period) {
T->duration = T->period;
_k_timer_enlist(T);
} else {
T->duration = -1;
}
TO_ALIST(&_k_command_stack, T->args);
ticks = 0; /* don't decrement duration for subsequent timer(s) */
}
}
/**
* @brief Handle timer allocation request
*
* This routine, called by _k_server(), handles the request for allocating a
* timer.
*
* @param P Pointer to timer allocation request arguments.
*
* @return N/A
*/
void _k_timer_alloc(struct k_args *P)
{
struct k_timer *T;
struct k_args *A;
GETTIMER(T);
P->args.c1.timer = T;
GETARGS(A);
T->args = A;
T->duration = -1; /* -1 indicates that timer is disabled */
SYS_TRACING_OBJ_INIT_DLL(micro_timer, T);
}
ktimer_t task_timer_alloc(void)
{
struct k_args A;
A.Comm = _K_SVC_TIMER_ALLOC;
KERNEL_ENTRY(&A);
return (ktimer_t)A.args.c1.timer;
}
/**
*
* @brief Handle timer deallocation request
*
* This routine, called by _k_server(), handles the request for deallocating a
* timer.
* @param P Pointer to timer deallocation request arguments.
* @return N/A
*/
void _k_timer_dealloc(struct k_args *P)
{
struct k_timer *T = P->args.c1.timer;
struct k_args *A = T->args;
if (T->duration != -1)
_k_timer_delist(T);
FREETIMER(T);
FREEARGS(A);
SYS_TRACING_OBJ_REMOVE_DLL(micro_timer, T);
}
void task_timer_free(ktimer_t timer)
{
struct k_args A;
A.Comm = _K_SVC_TIMER_DEALLOC;
A.args.c1.timer = (struct k_timer *)timer;
KERNEL_ENTRY(&A);
}
/**
* @brief Handle start timer request
*
* This routine, called by _k_server(), handles the start timer request from
* both task_timer_start() and task_timer_restart().
*
* @param P Pointer to timer start request arguments.
*
* @return N/A
*/
void _k_timer_start(struct k_args *P)
{
struct k_timer *T = P->args.c1.timer; /* ptr to the timer to start */
if (T->duration != -1) { /* Stop the timer if it is active */
_k_timer_delist(T);
}
T->duration = (int32_t)P->args.c1.time1; /* Set the initial delay */
T->period = P->args.c1.time2; /* Set the period */
/*
* Either the initial delay and/or the period is invalid. Mark
* the timer as inactive.
*/
if ((T->duration <= 0) || (T->period < 0)) {
T->duration = -1;
return;
}
/* Track the semaphore to signal for when the timer expires. */
if (P->args.c1.sema != _USE_CURRENT_SEM) {
T->args->Comm = _K_SVC_SEM_SIGNAL;
T->args->args.s1.sema = P->args.c1.sema;
}
_k_timer_enlist(T);
}
void task_timer_start(ktimer_t timer, int32_t duration, int32_t period,
ksem_t sema)
{
struct k_args A;
A.Comm = _K_SVC_TIMER_START;
A.args.c1.timer = (struct k_timer *)timer;
A.args.c1.time1 = (int64_t)duration;
A.args.c1.time2 = period;
A.args.c1.sema = sema;
KERNEL_ENTRY(&A);
}
/**
*
* @brief Handle stop timer request
*
* This routine, called by _k_server(), handles the request for stopping a
* timer.
*
* @return N/A
*/
void _k_timer_stop(struct k_args *P)
{
struct k_timer *T = P->args.c1.timer;
if (T->duration != -1)
_k_timer_delist(T);
}
void task_timer_stop(ktimer_t timer)
{
struct k_args A;
A.Comm = _K_SVC_TIMER_STOP;
A.args.c1.timer = (struct k_timer *)timer;
KERNEL_ENTRY(&A);
}
/**
*
* @brief Handle internally issued task wakeup request
*
* This routine, called by _k_server(), handles the request for waking a task
* at the end of its sleep period.
*
* @return N/A
*/
void _k_task_wakeup(struct k_args *P)
{
struct k_timer *T;
struct k_task *X;
X = P->Ctxt.task;
T = P->Time.timer;
FREETIMER(T);
_k_state_bit_reset(X, TF_TIME);
SYS_TRACING_OBJ_REMOVE_DLL(micro_timer, T);
}
/**
*
* @brief Handle task sleep request
*
* This routine, called by _k_server(), handles the request for putting a task
* to sleep.
*
* @param P Pointer to timer sleep request arguments.
* @return N/A
*/
void _k_task_sleep(struct k_args *P)
{
struct k_timer *T;
if ((P->Time.ticks) <= 0) {
return;
}
GETTIMER(T);
T->duration = P->Time.ticks;
T->period = 0;
T->args = P;
P->Comm = _K_SVC_TASK_WAKEUP;
P->Ctxt.task = _k_current_task;
P->Time.timer = T;
_k_timer_enlist(T);
_k_state_bit_set(_k_current_task, TF_TIME);
SYS_TRACING_OBJ_INIT_DLL(micro_timer, T);
}
static inline void _do_micro_task_sleep(int32_t ticks)
{
struct k_args A;
A.Comm = _K_SVC_TASK_SLEEP;
A.Time.ticks = ticks;
KERNEL_ENTRY(&A);
}
#if defined(CONFIG_NANO_TIMEOUTS)
/*
* Enable calling task_sleep() during the system initialization
* On k_server() start, _do_task_sleep gets reassigned to
* _micro_task_sleep()
*/
extern void _nano_task_sleep(int32_t timeout_in_ticks);
void _micro_task_sleep(int32_t ticks)
{
if (_IS_IDLE_TASK()) {
_nano_task_sleep(ticks);
} else {
_do_micro_task_sleep(ticks);
}
}
void (*_do_task_sleep)(int32_t ticks) = _nano_task_sleep;
#else
void _micro_task_sleep(int32_t ticks)
{
_do_micro_task_sleep(ticks);
}
void (*_do_task_sleep)(int32_t ticks) = _micro_task_sleep;
#endif

View File

@@ -1,157 +0,0 @@
# Kconfig - nanokernel configuration options
#
# Copyright (c) 2014-2015 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
menu "Nanokernel Options"
config BOOT_BANNER
bool
prompt "Boot banner"
default n
select PRINTK
select EARLY_CONSOLE
help
This option outputs a banner to the console device during boot up. It
also embeds a date & time stamp in the kernel and in each USAP image.
config BUILD_TIMESTAMP
bool
prompt "Build Timestamp"
help
Build timestamp and add it to the boot banner.
config INT_LATENCY_BENCHMARK
bool
prompt "Interrupt latency metrics [EXPERIMENTAL]"
default n
depends on ARCH="x86"
help
This option enables the tracking of interrupt latency metrics;
the exact set of metrics being tracked is board-dependent.
Tracking begins when int_latency_init() is invoked by an application.
The metrics are displayed (and a new sampling interval is started)
each time int_latency_show() is called thereafter.
config MAIN_STACK_SIZE
int
prompt "Background task stack size (in bytes)"
default 1024
help
This option specifies the size of the stack used by the kernel's
background task, whose entry point is main().
config ISR_STACK_SIZE
int
prompt "ISR and initialization stack size (in bytes)"
default 2048
help
This option specifies the size of the stack used by interrupt
service routines (ISRs), and during nanokernel initialization.
config THREAD_CUSTOM_DATA
bool
prompt "Task and fiber custom data"
default n
help
This option allows each task and fiber to store 32 bits of custom data,
which can be accessed using the sys_thread_custom_data_xxx() APIs.
config NANO_TIMEOUTS
bool
prompt "Enable timeouts on nanokernel objects"
default n
depends on SYS_CLOCK_EXISTS
help
Allow fibers and tasks to wait on nanokernel objects with a timeout, by
enabling the nano_xxx_wait_timeout APIs, and allow fibers to sleep for a
period of time, by enabling the fiber_sleep API.
config NANO_TIMERS
bool
prompt "Enable nanokernel timers"
default y if NANOKERNEL
default n
depends on SYS_CLOCK_EXISTS
help
Allow fibers and tasks to wait on nanokernel timers, which can be
accessed using the nano_timer_xxx() APIs.
config NANOKERNEL_TICKLESS_IDLE_SUPPORTED
bool
default n
help
To be selected by an architecture if it does support tickless idle in
nanokernel systems.
config ERRNO
bool
prompt "Enable errno support"
default y
help
Enable per-thread errno in the kernel. Application and library code must
include errno.h provided by the C library (libc) to use the errno symbol.
The C library must access the per-thread errno via the _get_errno() symbol.
config NANO_WORKQUEUE
bool "Enable nano workqueue support"
default n
help
Nano workqueues allow scheduling work items to be executed in a fiber
context. Typically such work items are scheduled from ISRs, when the
work cannot be executed in interrupt context.
config SYSTEM_WORKQUEUE
bool "Start a system workqueue"
default y
depends on NANO_WORKQUEUE
help
Start a system-wide nano_workqueue that can be used by any system
component.
config SYSTEM_WORKQUEUE_STACK_SIZE
int "System workqueue stack size"
default 1024
depends on SYSTEM_WORKQUEUE
config SYSTEM_WORKQUEUE_PRIORITY
int "System workqueue priority"
default 10
depends on SYSTEM_WORKQUEUE
config ATOMIC_OPERATIONS_BUILTIN
bool
help
Use the compiler builtin functions for atomic operations. This is
the preferred method. However, support for all arches in GCC is
incomplete.
config ATOMIC_OPERATIONS_CUSTOM
bool
help
Use when there isn't support for compiler built-ins, but you have
written optimized assembly code under arch/ which implements these.
config ATOMIC_OPERATIONS_C
bool
help
Use atomic operations routines that are implemented entirely
in C by locking interrupts. Selected by architectures which either
do not have support for atomic operations in their instruction
set, or haven't been implemented yet during bring-up, and also
the compiler does not have support for the atomic __sync_* builtins.
endmenu

View File

@@ -1,24 +0,0 @@
ccflags-y +=-I$(srctree)/kernel/nanokernel/include
ccflags-y +=-I$(srctree)/kernel/microkernel/include
asflags-y := ${ccflags-y}
obj-y = nano_fiber.o nano_lifo.o \
nano_fifo.o nano_stack.o nano_sys_clock.o \
nano_context.o nano_init.o nano_sema.o \
version.o device.o wait_q.o
obj-$(CONFIG_INT_LATENCY_BENCHMARK) += int_latency_bench.o
obj-$(CONFIG_NANO_TIMEOUTS) += nano_sleep.o
obj-$(CONFIG_STACK_CANARIES) += compiler_stack_protect.o
obj-$(CONFIG_SYS_POWER_MANAGEMENT) += idle.o
obj-$(CONFIG_NANO_TIMERS) += nano_timer.o
obj-$(CONFIG_KERNEL_EVENT_LOGGER) += event_logger.o
obj-$(CONFIG_KERNEL_EVENT_LOGGER) += kernel_event_logger.o
obj-$(CONFIG_ATOMIC_OPERATIONS_C) += atomic_c.o
obj-$(CONFIG_ERRNO) += errno.o
obj-$(CONFIG_NANO_WORKQUEUE) += nano_work.o
ifneq (,$(filter y,$(CONFIG_NANO_TIMERS) $(CONFIG_NANO_TIMEOUTS)))
obj-y += timeout_q.o
endif

View File

@@ -1 +0,0 @@
#include "../unified/atomic_c.c"

View File

@@ -1 +0,0 @@
#include "../unified/compiler_stack_protect.c"

View File

@@ -1 +0,0 @@
#include "../unified/device.c"

View File

@@ -1 +0,0 @@
#include "../unified/errno.c"

View File

@@ -1 +0,0 @@
#include "../unified/event_logger.c"

View File

@@ -1,92 +0,0 @@
/*
* Copyright (c) 2011-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Nanokernel idle support
*
* This module provides routines to set the idle field in the nanokernel
* data structure.
*/
#include <nanokernel.h>
#include <nano_private.h>
#include <toolchain.h>
#include <sections.h>
#include <drivers/system_timer.h>
#include <wait_q.h>
/**
*
* @brief Indicate that nanokernel is idling in tickless mode
*
* Sets the nanokernel data structure idle field to a non-zero value.
*
* @param ticks the number of ticks to idle
*
* @return N/A
*/
void nano_cpu_set_idle(int32_t ticks)
{
extern tNANO _nanokernel;
_nanokernel.idle = ticks;
}
#if defined(CONFIG_NANOKERNEL) && defined(CONFIG_TICKLESS_IDLE)
int32_t _sys_idle_ticks_threshold = CONFIG_TICKLESS_IDLE_THRESH;
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
static inline int32_t get_next_tick_expiry(void)
{
return _nano_get_earliest_timeouts_deadline();
}
#else
#define get_next_tick_expiry(void) TICKS_UNLIMITED
#endif
static inline int was_in_tickless_idle(void)
{
return (_nanokernel.idle == TICKS_UNLIMITED) ||
(_nanokernel.idle >= _sys_idle_ticks_threshold);
}
static inline int must_enter_tickless_idle(void)
{
/* uses same logic as was_in_tickless_idle() */
return was_in_tickless_idle();
}
void _power_save_idle(void)
{
_nanokernel.idle = get_next_tick_expiry();
if (must_enter_tickless_idle()) {
_timer_idle_enter((uint32_t)_nanokernel.idle);
}
}
void _power_save_idle_exit(void)
{
if (was_in_tickless_idle()) {
_timer_idle_exit();
_nanokernel.idle = 0;
}
}
#endif /* CONFIG_NANOKERNEL && CONFIG_TICKLESS_IDLE */

View File

@@ -1,94 +0,0 @@
/*
* Copyright (c) 2010, 2012, 2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Macros to generate structure member offset definitions
*
* This header contains macros to allow a nanokernel implementation to
* generate absolute symbols whose values represents the member offsets for
* various nanokernel structures. These absolute symbols are typically
* utilized by assembly source files rather than hardcoding the values in
* some local header file.
*
* WARNING: Absolute symbols can potentially be utilized by external tools --
* for example, to locate a specific field within a data structure.
* Consequently, changes made to such symbols may require modifications to the
* associated tool(s). Typically, relocating a member of a structure merely
* requires that a tool be rebuilt; however, moving a member to another
* structure (or to a new sub-structure within an existing structure) may
* require that the tool itself be modified. Likewise, deleting, renaming, or
* changing the meaning of an absolute symbol may require modifications to a
* tool.
*
* The macro "GEN_OFFSET_SYM(structure, member)" is used to generate a single
* absolute symbol. The absolute symbol will appear in the object module
* generated from the source file that utilizes the GEN_OFFSET_SYM() macro.
* Absolute symbols representing a structure member offset have the following
* form:
*
* __<structure>_<member>_OFFSET
*
* This header also defines the GEN_ABSOLUTE_SYM macro to simply define an
* absolute symbol, irrespective of whether the value represents a structure
* or offset.
*
* The following sample file illustrates the usage of the macros available
* in this file:
*
* <START of sample source file: offsets.c>
*
* #include <gen_offset.h>
* /@ include struct definitions for which offsets symbols are to be
* generated @/
*
* #include <nano_private.h>
* GEN_ABS_SYM_BEGIN (_OffsetAbsSyms) /@ the name parameter is arbitrary @/
* /@ tNANO structure member offsets @/
*
* GEN_OFFSET_SYM (tNANO, fiber);
* GEN_OFFSET_SYM (tNANO, task);
* GEN_OFFSET_SYM (tNANO, current);
* GEN_OFFSET_SYM (tNANO, nested);
* GEN_OFFSET_SYM (tNANO, common_isp);
*
* GEN_ABSOLUTE_SYM (__tNANO_SIZEOF, sizeof(tNANO));
*
* GEN_ABS_SYM_END
* <END of sample source file: offsets.c>
*
* Compiling the sample offsets.c results in the following symbols in offsets.o:
*
* $ nm offsets.o
* 00000010 A __tNANO_common_isp_OFFSET
* 00000008 A __tNANO_current_OFFSET
* 0000000c A __tNANO_nested_OFFSET
* 00000000 A __tNANO_fiber_OFFSET
* 00000004 A __tNANO_task_OFFSET
*/
#ifndef _GEN_OFFSET_H
#define _GEN_OFFSET_H
#include <toolchain.h>
#include <stddef.h>
/* definition of the GEN_OFFSET_SYM() macros is toolchain independant */
#define GEN_OFFSET_SYM(S, M) \
GEN_ABSOLUTE_SYM(__##S##_##M##_##OFFSET, offsetof(S, M))
#endif /* _GEN_OFFSET_H */

View File

@@ -1,142 +0,0 @@
/*
* Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Architecture-independent private nanokernel APIs
*
* This file contains private nanokernel APIs that are not
* architecture-specific.
*/
#ifndef _NANO_INTERNAL__H_
#define _NANO_INTERNAL__H_
#ifndef _ASMLANGUAGE
#ifdef __cplusplus
extern "C" {
#endif
#include <nanokernel.h>
/* Early boot functions */
void _bss_zero(void);
#ifdef CONFIG_XIP
void _data_copy(void);
#else
static inline void _data_copy(void)
{
/* Do nothing */
}
#endif
FUNC_NORETURN void _Cstart(void);
/* helper type alias for thread control structure */
typedef struct tcs tTCS;
/* thread entry point declarations */
typedef void *_thread_arg_t;
typedef void (*_thread_entry_t)(_thread_arg_t arg1,
_thread_arg_t arg2,
_thread_arg_t arg3);
extern void _thread_entry(_thread_entry_t,
_thread_arg_t,
_thread_arg_t,
_thread_arg_t);
extern void _new_thread(char *pStack, unsigned stackSize,
void *uk_task_ptr, _thread_entry_t pEntry,
_thread_arg_t arg1, _thread_arg_t arg2,
_thread_arg_t arg3,
int prio, unsigned options);
/* context switching and scheduling-related routines */
extern void _nano_fiber_ready(struct tcs *tcs);
extern void _nano_fiber_swap(void);
extern unsigned int _Swap(unsigned int);
/* set and clear essential fiber/task flag */
extern void _thread_essential_set(void);
extern void _thread_essential_clear(void);
/* clean up when a thread is aborted */
#if defined(CONFIG_THREAD_MONITOR)
extern void _thread_monitor_exit(struct tcs *tcs);
#else
#define _thread_monitor_exit(tcs) \
do {/* nothing */ \
} while (0)
#endif /* CONFIG_THREAD_MONITOR */
/* special nanokernel object APIs */
struct nano_lifo;
extern void *_nano_fiber_lifo_get_panic(struct nano_lifo *lifo);
#ifdef CONFIG_MICROKERNEL
extern void _task_nop(void);
extern void _nano_nop(void);
extern int _nano_unpend_tasks(struct _nano_queue *queue);
extern void _nano_task_ready(void *ptr);
extern void _nano_timer_task_ready(void *task);
#define _TASK_PENDQ_INIT(queue) _nano_wait_q_init(queue)
#define _NANO_UNPEND_TASKS(queue) \
do { \
if (_nano_unpend_tasks(queue) != 0) { \
_nano_nop(); \
} \
} while (0)
#define _TASK_NANO_UNPEND_TASKS(queue) \
do { \
if (_nano_unpend_tasks(queue) != 0) { \
_task_nop(); \
} \
} while (0)
#define _NANO_TASK_READY(tcs) _nano_task_ready(tcs->uk_task_ptr)
#define _NANO_TIMER_TASK_READY(tcs) _nano_timer_task_ready(tcs->uk_task_ptr)
#define _IS_MICROKERNEL_TASK(tcs) ((tcs)->uk_task_ptr != NULL)
#define _IS_IDLE_TASK() \
(task_priority_get() == (CONFIG_NUM_TASK_PRIORITIES - 1))
#else
#define _TASK_PENDQ_INIT(queue) do { } while (0)
#define _NANO_UNPEND_TASKS(queue) do { } while (0)
#define _TASK_NANO_UNPEND_TASKS(queue) do { } while (0)
#define _NANO_TASK_READY(tcs) do { } while (0)
#define _NANO_TIMER_TASK_READY(tcs) do { } while (0)
#define _IS_MICROKERNEL_TASK(tcs) (0)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* _NANO_INTERNAL__H_ */

View File

@@ -1,67 +0,0 @@
/* nano_offsets.h - nanokernel structure member offset definitions */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <device.h>
#ifndef _NANO_OFFSETS__H_
#define _NANO_OFFSETS__H_
/*
* The final link step uses the symbol _OffsetAbsSyms to force the linkage of
* offsets.o into the ELF image.
*/
GEN_ABS_SYM_BEGIN(_OffsetAbsSyms)
/* arch-agnostic tNANO structure member offsets */
GEN_OFFSET_SYM(tNANO, fiber);
GEN_OFFSET_SYM(tNANO, task);
GEN_OFFSET_SYM(tNANO, current);
#if defined(CONFIG_THREAD_MONITOR)
GEN_OFFSET_SYM(tNANO, threads);
#endif
#ifdef CONFIG_FP_SHARING
GEN_OFFSET_SYM(tNANO, current_fp);
#endif
/* size of the entire tNANO structure */
GEN_ABSOLUTE_SYM(__tNANO_SIZEOF, sizeof(tNANO));
/* arch-agnostic struct tcs structure member offsets */
GEN_OFFSET_SYM(tTCS, link);
GEN_OFFSET_SYM(tTCS, prio);
GEN_OFFSET_SYM(tTCS, flags);
GEN_OFFSET_SYM(tTCS, coopReg); /* start of coop register set */
GEN_OFFSET_SYM(tTCS, preempReg); /* start of prempt register set */
#if defined(CONFIG_THREAD_MONITOR)
GEN_OFFSET_SYM(tTCS, next_thread);
#endif
/* size of the entire struct tcs structure */
GEN_ABSOLUTE_SYM(__tTCS_SIZEOF, sizeof(tTCS));
/* size of the device structure. Used by linker scripts */
GEN_ABSOLUTE_SYM(__DEVICE_STR_SIZEOF, sizeof(struct device));
#endif /* _NANO_OFFSETS__H_ */

View File

@@ -1,118 +0,0 @@
/** @file
* @brief timeout queue for fibers on nanokernel objects
*
* This file is meant to be included by nanokernel/include/wait_q.h only
*/
/*
* Copyright (c) 2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _kernel_nanokernel_include_timeout_q__h_
#define _kernel_nanokernel_include_timeout_q__h_
#include <misc/dlist.h>
#ifdef __cplusplus
extern "C" {
#endif
int _do_nano_timeout_abort(struct _nano_timeout *t);
void _do_nano_timeout_add(struct tcs *tcs, struct _nano_timeout *t,
struct _nano_queue *wait_q, int32_t timeout);
static inline void _nano_timeout_init(struct _nano_timeout *t,
_nano_timeout_func_t func)
{
/*
* Must be initialized here and when dequeueing a timeout so that code
* not dealing with timeouts does not have to handle this, such as when
* waiting forever on a semaphore.
*/
t->delta_ticks_from_prev = -1;
/*
* Must be initialized here so that the _fiber_wakeup family of APIs can
* verify the fiber is not on a wait queue before aborting a timeout.
*/
t->wait_q = NULL;
/*
* Must be initialized here, so the _nano_timeout_handle_one_timeout()
* routine can check if there is a fiber waiting on this timeout
*/
t->tcs = NULL;
/*
* Set callback function
*/
t->func = func;
}
#if defined(CONFIG_NANO_TIMEOUTS)
/* initialize the nano timeouts part of TCS when enabled in the kernel */
static inline void _nano_timeout_tcs_init(struct tcs *tcs)
{
_nano_timeout_init(&tcs->nano_timeout, NULL);
/*
* These are initialized when enqueing on the timeout queue:
*
* tcs->nano_timeout.node.next
* tcs->nano_timeout.node.prev
*/
}
/* abort a timeout for a specified fiber */
static inline int _nano_timeout_abort(struct tcs *tcs)
{
return _do_nano_timeout_abort(&tcs->nano_timeout);
}
/* put a fiber on the timeout queue and record its wait queue */
static inline void _nano_timeout_add(struct tcs *tcs,
struct _nano_queue *wait_q,
int32_t timeout)
{
_do_nano_timeout_add(tcs, &tcs->nano_timeout, wait_q, timeout);
}
#else
#define _nano_timeout_object_dequeue(tcs, t) do { } while (0)
#endif /* CONFIG_NANO_TIMEOUTS */
void _nano_timeout_handle_timeouts(void);
static inline int _nano_timer_timeout_abort(struct _nano_timeout *t)
{
return _do_nano_timeout_abort(t);
}
static inline void _nano_timer_timeout_add(struct _nano_timeout *t,
struct _nano_queue *wait_q,
int32_t timeout)
{
_do_nano_timeout_add(NULL, t, wait_q, timeout);
}
uint32_t _nano_get_earliest_timeouts_deadline(void);
#ifdef __cplusplus
}
#endif
#endif /* _kernel_nanokernel_include_timeout_q__h_ */

View File

@@ -1,120 +0,0 @@
/* wait queue for multiple fibers on nanokernel objects */
/*
* Copyright (c) 2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _kernel_nanokernel_include_wait_q__h_
#define _kernel_nanokernel_include_wait_q__h_
#include <nano_private.h>
#ifdef __cplusplus
extern "C" {
#endif
/* reset a wait queue, call during operation */
static inline void _nano_wait_q_reset(struct _nano_queue *wait_q)
{
wait_q->head = (void *)0;
wait_q->tail = (void *)&(wait_q->head);
}
/* initialize a wait queue: call only during object initialization */
static inline void _nano_wait_q_init(struct _nano_queue *wait_q)
{
_nano_wait_q_reset(wait_q);
}
struct tcs *_nano_wait_q_remove(struct _nano_queue *wait_q);
/* put current fiber on specified wait queue */
static inline void _nano_wait_q_put(struct _nano_queue *wait_q)
{
((struct tcs *)wait_q->tail)->link = _nanokernel.current;
wait_q->tail = _nanokernel.current;
}
#if defined(CONFIG_NANO_TIMEOUTS)
#include <timeout_q.h>
#define _NANO_TIMEOUT_TICK_GET() sys_tick_get()
#define _NANO_TIMEOUT_ADD(pq, ticks) \
do { \
if ((ticks) != TICKS_UNLIMITED) { \
_nano_timeout_add(_nanokernel.current, (pq), (ticks)); \
} \
} while (0)
#define _NANO_TIMEOUT_SET_TASK_TIMEOUT(ticks) \
_nanokernel.task_timeout = (ticks)
#define _NANO_TIMEOUT_UPDATE(timeout, limit, cur_ticks) \
do { \
if ((timeout) != TICKS_UNLIMITED) { \
(timeout) = (int32_t)((limit) - (cur_ticks)); \
} \
} while (0)
#elif defined(CONFIG_NANO_TIMERS)
#include <timeout_q.h>
#define _nano_timeout_tcs_init(tcs) do { } while ((0))
#define _nano_timeout_abort(tcs) do { } while ((0))
#define _NANO_TIMEOUT_TICK_GET() 0
#define _NANO_TIMEOUT_ADD(pq, ticks) do { } while (0)
#define _NANO_TIMEOUT_SET_TASK_TIMEOUT(ticks) do { } while ((0))
#define _NANO_TIMEOUT_UPDATE(timeout, limit, cur_ticks) do { } while (0)
#else
#define _nano_timeout_tcs_init(tcs) do { } while ((0))
#define _nano_timeout_abort(tcs) do { } while ((0))
#define _nano_get_earliest_timeouts_deadline() ((uint32_t)TICKS_UNLIMITED)
#define _NANO_TIMEOUT_TICK_GET() 0
#define _NANO_TIMEOUT_ADD(pq, ticks) do { } while (0)
#define _NANO_TIMEOUT_SET_TASK_TIMEOUT(ticks) do { } while ((0))
#define _NANO_TIMEOUT_UPDATE(timeout, limit, cur_ticks) do { } while (0)
#endif
#ifdef CONFIG_MICROKERNEL
extern void _task_nano_pend_task(struct _nano_queue *, int32_t);
extern uint32_t task_priority_get(void);
#define _NANO_OBJECT_WAIT(queue, data, timeout, key) \
do { \
if (_IS_IDLE_TASK()) { \
_NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout); \
nano_cpu_atomic_idle(key); \
key = irq_lock(); \
} else { \
_task_nano_pend_task(queue, timeout); \
} \
} while (0)
#else
#define _NANO_OBJECT_WAIT(queue, data, timeout, key) \
do { \
_NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout); \
nano_cpu_atomic_idle(key); \
key = irq_lock(); \
} while (0)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _kernel_nanokernel_include_wait_q__h_ */

View File

@@ -1 +0,0 @@
#include "../unified/int_latency_bench.c"

View File

@@ -1 +0,0 @@
#include "../unified/kernel_event_logger.c"

View File

@@ -1,245 +0,0 @@
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Nanokernel thread support
*
* This module provides general purpose thread support, with applies to both
* tasks or fibers.
*/
#include <toolchain.h>
#include <sections.h>
#include <nano_private.h>
#include <misc/printk.h>
#include <sys_clock.h>
#include <drivers/system_timer.h>
nano_thread_id_t sys_thread_self_get(void)
{
return _nanokernel.current;
}
nano_context_type_t sys_execution_context_type_get(void)
{
if (_is_in_isr())
return NANO_CTX_ISR;
if ((_nanokernel.current->flags & TASK) == TASK)
return NANO_CTX_TASK;
return NANO_CTX_FIBER;
}
/**
*
* @brief Mark thread as essential to system
*
* This function tags the running fiber or task as essential to system
* operation; exceptions raised by this thread will be treated as a fatal
* system error.
*
* @return N/A
*/
void _thread_essential_set(void)
{
_nanokernel.current->flags |= K_ESSENTIAL;
}
/**
*
* @brief Mark thread as not essential to system
*
* This function tags the running fiber or task as not essential to system
* operation; exceptions raised by this thread may be recoverable.
* (This is the default tag for a thread.)
*
* @return N/A
*/
void _thread_essential_clear(void)
{
_nanokernel.current->flags &= ~K_ESSENTIAL;
}
/**
*
* @brief Is the specified thread essential?
*
* This routine indicates if the running fiber or task is an essential system
* thread.
*
* @return Non-zero if current thread is essential, zero if it is not
*/
int _is_thread_essential(void)
{
return _nanokernel.current->flags & K_ESSENTIAL;
}
void sys_thread_busy_wait(uint32_t usec_to_wait)
{
/* use 64-bit math to prevent overflow when multiplying */
uint32_t cycles_to_wait = (uint32_t)(
(uint64_t)usec_to_wait *
(uint64_t)sys_clock_hw_cycles_per_sec /
(uint64_t)USEC_PER_SEC
);
uint32_t start_cycles = sys_cycle_get_32();
for (;;) {
uint32_t current_cycles = sys_cycle_get_32();
/* this handles the rollover on an unsigned 32-bit value */
if ((current_cycles - start_cycles) >= cycles_to_wait) {
break;
}
}
}
#ifdef CONFIG_THREAD_CUSTOM_DATA
/**
*
* @brief Set thread's custom data
*
* This routine sets the custom data value for the current task or fiber.
* Custom data is not used by the kernel itself, and is freely available
* for the thread to use as it sees fit.
*
* @param value New to set the thread's custom data to.
*
* @return N/A
*/
void sys_thread_custom_data_set(void *value)
{
_nanokernel.current->custom_data = value;
}
/**
*
* @brief Get thread's custom data
*
* This function returns the custom data value for the current task or fiber.
*
* @return current handle value
*/
void *sys_thread_custom_data_get(void)
{
return _nanokernel.current->custom_data;
}
#endif /* CONFIG_THREAD_CUSTOM_DATA */
#if defined(CONFIG_THREAD_MONITOR)
/*
* Remove a thread from the kernel's list of active threads.
*/
void _thread_monitor_exit(struct tcs *thread)
{
unsigned int key = irq_lock();
if (thread == _nanokernel.threads) {
_nanokernel.threads = _nanokernel.threads->next_thread;
} else {
struct tcs *prev_thread;
prev_thread = _nanokernel.threads;
while (thread != prev_thread->next_thread) {
prev_thread = prev_thread->next_thread;
}
prev_thread->next_thread = thread->next_thread;
}
irq_unlock(key);
}
#endif /* CONFIG_THREAD_MONITOR */
/**
*
* @brief Common thread entry point function
*
* This function serves as the entry point for _all_ threads, i.e. both
* task and fibers are instantiated such that initial execution starts
* here.
*
* This routine invokes the actual task or fiber entry point function and
* passes it three arguments. It also handles graceful termination of the
* task or fiber if the entry point function ever returns.
*
* @param pEntry address of the app entry point function
* @param parameter1 1st arg to the app entry point function
* @param parameter2 2nd arg to the app entry point function
* @param parameter3 3rd arg to the app entry point function
*
* @internal
* The 'noreturn' attribute is applied to this function so that the compiler
* can dispense with generating the usual preamble that is only required for
* functions that actually return.
*
* @return Does not return
*
*/
FUNC_NORETURN void _thread_entry(_thread_entry_t pEntry,
_thread_arg_t parameter1,
_thread_arg_t parameter2,
_thread_arg_t parameter3)
{
/* Execute the "application" entry point function */
pEntry(parameter1, parameter2, parameter3);
/* Determine if thread can legally terminate itself via "return" */
if (_is_thread_essential()) {
#ifdef CONFIG_NANOKERNEL
/*
* Nanokernel's background task must always be present,
* so if it has nothing left to do just let it idle forever
*/
while (((_nanokernel.current)->flags & TASK) == TASK) {
nano_cpu_idle();
}
#endif /* CONFIG_NANOKERNEL */
/* Loss of essential thread is a system fatal error */
_NanoFatalErrorHandler(_NANO_ERR_INVALID_TASK_EXIT,
&_default_esf);
}
/* Gracefully terminate the currently executing thread */
#ifdef CONFIG_MICROKERNEL
if (((_nanokernel.current)->flags & TASK) == TASK) {
extern FUNC_NORETURN void _TaskAbort(void);
_TaskAbort();
} else
#endif /* CONFIG_MICROKERNEL */
{
fiber_abort();
}
/*
* Compiler can't tell that fiber_abort() won't return and issues
* a warning unless we explicitly tell it that control never gets this
* far.
*/
CODE_UNREACHABLE;
}

View File

@@ -1,222 +0,0 @@
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Nanokernel fiber support primitives
*
* This module provides various nanokernel fiber related primitives,
* either in the form of an actual function or an alias to a function.
*/
#include <nano_private.h>
#include <nano_internal.h>
#include <string.h>
#include <toolchain.h>
#include <sections.h>
/**
*
* @brief Add a fiber to the list of runnable fibers
*
* The list of runnable fibers is maintained via a single linked list
* in priority order. Numerically lower priorities represent higher priority
* fibers.
*
* Interrupts must already be locked to ensure list cannot change
* while this routine is executing!
*
* @return N/A
*/
void _nano_fiber_ready(struct tcs *tcs)
{
struct tcs *pQ = (struct tcs *)&_nanokernel.fiber;
/*
* Search until end of list or until a fiber with numerically
* higher priority is located.
*/
while (pQ->link && (tcs->prio >= pQ->link->prio)) {
pQ = pQ->link;
}
/* Insert fiber, following any equal priority fibers */
tcs->link = pQ->link;
pQ->link = tcs;
}
/* currently the fiber and task implementations are identical */
FUNC_ALIAS(_fiber_start, fiber_fiber_start, nano_thread_id_t);
FUNC_ALIAS(_fiber_start, task_fiber_start, nano_thread_id_t);
FUNC_ALIAS(_fiber_start, fiber_start, nano_thread_id_t);
nano_thread_id_t _fiber_start(char *pStack,
unsigned stackSize, /* stack size in bytes */
nano_fiber_entry_t pEntry,
int parameter1,
int parameter2,
unsigned priority,
unsigned options)
{
struct tcs *tcs;
unsigned int imask;
tcs = (struct tcs *) pStack;
_new_thread(pStack,
stackSize,
NULL,
(_thread_entry_t)pEntry,
(void *)parameter1,
(void *)parameter2,
(void *)0,
priority,
options);
/*
* _new_thread() has already set the flags depending on the 'options'
* and 'priority' parameters passed to it
*/
/* lock interrupts to prevent corruption of the runnable fiber list */
imask = irq_lock();
/* make the newly crafted TCS a runnable fiber */
_nano_fiber_ready(tcs);
/*
* Simply return to the caller if the current thread is FIBER,
* otherwise swap into the newly created fiber
*/
if ((_nanokernel.current->flags & TASK) == TASK) {
_Swap(imask);
} else {
irq_unlock(imask);
}
return tcs;
}
void fiber_yield(void)
{
unsigned int imask = irq_lock();
if ((_nanokernel.fiber != (struct tcs *)NULL) &&
(_nanokernel.current->prio >= _nanokernel.fiber->prio)) {
/*
* Reinsert current thread into the list of runnable threads,
* and then swap to the thread at the head of the fiber list.
*/
_nano_fiber_ready(_nanokernel.current);
_Swap(imask);
} else {
irq_unlock(imask);
}
}
/**
*
* @brief Pass control from the currently executing fiber
*
* This routine is used when a fiber voluntarily gives up control of the CPU.
*
* This routine can only be called from a fiber.
*
* @return This function never returns
*/
FUNC_NORETURN void _nano_fiber_swap(void)
{
unsigned int imask;
/*
* Since the currently running fiber is not queued onto the runnable
* fiber list, simply performing a _Swap() shall initiate a context
* switch to the highest priority fiber, or the highest priority task
* if there are no runnable fibers.
*/
imask = irq_lock();
_Swap(imask);
/*
* Compiler can't know that _Swap() won't return and will issue a
* warning unless we explicitly tell it that control never gets this
* far.
*/
CODE_UNREACHABLE;
}
#ifndef CONFIG_ARCH_HAS_NANO_FIBER_ABORT
FUNC_NORETURN void fiber_abort(void)
{
/* Do normal thread exit cleanup, then give up CPU control */
_thread_monitor_exit(_nanokernel.current);
_nano_fiber_swap();
}
#endif
#ifdef CONFIG_NANO_TIMEOUTS
#include <wait_q.h>
FUNC_ALIAS(fiber_delayed_start, fiber_fiber_delayed_start, nano_thread_id_t);
FUNC_ALIAS(fiber_delayed_start, task_fiber_delayed_start, nano_thread_id_t);
nano_thread_id_t fiber_delayed_start(char *stack,
unsigned int stack_size_in_bytes,
nano_fiber_entry_t entry_point, int param1,
int param2, unsigned int priority,
unsigned int options, int32_t timeout_in_ticks)
{
unsigned int key;
struct tcs *tcs;
tcs = (struct tcs *)stack;
_new_thread(stack, stack_size_in_bytes, NULL, (_thread_entry_t)entry_point,
(void *)param1, (void *)param2, (void *)0, priority, options);
key = irq_lock();
_nano_timeout_add(tcs, NULL, timeout_in_ticks);
irq_unlock(key);
return tcs;
}
FUNC_ALIAS(fiber_delayed_start_cancel, fiber_fiber_delayed_start_cancel, void);
FUNC_ALIAS(fiber_delayed_start_cancel, task_fiber_delayed_start_cancel, void);
void fiber_delayed_start_cancel(nano_thread_id_t handle)
{
struct tcs *cancelled_tcs = handle;
int key = irq_lock();
_nano_timeout_abort(cancelled_tcs);
_thread_monitor_exit(cancelled_tcs);
irq_unlock(key);
}
#endif /* CONFIG_NANO_TIMEOUTS */

View File

@@ -1,366 +0,0 @@
/*
* Copyright (c) 2010-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
* @brief Nanokernel dynamic-size FIFO queue object.
*
* This module provides the nanokernel FIFO object implementation, including
* the following APIs:
*
* nano_fifo_init
* nano_fiber_fifo_put, nano_task_fifo_put, nano_isr_fifo_put
* nano_fiber_fifo_get, nano_task_fifo_get, nano_isr_fifo_get
* nano_fifo_get
*/
/*
* INTERNAL
* In some cases the compiler "alias" attribute is used to map two or more
* APIs to the same function, since they have identical implementations.
*/
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
#include <misc/__assert.h>
struct fifo_node {
void *next;
};
/**
* @brief Internal routine to append data to a fifo
*
* @return N/A
*/
static inline void data_q_init(struct _nano_queue *q)
{
q->head = NULL;
q->tail = &q->head;
}
/**
* @brief Internal routine to test if queue is empty
*
* @return N/A
*/
static inline int is_q_empty(struct _nano_queue *q)
{
return q->head == NULL;
}
/*
* INTERNAL
* Although the existing implementation will support invocation from an ISR
* context, for future flexibility, this API will be restricted from ISR
* level invocation.
*/
void nano_fifo_init(struct nano_fifo *fifo)
{
_nano_wait_q_init(&fifo->wait_q);
data_q_init(&fifo->data_q);
_TASK_PENDQ_INIT(&fifo->task_q);
SYS_TRACING_OBJ_INIT(nano_fifo, fifo);
}
FUNC_ALIAS(_fifo_put_non_preemptible, nano_isr_fifo_put, void);
FUNC_ALIAS(_fifo_put_non_preemptible, nano_fiber_fifo_put, void);
/**
*
* @brief Internal routine to append data to a fifo
*
* @return N/A
*/
static inline void enqueue_data(struct nano_fifo *fifo, void *data)
{
struct fifo_node *node = data;
struct fifo_node *tail = fifo->data_q.tail;
tail->next = node;
fifo->data_q.tail = node;
node->next = NULL;
}
/**
*
* @brief Append an element to a fifo (no context switch)
*
* This routine adds an element to the end of a fifo object; it may be called
* from either either a fiber or an ISR context. A fiber pending on the fifo
* object will be made ready, but will NOT be scheduled to execute.
*
* If a fiber is waiting on the fifo, the address of the element is returned to
* the waiting fiber. Otherwise, the element is linked to the end of the list.
*
* @param fifo FIFO on which to interact.
* @param data Data to send.
*
* @return N/A
*
* INTERNAL
* This function is capable of supporting invocations from both a fiber and an
* ISR context. However, the nano_isr_fifo_put and nano_fiber_fifo_put aliases
* are created to support any required implementation differences in the future
* without introducing a source code migration issue.
*/
void _fifo_put_non_preemptible(struct nano_fifo *fifo, void *data)
{
struct tcs *tcs;
unsigned int key;
key = irq_lock();
tcs = _nano_wait_q_remove(&fifo->wait_q);
if (tcs) {
_nano_timeout_abort(tcs);
fiberRtnValueSet(tcs, (unsigned int)data);
} else {
enqueue_data(fifo, data);
_NANO_UNPEND_TASKS(&fifo->task_q);
}
irq_unlock(key);
}
void nano_task_fifo_put(struct nano_fifo *fifo, void *data)
{
struct tcs *tcs;
unsigned int key;
key = irq_lock();
tcs = _nano_wait_q_remove(&fifo->wait_q);
if (tcs) {
_nano_timeout_abort(tcs);
fiberRtnValueSet(tcs, (unsigned int)data);
_Swap(key);
return;
}
enqueue_data(fifo, data);
_TASK_NANO_UNPEND_TASKS(&fifo->task_q);
irq_unlock(key);
}
void nano_fifo_put(struct nano_fifo *fifo, void *data)
{
static void (*func[3])(struct nano_fifo *fifo, void *data) = {
nano_isr_fifo_put,
nano_fiber_fifo_put,
nano_task_fifo_put
};
func[sys_execution_context_type_get()](fifo, data);
}
static void enqueue_list(struct nano_fifo *fifo, void *head, void *tail)
{
struct fifo_node *q_tail = fifo->data_q.tail;
q_tail->next = head;
fifo->data_q.tail = tail;
}
void _fifo_put_list_non_preemptible(struct nano_fifo *fifo,
void *head, void *tail)
{
__ASSERT(head && tail, "invalid head or tail");
unsigned int key = irq_lock();
struct tcs *fiber;
while (head && ((fiber = _nano_wait_q_remove(&fifo->wait_q)))) {
_nano_timeout_abort(fiber);
fiberRtnValueSet(fiber, (unsigned int)head);
head = *(void **)head;
}
if (head) {
enqueue_list(fifo, head, tail);
_NANO_UNPEND_TASKS(&fifo->task_q);
}
irq_unlock(key);
}
void _fifo_put_slist_non_preemptible(struct nano_fifo *fifo,
sys_slist_t *list)
{
__ASSERT(!sys_slist_is_empty(list), "list must not be empty");
_fifo_put_list_non_preemptible(fifo, list->head, list->tail);
}
FUNC_ALIAS(_fifo_put_list_non_preemptible, nano_isr_fifo_put_list, void);
FUNC_ALIAS(_fifo_put_list_non_preemptible, nano_fiber_fifo_put_list, void);
FUNC_ALIAS(_fifo_put_slist_non_preemptible, nano_isr_fifo_put_slist, void);
FUNC_ALIAS(_fifo_put_slist_non_preemptible, nano_fiber_fifo_put_slist, void);
void nano_task_fifo_put_list(struct nano_fifo *fifo, void *head, void *tail)
{
__ASSERT(head && tail, "invalid head or tail");
__ASSERT(*(void **)tail == NULL, "list is not NULL-terminated");
unsigned int key = irq_lock();
struct tcs *fiber, *first_fiber;
first_fiber = fifo->wait_q.head;
while (head && ((fiber = _nano_wait_q_remove(&fifo->wait_q)))) {
_nano_timeout_abort(fiber);
fiberRtnValueSet(fiber, (unsigned int)head);
head = *(void **)head;
}
if (head) {
enqueue_list(fifo, head, tail);
_NANO_UNPEND_TASKS(&fifo->task_q);
}
if (first_fiber) {
_Swap(key);
} else {
irq_unlock(key);
}
}
void nano_task_fifo_put_slist(struct nano_fifo *fifo, sys_slist_t *list)
{
__ASSERT(!sys_slist_is_empty(list), "list must not be empty");
nano_task_fifo_put_list(fifo, list->head, list->tail);
}
void nano_fifo_put_list(struct nano_fifo *fifo, void *head, void *tail)
{
static void (*func[3])(struct nano_fifo *, void *, void *) = {
nano_isr_fifo_put_list,
nano_fiber_fifo_put_list,
nano_task_fifo_put_list
};
func[sys_execution_context_type_get()](fifo, head, tail);
}
void nano_fifo_put_slist(struct nano_fifo *fifo, sys_slist_t *list)
{
static void (*func[3])(struct nano_fifo *, sys_slist_t *) = {
nano_isr_fifo_put_slist,
nano_fiber_fifo_put_slist,
nano_task_fifo_put_slist
};
func[sys_execution_context_type_get()](fifo, list);
}
/**
*
* @brief Internal routine to remove data from a fifo
*
* @return The data item removed
*/
static inline void *dequeue_data(struct nano_fifo *fifo)
{
struct fifo_node *head = fifo->data_q.head;
fifo->data_q.head = head->next;
if (fifo->data_q.tail == head) {
fifo->data_q.tail = &fifo->data_q.head;
}
return head;
}
FUNC_ALIAS(_fifo_get, nano_isr_fifo_get, void *);
FUNC_ALIAS(_fifo_get, nano_fiber_fifo_get, void *);
void *_fifo_get(struct nano_fifo *fifo, int32_t timeout_in_ticks)
{
unsigned int key;
void *data = NULL;
key = irq_lock();
if (likely(!is_q_empty(&fifo->data_q))) {
data = dequeue_data(fifo);
} else if (timeout_in_ticks != TICKS_NONE) {
_NANO_TIMEOUT_ADD(&fifo->wait_q, timeout_in_ticks);
_nano_wait_q_put(&fifo->wait_q);
data = (void *)_Swap(key);
return data;
}
irq_unlock(key);
return data;
}
void *nano_task_fifo_get(struct nano_fifo *fifo, int32_t timeout_in_ticks)
{
int64_t cur_ticks;
int64_t limit = 0x7fffffffffffffffll;
unsigned int key;
key = irq_lock();
cur_ticks = _NANO_TIMEOUT_TICK_GET();
if (timeout_in_ticks != TICKS_UNLIMITED) {
limit = cur_ticks + timeout_in_ticks;
}
do {
/*
* Predict that the branch will be taken to break out of the
* loop. There is little cost to a misprediction since that
* leads to idle.
*/
if (likely(!is_q_empty(&fifo->data_q))) {
void *data = dequeue_data(fifo);
irq_unlock(key);
return data;
}
if (timeout_in_ticks != TICKS_NONE) {
_NANO_OBJECT_WAIT(&fifo->task_q, &fifo->data_q.head,
timeout_in_ticks, key);
cur_ticks = _NANO_TIMEOUT_TICK_GET();
_NANO_TIMEOUT_UPDATE(timeout_in_ticks,
limit, cur_ticks);
}
} while (cur_ticks < limit);
irq_unlock(key);
return NULL;
}
void *nano_fifo_get(struct nano_fifo *fifo, int32_t timeout)
{
static void *(*func[3])(struct nano_fifo *, int32_t) = {
nano_isr_fifo_get,
nano_fiber_fifo_get,
nano_task_fifo_get
};
return func[sys_execution_context_type_get()](fifo, timeout);
}

View File

@@ -1,355 +0,0 @@
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Nanokernel initialization module
*
* This module contains routines that are used to initialize the nanokernel.
*/
#include <offsets.h>
#include <nanokernel.h>
#include <misc/printk.h>
#include <drivers/rand32.h>
#include <sections.h>
#include <toolchain.h>
#include <nano_private.h>
#include <device.h>
#include <init.h>
#include <linker-defs.h>
/* kernel build timestamp items */
#define BUILD_TIMESTAMP "BUILD: " __DATE__ " " __TIME__
#ifdef CONFIG_BUILD_TIMESTAMP
const char * const build_timestamp = BUILD_TIMESTAMP;
#endif
/* boot banner items */
#define BOOT_BANNER "BOOTING ZEPHYR OS"
#if !defined(CONFIG_BOOT_BANNER)
#define PRINT_BOOT_BANNER() do { } while (0)
#elif !defined(CONFIG_BUILD_TIMESTAMP)
#define PRINT_BOOT_BANNER() printk("***** " BOOT_BANNER " *****\n")
#else
#define PRINT_BOOT_BANNER() printk("***** " BOOT_BANNER " - %s *****\n", build_timestamp)
#endif
/* boot time measurement items */
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
uint64_t __noinit __start_tsc; /* timestamp when kernel starts */
uint64_t __noinit __main_tsc; /* timestamp when main task starts */
uint64_t __noinit __idle_tsc; /* timestamp when CPU goes idle */
#endif
/* random number generator items */
#if defined(CONFIG_TEST_RANDOM_GENERATOR) || \
defined(CONFIG_RANDOM_GENERATOR)
#define RAND32_INIT() sys_rand32_init()
#else
#define RAND32_INIT()
#endif
/* stack space for the background (or idle) task */
#if CONFIG_MAIN_STACK_SIZE & (STACK_ALIGN - 1)
#error "MAIN_STACK_SIZE must be a multiple of the stack alignment"
#endif
char __noinit __stack main_task_stack[CONFIG_MAIN_STACK_SIZE];
/*
* storage space for the interrupt stack
*
* Note: This area is used as the system stack during nanokernel initialization,
* since the nanokernel hasn't yet set up its own stack areas. The dual
* purposing of this area is safe since interrupts are disabled until the
* nanokernel context switches to the background (or idle) task.
*/
#if CONFIG_ISR_STACK_SIZE & (STACK_ALIGN - 1)
#error "ISR_STACK_SIZE must be a multiple of the stack alignment"
#endif
char __noinit __stack _interrupt_stack[CONFIG_ISR_STACK_SIZE];
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
#include <misc/dlist.h>
#define initialize_nano_timeouts() do { \
sys_dlist_init(&_nanokernel.timeout_q); \
_nanokernel.task_timeout = TICKS_UNLIMITED; \
} while ((0))
#else
#define initialize_nano_timeouts() do { } while ((0))
#endif
#ifdef CONFIG_NANOKERNEL
#define MICROKERNEL_IDLE_TASK_PTR (NULL)
/**
*
* @brief Mainline for nanokernel's background task
*
* This routine completes kernel initialization by invoking the remaining
* init functions, then invokes application's main() routine.
*
* @return N/A
*/
static void _main(void)
{
_sys_device_do_config_level(_SYS_INIT_LEVEL_SECONDARY);
_sys_device_do_config_level(_SYS_INIT_LEVEL_NANOKERNEL);
_sys_device_do_config_level(_SYS_INIT_LEVEL_APPLICATION);
#ifdef CONFIG_CPLUSPLUS
/* Process the .ctors and .init_array sections */
extern void __do_global_ctors_aux(void);
extern void __do_init_array_aux(void);
__do_global_ctors_aux();
__do_init_array_aux();
#endif
extern void main(void);
main();
}
#else
extern ktask_t _k_task_ptr_idle _GENERIC_SECTION(_k_task_list);
#define MICROKERNEL_IDLE_TASK_PTR ((void *) _k_task_ptr_idle)
/* microkernel has its own implementation of _main() */
extern void _main(void);
#endif
/**
*
* @brief Clear BSS
*
* This routine clears the BSS region, so all bytes are 0.
*
* @return N/A
*/
void _bss_zero(void)
{
uint32_t *pos = (uint32_t *)&__bss_start;
for ( ; pos < (uint32_t *)&__bss_end; pos++) {
*pos = 0;
}
}
#ifdef CONFIG_XIP
/**
*
* @brief Copy the data section from ROM to RAM
*
* This routine copies the data section from ROM to RAM.
*
* @return N/A
*/
void _data_copy(void)
{
uint32_t *pROM, *pRAM;
pROM = (uint32_t *)&__data_rom_start;
pRAM = (uint32_t *)&__data_ram_start;
for ( ; pRAM < (uint32_t *)&__data_ram_end; pROM++, pRAM++) {
*pRAM = *pROM;
}
}
#endif
/**
*
* @brief Initializes nanokernel data structures
*
* This routine initializes various nanokernel data structures, including
* the background (or idle) task and any architecture-specific initialization.
*
* Note that all fields of "_nanokernel" are set to zero on entry, which may
* be all the initialization many of them require.
*
* @return N/A
*/
static void nano_init(struct tcs *dummyOutContext)
{
/*
* Initialize the current execution thread to permit a level of
* debugging output if an exception should happen during nanokernel
* initialization.
* However, don't waste effort initializing the fields of the dummy
* thread beyond those needed to identify it as a dummy thread.
*/
_nanokernel.current = dummyOutContext;
/*
* Do not insert dummy execution context in the list of fibers, so that
* it does not get scheduled back in once context-switched out.
*/
dummyOutContext->link = (struct tcs *)NULL;
dummyOutContext->flags = FIBER | K_ESSENTIAL;
dummyOutContext->prio = 0;
/*
* The interrupt library needs to be initialized early since a series of
* handlers are installed into the interrupt table to catch spurious
* interrupts. This must be performed before other nanokernel subsystems
* install bonafide handlers, or before hardware device drivers are
* initialized.
*/
_IntLibInit();
/*
* Initialize the thread control block (TCS) for the main task (either
* background or idle task). The entry point for this thread is '_main'.
*/
_nanokernel.task = (struct tcs *) main_task_stack;
_new_thread(main_task_stack, /* pStackMem */
CONFIG_MAIN_STACK_SIZE, /* stackSize */
MICROKERNEL_IDLE_TASK_PTR, /* ptr to idle task */
(_thread_entry_t)_main, /* pEntry */
(_thread_arg_t)0, /* parameter1 */
(_thread_arg_t)0, /* parameter2 */
(_thread_arg_t)0, /* parameter3 */
-1, /* priority */
0 /* options */
);
/* indicate that failure of this task may be fatal to the entire
* system
*
* Warning: _thread_essential_set() doesn't do the same thing. That
* operates on _nanokernel.current, not _nanokernel.task ...
*/
_nanokernel.task->flags |= K_ESSENTIAL;
initialize_nano_timeouts();
/* perform any architecture-specific initialization */
nanoArchInit();
}
#ifdef CONFIG_STACK_CANARIES
/**
*
* @brief Initialize the kernel's stack canary
*
* This macro initializes the kernel's stack canary global variable,
* __stack_chk_guard, with a random value.
*
* INTERNAL
* Depending upon the compiler, modifying __stack_chk_guard directly at runtime
* may generate a build error. In-line assembly is used as a workaround.
*/
extern void *__stack_chk_guard;
#if defined(CONFIG_X86)
#define _MOVE_INSTR "movl "
#elif defined(CONFIG_ARM)
#define _MOVE_INSTR "str "
#elif defined(CONFIG_ARC)
#define _MOVE_INSTR "st "
#else
#error "Unknown Architecture type"
#endif /* CONFIG_X86 */
#define STACK_CANARY_INIT() \
do { \
register void *tmp; \
tmp = (void *)sys_rand32_get(); \
__asm__ volatile(_MOVE_INSTR "%1, %0;\n\t" \
: "=m"(__stack_chk_guard) \
: "r"(tmp)); \
} while (0)
#else /* !CONFIG_STACK_CANARIES */
#define STACK_CANARY_INIT()
#endif /* CONFIG_STACK_CANARIES */
/**
*
* @brief Initialize nanokernel
*
* This routine is invoked when the system is ready to run C code. The
* processor must be running in 32-bit mode, and the BSS must have been
* cleared/zeroed.
*
* @return Does not return
*/
FUNC_NORETURN void _Cstart(void)
{
/* floating point operations are NOT performed during nanokernel init */
char dummyTCS[__tTCS_NOFLOAT_SIZEOF];
/*
* Initialize nanokernel data structures. This step includes
* initializing the interrupt subsystem, which must be performed
* before the hardware initialization phase.
*/
nano_init((struct tcs *)&dummyTCS);
/* perform basic hardware initialization */
_sys_device_do_config_level(_SYS_INIT_LEVEL_PRIMARY);
/*
* Initialize random number generator
* As a platform may implement it in hardware, it has to be
* initialized after rest of hardware initialization and
* before stack canaries that use it
*/
RAND32_INIT();
/* initialize stack canaries */
STACK_CANARY_INIT();
/* display boot banner */
PRINT_BOOT_BANNER();
/* context switch to main task (entry function is _main()) */
_nano_fiber_swap();
/*
* Compiler can't tell that the above routines won't return and issues
* a warning unless we explicitly tell it that control never gets this
* far.
*/
CODE_UNREACHABLE;
}

View File

@@ -1,215 +0,0 @@
/*
* Copyright (c) 2010-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file
*
* @brief Nanokernel dynamic-size LIFO queue object
*
* This module provides the nanokernel LIFO object implementation, including
* the following APIs:
*
* nano_lifo_init
* nano_fiber_lifo_put, nano_task_lifo_put, nano_isr_lifo_put
* nano_fiber_lifo_get, nano_task_lifo_get, nano_isr_lifo_get
* nano_lifo_get
*/
/** INTERNAL
*
* In some cases the compiler "alias" attribute is used to map two or more
* APIs to the same function, since they have identical implementations.
*/
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
/** INTERNAL
*
* Although the existing implementation will support invocation from an ISR
* context, for future flexibility, this API will be restricted from ISR
* level invocation.
*/
void nano_lifo_init(struct nano_lifo *lifo)
{
lifo->list = (void *) 0;
_nano_wait_q_init(&lifo->wait_q);
SYS_TRACING_OBJ_INIT(nano_lifo, lifo);
_TASK_PENDQ_INIT(&lifo->task_q);
}
FUNC_ALIAS(_lifo_put_non_preemptible, nano_isr_lifo_put, void);
FUNC_ALIAS(_lifo_put_non_preemptible, nano_fiber_lifo_put, void);
/** INTERNAL
*
* This function is capable of supporting invocations from both a fiber and an
* ISR context. However, the nano_isr_lifo_put and nano_fiber_lifo_put aliases
* are created to support any required implementation differences in the future
* without introducing a source code migration issue.
*/
void _lifo_put_non_preemptible(struct nano_lifo *lifo, void *data)
{
struct tcs *tcs;
unsigned int imask;
imask = irq_lock();
tcs = _nano_wait_q_remove(&lifo->wait_q);
if (tcs) {
_nano_timeout_abort(tcs);
fiberRtnValueSet(tcs, (unsigned int) data);
} else {
*(void **) data = lifo->list;
lifo->list = data;
_NANO_UNPEND_TASKS(&lifo->task_q);
}
irq_unlock(imask);
}
void nano_task_lifo_put(struct nano_lifo *lifo, void *data)
{
struct tcs *tcs;
unsigned int imask;
imask = irq_lock();
tcs = _nano_wait_q_remove(&lifo->wait_q);
if (tcs) {
_nano_timeout_abort(tcs);
fiberRtnValueSet(tcs, (unsigned int) data);
_Swap(imask);
return;
}
*(void **) data = lifo->list;
lifo->list = data;
_TASK_NANO_UNPEND_TASKS(&lifo->task_q);
irq_unlock(imask);
}
void nano_lifo_put(struct nano_lifo *lifo, void *data)
{
static void (*func[3])(struct nano_lifo *, void *) = {
nano_isr_lifo_put,
nano_fiber_lifo_put,
nano_task_lifo_put
};
func[sys_execution_context_type_get()](lifo, data);
}
FUNC_ALIAS(_lifo_get, nano_isr_lifo_get, void *);
FUNC_ALIAS(_lifo_get, nano_fiber_lifo_get, void *);
void *_lifo_get(struct nano_lifo *lifo, int32_t timeout_in_ticks)
{
void *data = NULL;
unsigned int imask;
imask = irq_lock();
if (likely(lifo->list != NULL)) {
data = lifo->list;
lifo->list = *(void **) data;
} else if (timeout_in_ticks != TICKS_NONE) {
_NANO_TIMEOUT_ADD(&lifo->wait_q, timeout_in_ticks);
_nano_wait_q_put(&lifo->wait_q);
data = (void *) _Swap(imask);
return data;
}
irq_unlock(imask);
return data;
}
void *nano_task_lifo_get(struct nano_lifo *lifo, int32_t timeout_in_ticks)
{
int64_t cur_ticks;
int64_t limit = 0x7fffffffffffffffll;
unsigned int imask;
imask = irq_lock();
cur_ticks = _NANO_TIMEOUT_TICK_GET();
if (timeout_in_ticks != TICKS_UNLIMITED) {
limit = cur_ticks + timeout_in_ticks;
}
do {
/*
* Predict that the branch will be taken to break out of the
* loop. There is little cost to a misprediction since that
* leads to idle.
*/
if (likely(lifo->list != NULL)) {
void *data = lifo->list;
lifo->list = *(void **) data;
irq_unlock(imask);
return data;
}
if (timeout_in_ticks != TICKS_NONE) {
_NANO_OBJECT_WAIT(&lifo->task_q, &lifo->list,
timeout_in_ticks, imask);
cur_ticks = _NANO_TIMEOUT_TICK_GET();
_NANO_TIMEOUT_UPDATE(timeout_in_ticks,
limit, cur_ticks);
}
} while (cur_ticks < limit);
irq_unlock(imask);
return NULL;
}
void *nano_lifo_get(struct nano_lifo *lifo, int32_t timeout)
{
static void *(*func[3])(struct nano_lifo *, int32_t) = {
nano_isr_lifo_get,
nano_fiber_lifo_get,
nano_task_lifo_get
};
return func[sys_execution_context_type_get()](lifo, timeout);
}
/*
* @brief Get first element from lifo and panic if NULL
*
* Get the first element from the specified lifo but generate a fatal error
* if the element is NULL.
*
* @param lifo LIFO from which to receive.
*
* @return Pointer to first element in the list
*/
void *_nano_fiber_lifo_get_panic(struct nano_lifo *lifo)
{
void *element;
element = nano_fiber_lifo_get(lifo, TICKS_NONE);
if (element == NULL) {
_NanoFatalErrorHandler(_NANO_ERR_ALLOCATION_FAIL, &_default_esf);
}
return element;
}

View File

@@ -1,204 +0,0 @@
/*
* Copyright (c) 2010-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
* @brief Nanokernel semaphore object.
*
* This module provides the nanokernel semaphore object implementation,
* including the following APIs:
*
* nano_sem_init
* nano_fiber_sem_give, nano_task_sem_give, nano_isr_sem_give
* nano_fiber_sem_take, nano_task_sem_take, nano_isr_sem_take
* nano_sem_take
*
* The semaphores are of the 'counting' type, i.e. each 'give' operation will
* increment the internal count by 1, if no fiber is pending on it. The 'init'
* call initializes the count to 0. Following multiple 'give' operations, the
* same number of 'take' operations can be performed without the calling fiber
* having to pend on the semaphore, or the calling task having to poll.
*/
/**
* INTERNAL
* In some cases the compiler "alias" attribute is used to map two or more
* APIs to the same function, since they have identical implementations.
*/
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
/**
* INTERNAL
* Although the existing implementation will support invocation from an ISR
* context, for future flexibility, this API will be restricted from ISR
* level invocation.
*/
void nano_sem_init(struct nano_sem *sem)
{
sem->nsig = 0;
_nano_wait_q_init(&sem->wait_q);
SYS_TRACING_OBJ_INIT(nano_sem, sem);
_TASK_PENDQ_INIT(&sem->task_q);
}
FUNC_ALIAS(_sem_give_non_preemptible, nano_isr_sem_give, void);
FUNC_ALIAS(_sem_give_non_preemptible, nano_fiber_sem_give, void);
#ifdef CONFIG_NANO_TIMEOUTS
#define set_sem_available(tcs) fiberRtnValueSet(tcs, 1)
#else
#define set_sem_available(tcs) do { } while ((0))
#endif
/**
* INTERNAL
* This function is capable of supporting invocations from both a fiber and an
* ISR context. However, the nano_isr_sem_give and nano_fiber_sem_give aliases
* are created to support any required implementation differences in the future
* without introducing a source code migration issue.
*/
void _sem_give_non_preemptible(struct nano_sem *sem)
{
struct tcs *tcs;
unsigned int imask;
imask = irq_lock();
tcs = _nano_wait_q_remove(&sem->wait_q);
if (!tcs) {
sem->nsig++;
_NANO_UNPEND_TASKS(&sem->task_q);
} else {
_nano_timeout_abort(tcs);
set_sem_available(tcs);
}
irq_unlock(imask);
}
void nano_task_sem_give(struct nano_sem *sem)
{
struct tcs *tcs;
unsigned int imask;
imask = irq_lock();
tcs = _nano_wait_q_remove(&sem->wait_q);
if (tcs) {
_nano_timeout_abort(tcs);
set_sem_available(tcs);
_Swap(imask);
return;
}
sem->nsig++;
_TASK_NANO_UNPEND_TASKS(&sem->task_q);
irq_unlock(imask);
}
void nano_sem_give(struct nano_sem *sem)
{
static void (*func[3])(struct nano_sem *sem) = {
nano_isr_sem_give,
nano_fiber_sem_give,
nano_task_sem_give
};
func[sys_execution_context_type_get()](sem);
}
FUNC_ALIAS(_sem_take, nano_isr_sem_take, int);
FUNC_ALIAS(_sem_take, nano_fiber_sem_take, int);
int _sem_take(struct nano_sem *sem, int32_t timeout_in_ticks)
{
unsigned int key = irq_lock();
if (likely(sem->nsig > 0)) {
sem->nsig--;
irq_unlock(key);
return 1;
}
if (timeout_in_ticks != TICKS_NONE) {
_NANO_TIMEOUT_ADD(&sem->wait_q, timeout_in_ticks);
_nano_wait_q_put(&sem->wait_q);
return _Swap(key);
}
irq_unlock(key);
return 0;
}
/**
* INTERNAL
* Since a task cannot pend on a nanokernel object, they poll the
* sempahore object.
*/
int nano_task_sem_take(struct nano_sem *sem, int32_t timeout_in_ticks)
{
int64_t cur_ticks;
int64_t limit = 0x7fffffffffffffffll;
unsigned int key;
key = irq_lock();
cur_ticks = _NANO_TIMEOUT_TICK_GET();
if (timeout_in_ticks != TICKS_UNLIMITED) {
limit = cur_ticks + timeout_in_ticks;
}
do {
/*
* Predict that the branch will be taken to break out of the
* loop. There is little cost to a misprediction since that
* leads to idle.
*/
if (likely(sem->nsig > 0)) {
sem->nsig--;
irq_unlock(key);
return 1;
}
if (timeout_in_ticks != TICKS_NONE) {
_NANO_OBJECT_WAIT(&sem->task_q, &sem->nsig,
timeout_in_ticks, key);
cur_ticks = _NANO_TIMEOUT_TICK_GET();
_NANO_TIMEOUT_UPDATE(timeout_in_ticks,
limit, cur_ticks);
}
} while (cur_ticks < limit);
irq_unlock(key);
return 0;
}
int nano_sem_take(struct nano_sem *sem, int32_t timeout)
{
static int (*func[3])(struct nano_sem *, int32_t) = {
nano_isr_sem_take,
nano_fiber_sem_take,
nano_task_sem_take
};
return func[sys_execution_context_type_get()](sem, timeout);
}

View File

@@ -1,106 +0,0 @@
/*
* Copyright (c) 2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Nanokernel sleep routines
*
* This module provides various nanokernel related sleep routines.
*/
#include <nano_private.h>
#include <nano_internal.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
void fiber_sleep(int32_t timeout_in_ticks)
{
int key;
if (timeout_in_ticks == TICKS_NONE) {
fiber_yield();
return;
}
key = irq_lock();
_nano_timeout_add(_nanokernel.current, NULL, timeout_in_ticks);
_Swap(key);
}
FUNC_ALIAS(_fiber_wakeup, isr_fiber_wakeup, void);
FUNC_ALIAS(_fiber_wakeup, fiber_fiber_wakeup, void);
void _fiber_wakeup(nano_thread_id_t fiber)
{
int key = irq_lock();
/* verify first if fiber is not waiting on an object */
if (!fiber->nano_timeout.wait_q && (_nano_timeout_abort(fiber) == 0)) {
_nano_fiber_ready(fiber);
}
irq_unlock(key);
}
void task_fiber_wakeup(nano_thread_id_t fiber)
{
int key = irq_lock();
/* verify first if fiber is not waiting on an object */
if ((fiber->nano_timeout.wait_q) || (_nano_timeout_abort(fiber) < 0)) {
irq_unlock(key);
} else {
_nano_fiber_ready(fiber);
_Swap(key);
}
}
void fiber_wakeup(nano_thread_id_t fiber)
{
static void (*func[3])(nano_thread_id_t) = {
isr_fiber_wakeup,
fiber_fiber_wakeup,
task_fiber_wakeup
};
func[sys_execution_context_type_get()](fiber);
}
#ifndef CONFIG_MICROKERNEL
FUNC_ALIAS(_nano_task_sleep, task_sleep, void);
#endif
void _nano_task_sleep(int32_t timeout_in_ticks)
{
int64_t cur_ticks, limit;
int key;
key = irq_lock();
cur_ticks = sys_tick_get();
limit = cur_ticks + timeout_in_ticks;
while (cur_ticks < limit) {
_NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks);
nano_cpu_atomic_idle(key);
key = irq_lock();
cur_ticks = sys_tick_get();
_NANO_TIMEOUT_UPDATE(timeout_in_ticks, limit, cur_ticks);
}
irq_unlock(key);
}

View File

@@ -1,230 +0,0 @@
/*
* Copyright (c) 2010-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @brief Nanokernel fixed-size stack object
*
* This module provides the nanokernel stack object implementation, including
* the following APIs:
*
* nano_stack_init
* nano_fiber_stack_push, nano_task_stack_push, nano_isr_stack_push
* nano_fiber_stack_pop, nano_task_stack_pop, nano_isr_stack_pop
*
* @param stack the stack to initialize
* @param data pointer to the container for the stack
*
* @internal
* In some cases the compiler "alias" attribute is used to map two or more
* APIs to the same function, since they have identical implementations.
* @endinternal
*
*/
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
void nano_stack_init(struct nano_stack *stack, uint32_t *data)
{
stack->next = stack->base = data;
stack->fiber = (struct tcs *)0;
SYS_TRACING_OBJ_INIT(nano_stack, stack);
}
FUNC_ALIAS(_stack_push_non_preemptible, nano_isr_stack_push, void);
FUNC_ALIAS(_stack_push_non_preemptible, nano_fiber_stack_push, void);
/**
*
* @brief Push data onto a stack (no context switch)
*
* This routine pushes a data item onto a stack object; it may be called from
* either a fiber or ISR context. A fiber pending on the stack object will be
* made ready, but will NOT be scheduled to execute.
*
* @param stack Stack on which to interact
* @param data Data to push on stack
* @return N/A
*
* @internal
* This function is capable of supporting invocations from both a fiber and an
* ISR context. However, the nano_isr_stack_push and nano_fiber_stack_push
* aliases are created to support any required implementation differences in
* the future without introducing a source code migration issue.
* @endinternal
*/
void _stack_push_non_preemptible(struct nano_stack *stack, uint32_t data)
{
struct tcs *tcs;
unsigned int imask;
imask = irq_lock();
tcs = stack->fiber;
if (tcs) {
stack->fiber = 0;
fiberRtnValueSet(tcs, data);
_nano_fiber_ready(tcs);
} else {
*(stack->next) = data;
stack->next++;
}
irq_unlock(imask);
}
void nano_task_stack_push(struct nano_stack *stack, uint32_t data)
{
struct tcs *tcs;
unsigned int imask;
imask = irq_lock();
tcs = stack->fiber;
if (tcs) {
stack->fiber = 0;
fiberRtnValueSet(tcs, data);
_nano_fiber_ready(tcs);
_Swap(imask);
return;
}
*(stack->next) = data;
stack->next++;
irq_unlock(imask);
}
void nano_stack_push(struct nano_stack *stack, uint32_t data)
{
static void (*func[3])(struct nano_stack *, uint32_t) = {
nano_isr_stack_push,
nano_fiber_stack_push,
nano_task_stack_push
};
func[sys_execution_context_type_get()](stack, data);
}
FUNC_ALIAS(_stack_pop, nano_isr_stack_pop, int);
FUNC_ALIAS(_stack_pop, nano_fiber_stack_pop, int);
/**
*
* @brief Pop data from a nanokernel stack
*
* Pop the first data word from a nanokernel stack object; it may be called
* from either a fiber or ISR context.
*
* If the stack is not empty, a data word is popped and copied to the provided
* address <pData> and a non-zero value is returned. If the stack is empty,
* it waits until data is ready.
*
* @param stack Stack to operate on
* @param pData Container for data to pop
* @param timeout_in_ticks Affects the action taken should the stack be empty.
* If TICKS_NONE, then return immediately. If TICKS_UNLIMITED, then wait as
* long as necessary. No other value is currently supported as this routine
* does not support CONFIG_NANO_TIMEOUTS.
*
* @return 1 popped data from the stack; 0 otherwise
*/
int _stack_pop(struct nano_stack *stack, uint32_t *pData, int32_t timeout_in_ticks)
{
unsigned int imask;
imask = irq_lock();
if (likely(stack->next > stack->base)) {
stack->next--;
*pData = *(stack->next);
irq_unlock(imask);
return 1;
}
if (timeout_in_ticks != TICKS_NONE) {
stack->fiber = _nanokernel.current;
*pData = (uint32_t) _Swap(imask);
return 1;
}
irq_unlock(imask);
return 0;
}
int nano_task_stack_pop(struct nano_stack *stack, uint32_t *pData, int32_t timeout_in_ticks)
{
unsigned int imask;
imask = irq_lock();
while (1) {
/*
* Predict that the branch will be taken to break out of the
* loop. There is little cost to a misprediction since that
* leads to idle.
*/
if (likely(stack->next > stack->base)) {
stack->next--;
*pData = *(stack->next);
irq_unlock(imask);
return 1;
}
if (timeout_in_ticks == TICKS_NONE) {
break;
}
/*
* Invoke nano_cpu_atomic_idle() with interrupts still disabled
* to prevent the scenario where an interrupt fires after
* re-enabling interrupts and before executing the "halt"
* instruction. If the ISR performs a nano_isr_stack_push() on
* the same stack object, the subsequent execution of the "halt"
* instruction will result in the queued data being ignored
* until the next interrupt, if any.
*
* Thus it should be clear that an architectures implementation
* of nano_cpu_atomic_idle() must be able to atomically
* re-enable interrupts and enter a low-power mode.
*
* This explanation is valid for all nanokernel objects: stacks,
* FIFOs, LIFOs, and semaphores, for their
* nano_task_<object>_<get>() routines.
*/
nano_cpu_atomic_idle(imask);
imask = irq_lock();
}
irq_unlock(imask);
return 0;
}
int nano_stack_pop(struct nano_stack *stack, uint32_t *pData, int32_t timeout_in_ticks)
{
static int (*func[3])(struct nano_stack *, uint32_t *, int32_t) = {
nano_isr_stack_pop,
nano_fiber_stack_pop,
nano_task_stack_pop,
};
return func[sys_execution_context_type_get()](stack, pData, timeout_in_ticks);
}

View File

@@ -1,198 +0,0 @@
/* system clock support for nanokernel-only systems */
/*
* Copyright (c) 1997-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nano_private.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
#include <drivers/system_timer.h>
#ifdef CONFIG_SYS_CLOCK_EXISTS
int sys_clock_us_per_tick = 1000000 / sys_clock_ticks_per_sec;
int sys_clock_hw_cycles_per_tick =
CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / sys_clock_ticks_per_sec;
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
int sys_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
#endif
#else
/* don't initialize to avoid division-by-zero error */
int sys_clock_us_per_tick;
int sys_clock_hw_cycles_per_tick;
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
int sys_clock_hw_cycles_per_sec;
#endif
#endif
#ifdef CONFIG_NANOKERNEL
/* updated by timer driver for tickless, stays at 1 for non-tickless */
int32_t _sys_idle_elapsed_ticks = 1;
#endif /* CONFIG_NANOKERNEL */
int64_t _sys_clock_tick_count;
/**
*
* @brief Return the lower part of the current system tick count
*
* @return the current system tick count
*
*/
uint32_t sys_tick_get_32(void)
{
return (uint32_t)_sys_clock_tick_count;
}
/**
*
* @brief Return the current system tick count
*
* @return the current system tick count
*
*/
int64_t sys_tick_get(void)
{
int64_t tmp_sys_clock_tick_count;
/*
* Lock the interrupts when reading _sys_clock_tick_count 64-bit
* variable. Some architectures (x86) do not handle 64-bit atomically,
* so we have to lock the timer interrupt that causes change of
* _sys_clock_tick_count
*/
unsigned int imask = irq_lock();
tmp_sys_clock_tick_count = _sys_clock_tick_count;
irq_unlock(imask);
return tmp_sys_clock_tick_count;
}
/**
*
* @brief Return number of ticks since a reference time
*
* This function is meant to be used in contained fragments of code. The first
* call to it in a particular code fragment fills in a reference time variable
* which then gets passed and updated every time the function is called. From
* the second call on, the delta between the value passed to it and the current
* tick count is the return value. Since the first call is meant to only fill in
* the reference time, its return value should be discarded.
*
* Since a code fragment that wants to use sys_tick_delta() passes in its
* own reference time variable, multiple code fragments can make use of this
* function concurrently.
*
* e.g.
* uint64_t reftime;
* (void) sys_tick_delta(&reftime); /# prime it #/
* [do stuff]
* x = sys_tick_delta(&reftime); /# how long since priming #/
* [do more stuff]
* y = sys_tick_delta(&reftime); /# how long since [do stuff] #/
*
* @return tick count since reference time; undefined for first invocation
*
* NOTE: We use inline function for both 64-bit and 32-bit functions.
* Compiler optimizes out 64-bit result handling in 32-bit version.
*/
static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime)
{
int64_t delta;
int64_t saved;
/*
* Lock the interrupts when reading _sys_clock_tick_count 64-bit
* variable. Some architectures (x86) do not handle 64-bit atomically,
* so we have to lock the timer interrupt that causes change of
* _sys_clock_tick_count
*/
unsigned int imask = irq_lock();
saved = _sys_clock_tick_count;
irq_unlock(imask);
delta = saved - (*reftime);
*reftime = saved;
return delta;
}
/**
*
* @brief Return number of ticks since a reference time
*
* @return tick count since reference time; undefined for first invocation
*/
int64_t sys_tick_delta(int64_t *reftime)
{
return _nano_tick_delta(reftime);
}
uint32_t sys_tick_delta_32(int64_t *reftime)
{
return (uint32_t)_nano_tick_delta(reftime);
}
/* handle the expired timeouts in the nano timeout queue */
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
#include <wait_q.h>
static inline void handle_expired_nano_timeouts(int32_t ticks)
{
struct _nano_timeout *head =
(struct _nano_timeout *)sys_dlist_peek_head(&_nanokernel.timeout_q);
_nanokernel.task_timeout = TICKS_UNLIMITED;
if (head) {
head->delta_ticks_from_prev -= ticks;
_nano_timeout_handle_timeouts();
}
}
#else
#define handle_expired_nano_timeouts(ticks) do { } while ((0))
#endif
/**
*
* @brief Announce a tick to the nanokernel
*
* This function is only to be called by the system clock timer driver when a
* tick is to be announced to the nanokernel. It takes care of dequeuing the
* timers that have expired and wake up the fibers pending on them.
*
* @return N/A
*/
void _nano_sys_clock_tick_announce(int32_t ticks)
{
unsigned int key;
key = irq_lock();
_sys_clock_tick_count += ticks;
handle_expired_nano_timeouts(ticks);
irq_unlock(key);
}
/*
* Get closest nano timeouts/timers deadline expiry, (uint32_t)TICKS_UNLIMITED
* if none.
*/
uint32_t _nano_get_earliest_deadline(void)
{
return _nano_get_earliest_timeouts_deadline();
}

View File

@@ -1,288 +0,0 @@
/* nano_timer.c - timer for nanokernel-only systems */
/*
* Copyright (c) 1997-2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <wait_q.h>
void nano_timer_init(struct nano_timer *timer, void *data)
{
/* initialize timeout_data */
_nano_timeout_init(&timer->timeout_data, NULL);
/* nano_timer_test() returns NULL on timer that was not started */
timer->user_data = NULL;
timer->user_data_backup = data;
SYS_TRACING_OBJ_INIT(nano_timer, timer);
}
FUNC_ALIAS(_timer_start, nano_isr_timer_start, void);
FUNC_ALIAS(_timer_start, nano_fiber_timer_start, void);
FUNC_ALIAS(_timer_start, nano_task_timer_start, void);
FUNC_ALIAS(_timer_start, nano_timer_start, void);
/**
*
* @brief Start a nanokernel timer (generic implementation)
*
* This function starts a previously initialized nanokernel timer object.
* The timer will expire in <ticks> system clock ticks.
*
* @param timer The Timer to start
* @param ticks The number of system ticks before expiration
*
* @return N/A
*/
void _timer_start(struct nano_timer *timer, int ticks)
{
int key = irq_lock();
/*
* Once timer is started nano_timer_test() returns
* the pointer to user data
*/
timer->user_data = timer->user_data_backup;
_nano_timer_timeout_add(&timer->timeout_data,
NULL, ticks);
irq_unlock(key);
}
FUNC_ALIAS(_timer_stop_non_preemptible, nano_isr_timer_stop, void);
FUNC_ALIAS(_timer_stop_non_preemptible, nano_fiber_timer_stop, void);
void _timer_stop_non_preemptible(struct nano_timer *timer)
{
struct _nano_timeout *t = &timer->timeout_data;
struct tcs *tcs = t->tcs;
int key = irq_lock();
/*
* Verify first if fiber is not waiting on an object,
* timer is not expired and there is a fiber waiting
* on it
*/
if (!t->wait_q && (_nano_timer_timeout_abort(t) == 0) &&
tcs != NULL) {
if (_IS_MICROKERNEL_TASK(tcs)) {
_NANO_TIMER_TASK_READY(tcs);
} else {
_nano_fiber_ready(tcs);
}
}
/*
* After timer gets aborted nano_timer_test() should
* return NULL until timer gets restarted
*/
timer->user_data = NULL;
irq_unlock(key);
}
#ifdef CONFIG_MICROKERNEL
extern void _task_nano_timer_task_ready(void *uk_task_ptr);
#define _TASK_NANO_TIMER_TASK_READY(tcs) \
_task_nano_timer_task_ready(tcs->uk_task_ptr)
#else
#define _TASK_NANO_TIMER_TASK_READY(tcs) do { } while (0)
#endif
void nano_task_timer_stop(struct nano_timer *timer)
{
struct _nano_timeout *t = &timer->timeout_data;
struct tcs *tcs = t->tcs;
int key = irq_lock();
timer->user_data = NULL;
/*
* Verify first if fiber is not waiting on an object,
* timer is not expired and there is a fiber waiting
* on it
*/
if (!t->wait_q && (_nano_timer_timeout_abort(t) == 0) &&
tcs != NULL) {
if (!_IS_MICROKERNEL_TASK(tcs)) {
_nano_fiber_ready(tcs);
_Swap(key);
return;
}
_TASK_NANO_TIMER_TASK_READY(tcs);
}
irq_unlock(key);
}
void nano_timer_stop(struct nano_timer *timer)
{
static void (*func[3])(struct nano_timer *) = {
nano_isr_timer_stop,
nano_fiber_timer_stop,
nano_task_timer_stop,
};
func[sys_execution_context_type_get()](timer);
}
/**
*
* @brief Test nano timer for cases when the calling thread does not wait
*
* @param timer Timer to check
* @param timeout_in_ticks Determines the action to take when the timer has
* not expired.
* For TICKS_NONE, return immediately.
* For TICKS_UNLIMITED, wait as long as necessary.
* @param user_data_ptr Pointer to user data if the timer is expired
* it's set to timer->user_data. Otherwise it's set to NULL
*
* @return 1 if the thread waits for timer to expire and 0 otherwise
*/
static int _nano_timer_expire_wait(struct nano_timer *timer,
int32_t timeout_in_ticks,
void **user_data_ptr)
{
struct _nano_timeout *t = &timer->timeout_data;
/* check if the timer has expired */
if (t->delta_ticks_from_prev == -1) {
*user_data_ptr = timer->user_data;
timer->user_data = NULL;
/* if the thread should not wait, return immediately */
} else if (timeout_in_ticks == TICKS_NONE) {
*user_data_ptr = NULL;
} else {
return 1;
}
return 0;
}
void *nano_isr_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks)
{
int key = irq_lock();
void *user_data;
if (_nano_timer_expire_wait(timer, timeout_in_ticks, &user_data)) {
/* since ISR can not wait, return NULL */
user_data = NULL;
}
irq_unlock(key);
return user_data;
}
void *nano_fiber_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks)
{
int key = irq_lock();
struct _nano_timeout *t = &timer->timeout_data;
void *user_data;
if (_nano_timer_expire_wait(timer, timeout_in_ticks, &user_data)) {
t->tcs = _nanokernel.current;
_Swap(key);
key = irq_lock();
user_data = timer->user_data;
timer->user_data = NULL;
}
irq_unlock(key);
return user_data;
}
#define IDLE_TASK_TIMER_PEND(timer, key) \
do { \
_nanokernel.task_timeout = nano_timer_ticks_remain(timer); \
nano_cpu_atomic_idle(key); \
key = irq_lock(); \
} while (0)
#ifdef CONFIG_MICROKERNEL
extern void _task_nano_timer_pend_task(struct nano_timer *timer);
#define NANO_TASK_TIMER_PEND(timer, key) \
do { \
if (_IS_IDLE_TASK()) { \
IDLE_TASK_TIMER_PEND(timer, key); \
} else { \
_task_nano_timer_pend_task(timer); \
} \
} while (0)
#else
#define NANO_TASK_TIMER_PEND(timer, key) IDLE_TASK_TIMER_PEND(timer, key)
#endif
void *nano_task_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks)
{
int key = irq_lock();
struct _nano_timeout *t = &timer->timeout_data;
void *user_data;
if (_nano_timer_expire_wait(timer, timeout_in_ticks, &user_data)) {
/* task goes to busy waiting loop */
while (t->delta_ticks_from_prev != -1) {
NANO_TASK_TIMER_PEND(timer, key);
}
user_data = timer->user_data;
timer->user_data = NULL;
}
irq_unlock(key);
return user_data;
}
void *nano_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks)
{
static void *(*func[3])(struct nano_timer *, int32_t) = {
nano_isr_timer_test,
nano_fiber_timer_test,
nano_task_timer_test,
};
return func[sys_execution_context_type_get()](timer, timeout_in_ticks);
}
int32_t nano_timer_ticks_remain(struct nano_timer *timer)
{
int key = irq_lock();
int32_t remaining_ticks;
struct _nano_timeout *t = &timer->timeout_data;
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
struct _nano_timeout *iterator;
if (t->delta_ticks_from_prev == -1) {
remaining_ticks = 0;
} else {
/*
* As nanokernel timeouts are stored in a linked list with
* delta_ticks_from_prev, to get the actual number of ticks
* remaining for the timer, walk through the timeouts list
* and accumulate all the delta_ticks_from_prev values up to
* the timer.
*/
iterator =
(struct _nano_timeout *)sys_dlist_peek_head(timeout_q);
remaining_ticks = iterator->delta_ticks_from_prev;
while (iterator != t) {
iterator = (struct _nano_timeout *)sys_dlist_peek_next(
timeout_q, &iterator->node);
remaining_ticks += iterator->delta_ticks_from_prev;
}
}
irq_unlock(key);
return remaining_ticks;
}

View File

@@ -1,190 +0,0 @@
/*
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
* Workqueue support functions
*/
#include <nano_private.h>
#include <wait_q.h>
#include <errno.h>
#include <misc/nano_work.h>
static void workqueue_fiber_main(int arg1, int arg2)
{
struct nano_workqueue *wq = (struct nano_workqueue *)arg1;
ARG_UNUSED(arg2);
while (1) {
struct nano_work *work;
work_handler_t handler;
work = nano_fiber_fifo_get(&wq->fifo, TICKS_UNLIMITED);
handler = work->handler;
/* Reset pending state so it can be resubmitted by handler */
if (atomic_test_and_clear_bit(work->flags,
NANO_WORK_STATE_PENDING)) {
handler(work);
}
/* Make sure we don't hog up the CPU if the FIFO never (or
* very rarely) gets empty.
*/
fiber_yield();
}
}
void nano_fiber_workqueue_start(struct nano_workqueue *wq,
const struct fiber_config *config)
{
nano_fifo_init(&wq->fifo);
fiber_fiber_start_config(config, workqueue_fiber_main,
(int)wq, 0, 0);
}
void nano_task_workqueue_start(struct nano_workqueue *wq,
const struct fiber_config *config)
{
nano_fifo_init(&wq->fifo);
task_fiber_start_config(config, workqueue_fiber_main,
(int)wq, 0, 0);
}
void nano_workqueue_start(struct nano_workqueue *wq,
const struct fiber_config *config)
{
nano_fifo_init(&wq->fifo);
fiber_start_config(config, workqueue_fiber_main,
(int)wq, 0, 0);
}
static void work_timeout(struct _nano_timeout *t)
{
struct nano_delayed_work *w = CONTAINER_OF(t, struct nano_delayed_work,
timeout);
/* submit work to workqueue */
nano_work_submit_to_queue(w->wq, &w->work);
}
void nano_delayed_work_init(struct nano_delayed_work *work,
work_handler_t handler)
{
nano_work_init(&work->work, handler);
_nano_timeout_init(&work->timeout, work_timeout);
work->wq = NULL;
}
int nano_delayed_work_submit_to_queue(struct nano_workqueue *wq,
struct nano_delayed_work *work,
int ticks)
{
int key = irq_lock();
int err;
/* Work cannot be active in multiple queues */
if (work->wq && work->wq != wq) {
err = -EADDRINUSE;
goto done;
}
/* Cancel if work has been submitted */
if (work->wq == wq) {
err = nano_delayed_work_cancel(work);
if (err < 0) {
goto done;
}
}
/* Attach workqueue so the timeout callback can submit it */
work->wq = wq;
if (!ticks) {
/* Submit work if no ticks is 0 */
nano_work_submit_to_queue(wq, &work->work);
} else {
/* Add timeout */
_do_nano_timeout_add(NULL, &work->timeout, NULL, ticks);
}
err = 0;
done:
irq_unlock(key);
return err;
}
int nano_delayed_work_cancel(struct nano_delayed_work *work)
{
int key = irq_lock();
if (nano_work_pending(&work->work)) {
irq_unlock(key);
return -EINPROGRESS;
}
if (!work->wq) {
irq_unlock(key);
return -EINVAL;
}
/* Abort timeout, if it has expired this will do nothing */
_do_nano_timeout_abort(&work->timeout);
/* Detach from workqueue */
work->wq = NULL;
irq_unlock(key);
return 0;
}
#ifdef CONFIG_SYSTEM_WORKQUEUE
#include <init.h>
static char __stack sys_wq_stack[CONFIG_SYSTEM_WORKQUEUE_STACK_SIZE];
static const struct fiber_config sys_wq_config = {
.stack = sys_wq_stack,
.stack_size = sizeof(sys_wq_stack),
.prio = CONFIG_SYSTEM_WORKQUEUE_PRIORITY,
};
struct nano_workqueue sys_workqueue;
static int sys_workqueue_init(struct device *dev)
{
ARG_UNUSED(dev);
nano_workqueue_start(&sys_workqueue, &sys_wq_config);
return 0;
}
SYS_INIT(sys_workqueue_init, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif

View File

@@ -1,207 +0,0 @@
/** @file
* @brief timeout queue for fibers on nanokernel objects
*
* This file is meant to be included by nanokernel/include/wait_q.h only
*/
/*
* Copyright (c) 2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <wait_q.h>
#if defined(CONFIG_NANO_TIMEOUTS)
static void _nano_timeout_remove_tcs_from_wait_q(
struct tcs *tcs, struct _nano_queue *wait_q)
{
if (wait_q->head == tcs) {
if (wait_q->tail == wait_q->head) {
_nano_wait_q_reset(wait_q);
} else {
wait_q->head = tcs->link;
}
} else {
struct tcs *prev = wait_q->head;
while (prev->link != tcs) {
prev = prev->link;
}
prev->link = tcs->link;
if (wait_q->tail == tcs) {
wait_q->tail = prev;
}
}
tcs->nano_timeout.wait_q = NULL;
}
/**
* @brief Remove the thread from nanokernel object wait queue
*
* If a thread waits on a nanokernel object with timeout,
* remove the thread from the wait queue
*
* @param tcs Waiting thread
* @param t nano timer
*
* @return N/A
*/
static void _nano_timeout_object_dequeue(
struct tcs *tcs, struct _nano_timeout *t)
{
if (t->wait_q) {
_nano_timeout_remove_tcs_from_wait_q(tcs, t->wait_q);
fiberRtnValueSet(tcs, 0);
}
}
#else
#define _nano_timeout_object_dequeue(tcs, t) do { } while (0)
#endif /* CONFIG_NANO_TIMEOUTS */
/*
* Handle one expired timeout.
* This removes the fiber from the timeout queue head, and also removes it
* from the wait queue it is on if waiting for an object. In that case, it
* also sets the return value to 0/NULL.
*/
static struct _nano_timeout *_nano_timeout_handle_one_timeout(
sys_dlist_t *timeout_q)
{
struct _nano_timeout *t = (void *)sys_dlist_get(timeout_q);
struct tcs *tcs = t->tcs;
if (tcs != NULL) {
_nano_timeout_object_dequeue(tcs, t);
if (_IS_MICROKERNEL_TASK(tcs)) {
_NANO_TASK_READY(tcs);
} else {
_nano_fiber_ready(tcs);
}
} else if (t->func) {
t->func(t);
}
t->delta_ticks_from_prev = -1;
return (struct _nano_timeout *)sys_dlist_peek_head(timeout_q);
}
/* loop over all expired timeouts and handle them one by one */
void _nano_timeout_handle_timeouts(void)
{
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
struct _nano_timeout *next;
next = (struct _nano_timeout *)sys_dlist_peek_head(timeout_q);
while (next && next->delta_ticks_from_prev == 0) {
next = _nano_timeout_handle_one_timeout(timeout_q);
}
}
/**
*
* @brief abort a timeout
*
* @param t Timeout to abort
*
* @return 0 in success and -1 if the timer has expired
*/
int _do_nano_timeout_abort(struct _nano_timeout *t)
{
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
if (-1 == t->delta_ticks_from_prev) {
return -1;
}
if (!sys_dlist_is_tail(timeout_q, &t->node)) {
struct _nano_timeout *next =
(struct _nano_timeout *)sys_dlist_peek_next(timeout_q,
&t->node);
next->delta_ticks_from_prev += t->delta_ticks_from_prev;
}
sys_dlist_remove(&t->node);
t->delta_ticks_from_prev = -1;
return 0;
}
/*
* callback for sys_dlist_insert_at():
*
* Returns 1 if the timeout to insert is lower or equal than the next timeout
* in the queue, signifying that it should be inserted before the next.
* Returns 0 if it is greater.
*
* If it is greater, the timeout to insert is decremented by the next timeout,
* since the timeout queue is a delta queue. If it lower or equal, decrement
* the timeout of the insert point to update its delta queue value, since the
* current timeout will be inserted before it.
*/
static int _nano_timeout_insert_point_test(sys_dnode_t *test, void *timeout)
{
struct _nano_timeout *t = (void *)test;
int32_t *timeout_to_insert = timeout;
if (*timeout_to_insert > t->delta_ticks_from_prev) {
*timeout_to_insert -= t->delta_ticks_from_prev;
return 0;
}
t->delta_ticks_from_prev -= *timeout_to_insert;
return 1;
}
/**
*
* @brief Put timeout on the timeout queue, record waiting fiber and wait queue
*
* @param tcs Fiber waiting on a timeout
* @param t Timeout structure to be added to the nanokernel queue
* @wait_q nanokernel object wait queue
* @timeout Timeout in ticks
*
* @return N/A
*/
void _do_nano_timeout_add(struct tcs *tcs,
struct _nano_timeout *t,
struct _nano_queue *wait_q,
int32_t timeout)
{
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
t->tcs = tcs;
t->delta_ticks_from_prev = timeout;
t->wait_q = wait_q;
sys_dlist_insert_at(timeout_q, (void *)t,
_nano_timeout_insert_point_test,
&t->delta_ticks_from_prev);
}
/* find the closest deadline in the timeout queue */
uint32_t _nano_get_earliest_timeouts_deadline(void)
{
sys_dlist_t *q = &_nanokernel.timeout_q;
struct _nano_timeout *t =
(struct _nano_timeout *)sys_dlist_peek_head(q);
return t ? min((uint32_t)t->delta_ticks_from_prev,
(uint32_t)_nanokernel.task_timeout)
: (uint32_t)_nanokernel.task_timeout;
}

View File

@@ -1 +0,0 @@
#include "../unified/version.c"

View File

@@ -1,51 +0,0 @@
/* wait queue for multiple fibers on nanokernel objects */
/*
* Copyright (c) 2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <wait_q.h>
/*
* Remove first fiber from a wait queue and put it on the ready queue, knowing
* that the wait queue is not empty.
*/
static struct tcs *_nano_wait_q_remove_no_check(struct _nano_queue *wait_q)
{
struct tcs *tcs = wait_q->head;
if (wait_q->tail == wait_q->head) {
_nano_wait_q_reset(wait_q);
} else {
wait_q->head = tcs->link;
}
tcs->link = 0;
_nano_fiber_ready(tcs);
return tcs;
}
/*
* Remove first fiber from a wait queue and put it on the ready queue.
* Abort and return NULL if the wait queue is empty.
*/
struct tcs *_nano_wait_q_remove(struct _nano_queue *wait_q)
{
return wait_q->head ? _nano_wait_q_remove_no_check(wait_q) : NULL;
}