lib, kernel: use single evaluation min/max/clamp

Replace all in-function instances of MIN/MAX/CLAMP with the single
evaluation version min/max/clamp.

There's probably no race conditions in these files, but the single
evaluation ones save a couple of instructions each so they should save
few code bytes and potentially perform better, so they should be
preferred in general.

Signed-off-by: Fabio Baltieri <fabiobaltieri@google.com>
This commit is contained in:
Fabio Baltieri
2025-10-21 16:50:55 +01:00
committed by Johan Hedberg
parent 3d93fa23e0
commit 700a1a5a28
21 changed files with 39 additions and 39 deletions

View File

@@ -515,7 +515,7 @@ void __weak z_early_rand_get(uint8_t *buf, size_t length)
state = state + k_cycle_get_32();
state = state * 2862933555777941757ULL + 3037000493ULL;
val = (uint32_t)(state >> 32);
rc = MIN(length, sizeof(val));
rc = min(length, sizeof(val));
arch_early_memcpy((void *)buf, &val, rc);
length -= rc;

View File

@@ -237,7 +237,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
"slab corruption detected");
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
slab->info.max_used = MAX(slab->info.num_used,
slab->info.max_used = max(slab->info.num_used,
slab->info.max_used);
#endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */

View File

@@ -274,8 +274,8 @@ static void virt_region_free(void *vaddr, size_t size)
(vaddr_u8 < Z_VIRT_REGION_END_ADDR)) ||
(((vaddr_u8 + size - 1) >= Z_VIRT_REGION_START_ADDR) &&
((vaddr_u8 + size - 1) < Z_VIRT_REGION_END_ADDR))) {
uint8_t *adjusted_start = MAX(vaddr_u8, Z_VIRT_REGION_START_ADDR);
uint8_t *adjusted_end = MIN(vaddr_u8 + size,
uint8_t *adjusted_start = max(vaddr_u8, Z_VIRT_REGION_START_ADDR);
uint8_t *adjusted_end = min(vaddr_u8 + size,
Z_VIRT_REGION_END_ADDR);
size_t adjusted_sz = adjusted_end - adjusted_start;
@@ -930,8 +930,8 @@ void k_mem_map_phys_bare(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32
IN_RANGE(aligned_phys + aligned_size - 1,
(uintptr_t)K_MEM_VIRT_RAM_START,
(uintptr_t)(K_MEM_VIRT_RAM_END - 1))) {
uint8_t *adjusted_start = MAX(dest_addr, K_MEM_VIRT_RAM_START);
uint8_t *adjusted_end = MIN(dest_addr + aligned_size,
uint8_t *adjusted_start = max(dest_addr, K_MEM_VIRT_RAM_START);
uint8_t *adjusted_end = min(dest_addr + aligned_size,
K_MEM_VIRT_RAM_END);
size_t adjusted_sz = adjusted_end - adjusted_start;

View File

@@ -113,7 +113,7 @@ static size_t copy_to_pending_readers(struct k_pipe *pipe, bool *need_resched,
}
reader_buf = reader->base.swap_data;
copy_size = MIN(len - written,
copy_size = min(len - written,
reader_buf->len - reader_buf->used);
memcpy(&reader_buf->data[reader_buf->used],
&data[written], copy_size);

View File

@@ -1005,7 +1005,7 @@ void z_impl_k_thread_absolute_deadline_set(k_tid_t tid, int deadline)
void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
{
deadline = CLAMP(deadline, 0, INT_MAX);
deadline = clamp(deadline, 0, INT_MAX);
int32_t newdl = k_cycle_get_32() + deadline;
@@ -1139,7 +1139,7 @@ int32_t z_impl_k_sleep(k_timeout_t timeout)
/* k_sleep() still returns 32 bit milliseconds for compatibility */
int64_t ms = K_TIMEOUT_EQ(timeout, K_FOREVER) ? K_TICKS_FOREVER :
CLAMP(k_ticks_to_ms_ceil64(ticks), 0, INT_MAX);
clamp(k_ticks_to_ms_ceil64(ticks), 0, INT_MAX);
SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ms);
return (int32_t) ms;

View File

@@ -217,7 +217,7 @@ static size_t copy_bytes(char *dest, size_t dest_size, const char *src, size_t s
{
size_t bytes_to_copy;
bytes_to_copy = MIN(dest_size, src_size);
bytes_to_copy = min(dest_size, src_size);
memcpy(dest, src, bytes_to_copy);
return bytes_to_copy;

View File

@@ -90,7 +90,7 @@ static int32_t next_timeout(int32_t ticks_elapsed)
((int64_t)(to->dticks - ticks_elapsed) > (int64_t)INT_MAX)) {
ret = SYS_CLOCK_MAX_WAIT;
} else {
ret = MAX(0, to->dticks - ticks_elapsed);
ret = max(0, to->dticks - ticks_elapsed);
}
return ret;
@@ -124,7 +124,7 @@ k_ticks_t z_add_timeout(struct _timeout *to, _timeout_func_t fn, k_timeout_t tim
} else {
k_ticks_t dticks = Z_TICK_ABS(timeout.ticks) - curr_tick;
to->dticks = MAX(1, dticks);
to->dticks = max(1, dticks);
ticks = timeout.ticks;
}
@@ -322,7 +322,7 @@ k_timepoint_t sys_timepoint_calc(k_timeout_t timeout)
k_ticks_t dt = timeout.ticks;
if (Z_IS_TIMEOUT_RELATIVE(timeout)) {
timepoint.tick = sys_clock_tick_get() + MAX(1, dt);
timepoint.tick = sys_clock_tick_get() + max(1, dt);
} else {
timepoint.tick = Z_TICK_ABS(dt);
}

View File

@@ -57,7 +57,7 @@ void z_timer_expiration_handler(struct _timeout *t)
k_timeout_t next = timer->period;
/* see note about z_add_timeout() in z_impl_k_timer_start() */
next.ticks = MAX(next.ticks - 1, 0);
next.ticks = max(next.ticks - 1, 0);
#ifdef CONFIG_TIMEOUT_64BIT
/* Exploit the fact that uptime during a kernel
@@ -171,7 +171,7 @@ void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration,
* is consistent for both 32-bit k_ticks_t which are unsigned
* and 64-bit k_ticks_t which are signed.
*/
duration.ticks = MAX(1, duration.ticks);
duration.ticks = max(1, duration.ticks);
duration.ticks = duration.ticks - 1;
}

View File

@@ -17,7 +17,7 @@
static inline void increase_allocated_bytes(struct z_heap *h, size_t num_bytes)
{
h->allocated_bytes += num_bytes;
h->max_allocated_bytes = MAX(h->max_allocated_bytes, h->allocated_bytes);
h->max_allocated_bytes = max(h->max_allocated_bytes, h->allocated_bytes);
}
#endif
@@ -321,7 +321,7 @@ void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes)
rew = align & -align;
if (align != rew) {
align -= rew;
gap = MIN(rew, chunk_header_bytes(h));
gap = min(rew, chunk_header_bytes(h));
} else {
if (align <= chunk_header_bytes(h)) {
return sys_heap_alloc(heap, bytes);
@@ -482,7 +482,7 @@ void *sys_heap_realloc(struct sys_heap *heap, void *ptr, size_t bytes)
if (ptr2 != NULL) {
size_t prev_size = sys_heap_usable_size(heap, ptr);
memcpy(ptr2, ptr, MIN(prev_size, bytes));
memcpy(ptr2, ptr, min(prev_size, bytes));
sys_heap_free(heap, ptr);
}
return ptr2;
@@ -516,7 +516,7 @@ void *sys_heap_aligned_realloc(struct sys_heap *heap, void *ptr,
if (ptr2 != NULL) {
size_t prev_size = sys_heap_usable_size(heap, ptr);
memcpy(ptr2, ptr, MIN(prev_size, bytes));
memcpy(ptr2, ptr, min(prev_size, bytes));
sys_heap_free(heap, ptr);
}
return ptr2;

View File

@@ -245,7 +245,7 @@ static ALWAYS_INLINE chunksz_t bytes_to_chunksz(struct z_heap *h, size_t bytes,
size_t oddments = ((bytes % CHUNK_UNIT) + (extra % CHUNK_UNIT) +
chunk_header_bytes(h) + CHUNK_UNIT - 1U) / CHUNK_UNIT;
return (chunksz_t)MIN(chunks + oddments, h->end_chunk);
return (chunksz_t)min(chunks + oddments, h->end_chunk);
}
static inline chunksz_t min_chunk_size(struct z_heap *h)

View File

@@ -32,7 +32,7 @@ static void heap_print_info(struct z_heap *h, bool dump_chunks)
do {
count++;
largest = MAX(largest, chunk_size(h, curr));
largest = max(largest, chunk_size(h, curr));
curr = next_free_chunk(h, curr);
} while (curr != first);
}

View File

@@ -120,7 +120,7 @@ void *sys_multi_heap_aligned_realloc(struct sys_multi_heap *mheap, void *cfg,
/* Otherwise, allocate a new block and copy the data */
new_ptr = sys_multi_heap_aligned_alloc(mheap, cfg, align, bytes);
if (new_ptr != NULL) {
memcpy(new_ptr, ptr, MIN(old_size, bytes));
memcpy(new_ptr, ptr, min(old_size, bytes));
sys_multi_heap_free(mheap, ptr);
}

View File

@@ -121,7 +121,7 @@ static int malloc_prepare(void)
#ifdef USE_MALLOC_PREPARE
#ifdef CONFIG_MMU
max_heap_size = MIN(CONFIG_NEWLIB_LIBC_MAX_MAPPED_REGION_SIZE,
max_heap_size = min(CONFIG_NEWLIB_LIBC_MAX_MAPPED_REGION_SIZE,
k_mem_free_get());
if (max_heap_size != 0) {

View File

@@ -354,7 +354,7 @@ success:
#if defined(CONFIG_NET_BUF_POOL_USAGE)
atomic_dec(&pool->avail_count);
__ASSERT_NO_MSG(atomic_get(&pool->avail_count) >= 0);
pool->max_used = MAX(pool->max_used,
pool->max_used = max(pool->max_used,
pool->buf_count - atomic_get(&pool->avail_count));
#endif
return buf;
@@ -630,7 +630,7 @@ size_t net_buf_linearize(void *dst, size_t dst_len, const struct net_buf *src,
size_t to_copy;
size_t copied;
len = MIN(len, dst_len);
len = min(len, dst_len);
frag = src;
@@ -643,7 +643,7 @@ size_t net_buf_linearize(void *dst, size_t dst_len, const struct net_buf *src,
/* traverse the fragment chain until len bytes are copied */
copied = 0;
while (frag && len > 0) {
to_copy = MIN(len, frag->len - offset);
to_copy = min(len, frag->len - offset);
memcpy((uint8_t *)dst + copied, frag->data + offset, to_copy);
copied += to_copy;
@@ -673,7 +673,7 @@ size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
size_t max_size;
do {
uint16_t count = MIN(len, net_buf_tailroom(frag));
uint16_t count = min(len, net_buf_tailroom(frag));
net_buf_add_mem(frag, value8, count);
len -= count;
@@ -695,7 +695,7 @@ size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
pool = net_buf_pool_get(buf->pool_id);
max_size = pool->alloc->max_alloc_size;
frag = net_buf_alloc_len(pool,
max_size ? MIN(len, max_size) : len,
max_size ? min(len, max_size) : len,
timeout);
}
@@ -729,7 +729,7 @@ size_t net_buf_data_match(const struct net_buf *buf, size_t offset, const void *
while (buf && len > 0) {
bptr = buf->data + offset;
to_compare = MIN(len, buf->len - offset);
to_compare = min(len, buf->len - offset);
for (size_t i = 0; i < to_compare; ++i) {
if (dptr[compared] != bptr[i]) {

View File

@@ -112,7 +112,7 @@ static inline void max_utilization_update(struct mpsc_pbuf_buffer *buffer)
return;
}
buffer->max_usage = MAX(buffer->max_usage, get_usage(buffer));
buffer->max_usage = max(buffer->max_usage, get_usage(buffer));
}
static inline bool is_valid(union mpsc_pbuf_generic *item)

View File

@@ -192,7 +192,7 @@ int spsc_pbuf_alloc(struct spsc_pbuf *pb, uint16_t len, char **buf)
free_space = rd_idx - wr_idx - FREE_SPACE_DISTANCE;
}
len = MIN(len, MAX(free_space - (int32_t)LEN_SZ, 0));
len = min(len, max(free_space - (int32_t)LEN_SZ, 0));
*buf = &data_loc[wr_idx + LEN_SZ];
return len;

View File

@@ -39,7 +39,7 @@ int readdir_r(DIR *dirp, struct dirent *entry, struct dirent **result)
return -rc;
}
strncpy(entry->d_name, de.name, MIN(sizeof(entry->d_name), sizeof(de.name)));
strncpy(entry->d_name, de.name, min(sizeof(entry->d_name), sizeof(de.name)));
entry->d_name[sizeof(entry->d_name) - 1] = '\0';
if (entry->d_name[0] == '\0') {

View File

@@ -211,7 +211,7 @@ static ssize_t shm_rw(struct shm_obj *shm, void *buf, size_t size, bool is_write
if (offset >= shm->size) {
size = 0;
} else {
size = MIN(size, shm->size - offset);
size = min(size, shm->size - offset);
}
if (size > 0) {

View File

@@ -21,5 +21,5 @@ uint32_t timespec_to_timeoutms(int clock_id, const struct timespec *abstime)
return 0;
}
return CLAMP(tp_diff(abstime, &curtime) / NSEC_PER_MSEC, 0, UINT32_MAX);
return clamp(tp_diff(abstime, &curtime) / NSEC_PER_MSEC, 0, UINT32_MAX);
}

View File

@@ -20,7 +20,7 @@ uint32_t ring_buf_area_claim(struct ring_buf *buf, struct ring_buf_index *ring,
head_offset -= buf->size;
}
wrap_size = buf->size - head_offset;
size = MIN(size, wrap_size);
size = min(size, wrap_size);
*data = &buf->buffer[head_offset];
ring->head += size;

View File

@@ -55,7 +55,7 @@ void sys_winstream_write(struct sys_winstream *ws,
/* Make room in the buffer by advancing start first (note same
* len-1 from above)
*/
len = MIN(len, ws->len);
len = min(len, ws->len);
if (seq != 0) {
uint32_t avail = (ws->len - 1) - idx_sub(ws, end, start);
@@ -71,7 +71,7 @@ void sys_winstream_write(struct sys_winstream *ws,
data += len0 - len;
}
suffix = MIN(len, ws->len - end);
suffix = min(len, ws->len - end);
MEMCPY(&ws->data[end], data, suffix);
if (len > suffix) {
MEMCPY(&ws->data[0], data + suffix, len - suffix);
@@ -109,8 +109,8 @@ uint32_t sys_winstream_read(struct sys_winstream *ws,
/* Copy data */
copy = idx_sub(ws, ws->end, behind);
len = MIN(buflen, behind);
suffix = MIN(len, ws->len - copy);
len = min(buflen, behind);
suffix = min(len, ws->len - copy);
MEMCPY(buf, &ws->data[copy], suffix);
if (len > suffix) {
MEMCPY(buf + suffix, &ws->data[0], len - suffix);