drivers: dma: silabs: add dma block splitting for high transfer size

The LDMA driver currently has a 1:1 mapping between hardware LDMA
descriptors and struct dma_block_config. This patch allows multiple
hardware descriptors to be allocated for a single struct dma_block_config
if the block size exceeds the transfer capacity of a single hardware
LDMA descriptor. This is beneficial for other peripheral drivers: it is
no longer necessary to split the payload by the transfer capacity of a
single hardware LDMA descriptor.

Signed-off-by: Martin Hoff <martin.hoff@silabs.com>
This commit is contained in:
Martin Hoff
2025-07-09 17:28:00 +02:00
committed by Maureen Helm
parent 3d2f8b1cd8
commit 9ed0ded09d

View File

@@ -89,9 +89,12 @@ static int dma_silabs_get_blocksize(uint32_t src_blen, uint32_t dst_blen, uint32
static int dma_silabs_block_to_descriptor(struct dma_config *config,
struct dma_silabs_channel *chan_conf,
struct dma_block_config *block, LDMA_Descriptor_t *desc)
struct dma_block_config *block, LDMA_Descriptor_t *desc,
int *offset)
{
int ret, src_size, xfer_count;
int ret, src_size, xfer_count, loc_offset, mod, rem_bsize;
loc_offset = *offset;
if (block->dest_scatter_count || block->source_gather_count ||
block->source_gather_interval || block->dest_scatter_interval ||
@@ -123,17 +126,27 @@ static int dma_silabs_block_to_descriptor(struct dma_config *config,
src_size = config->source_data_size;
desc->xfer.size = LOG2(src_size);
if (block->block_size % config->source_data_size) {
xfer_count = block->block_size / config->source_data_size;
if (loc_offset) {
rem_bsize = block->block_size - loc_offset * config->source_data_size;
} else {
xfer_count = block->block_size / config->source_data_size - 1;
rem_bsize = block->block_size;
}
xfer_count = rem_bsize / config->source_data_size;
mod = rem_bsize % config->source_data_size;
if (xfer_count > LDMA_DESCRIPTOR_MAX_XFER_SIZE) {
return -ENOTSUP;
}
desc->xfer.xferCnt = LDMA_DESCRIPTOR_MAX_XFER_SIZE - 1;
*offset = loc_offset + LDMA_DESCRIPTOR_MAX_XFER_SIZE;
desc->xfer.xferCnt = xfer_count;
} else {
if (!mod || xfer_count == LDMA_DESCRIPTOR_MAX_XFER_SIZE) {
xfer_count--;
}
desc->xfer.xferCnt = xfer_count;
*offset = 0;
}
/* Warning : High LDMA blockSize (high burst) mean a large transfer
* without LDMA controller re-arbitration.
@@ -195,8 +208,8 @@ static int dma_silabs_block_to_descriptor(struct dma_config *config,
LOG_WRN("dest_buffer address is null.");
}
desc->xfer.srcAddr = block->source_address;
desc->xfer.dstAddr = block->dest_address;
desc->xfer.srcAddr = block->source_address + loc_offset * config->source_data_size;
desc->xfer.dstAddr = block->dest_address + loc_offset * config->dest_data_size;
return 0;
}
@@ -229,7 +242,7 @@ static int dma_silabs_configure_descriptor(struct dma_config *config, struct dma
struct dma_block_config *head_block = config->head_block;
struct dma_block_config *block = config->head_block;
LDMA_Descriptor_t *desc, *prev_desc;
int ret;
int ret, offset;
/* Descriptors configuration
* block refers to user configured block (dma_block_config structure from dma.h)
@@ -237,13 +250,14 @@ static int dma_silabs_configure_descriptor(struct dma_config *config, struct dma
* hal)
*/
prev_desc = NULL;
offset = 0;
while (block) {
ret = sys_mem_blocks_alloc(data->dma_desc_pool, 1, (void **)&desc);
if (ret) {
goto err;
}
ret = dma_silabs_block_to_descriptor(config, chan_conf, block, desc);
ret = dma_silabs_block_to_descriptor(config, chan_conf, block, desc, &offset);
if (ret) {
goto err;
}
@@ -257,13 +271,15 @@ static int dma_silabs_configure_descriptor(struct dma_config *config, struct dma
}
prev_desc = desc;
block = block->next_block;
if (block == head_block) {
block = NULL;
prev_desc->xfer.linkAddr =
LDMA_DESCRIPTOR_LINKABS_ADDR_TO_LINKADDR(chan_conf->desc);
prev_desc->xfer.linkMode = ldmaLinkModeAbs;
prev_desc->xfer.link = 1;
if (!offset) {
block = block->next_block;
if (block == head_block) {
block = NULL;
prev_desc->xfer.linkAddr =
LDMA_DESCRIPTOR_LINKABS_ADDR_TO_LINKADDR(chan_conf->desc);
prev_desc->xfer.linkMode = ldmaLinkModeAbs;
prev_desc->xfer.link = 1;
}
}
}
@@ -525,7 +541,9 @@ int silabs_ldma_append_block(const struct device *dev, uint32_t channel, struct
struct dma_block_config *block_config = config->head_block;
LDMA_Descriptor_t *desc = data->dma_chan_table[channel].desc;
unsigned int key;
int ret;
int ret, offset;
offset = 0;
__ASSERT(!((uintptr_t)desc & ~_LDMA_CH_LINK_LINKADDR_MASK),
"DMA Descriptor is not 32 bits aligned");
@@ -552,9 +570,15 @@ int silabs_ldma_append_block(const struct device *dev, uint32_t channel, struct
return -EINVAL;
}
ret = dma_silabs_block_to_descriptor(config, chan_conf, block_config, desc);
ret = dma_silabs_block_to_descriptor(config, chan_conf, block_config, desc, &offset);
if (ret) {
return ret;
} else if (offset) {
/* If the offset is not 0, it means that the block size is larger than the transfer
* capacity of a single hardware LDMA descriptor. It is not supported with the
* append function.
*/
return -EINVAL;
}
key = irq_lock();