Merge tag 'dmaengine-fix-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:
 "A bunch of driver fixes for:

   - dma mask fix for mmp pdma driver

   - Xilinx regmap max register, uninitialized addr_width fix

   - device leak fix for bunch of drivers in the subsystem

   - stm32 dmamux, TI crossbar driver fixes for device & of node leak
     and route allocation cleanup

   - Tegra use afer free fix

   - Memory leak fix in Qualcomm gpi and omap-dma driver

   - compatible fix for apple driver"

* tag 'dmaengine-fix-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (25 commits)
  dmaengine: apple-admac: Add "apple,t8103-admac" compatible
  dmaengine: omap-dma: fix dma_pool resource leak in error paths
  dmaengine: qcom: gpi: Fix memory leak in gpi_peripheral_config()
  dmaengine: sh: rz-dmac: Fix rz_dmac_terminate_all()
  dmaengine: xilinx_dma: Fix uninitialized addr_width when "xlnx,addrwidth" property is missing
  dmaengine: tegra-adma: Fix use-after-free
  dmaengine: fsl-edma: Fix clk leak on alloc_chan_resources failure
  dmaengine: mmp_pdma: Fix race condition in mmp_pdma_residue()
  dmaengine: ti: k3-udma: fix device leak on udma lookup
  dmaengine: ti: dma-crossbar: clean up dra7x route allocation error paths
  dmaengine: ti: dma-crossbar: fix device leak on am335x route allocation
  dmaengine: ti: dma-crossbar: fix device leak on dra7x route allocation
  dmaengine: stm32: dmamux: clean up route allocation error labels
  dmaengine: stm32: dmamux: fix OF node leak on route allocation failure
  dmaengine: stm32: dmamux: fix device leak on route allocation
  dmaengine: sh: rz-dmac: fix device leak on probe failure
  dmaengine: lpc32xx-dmamux: fix device leak on route allocation
  dmaengine: lpc18xx-dmamux: fix device leak on route allocation
  dmaengine: idxd: fix device leaks on compat bind and unbind
  dmaengine: dw: dmamux: fix OF node leak on route allocation failure
  ...
This commit is contained in:
Linus Torvalds
2026-01-18 13:38:31 -08:00
20 changed files with 168 additions and 73 deletions

View File

@@ -936,6 +936,7 @@ static void admac_remove(struct platform_device *pdev)
}
static const struct of_device_id admac_of_match[] = {
{ .compatible = "apple,t8103-admac", },
{ .compatible = "apple,admac", },
{ }
};

View File

@@ -1765,6 +1765,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
static void atc_free_chan_resources(struct dma_chan *chan)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave;
BUG_ON(atc_chan_is_enabled(atchan));
@@ -1774,8 +1775,12 @@ static void atc_free_chan_resources(struct dma_chan *chan)
/*
* Free atslave allocated in at_dma_xlate()
*/
kfree(chan->private);
atslave = chan->private;
if (atslave) {
put_device(atslave->dma_dev);
kfree(atslave);
chan->private = NULL;
}
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
}

View File

@@ -1699,7 +1699,7 @@ static int sba_probe(struct platform_device *pdev)
/* Prealloc channel resource */
ret = sba_prealloc_channel_resources(sba);
if (ret)
goto fail_free_mchan;
goto fail_put_mbox;
/* Check availability of debugfs */
if (!debugfs_initialized())
@@ -1729,6 +1729,8 @@ skip_debugfs:
fail_free_resources:
debugfs_remove_recursive(sba->root);
sba_freeup_channel_resources(sba);
fail_put_mbox:
put_device(sba->mbox_dev);
fail_free_mchan:
mbox_free_channel(sba->mchan);
return ret;
@@ -1744,6 +1746,8 @@ static void sba_remove(struct platform_device *pdev)
sba_freeup_channel_resources(sba);
put_device(sba->mbox_dev);
mbox_free_channel(sba->mchan);
}

View File

@@ -102,11 +102,11 @@ static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
struct llist_node *node;
unsigned long flags;
unsigned int chid, devid, cpuid;
int ret;
int ret = -EINVAL;
if (dma_spec->args_count != DMAMUX_NCELLS) {
dev_err(&pdev->dev, "invalid number of dma mux args\n");
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
devid = dma_spec->args[0];
@@ -115,18 +115,18 @@ static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
if (devid > MAX_DMA_MAPPING_ID) {
dev_err(&pdev->dev, "invalid device id: %u\n", devid);
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
if (cpuid > MAX_DMA_CPU_ID) {
dev_err(&pdev->dev, "invalid cpu id: %u\n", cpuid);
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "can't get dma master\n");
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
spin_lock_irqsave(&dmamux->lock, flags);
@@ -136,8 +136,6 @@ static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
if (map->peripheral == devid && map->cpu == cpuid)
goto found;
}
ret = -EINVAL;
goto failed;
} else {
node = llist_del_first(&dmamux->free_maps);
@@ -171,12 +169,17 @@ found:
dev_dbg(&pdev->dev, "register channel %u for req %u (cpu %u)\n",
chid, devid, cpuid);
put_device(&pdev->dev);
return map;
failed:
spin_unlock_irqrestore(&dmamux->lock, flags);
of_node_put(dma_spec->np);
dev_err(&pdev->dev, "errno %d\n", ret);
err_put_pdev:
put_device(&pdev->dev);
return ERR_PTR(ret);
}

View File

@@ -90,7 +90,7 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
if (test_and_set_bit(map->req_idx, dmamux->used_chans)) {
ret = -EBUSY;
goto free_map;
goto put_dma_spec_np;
}
mask = BIT(map->req_idx);
@@ -103,6 +103,8 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
clear_bitmap:
clear_bit(map->req_idx, dmamux->used_chans);
put_dma_spec_np:
of_node_put(dma_spec->np);
free_map:
kfree(map);
put_device:

View File

@@ -873,6 +873,7 @@ err_errirq:
free_irq(fsl_chan->txirq, fsl_chan);
err_txirq:
dma_pool_destroy(fsl_chan->tcd_pool);
clk_disable_unprepare(fsl_chan->clk);
return ret;
}

View File

@@ -20,11 +20,16 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf, size_t c
int rc = -ENODEV;
dev = bus_find_device_by_name(bus, NULL, buf);
if (dev && dev->driver) {
if (!dev)
return -ENODEV;
if (dev->driver) {
device_driver_detach(dev);
rc = count;
}
put_device(dev);
return rc;
}
static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store);
@@ -38,9 +43,12 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t cou
struct idxd_dev *idxd_dev;
dev = bus_find_device_by_name(bus, NULL, buf);
if (!dev || dev->driver || drv != &dsa_drv.drv)
if (!dev)
return -ENODEV;
if (dev->driver || drv != &dsa_drv.drv)
goto err_put_dev;
idxd_dev = confdev_to_idxd_dev(dev);
if (is_idxd_dev(idxd_dev)) {
alt_drv = driver_find("idxd", bus);
@@ -53,13 +61,20 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t cou
alt_drv = driver_find("user", bus);
}
if (!alt_drv)
return -ENODEV;
goto err_put_dev;
rc = device_driver_attach(alt_drv, dev);
if (rc < 0)
return rc;
goto err_put_dev;
put_device(dev);
return count;
err_put_dev:
put_device(dev);
return rc;
}
static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store);

View File

@@ -57,30 +57,31 @@ static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
struct lpc18xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
unsigned long flags;
unsigned mux;
int ret = -EINVAL;
if (dma_spec->args_count != 3) {
dev_err(&pdev->dev, "invalid number of dma mux args\n");
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
mux = dma_spec->args[0];
if (mux >= dmamux->dma_master_requests) {
dev_err(&pdev->dev, "invalid mux number: %d\n",
dma_spec->args[0]);
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
if (dma_spec->args[1] > LPC18XX_DMAMUX_MAX_VAL) {
dev_err(&pdev->dev, "invalid dma mux value: %d\n",
dma_spec->args[1]);
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
/* The of_node_put() will be done in the core for the node */
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "can't get dma master\n");
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
spin_lock_irqsave(&dmamux->lock, flags);
@@ -89,7 +90,8 @@ static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
dev_err(&pdev->dev, "dma request %u busy with %u.%u\n",
mux, mux, dmamux->muxes[mux].value);
of_node_put(dma_spec->np);
return ERR_PTR(-EBUSY);
ret = -EBUSY;
goto err_put_pdev;
}
dmamux->muxes[mux].busy = true;
@@ -106,7 +108,14 @@ static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
dev_dbg(&pdev->dev, "mapping dmamux %u.%u to dma request %u\n", mux,
dmamux->muxes[mux].value, mux);
put_device(&pdev->dev);
return &dmamux->muxes[mux];
err_put_pdev:
put_device(&pdev->dev);
return ERR_PTR(ret);
}
static int lpc18xx_dmamux_probe(struct platform_device *pdev)

View File

@@ -95,11 +95,12 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
struct lpc32xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
unsigned long flags;
struct lpc32xx_dmamux *mux = NULL;
int ret = -EINVAL;
int i;
if (dma_spec->args_count != 3) {
dev_err(&pdev->dev, "invalid number of dma mux args\n");
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
for (i = 0; i < ARRAY_SIZE(lpc32xx_muxes); i++) {
@@ -111,20 +112,20 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
if (!mux) {
dev_err(&pdev->dev, "invalid mux request number: %d\n",
dma_spec->args[0]);
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
if (dma_spec->args[2] > 1) {
dev_err(&pdev->dev, "invalid dma mux value: %d\n",
dma_spec->args[1]);
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
/* The of_node_put() will be done in the core for the node */
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "can't get dma master\n");
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
spin_lock_irqsave(&dmamux->lock, flags);
@@ -133,7 +134,8 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
dev_err(dev, "dma request signal %d busy, routed to %s\n",
mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
of_node_put(dma_spec->np);
return ERR_PTR(-EBUSY);
ret = -EBUSY;
goto err_put_pdev;
}
mux->busy = true;
@@ -148,7 +150,14 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
dev_dbg(dev, "dma request signal %d routed to %s\n",
mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
put_device(&pdev->dev);
return mux;
err_put_pdev:
put_device(&pdev->dev);
return ERR_PTR(ret);
}
static int lpc32xx_dmamux_probe(struct platform_device *pdev)

View File

@@ -152,8 +152,8 @@ struct mmp_pdma_phy {
*
* Controller Configuration:
* @run_bits: Control bits in DCSR register for channel start/stop
* @dma_mask: DMA addressing capability of controller. 0 to use OF/platform
* settings, or explicit mask like DMA_BIT_MASK(32/64)
* @dma_width: DMA addressing width in bits (32 or 64). Determines the
* DMA mask capability of the controller hardware.
*/
struct mmp_pdma_ops {
/* Hardware Register Operations */
@@ -173,7 +173,7 @@ struct mmp_pdma_ops {
/* Controller Configuration */
u32 run_bits;
u64 dma_mask;
u32 dma_width;
};
struct mmp_pdma_device {
@@ -928,6 +928,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
{
struct mmp_pdma_desc_sw *sw;
struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
unsigned long flags;
u64 curr;
u32 residue = 0;
bool passed = false;
@@ -945,6 +946,8 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
else
curr = pdev->ops->read_src_addr(chan->phy);
spin_lock_irqsave(&chan->desc_lock, flags);
list_for_each_entry(sw, &chan->chain_running, node) {
u64 start, end;
u32 len;
@@ -989,6 +992,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
continue;
if (sw->async_tx.cookie == cookie) {
spin_unlock_irqrestore(&chan->desc_lock, flags);
return residue;
} else {
residue = 0;
@@ -996,6 +1000,8 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
}
}
spin_unlock_irqrestore(&chan->desc_lock, flags);
/* We should only get here in case of cyclic transactions */
return residue;
}
@@ -1172,7 +1178,7 @@ static const struct mmp_pdma_ops marvell_pdma_v1_ops = {
.get_desc_src_addr = get_desc_src_addr_32,
.get_desc_dst_addr = get_desc_dst_addr_32,
.run_bits = (DCSR_RUN),
.dma_mask = 0, /* let OF/platform set DMA mask */
.dma_width = 32,
};
static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
@@ -1185,7 +1191,7 @@ static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
.get_desc_src_addr = get_desc_src_addr_64,
.get_desc_dst_addr = get_desc_dst_addr_64,
.run_bits = (DCSR_RUN | DCSR_LPAEEN),
.dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */
.dma_width = 64,
};
static const struct of_device_id mmp_pdma_dt_ids[] = {
@@ -1314,13 +1320,9 @@ static int mmp_pdma_probe(struct platform_device *op)
pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
/* Set DMA mask based on ops->dma_mask, or OF/platform */
if (pdev->ops->dma_mask)
dma_set_mask(pdev->dev, pdev->ops->dma_mask);
else if (pdev->dev->coherent_dma_mask)
dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
else
dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
/* Set DMA mask based on controller hardware capabilities */
dma_set_mask_and_coherent(pdev->dev,
DMA_BIT_MASK(pdev->ops->dma_width));
ret = dma_async_device_register(&pdev->device);
if (ret) {

View File

@@ -1605,14 +1605,16 @@ static int
gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config)
{
struct gchan *gchan = to_gchan(chan);
void *new_config;
if (!config->peripheral_config)
return -EINVAL;
gchan->config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT);
if (!gchan->config)
new_config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT);
if (!new_config)
return -ENOMEM;
gchan->config = new_config;
memcpy(gchan->config, config->peripheral_config, config->peripheral_size);
return 0;

View File

@@ -557,11 +557,16 @@ rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
static int rz_dmac_terminate_all(struct dma_chan *chan)
{
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
struct rz_lmdesc *lmdesc = channel->lmdesc.base;
unsigned long flags;
unsigned int i;
LIST_HEAD(head);
rz_dmac_disable_hw(channel);
spin_lock_irqsave(&channel->vc.lock, flags);
for (i = 0; i < DMAC_NR_LMDESC; i++)
lmdesc[i].header = 0;
list_splice_tail_init(&channel->ld_active, &channel->ld_free);
list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
vchan_get_all_descriptors(&channel->vc, &head);
@@ -854,6 +859,13 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac,
return 0;
}
static void rz_dmac_put_device(void *_dev)
{
struct device *dev = _dev;
put_device(dev);
}
static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
{
struct device_node *np = dev->of_node;
@@ -876,6 +888,10 @@ static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
return -ENODEV;
}
ret = devm_add_action_or_reset(dev, rz_dmac_put_device, &dmac->icu.pdev->dev);
if (ret)
return ret;
dmac_index = args.args[0];
if (dmac_index > RZV2H_MAX_DMAC_INDEX) {
dev_err(dev, "DMAC index %u invalid.\n", dmac_index);
@@ -1055,8 +1071,6 @@ static void rz_dmac_remove(struct platform_device *pdev)
reset_control_assert(dmac->rstc);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
platform_device_put(dmac->icu.pdev);
}
static const struct of_device_id of_rz_dmac_match[] = {

View File

@@ -90,23 +90,25 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
struct stm32_dmamux *mux;
u32 i, min, max;
int ret;
int ret = -EINVAL;
unsigned long flags;
if (dma_spec->args_count != 3) {
dev_err(&pdev->dev, "invalid number of dma mux args\n");
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
if (dma_spec->args[0] > dmamux->dmamux_requests) {
dev_err(&pdev->dev, "invalid mux request number: %d\n",
dma_spec->args[0]);
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
return ERR_PTR(-ENOMEM);
if (!mux) {
ret = -ENOMEM;
goto err_put_pdev;
}
spin_lock_irqsave(&dmamux->lock, flags);
mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
@@ -116,7 +118,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
spin_unlock_irqrestore(&dmamux->lock, flags);
dev_err(&pdev->dev, "Run out of free DMA requests\n");
ret = -ENOMEM;
goto error_chan_id;
goto err_free_mux;
}
set_bit(mux->chan_id, dmamux->dma_inuse);
spin_unlock_irqrestore(&dmamux->lock, flags);
@@ -133,8 +135,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
if (!dma_spec->np) {
dev_err(&pdev->dev, "can't get dma master\n");
ret = -EINVAL;
goto error;
goto err_clear_inuse;
}
/* Set dma request */
@@ -142,7 +143,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
spin_unlock_irqrestore(&dmamux->lock, flags);
goto error;
goto err_put_dma_spec_np;
}
spin_unlock_irqrestore(&dmamux->lock, flags);
@@ -160,13 +161,19 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
mux->request, mux->master, mux->chan_id);
put_device(&pdev->dev);
return mux;
error:
err_put_dma_spec_np:
of_node_put(dma_spec->np);
err_clear_inuse:
clear_bit(mux->chan_id, dmamux->dma_inuse);
error_chan_id:
err_free_mux:
kfree(mux);
err_put_pdev:
put_device(&pdev->dev);
return ERR_PTR(ret);
}

View File

@@ -429,10 +429,17 @@ static void tegra_adma_stop(struct tegra_adma_chan *tdc)
return;
}
kfree(tdc->desc);
vchan_terminate_vdesc(&tdc->desc->vd);
tdc->desc = NULL;
}
static void tegra_adma_synchronize(struct dma_chan *dc)
{
struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
vchan_synchronize(&tdc->vc);
}
static void tegra_adma_start(struct tegra_adma_chan *tdc)
{
struct virt_dma_desc *vd = vchan_next_desc(&tdc->vc);
@@ -1155,6 +1162,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdma->dma_dev.device_config = tegra_adma_slave_config;
tdma->dma_dev.device_tx_status = tegra_adma_tx_status;
tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all;
tdma->dma_dev.device_synchronize = tegra_adma_synchronize;
tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);

View File

@@ -79,34 +79,35 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
{
struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
struct ti_am335x_xbar_map *map;
struct ti_am335x_xbar_map *map = ERR_PTR(-EINVAL);
if (dma_spec->args_count != 3)
return ERR_PTR(-EINVAL);
goto out_put_pdev;
if (dma_spec->args[2] >= xbar->xbar_events) {
dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
dma_spec->args[2]);
return ERR_PTR(-EINVAL);
goto out_put_pdev;
}
if (dma_spec->args[0] >= xbar->dma_requests) {
dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
dma_spec->args[0]);
return ERR_PTR(-EINVAL);
goto out_put_pdev;
}
/* The of_node_put() will be done in the core for the node */
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "Can't get DMA master\n");
return ERR_PTR(-EINVAL);
goto out_put_pdev;
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
of_node_put(dma_spec->np);
return ERR_PTR(-ENOMEM);
map = ERR_PTR(-ENOMEM);
goto out_put_pdev;
}
map->dma_line = (u16)dma_spec->args[0];
@@ -120,6 +121,9 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
out_put_pdev:
put_device(&pdev->dev);
return map;
}
@@ -241,28 +245,26 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
{
struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
struct ti_dra7_xbar_map *map;
struct ti_dra7_xbar_map *map = ERR_PTR(-EINVAL);
if (dma_spec->args[0] >= xbar->xbar_requests) {
dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
dma_spec->args[0]);
put_device(&pdev->dev);
return ERR_PTR(-EINVAL);
goto out_put_pdev;
}
/* The of_node_put() will be done in the core for the node */
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "Can't get DMA master\n");
put_device(&pdev->dev);
return ERR_PTR(-EINVAL);
goto out_put_pdev;
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
of_node_put(dma_spec->np);
put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
map = ERR_PTR(-ENOMEM);
goto out_put_pdev;
}
mutex_lock(&xbar->mutex);
@@ -273,8 +275,8 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
dev_err(&pdev->dev, "Run out of free DMA requests\n");
kfree(map);
of_node_put(dma_spec->np);
put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
map = ERR_PTR(-ENOMEM);
goto out_put_pdev;
}
set_bit(map->xbar_out, xbar->dma_inuse);
mutex_unlock(&xbar->mutex);
@@ -288,6 +290,9 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
out_put_pdev:
put_device(&pdev->dev);
return map;
}

View File

@@ -42,9 +42,9 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
}
ud = platform_get_drvdata(pdev);
put_device(&pdev->dev);
if (!ud) {
pr_debug("UDMA has not been probed\n");
put_device(&pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
}

View File

@@ -1808,6 +1808,8 @@ static int omap_dma_probe(struct platform_device *pdev)
if (rc) {
pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
rc);
if (od->ll123_supported)
dma_pool_destroy(od->desc_pool);
omap_dma_free(od);
return rc;
}
@@ -1823,6 +1825,8 @@ static int omap_dma_probe(struct platform_device *pdev)
if (rc) {
pr_warn("OMAP-DMA: failed to register DMA controller\n");
dma_async_device_unregister(&od->ddev);
if (od->ll123_supported)
dma_pool_destroy(od->desc_pool);
omap_dma_free(od);
}
}

View File

@@ -9,6 +9,7 @@
/* The length of register space exposed to host */
#define XDMA_REG_SPACE_LEN 65536
#define XDMA_MAX_REG_OFFSET (XDMA_REG_SPACE_LEN - 4)
/*
* maximum number of DMA channels for each direction:

View File

@@ -38,7 +38,7 @@ static const struct regmap_config xdma_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = XDMA_REG_SPACE_LEN,
.max_register = XDMA_MAX_REG_OFFSET,
};
/**

View File

@@ -131,6 +131,7 @@
#define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
#define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
#define XILINX_DMA_DFAULT_ADDRWIDTH 0x20
#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
@@ -3159,7 +3160,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node;
u32 num_frames, addr_width, len_width;
u32 num_frames, addr_width = XILINX_DMA_DFAULT_ADDRWIDTH, len_width;
int i, err;
/* Allocate and initialize the DMA engine structure */
@@ -3235,7 +3236,9 @@ static int xilinx_dma_probe(struct platform_device *pdev)
err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
if (err < 0)
dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
dev_warn(xdev->dev,
"missing xlnx,addrwidth property, using default value %d\n",
XILINX_DMA_DFAULT_ADDRWIDTH);
if (addr_width > 32)
xdev->ext_addr = true;