diff options
| -rw-r--r-- | drivers/dma/dw/core.c | 6 | ||||
| -rw-r--r-- | drivers/dma/imx-sdma.c | 69 | ||||
| -rw-r--r-- | drivers/dma/ti/cppi41.c | 16 | 
3 files changed, 62 insertions, 29 deletions
| diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index d0c3e50b39fb..1fc488e90f36 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1059,12 +1059,12 @@ static void dwc_issue_pending(struct dma_chan *chan)  /*   * Program FIFO size of channels.   * - * By default full FIFO (1024 bytes) is assigned to channel 0. Here we + * By default full FIFO (512 bytes) is assigned to channel 0. Here we   * slice FIFO on equal parts between channels.   */  static void idma32_fifo_partition(struct dw_dma *dw)  { -	u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) | +	u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |  		    IDMA32C_FP_UPDATE;  	u64 fifo_partition = 0; @@ -1077,7 +1077,7 @@ static void idma32_fifo_partition(struct dw_dma *dw)  	/* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */  	fifo_partition |= value << 32; -	/* Program FIFO Partition registers - 128 bytes for each channel */ +	/* Program FIFO Partition registers - 64 bytes per channel */  	idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);  	idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);  } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index b4ec2d20e661..cb1b44d78a1f 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -24,7 +24,6 @@  #include <linux/spinlock.h>  #include <linux/device.h>  #include <linux/dma-mapping.h> -#include <linux/dmapool.h>  #include <linux/firmware.h>  #include <linux/slab.h>  #include <linux/platform_device.h> @@ -33,6 +32,7 @@  #include <linux/of_address.h>  #include <linux/of_device.h>  #include <linux/of_dma.h> +#include <linux/workqueue.h>  #include <asm/irq.h>  #include <linux/platform_data/dma-imx-sdma.h> @@ -376,7 +376,7 @@ struct sdma_channel {  	u32				shp_addr, per_addr;  	enum dma_status			status;  	struct imx_dma_data		data; -	struct dma_pool			*bd_pool; +	struct work_struct		terminate_worker;  };  #define IMX_DMA_SG_LOOP		BIT(0) @@ -1027,31 +1027,49 @@ static int sdma_disable_channel(struct dma_chan *chan)  	return 0;  } - -static int sdma_disable_channel_with_delay(struct dma_chan *chan) +static void sdma_channel_terminate_work(struct work_struct *work)  { -	struct sdma_channel *sdmac = to_sdma_chan(chan); +	struct sdma_channel *sdmac = container_of(work, struct sdma_channel, +						  terminate_worker);  	unsigned long flags;  	LIST_HEAD(head); -	sdma_disable_channel(chan); -	spin_lock_irqsave(&sdmac->vc.lock, flags); -	vchan_get_all_descriptors(&sdmac->vc, &head); -	sdmac->desc = NULL; -	spin_unlock_irqrestore(&sdmac->vc.lock, flags); -	vchan_dma_desc_free_list(&sdmac->vc, &head); -  	/*  	 * According to NXP R&D team a delay of one BD SDMA cost time  	 * (maximum is 1ms) should be added after disable of the channel  	 * bit, to ensure SDMA core has really been stopped after SDMA  	 * clients call .device_terminate_all.  	 */ -	mdelay(1); +	usleep_range(1000, 2000); + +	spin_lock_irqsave(&sdmac->vc.lock, flags); +	vchan_get_all_descriptors(&sdmac->vc, &head); +	sdmac->desc = NULL; +	spin_unlock_irqrestore(&sdmac->vc.lock, flags); +	vchan_dma_desc_free_list(&sdmac->vc, &head); +} + +static int sdma_disable_channel_async(struct dma_chan *chan) +{ +	struct sdma_channel *sdmac = to_sdma_chan(chan); + +	sdma_disable_channel(chan); + +	if (sdmac->desc) +		schedule_work(&sdmac->terminate_worker);  	return 0;  } +static void sdma_channel_synchronize(struct dma_chan *chan) +{ +	struct sdma_channel *sdmac = to_sdma_chan(chan); + +	vchan_synchronize(&sdmac->vc); + +	flush_work(&sdmac->terminate_worker); +} +  static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)  {  	struct sdma_engine *sdma = sdmac->sdma; @@ -1192,10 +1210,11 @@ out:  static int sdma_alloc_bd(struct sdma_desc *desc)  { +	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);  	int ret = 0; -	desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT, -				  &desc->bd_phys); +	desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, +					GFP_NOWAIT);  	if (!desc->bd) {  		ret = -ENOMEM;  		goto out; @@ -1206,7 +1225,9 @@ out:  static void sdma_free_bd(struct sdma_desc *desc)  { -	dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys); +	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); + +	dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);  }  static void sdma_desc_free(struct virt_dma_desc *vd) @@ -1272,10 +1293,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)  	if (ret)  		goto disable_clk_ahb; -	sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev, -				sizeof(struct sdma_buffer_descriptor), -				32, 0); -  	return 0;  disable_clk_ahb: @@ -1290,7 +1307,9 @@ static void sdma_free_chan_resources(struct dma_chan *chan)  	struct sdma_channel *sdmac = to_sdma_chan(chan);  	struct sdma_engine *sdma = sdmac->sdma; -	sdma_disable_channel_with_delay(chan); +	sdma_disable_channel_async(chan); + +	sdma_channel_synchronize(chan);  	if (sdmac->event_id0)  		sdma_event_disable(sdmac, sdmac->event_id0); @@ -1304,9 +1323,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)  	clk_disable(sdma->clk_ipg);  	clk_disable(sdma->clk_ahb); - -	dma_pool_destroy(sdmac->bd_pool); -	sdmac->bd_pool = NULL;  }  static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac, @@ -1999,6 +2015,8 @@ static int sdma_probe(struct platform_device *pdev)  		sdmac->channel = i;  		sdmac->vc.desc_free = sdma_desc_free; +		INIT_WORK(&sdmac->terminate_worker, +				sdma_channel_terminate_work);  		/*  		 * Add the channel to the DMAC list. Do not add channel 0 though  		 * because we need it internally in the SDMA driver. This also means @@ -2050,7 +2068,8 @@ static int sdma_probe(struct platform_device *pdev)  	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;  	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;  	sdma->dma_device.device_config = sdma_config; -	sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; +	sdma->dma_device.device_terminate_all = sdma_disable_channel_async; +	sdma->dma_device.device_synchronize = sdma_channel_synchronize;  	sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;  	sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;  	sdma->dma_device.directions = SDMA_DMA_DIRECTIONS; diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c index 1497da367710..e507ec36c0d3 100644 --- a/drivers/dma/ti/cppi41.c +++ b/drivers/dma/ti/cppi41.c @@ -723,8 +723,22 @@ static int cppi41_stop_chan(struct dma_chan *chan)  	desc_phys = lower_32_bits(c->desc_phys);  	desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); -	if (!cdd->chan_busy[desc_num]) +	if (!cdd->chan_busy[desc_num]) { +		struct cppi41_channel *cc, *_ct; + +		/* +		 * channels might still be in the pendling list if +		 * cppi41_dma_issue_pending() is called after +		 * cppi41_runtime_suspend() is called +		 */ +		list_for_each_entry_safe(cc, _ct, &cdd->pending, node) { +			if (cc != c) +				continue; +			list_del(&cc->node); +			break; +		}  		return 0; +	}  	ret = cppi41_tear_down_chan(c);  	if (ret) | 
