diff options
| -rw-r--r-- | kernel/dma/mapping.c | 11 | 
1 files changed, 10 insertions, 1 deletions
| diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index cda127027e48..67da08fa6723 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -918,7 +918,7 @@ EXPORT_SYMBOL(dma_set_coherent_mask);   * the system, else %false.  Lack of addressing bits is the prime reason for   * bounce buffering, but might not be the only one.   */ -bool dma_addressing_limited(struct device *dev) +static bool __dma_addressing_limited(struct device *dev)  {  	const struct dma_map_ops *ops = get_dma_ops(dev); @@ -930,6 +930,15 @@ bool dma_addressing_limited(struct device *dev)  		return false;  	return !dma_direct_all_ram_mapped(dev);  } + +bool dma_addressing_limited(struct device *dev) +{ +	if (!__dma_addressing_limited(dev)) +		return false; + +	dev_dbg(dev, "device is DMA addressing limited\n"); +	return true; +}  EXPORT_SYMBOL_GPL(dma_addressing_limited);  size_t dma_max_mapping_size(struct device *dev) | 
