diff options
| author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2011-03-24 20:50:06 +0000 | 
|---|---|---|
| committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2011-03-30 10:44:00 +1100 | 
| commit | 6090912c4abcfc6c81b156cf2bb4cda23ae6e847 (patch) | |
| tree | 9bddd5b697883f706a53ef0413181845bb735250 | |
| parent | 15d260b36facc1aa769fb39b0efc41f4c8c44729 (diff) | |
powerpc: Implement dma_mmap_coherent()
This is used by Alsa to mmap buffers allocated with dma_alloc_coherent()
into userspace. We need a special variant to handle machines with
non-coherent DMAs as those buffers have "special" virt addresses and
require non-cachable mappings
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
| -rw-r--r-- | arch/powerpc/include/asm/dma-mapping.h | 6 | ||||
| -rw-r--r-- | arch/powerpc/kernel/dma.c | 18 | ||||
| -rw-r--r-- | arch/powerpc/mm/dma-noncoherent.c | 20 | 
3 files changed, 44 insertions, 0 deletions
| diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 6d2416a85709..dd70fac57ec8 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -42,6 +42,7 @@ extern void __dma_free_coherent(size_t size, void *vaddr);  extern void __dma_sync(void *vaddr, size_t size, int direction);  extern void __dma_sync_page(struct page *page, unsigned long offset,  				 size_t size, int direction); +extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);  #else /* ! CONFIG_NOT_COHERENT_CACHE */  /* @@ -198,6 +199,11 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)  #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)  #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +extern int dma_mmap_coherent(struct device *, struct vm_area_struct *, +			     void *, dma_addr_t, size_t); +#define ARCH_HAS_DMA_MMAP_COHERENT + +  static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,  		enum dma_data_direction direction)  { diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index cf02cad62d9a..d238c082c3c5 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -179,3 +179,21 @@ static int __init dma_init(void)         return 0;  }  fs_initcall(dma_init); + +int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, +		      void *cpu_addr, dma_addr_t handle, size_t size) +{ +	unsigned long pfn; + +#ifdef CONFIG_NOT_COHERENT_CACHE +	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +	pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); +#else +	pfn = page_to_pfn(virt_to_page(cpu_addr)); +#endif +	return remap_pfn_range(vma, vma->vm_start, +			       pfn + vma->vm_pgoff, +			       vma->vm_end - vma->vm_start, +			       vma->vm_page_prot); +} +EXPORT_SYMBOL_GPL(dma_mmap_coherent); diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c index 757c0bed9a91..b42f76c4948d 100644 --- a/arch/powerpc/mm/dma-noncoherent.c +++ b/arch/powerpc/mm/dma-noncoherent.c @@ -399,3 +399,23 @@ void __dma_sync_page(struct page *page, unsigned long offset,  #endif  }  EXPORT_SYMBOL(__dma_sync_page); + +/* + * Return the PFN for a given cpu virtual address returned by + * __dma_alloc_coherent. This is used by dma_mmap_coherent() + */ +unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr) +{ +	/* This should always be populated, so we don't test every +	 * level. If that fails, we'll have a nice crash which +	 * will be as good as a BUG_ON() +	 */ +	pgd_t *pgd = pgd_offset_k(cpu_addr); +	pud_t *pud = pud_offset(pgd, cpu_addr); +	pmd_t *pmd = pmd_offset(pud, cpu_addr); +	pte_t *ptep = pte_offset_kernel(pmd, cpu_addr); + +	if (pte_none(*ptep) || !pte_present(*ptep)) +		return 0; +	return pte_pfn(*ptep); +} | 
